xref: /xnu-12377.1.9/osfmk/arm64/pcb.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2007-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <debug.h>
30 
31 #include <types.h>
32 
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36 
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kpc.h>
45 #include <kern/monotonic.h>
46 
47 #include <machine/atomic.h>
48 #include <arm64/proc_reg.h>
49 #include <arm64/machine_machdep.h>
50 #include <arm/cpu_data_internal.h>
51 #include <arm/machdep_call.h>
52 #include <arm/misc_protos.h>
53 #include <arm/cpuid.h>
54 #include <arm/cpu_capabilities_public.h>
55 
56 #include <vm/vm_map_xnu.h>
57 #include <vm/vm_protos.h>
58 
59 #include <sys/kdebug.h>
60 
61 
62 #include <san/kcov_stksz.h>
63 
64 #include <IOKit/IOBSD.h>
65 
66 #include <pexpert/arm64/apple_arm64_cpu.h>
67 #include <pexpert/pexpert.h>
68 
69 // fixme: rdar://114299113 tracks resolving the supportlib issue with hwtrace features
70 
71 extern int debug_task;
72 
73 /* zone for debug_state area */
74 ZONE_DEFINE_TYPE(ads_zone, "arm debug state", arm_debug_state_t, ZC_NONE);
75 ZONE_DEFINE_TYPE(user_ss_zone, "user save state", arm_context_t, ZC_NONE);
76 
77 #if HAS_ARM_FEAT_SME
78 static SECURITY_READ_ONLY_LATE(uint16_t) sme_svl_b;
79 /* zone for arm_sme_saved_state_t allocations */
80 static SECURITY_READ_ONLY_LATE(zone_t) sme_ss_zone;
81 #endif
82 
83 void
arm_get_matrix_cpu_state(struct arm_matrix_cpu_state * cpu_state)84 arm_get_matrix_cpu_state(struct arm_matrix_cpu_state *cpu_state)
85 {
86 #if HAS_ARM_FEAT_SME
87 	cpu_state->have_sme = arm_sme_version() > 0;
88 	if (cpu_state->have_sme) {
89 		cpu_state->za_is_enabled = !!(__builtin_arm_rsr64("SVCR") & SVCR_ZA);
90 	} else {
91 		cpu_state->za_is_enabled = false;
92 	}
93 #endif /* HAS_ARM_FEAT_SME */
94 
95 #if !HAS_ARM_FEAT_SME
96 #pragma unused(cpu_state)
97 #endif
98 }
99 
100 /*
101  * Routine: consider_machine_collect
102  *
103  */
104 void
consider_machine_collect(void)105 consider_machine_collect(void)
106 {
107 	pmap_gc();
108 }
109 
110 /*
111  * Routine: consider_machine_adjust
112  *
113  */
114 void
consider_machine_adjust(void)115 consider_machine_adjust(void)
116 {
117 }
118 
119 
120 
121 #if HAS_ARM_FEAT_SME
122 static inline bool
machine_thread_has_valid_za(const arm_sme_saved_state_t * _Nullable sme_ss)123 machine_thread_has_valid_za(const arm_sme_saved_state_t *_Nullable sme_ss)
124 {
125 	return sme_ss && (sme_ss->svcr & SVCR_ZA);
126 }
127 
128 arm_sme_saved_state_t *
machine_thread_get_sme_state(thread_t thread)129 machine_thread_get_sme_state(thread_t thread)
130 {
131 	arm_state_hdr_t *hdr = thread->machine.umatrix_hdr;
132 	if (hdr) {
133 		assert(hdr->flavor == ARM_SME_SAVED_STATE);
134 		return thread->machine.usme;
135 	}
136 
137 	return NULL;
138 }
139 
140 static void
machine_save_sme_context(thread_t old,arm_sme_saved_state_t * old_sme_ss,const struct arm_matrix_cpu_state * cpu_state)141 machine_save_sme_context(thread_t old, arm_sme_saved_state_t *old_sme_ss, const struct arm_matrix_cpu_state *cpu_state)
142 {
143 	/*
144 	 * Note: we're deliberately not saving old_sme_ss->svcr, since it
145 	 * already happened on kernel entry.  Likewise we're not restoring the
146 	 * SM bit from new_sme_ss->svcr, since we don't want streaming SVE mode
147 	 * active while we're in kernel space; we'll put it back on kernel exit.
148 	 */
149 
150 	old->machine.tpidr2_el0 = __builtin_arm_rsr64("TPIDR2_EL0");
151 
152 
153 	if (cpu_state->za_is_enabled) {
154 		arm_save_sme_za_zt0(&old_sme_ss->context, old_sme_ss->svl_b);
155 	}
156 }
157 
158 static void
machine_restore_sme_context(thread_t new,const arm_sme_saved_state_t * new_sme_ss,const struct arm_matrix_cpu_state * cpu_state)159 machine_restore_sme_context(thread_t new, const arm_sme_saved_state_t *new_sme_ss, const struct arm_matrix_cpu_state *cpu_state)
160 {
161 	__builtin_arm_wsr64("TPIDR2_EL0", new->machine.tpidr2_el0);
162 
163 	if (new_sme_ss) {
164 		if (machine_thread_has_valid_za(new_sme_ss)) {
165 			if (!cpu_state->za_is_enabled) {
166 				asm volatile ("smstart za");
167 			}
168 			arm_load_sme_za_zt0(&new_sme_ss->context, new_sme_ss->svl_b);
169 		} else if (cpu_state->za_is_enabled) {
170 			asm volatile ("smstop za");
171 		}
172 
173 		arm_sme_trap_at_el0(false);
174 	}
175 }
176 
177 static void
machine_disable_sme_context(const struct arm_matrix_cpu_state * cpu_state)178 machine_disable_sme_context(const struct arm_matrix_cpu_state *cpu_state)
179 {
180 	if (cpu_state->za_is_enabled) {
181 		asm volatile ("smstop za");
182 	}
183 
184 	arm_sme_trap_at_el0(true);
185 }
186 #endif /* HAS_ARM_FEAT_SME */
187 
188 
189 #if HAVE_MACHINE_THREAD_MATRIX_STATE
190 static void
machine_switch_matrix_context(thread_t old,thread_t new)191 machine_switch_matrix_context(thread_t old, thread_t new)
192 {
193 	struct arm_matrix_cpu_state cpu_state;
194 	arm_get_matrix_cpu_state(&cpu_state);
195 
196 
197 #if HAS_ARM_FEAT_SME
198 	arm_sme_saved_state_t *old_sme_ss = machine_thread_get_sme_state(old);
199 	const arm_sme_saved_state_t *new_sme_ss = machine_thread_get_sme_state(new);
200 
201 	if (cpu_state.have_sme) {
202 		machine_save_sme_context(old, old_sme_ss, &cpu_state);
203 	}
204 #endif /* HAS_ARM_FEAT_SME */
205 
206 
207 #if HAS_ARM_FEAT_SME
208 	if (cpu_state.have_sme && !new_sme_ss) {
209 		machine_disable_sme_context(&cpu_state);
210 	}
211 #endif /* HAS_ARM_FEAT_SME */
212 
213 
214 #if HAS_ARM_FEAT_SME
215 	if (cpu_state.have_sme) {
216 		machine_restore_sme_context(new, new_sme_ss, &cpu_state);
217 	}
218 #endif /* HAS_ARM_FEAT_SME */
219 
220 
221 }
222 
223 
224 #endif /* HAVE_MACHINE_THREAD_MATRIX_STATE */
225 
226 
227 
228 
229 static inline void
machine_thread_switch_cpu_data(thread_t old,thread_t new)230 machine_thread_switch_cpu_data(thread_t old, thread_t new)
231 {
232 	/*
233 	 * We build with -fno-strict-aliasing, so the load through temporaries
234 	 * is required so that this generates a single load / store pair.
235 	 */
236 	cpu_data_t *datap = old->machine.CpuDatap;
237 	vm_offset_t base  = old->machine.pcpu_data_base_and_cpu_number;
238 
239 	/* TODO: Should this be ordered? */
240 
241 	old->machine.CpuDatap = NULL;
242 	old->machine.pcpu_data_base_and_cpu_number = 0;
243 
244 	new->machine.CpuDatap = datap;
245 	new->machine.pcpu_data_base_and_cpu_number = base;
246 }
247 
248 /**
249  * routine: machine_switch_pmap_and_extended_context
250  *
251  * Helper function used by machine_switch_context and machine_stack_handoff to switch the
252  * extended context and switch the pmap if necessary.
253  *
254  */
255 
256 static inline void
machine_switch_pmap_and_extended_context(thread_t old,thread_t new)257 machine_switch_pmap_and_extended_context(thread_t old, thread_t new)
258 {
259 	pmap_t new_pmap;
260 
261 
262 
263 
264 
265 #if HAVE_MACHINE_THREAD_MATRIX_STATE
266 	machine_switch_matrix_context(old, new);
267 #endif
268 
269 
270 
271 
272 	new_pmap = new->map->pmap;
273 	bool pmap_changed = old->map->pmap != new_pmap;
274 	bool sec_override_changed =
275 	    false;
276 
277 	if (pmap_changed || sec_override_changed) {
278 		pmap_switch(new_pmap, new);
279 	} else {
280 		/*
281 		 * If the thread is preempted while performing cache or TLB maintenance,
282 		 * it may be migrated to a different CPU between the completion of the relevant
283 		 * maintenance instruction and the synchronizing DSB.   ARM requires that the
284 		 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
285 		 * in order to guarantee completion of the instruction and visibility of its effects.
286 		 * Issue DSB here to enforce that guarantee.  We only do this for the case in which
287 		 * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates
288 		 * TTBR0.  Note also that cache maintenance may be performed in userspace, so we
289 		 * cannot further limit this operation e.g. by setting a per-thread flag to indicate
290 		 * a pending kernel TLB or cache maintenance instruction.
291 		 */
292 		__builtin_arm_dsb(DSB_ISH);
293 
294 		/*
295 		 * An ISB is needed for similar userspace reasons to the DSB above. Unlike the DSB
296 		 * case, the context synchronization needs to happen on the CPU the 'old' thread will
297 		 * later be scheduled on. We can rely on the fact that when 'old' is later scheduled,
298 		 * whatever thread it is replacing will go through this function as 'old' and will
299 		 * issue this ISB on its behalf.
300 		 */
301 		arm_context_switch_requires_sync();
302 	}
303 
304 
305 	machine_thread_switch_cpu_data(old, new);
306 }
307 
308 /*
309  * Routine: machine_switch_context
310  *
311  */
312 thread_t
machine_switch_context(thread_t old,thread_continue_t continuation,thread_t new)313 machine_switch_context(thread_t old,
314     thread_continue_t continuation,
315     thread_t new)
316 {
317 	thread_t retval;
318 
319 #if __ARM_PAN_AVAILABLE__
320 	if (__improbable(__builtin_arm_rsr("pan") == 0)) {
321 		panic("context switch with PAN disabled");
322 	}
323 #endif
324 
325 #define machine_switch_context_kprintf(x...) \
326 	/* kprintf("machine_switch_context: " x) */
327 
328 	if (old == new) {
329 		panic("machine_switch_context");
330 	}
331 
332 #if CONFIG_CPU_COUNTERS
333 	kpc_off_cpu(old);
334 #endif /* CONFIG_CPU_COUNTERS */
335 
336 	machine_switch_pmap_and_extended_context(old, new);
337 
338 	machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
339 
340 	retval = Switch_context(old, continuation, new);
341 	assert(retval != NULL);
342 
343 	return retval;
344 }
345 
346 boolean_t
machine_thread_on_core(thread_t thread)347 machine_thread_on_core(thread_t thread)
348 {
349 	return thread->machine.CpuDatap != NULL;
350 }
351 
352 boolean_t
machine_thread_on_core_allow_invalid(thread_t thread)353 machine_thread_on_core_allow_invalid(thread_t thread)
354 {
355 	#define _copyin_fn      _copyin_atomic64
356 
357 	extern int _copyin_fn(const char *src, uint64_t *dst);
358 	uint64_t addr;
359 
360 	/*
361 	 * Utilize that the thread zone is sequestered which means
362 	 * that this kernel-to-kernel copyin can't read data
363 	 * from anything but a thread, zeroed or freed memory.
364 	 */
365 	assert(get_preemption_level() > 0);
366 	if (thread == THREAD_NULL) {
367 		return false;
368 	}
369 	thread_require(thread);
370 	if (_copyin_fn((void *)&thread->machine.CpuDatap, &addr) == 0) {
371 		return addr != 0;
372 	}
373 	return false;
374 
375 #undef _copyin_fn
376 }
377 
378 
379 /*
380  * Routine: machine_thread_create
381  *
382  */
383 void
machine_thread_create(thread_t thread,task_t task,bool first_thread)384 machine_thread_create(thread_t thread, task_t task, bool first_thread)
385 {
386 #define machine_thread_create_kprintf(x...) \
387 	/* kprintf("machine_thread_create: " x) */
388 
389 	machine_thread_create_kprintf("thread = %x\n", thread);
390 
391 	if (!first_thread) {
392 		thread->machine.CpuDatap = (cpu_data_t *)0;
393 		// setting this offset will cause trying to use it to panic
394 		thread->machine.pcpu_data_base_and_cpu_number =
395 		    ml_make_pcpu_base_and_cpu_number(VM_MIN_KERNEL_ADDRESS, 0);
396 	}
397 	thread->machine.arm_machine_flags = 0;
398 	thread->machine.preemption_count = 0;
399 	thread->machine.cthread_self = 0;
400 	thread->machine.kpcb = NULL;
401 	thread->machine.exception_trace_code = 0;
402 #if defined(HAS_APPLE_PAC)
403 	thread->machine.rop_pid = task->rop_pid;
404 	thread->machine.jop_pid = task->jop_pid;
405 	if (task->disable_user_jop) {
406 		thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_DISABLE_USER_JOP;
407 	}
408 #endif
409 
410 
411 
412 
413 	if (task != kernel_task) {
414 		/* If this isn't a kernel thread, we'll have userspace state. */
415 		arm_context_t *contextData = zalloc_flags(user_ss_zone,
416 		    Z_WAITOK | Z_NOFAIL);
417 
418 #if __has_feature(ptrauth_calls)
419 		uint64_t intr = ml_pac_safe_interrupts_disable();
420 		zone_require(user_ss_zone, contextData);
421 #endif
422 		thread->machine.contextData = contextData;
423 		thread->machine.upcb = &contextData->ss;
424 		thread->machine.uNeon = &contextData->ns;
425 #if __has_feature(ptrauth_calls)
426 		ml_pac_safe_interrupts_restore(intr);
427 #endif
428 
429 		if (task_has_64Bit_data(task)) {
430 			thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
431 			thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
432 			thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
433 			thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
434 
435 		} else {
436 			thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
437 			thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
438 			thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
439 			thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
440 		}
441 	} else {
442 		thread->machine.upcb = NULL;
443 		thread->machine.uNeon = NULL;
444 		thread->machine.contextData = NULL;
445 	}
446 
447 #if HAVE_MACHINE_THREAD_MATRIX_STATE
448 	thread->machine.umatrix_hdr = NULL;
449 #endif
450 
451 
452 #if HAS_ARM_FEAT_SME
453 	thread->machine.tpidr2_el0 = 0;
454 #endif
455 
456 	bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
457 	machine_thread_state_initialize(thread);
458 }
459 
460 /*
461  * Routine: machine_thread_process_signature
462  *
463  * Called to allow code signature dependent adjustments to the thread
464  * state. Note that this is usually called twice for the main thread:
465  * Once at thread creation by thread_create, when the signature is
466  * potentially not attached yet (which is usually the case for the
467  * first/main thread of a task), and once after the task's signature
468  * has actually been attached.
469  *
470  */
471 kern_return_t
machine_thread_process_signature(thread_t __unused thread,task_t __unused task)472 machine_thread_process_signature(thread_t __unused thread, task_t __unused task)
473 {
474 	kern_return_t result = KERN_SUCCESS;
475 
476 	/*
477 	 * Reset to default state.
478 	 *
479 	 * In general, this function must not assume anything about the
480 	 * previous signature dependent thread state.
481 	 *
482 	 * At least at the time of writing this, threads don't transition
483 	 * to different code signatures, so each thread this function
484 	 * operates on is "fresh" in the sense that
485 	 * machine_thread_process_signature() has either not even been
486 	 * called on it yet, or only been called as part of thread
487 	 * creation when there was no signature yet.
488 	 *
489 	 * But for easier reasoning, and to prevent future bugs, this
490 	 * function should always recalculate all signature-dependent
491 	 * thread state, as if the signature could actually change from an
492 	 * actual signature to another.
493 	 */
494 #if !__ARM_KERNEL_PROTECT__
495 	thread->machine.arm_machine_flags &= ~(ARM_MACHINE_THREAD_PRESERVE_X18);
496 #endif /* !__ARM_KERNEL_PROTECT__ */
497 	thread->machine.arm_machine_flags &= ~(ARM_MACHINE_THREAD_USES_1GHZ_TIMBASE);
498 
499 	/*
500 	 * Set signature dependent state.
501 	 */
502 	if (task != kernel_task && task_has_64Bit_data(task)) {
503 #if !__ARM_KERNEL_PROTECT__
504 #if CONFIG_ROSETTA
505 		if (task_is_translated(task)) {
506 			/* Note that for x86_64 translation specifically, the
507 			 * context switch path implicitly switches x18 regardless
508 			 * of this flag. */
509 			thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
510 		}
511 #endif /* CONFIG_ROSETTA */
512 
513 		if (task->preserve_x18) {
514 			thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
515 		}
516 #endif /* !__ARM_KERNEL_PROTECT__ */
517 
518 		if (task->uses_1ghz_timebase) {
519 			thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_USES_1GHZ_TIMBASE;
520 		}
521 	} else {
522 #if !__ARM_KERNEL_PROTECT__
523 		/*
524 		 * For informational value only, context switch only trashes
525 		 * x18 for user threads.  (Except for devices with
526 		 * __ARM_KERNEL_PROTECT__, which make real destructive use of
527 		 * x18.)
528 		 */
529 		thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
530 #endif /* !__ARM_KERNEL_PROTECT__ */
531 		thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_USES_1GHZ_TIMBASE;
532 	}
533 
534 	/**
535 	 * Make sure the machine flags are observed before the thread becomes available
536 	 * to run in user mode, especially in the posix_spawn() path.
537 	 */
538 	os_atomic_thread_fence(release);
539 	return result;
540 }
541 
542 /*
543  * Routine: machine_thread_destroy
544  *
545  */
546 void
machine_thread_destroy(thread_t thread)547 machine_thread_destroy(thread_t thread)
548 {
549 	arm_context_t *thread_user_ss;
550 
551 	if (thread->machine.contextData) {
552 		/* Disassociate the user save state from the thread before we free it. */
553 		thread_user_ss = thread->machine.contextData;
554 		thread->machine.upcb = NULL;
555 		thread->machine.uNeon = NULL;
556 		thread->machine.contextData = NULL;
557 
558 #if HAS_ARM_FEAT_SME
559 		machine_thread_sme_state_free(thread);
560 #endif
561 
562 		zfree(user_ss_zone, thread_user_ss);
563 	}
564 
565 	if (thread->machine.DebugData != NULL) {
566 		if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
567 			arm_debug_set(NULL);
568 		}
569 
570 		if (os_ref_release(&thread->machine.DebugData->ref) == 0) {
571 			zfree(ads_zone, thread->machine.DebugData);
572 		}
573 	}
574 }
575 
576 
577 #if HAS_ARM_FEAT_SME
578 static arm_sme_saved_state_t *
zalloc_sme_saved_state(void)579 zalloc_sme_saved_state(void)
580 {
581 	arm_sme_saved_state_t *sme_ss = zalloc_flags(sme_ss_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
582 	sme_ss->hdr.flavor = ARM_SME_SAVED_STATE;
583 	sme_ss->hdr.count = arm_sme_saved_state_count(sme_svl_b);
584 	sme_ss->svl_b = sme_svl_b;
585 	return sme_ss;
586 }
587 
588 kern_return_t
machine_thread_sme_state_alloc(thread_t thread)589 machine_thread_sme_state_alloc(thread_t thread)
590 {
591 	assert(arm_sme_version());
592 
593 
594 	if (thread->machine.usme) {
595 		panic("thread %p already has SME saved state %p",
596 		    thread, thread->machine.usme);
597 	}
598 
599 	arm_sme_saved_state_t *sme_ss = zalloc_sme_saved_state();
600 	disable_preemption();
601 
602 	arm_sme_trap_at_el0(false);
603 	__builtin_arm_isb(ISB_SY);
604 	thread->machine.usme = sme_ss;
605 
606 	enable_preemption();
607 
608 	return KERN_SUCCESS;
609 }
610 
611 void
machine_thread_sme_state_free(thread_t thread)612 machine_thread_sme_state_free(thread_t thread)
613 {
614 	arm_sme_saved_state_t *sme_ss = machine_thread_get_sme_state(thread);
615 
616 	if (sme_ss) {
617 		thread->machine.usme = NULL;
618 		zfree(sme_ss_zone, sme_ss);
619 	}
620 }
621 
622 static void
machine_thread_sme_state_dup(const arm_sme_saved_state_t * src_sme_ss,thread_t target)623 machine_thread_sme_state_dup(const arm_sme_saved_state_t *src_sme_ss, thread_t target)
624 {
625 	arm_sme_saved_state_t *sme_ss = zalloc_sme_saved_state();
626 	assert(sme_ss->svl_b == src_sme_ss->svl_b);
627 
628 	arm_sme_context_t *context = &sme_ss->context;
629 	uint16_t svl_b = sme_ss->svl_b;
630 
631 	sme_ss->svcr = src_sme_ss->svcr;
632 	/* Z and P are saved on kernel entry.  ZA and ZT0 may be stale. */
633 	if (sme_ss->svcr & SVCR_SM) {
634 		const arm_sme_context_t *src_context = &src_sme_ss->context;
635 		memcpy(arm_sme_z(context), const_arm_sme_z(src_context), arm_sme_z_size(svl_b));
636 		memcpy(arm_sme_p(context, svl_b), const_arm_sme_p(src_context, svl_b), arm_sme_p_size(svl_b));
637 	}
638 	if (sme_ss->svcr & SVCR_ZA) {
639 		arm_save_sme_za_zt0(context, svl_b);
640 	}
641 
642 	target->machine.usme = sme_ss;
643 }
644 #endif /* HAS_ARM_FEAT_SME */
645 
646 #if HAVE_MACHINE_THREAD_MATRIX_STATE
647 void
machine_thread_matrix_state_dup(thread_t target)648 machine_thread_matrix_state_dup(thread_t target)
649 {
650 	assert(!target->machine.umatrix_hdr);
651 	thread_t thread = current_thread();
652 
653 #if HAS_ARM_FEAT_SME
654 	const arm_sme_saved_state_t *sme_ss = machine_thread_get_sme_state(thread);
655 	if (sme_ss) {
656 		machine_thread_sme_state_dup(sme_ss, target);
657 		return;
658 	}
659 #endif
660 
661 }
662 #endif /* HAVE_MACHINE_THREAD_MATRIX_STATE */
663 
664 /*
665  * Routine: machine_thread_init
666  *
667  */
668 void
machine_thread_init(void)669 machine_thread_init(void)
670 {
671 #if HAS_ARM_FEAT_SME
672 	if (arm_sme_version()) {
673 		sme_svl_b = arm_sme_svl_b();
674 		vm_size_t size = arm_sme_saved_state_count(sme_svl_b) * sizeof(unsigned int);
675 		sme_ss_zone = zone_create_ext("SME saved state", size, ZC_NONE, ZONE_ID_ANY, NULL);
676 	}
677 #endif
678 }
679 
680 /*
681  * Routine:	machine_thread_template_init
682  *
683  */
684 void
machine_thread_template_init(thread_t __unused thr_template)685 machine_thread_template_init(thread_t __unused thr_template)
686 {
687 	/* Nothing to do on this platform. */
688 }
689 
690 /*
691  * Routine: get_useraddr
692  *
693  */
694 user_addr_t
get_useraddr()695 get_useraddr()
696 {
697 	return get_saved_state_pc(current_thread()->machine.upcb);
698 }
699 
700 /*
701  * Routine: machine_stack_detach
702  *
703  */
704 vm_offset_t
machine_stack_detach(thread_t thread)705 machine_stack_detach(thread_t thread)
706 {
707 	vm_offset_t stack;
708 
709 	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
710 	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
711 
712 	stack = thread->kernel_stack;
713 #if CONFIG_STKSZ
714 	kcov_stksz_set_thread_stack(thread, stack);
715 #endif
716 	thread->kernel_stack = 0;
717 	thread->machine.kstackptr = NULL;
718 
719 	return stack;
720 }
721 
722 
723 /*
724  * Routine: machine_stack_attach
725  *
726  */
727 void
machine_stack_attach(thread_t thread,vm_offset_t stack)728 machine_stack_attach(thread_t thread,
729     vm_offset_t stack)
730 {
731 	struct arm_kernel_context *context;
732 	struct arm_kernel_saved_state *savestate;
733 	struct arm_kernel_neon_saved_state *neon_savestate;
734 	uint32_t current_el;
735 
736 #define machine_stack_attach_kprintf(x...) \
737 	/* kprintf("machine_stack_attach: " x) */
738 
739 	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
740 	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
741 
742 	thread->kernel_stack = stack;
743 #if CONFIG_STKSZ
744 	kcov_stksz_set_thread_stack(thread, 0);
745 #endif
746 	void *kstackptr = (void *)(stack + kernel_stack_size - sizeof(struct thread_kernel_state));
747 	thread->machine.kstackptr = kstackptr;
748 	thread_initialize_kernel_state(thread);
749 
750 	machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)kstackptr);
751 
752 	current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
753 	context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
754 	savestate = &context->ss;
755 	savestate->fp = 0;
756 	savestate->sp = (uint64_t)kstackptr;
757 	savestate->pc_was_in_userspace = false;
758 #if defined(HAS_APPLE_PAC)
759 	/* Sign the initial kernel stack saved state */
760 	uint64_t intr = ml_pac_safe_interrupts_disable();
761 	asm volatile (
762                 "adrp	x17, _thread_continue@page"             "\n"
763                 "add	x17, x17, _thread_continue@pageoff"     "\n"
764                 "ldr	x16, [%[ss], %[SS64_SP]]"               "\n"
765                 "pacia1716"                                     "\n"
766                 "str	x17, [%[ss], %[SS64_LR]]"               "\n"
767                 :
768                 : [ss]                  "r"(&context->ss),
769                   [SS64_SP]             "i"(offsetof(struct arm_kernel_saved_state, sp)),
770                   [SS64_LR]             "i"(offsetof(struct arm_kernel_saved_state, lr))
771                 : "x16", "x17"
772         );
773 	ml_pac_safe_interrupts_restore(intr);
774 #else
775 	savestate->lr = (uintptr_t)thread_continue;
776 #endif /* defined(HAS_APPLE_PAC) */
777 	neon_savestate = &context->ns;
778 	neon_savestate->fpcr = FPCR_DEFAULT;
779 	machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
780 }
781 
782 
783 /*
784  * Routine: machine_stack_handoff
785  *
786  */
787 void
machine_stack_handoff(thread_t old,thread_t new)788 machine_stack_handoff(thread_t old,
789     thread_t new)
790 {
791 	vm_offset_t  stack;
792 
793 #if __ARM_PAN_AVAILABLE__
794 	if (__improbable(__builtin_arm_rsr("pan") == 0)) {
795 		panic("stack handoff with PAN disabled");
796 	}
797 #endif
798 
799 #if CONFIG_CPU_COUNTERS
800 	kpc_off_cpu(old);
801 #endif /* CONFIG_CPU_COUNTERS */
802 
803 	stack = machine_stack_detach(old);
804 #if CONFIG_STKSZ
805 	kcov_stksz_set_thread_stack(new, 0);
806 #endif
807 	new->kernel_stack = stack;
808 	void *kstackptr = (void *)(stack + kernel_stack_size - sizeof(struct thread_kernel_state));
809 	new->machine.kstackptr = kstackptr;
810 	if (stack == old->reserved_stack) {
811 		assert(new->reserved_stack);
812 		old->reserved_stack = new->reserved_stack;
813 #if KASAN_TBI
814 		kasan_unpoison_stack(old->reserved_stack, kernel_stack_size);
815 #endif /* KASAN_TBI */
816 		new->reserved_stack = stack;
817 	}
818 
819 	machine_switch_pmap_and_extended_context(old, new);
820 
821 	machine_set_current_thread(new);
822 	thread_initialize_kernel_state(new);
823 }
824 
825 
826 /*
827  * Routine: call_continuation
828  *
829  */
830 void
call_continuation(thread_continue_t continuation,void * parameter,wait_result_t wresult,boolean_t enable_interrupts)831 call_continuation(thread_continue_t continuation,
832     void *parameter,
833     wait_result_t wresult,
834     boolean_t enable_interrupts)
835 {
836 #define call_continuation_kprintf(x...) \
837 	/* kprintf("call_continuation_kprintf:" x) */
838 
839 	call_continuation_kprintf("thread = %p continuation = %p, stack = %lx\n",
840 	    current_thread(), continuation, current_thread()->machine.kstackptr);
841 	Call_continuation(continuation, parameter, wresult, enable_interrupts);
842 }
843 
844 #define SET_DBGBCRn(n, value, accum) \
845 	__asm__ volatile( \
846 	        "msr DBGBCR" #n "_EL1, %[val]\n" \
847 	        "orr %[result], %[result], %[val]\n" \
848 	        : [result] "+r"(accum) : [val] "r"((value)))
849 
850 #define SET_DBGBVRn(n, value) \
851 	__asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
852 
853 #define SET_DBGWCRn(n, value, accum) \
854 	__asm__ volatile( \
855 	        "msr DBGWCR" #n "_EL1, %[val]\n" \
856 	        "orr %[result], %[result], %[val]\n" \
857 	        : [result] "+r"(accum) : [val] "r"((value)))
858 
859 #define SET_DBGWVRn(n, value) \
860 	__asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
861 
862 void
arm_debug_set32(arm_debug_state_t * debug_state)863 arm_debug_set32(arm_debug_state_t *debug_state)
864 {
865 	struct cpu_data *  cpu_data_ptr;
866 	arm_debug_info_t * debug_info    = arm_debug_info();
867 	boolean_t          intr;
868 	arm_debug_state_t  off_state;
869 	arm_debug_state_t  *cpu_debug;
870 	uint64_t           all_ctrls = 0;
871 
872 	// Non-developers should never need to have hardware break/watchpoints
873 	// set on their phones.
874 	extern bool developer_mode_state(void);
875 	if (!developer_mode_state()) {
876 		return;
877 	}
878 
879 	intr = ml_set_interrupts_enabled(FALSE);
880 	cpu_data_ptr = getCpuDatap();
881 	cpu_debug = cpu_data_ptr->cpu_user_debug;
882 
883 	/*
884 	 * Retain and set new per-cpu state.
885 	 * Reference count does not matter when turning off debug state.
886 	 */
887 	if (debug_state == NULL) {
888 		bzero(&off_state, sizeof(off_state));
889 		cpu_data_ptr->cpu_user_debug = NULL;
890 		debug_state = &off_state;
891 	} else {
892 		os_ref_retain(&debug_state->ref);
893 		cpu_data_ptr->cpu_user_debug = debug_state;
894 	}
895 
896 	/* Release previous debug state. */
897 	if (cpu_debug != NULL) {
898 		if (os_ref_release(&cpu_debug->ref) == 0) {
899 			zfree(ads_zone, cpu_debug);
900 		}
901 	}
902 
903 	switch (debug_info->num_breakpoint_pairs) {
904 	case 16:
905 		SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
906 		SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
907 		OS_FALLTHROUGH;
908 	case 15:
909 		SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
910 		SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
911 		OS_FALLTHROUGH;
912 	case 14:
913 		SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
914 		SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
915 		OS_FALLTHROUGH;
916 	case 13:
917 		SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
918 		SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
919 		OS_FALLTHROUGH;
920 	case 12:
921 		SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
922 		SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
923 		OS_FALLTHROUGH;
924 	case 11:
925 		SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
926 		SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
927 		OS_FALLTHROUGH;
928 	case 10:
929 		SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
930 		SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
931 		OS_FALLTHROUGH;
932 	case 9:
933 		SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
934 		SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
935 		OS_FALLTHROUGH;
936 	case 8:
937 		SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
938 		SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
939 		OS_FALLTHROUGH;
940 	case 7:
941 		SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
942 		SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
943 		OS_FALLTHROUGH;
944 	case 6:
945 		SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
946 		SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
947 		OS_FALLTHROUGH;
948 	case 5:
949 		SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
950 		SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
951 		OS_FALLTHROUGH;
952 	case 4:
953 		SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
954 		SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
955 		OS_FALLTHROUGH;
956 	case 3:
957 		SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
958 		SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
959 		OS_FALLTHROUGH;
960 	case 2:
961 		SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
962 		SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
963 		OS_FALLTHROUGH;
964 	case 1:
965 		SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
966 		SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
967 		OS_FALLTHROUGH;
968 	default:
969 		break;
970 	}
971 
972 	switch (debug_info->num_watchpoint_pairs) {
973 	case 16:
974 		SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
975 		SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
976 		OS_FALLTHROUGH;
977 	case 15:
978 		SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
979 		SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
980 		OS_FALLTHROUGH;
981 	case 14:
982 		SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
983 		SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
984 		OS_FALLTHROUGH;
985 	case 13:
986 		SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
987 		SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
988 		OS_FALLTHROUGH;
989 	case 12:
990 		SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
991 		SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
992 		OS_FALLTHROUGH;
993 	case 11:
994 		SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
995 		SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
996 		OS_FALLTHROUGH;
997 	case 10:
998 		SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
999 		SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
1000 		OS_FALLTHROUGH;
1001 	case 9:
1002 		SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
1003 		SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
1004 		OS_FALLTHROUGH;
1005 	case 8:
1006 		SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
1007 		SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
1008 		OS_FALLTHROUGH;
1009 	case 7:
1010 		SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
1011 		SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
1012 		OS_FALLTHROUGH;
1013 	case 6:
1014 		SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
1015 		SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
1016 		OS_FALLTHROUGH;
1017 	case 5:
1018 		SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
1019 		SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
1020 		OS_FALLTHROUGH;
1021 	case 4:
1022 		SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
1023 		SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
1024 		OS_FALLTHROUGH;
1025 	case 3:
1026 		SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
1027 		SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
1028 		OS_FALLTHROUGH;
1029 	case 2:
1030 		SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
1031 		SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
1032 		OS_FALLTHROUGH;
1033 	case 1:
1034 		SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
1035 		SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
1036 		OS_FALLTHROUGH;
1037 	default:
1038 		break;
1039 	}
1040 
1041 #if defined(CONFIG_KERNEL_INTEGRITY)
1042 	if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
1043 		panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
1044 	}
1045 #endif
1046 
1047 	/*
1048 	 * Breakpoint/Watchpoint Enable
1049 	 */
1050 	if (all_ctrls != 0) {
1051 		update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
1052 	} else {
1053 		update_mdscr(0x8000, 0);
1054 	}
1055 
1056 	/*
1057 	 * Software debug single step enable
1058 	 */
1059 	if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
1060 		update_mdscr(0, 1); // MDSCR_EL1[SS]
1061 
1062 		mask_user_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
1063 	} else {
1064 		update_mdscr(0x1, 0);
1065 	}
1066 
1067 	__builtin_arm_isb(ISB_SY);
1068 	(void) ml_set_interrupts_enabled(intr);
1069 }
1070 
1071 void
arm_debug_set64(arm_debug_state_t * debug_state)1072 arm_debug_set64(arm_debug_state_t *debug_state)
1073 {
1074 	struct cpu_data *  cpu_data_ptr;
1075 	arm_debug_info_t * debug_info    = arm_debug_info();
1076 	boolean_t          intr;
1077 	arm_debug_state_t  off_state;
1078 	arm_debug_state_t  *cpu_debug;
1079 	uint64_t           all_ctrls = 0;
1080 
1081 	// Non-developers should never need to have hardware break/watchpoints
1082 	// set on their phones.
1083 	extern bool developer_mode_state(void);
1084 	if (!developer_mode_state()) {
1085 		return;
1086 	}
1087 
1088 	intr = ml_set_interrupts_enabled(FALSE);
1089 	cpu_data_ptr = getCpuDatap();
1090 	cpu_debug = cpu_data_ptr->cpu_user_debug;
1091 
1092 	/*
1093 	 * Retain and set new per-cpu state.
1094 	 * Reference count does not matter when turning off debug state.
1095 	 */
1096 	if (debug_state == NULL) {
1097 		bzero(&off_state, sizeof(off_state));
1098 		cpu_data_ptr->cpu_user_debug = NULL;
1099 		debug_state = &off_state;
1100 	} else {
1101 		os_ref_retain(&debug_state->ref);
1102 		cpu_data_ptr->cpu_user_debug = debug_state;
1103 	}
1104 
1105 	/* Release previous debug state. */
1106 	if (cpu_debug != NULL) {
1107 		if (os_ref_release(&cpu_debug->ref) == 0) {
1108 			zfree(ads_zone, cpu_debug);
1109 		}
1110 	}
1111 
1112 	switch (debug_info->num_breakpoint_pairs) {
1113 	case 16:
1114 		SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
1115 		SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
1116 		OS_FALLTHROUGH;
1117 	case 15:
1118 		SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
1119 		SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
1120 		OS_FALLTHROUGH;
1121 	case 14:
1122 		SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
1123 		SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
1124 		OS_FALLTHROUGH;
1125 	case 13:
1126 		SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
1127 		SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
1128 		OS_FALLTHROUGH;
1129 	case 12:
1130 		SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
1131 		SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
1132 		OS_FALLTHROUGH;
1133 	case 11:
1134 		SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
1135 		SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
1136 		OS_FALLTHROUGH;
1137 	case 10:
1138 		SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
1139 		SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
1140 		OS_FALLTHROUGH;
1141 	case 9:
1142 		SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
1143 		SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
1144 		OS_FALLTHROUGH;
1145 	case 8:
1146 		SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
1147 		SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
1148 		OS_FALLTHROUGH;
1149 	case 7:
1150 		SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
1151 		SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
1152 		OS_FALLTHROUGH;
1153 	case 6:
1154 		SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
1155 		SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
1156 		OS_FALLTHROUGH;
1157 	case 5:
1158 		SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
1159 		SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
1160 		OS_FALLTHROUGH;
1161 	case 4:
1162 		SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
1163 		SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
1164 		OS_FALLTHROUGH;
1165 	case 3:
1166 		SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
1167 		SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
1168 		OS_FALLTHROUGH;
1169 	case 2:
1170 		SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
1171 		SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
1172 		OS_FALLTHROUGH;
1173 	case 1:
1174 		SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
1175 		SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
1176 		OS_FALLTHROUGH;
1177 	default:
1178 		break;
1179 	}
1180 
1181 	switch (debug_info->num_watchpoint_pairs) {
1182 	case 16:
1183 		SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
1184 		SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
1185 		OS_FALLTHROUGH;
1186 	case 15:
1187 		SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
1188 		SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
1189 		OS_FALLTHROUGH;
1190 	case 14:
1191 		SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
1192 		SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
1193 		OS_FALLTHROUGH;
1194 	case 13:
1195 		SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
1196 		SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
1197 		OS_FALLTHROUGH;
1198 	case 12:
1199 		SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
1200 		SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
1201 		OS_FALLTHROUGH;
1202 	case 11:
1203 		SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
1204 		SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
1205 		OS_FALLTHROUGH;
1206 	case 10:
1207 		SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
1208 		SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
1209 		OS_FALLTHROUGH;
1210 	case 9:
1211 		SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
1212 		SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
1213 		OS_FALLTHROUGH;
1214 	case 8:
1215 		SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
1216 		SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
1217 		OS_FALLTHROUGH;
1218 	case 7:
1219 		SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
1220 		SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
1221 		OS_FALLTHROUGH;
1222 	case 6:
1223 		SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
1224 		SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
1225 		OS_FALLTHROUGH;
1226 	case 5:
1227 		SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
1228 		SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
1229 		OS_FALLTHROUGH;
1230 	case 4:
1231 		SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
1232 		SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
1233 		OS_FALLTHROUGH;
1234 	case 3:
1235 		SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
1236 		SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
1237 		OS_FALLTHROUGH;
1238 	case 2:
1239 		SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
1240 		SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
1241 		OS_FALLTHROUGH;
1242 	case 1:
1243 		SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
1244 		SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
1245 		OS_FALLTHROUGH;
1246 	default:
1247 		break;
1248 	}
1249 
1250 #if defined(CONFIG_KERNEL_INTEGRITY)
1251 	if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
1252 		panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
1253 	}
1254 #endif
1255 
1256 	/*
1257 	 * Breakpoint/Watchpoint Enable
1258 	 */
1259 	if (all_ctrls != 0) {
1260 		update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
1261 	} else {
1262 		update_mdscr(0x8000, 0);
1263 	}
1264 
1265 	/*
1266 	 * Software debug single step enable
1267 	 */
1268 	if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
1269 
1270 		update_mdscr(0, 1); // MDSCR_EL1[SS]
1271 
1272 		mask_user_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
1273 	} else {
1274 		update_mdscr(0x1, 0);
1275 	}
1276 
1277 	__builtin_arm_isb(ISB_SY);
1278 	(void) ml_set_interrupts_enabled(intr);
1279 }
1280 
1281 void
arm_debug_set(arm_debug_state_t * debug_state)1282 arm_debug_set(arm_debug_state_t *debug_state)
1283 {
1284 	if (debug_state) {
1285 		switch (debug_state->dsh.flavor) {
1286 		case ARM_DEBUG_STATE32:
1287 			arm_debug_set32(debug_state);
1288 			break;
1289 		case ARM_DEBUG_STATE64:
1290 			arm_debug_set64(debug_state);
1291 			break;
1292 		default:
1293 			panic("arm_debug_set");
1294 			break;
1295 		}
1296 	} else {
1297 		if (thread_is_64bit_data(current_thread())) {
1298 			arm_debug_set64(debug_state);
1299 		} else {
1300 			arm_debug_set32(debug_state);
1301 		}
1302 	}
1303 }
1304 
1305 #define VM_MAX_ADDRESS32          ((vm_address_t) 0x80000000)
1306 boolean_t
debug_legacy_state_is_valid(arm_legacy_debug_state_t * debug_state)1307 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
1308 {
1309 	arm_debug_info_t *debug_info = arm_debug_info();
1310 	uint32_t i;
1311 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1312 		if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
1313 			return FALSE;
1314 		}
1315 	}
1316 
1317 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1318 		if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
1319 			return FALSE;
1320 		}
1321 	}
1322 	return TRUE;
1323 }
1324 
1325 boolean_t
debug_state_is_valid32(arm_debug_state32_t * debug_state)1326 debug_state_is_valid32(arm_debug_state32_t *debug_state)
1327 {
1328 	arm_debug_info_t *debug_info = arm_debug_info();
1329 	uint32_t i;
1330 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1331 		if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
1332 			return FALSE;
1333 		}
1334 	}
1335 
1336 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1337 		if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
1338 			return FALSE;
1339 		}
1340 	}
1341 	return TRUE;
1342 }
1343 
1344 boolean_t
debug_state_is_valid64(arm_debug_state64_t * debug_state)1345 debug_state_is_valid64(arm_debug_state64_t *debug_state)
1346 {
1347 	arm_debug_info_t *debug_info = arm_debug_info();
1348 	uint32_t i;
1349 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1350 		if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i]) {
1351 			return FALSE;
1352 		}
1353 	}
1354 
1355 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1356 		if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i]) {
1357 			return FALSE;
1358 		}
1359 	}
1360 	return TRUE;
1361 }
1362 
1363 /*
1364  * Duplicate one arm_debug_state_t to another.  "all" parameter
1365  * is ignored in the case of ARM -- Is this the right assumption?
1366  */
1367 void
copy_legacy_debug_state(arm_legacy_debug_state_t * src,arm_legacy_debug_state_t * target,__unused boolean_t all)1368 copy_legacy_debug_state(arm_legacy_debug_state_t * src,
1369     arm_legacy_debug_state_t * target,
1370     __unused boolean_t         all)
1371 {
1372 	bcopy(src, target, sizeof(arm_legacy_debug_state_t));
1373 }
1374 
1375 void
copy_debug_state32(arm_debug_state32_t * src,arm_debug_state32_t * target,__unused boolean_t all)1376 copy_debug_state32(arm_debug_state32_t * src,
1377     arm_debug_state32_t * target,
1378     __unused boolean_t    all)
1379 {
1380 	bcopy(src, target, sizeof(arm_debug_state32_t));
1381 }
1382 
1383 void
copy_debug_state64(arm_debug_state64_t * src,arm_debug_state64_t * target,__unused boolean_t all)1384 copy_debug_state64(arm_debug_state64_t * src,
1385     arm_debug_state64_t * target,
1386     __unused boolean_t    all)
1387 {
1388 	bcopy(src, target, sizeof(arm_debug_state64_t));
1389 }
1390 
1391 kern_return_t
machine_thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)1392 machine_thread_set_tsd_base(thread_t         thread,
1393     mach_vm_offset_t tsd_base)
1394 {
1395 	if (get_threadtask(thread) == kernel_task) {
1396 		return KERN_INVALID_ARGUMENT;
1397 	}
1398 
1399 	if (thread_is_64bit_addr(thread)) {
1400 		if (tsd_base > vm_map_max(thread->map)) {
1401 			tsd_base = 0ULL;
1402 		}
1403 	} else {
1404 		if (tsd_base > UINT32_MAX) {
1405 			tsd_base = 0ULL;
1406 		}
1407 	}
1408 
1409 	thread->machine.cthread_self = tsd_base;
1410 
1411 	/* For current thread, make the TSD base active immediately */
1412 	if (thread == current_thread()) {
1413 		mp_disable_preemption();
1414 		set_tpidrro(tsd_base);
1415 		mp_enable_preemption();
1416 	}
1417 
1418 	return KERN_SUCCESS;
1419 }
1420 
1421 void
machine_tecs(__unused thread_t thr)1422 machine_tecs(__unused thread_t thr)
1423 {
1424 }
1425 
1426 int
machine_csv(__unused cpuvn_e cve)1427 machine_csv(__unused cpuvn_e cve)
1428 {
1429 	return 0;
1430 }
1431 
1432 void
arm_context_switch_requires_sync()1433 arm_context_switch_requires_sync()
1434 {
1435 	current_cpu_datap()->sync_on_cswitch = 1;
1436 }
1437 
1438 void
arm_context_switch_sync()1439 arm_context_switch_sync()
1440 {
1441 	if (__improbable(current_cpu_datap()->sync_on_cswitch != 0)) {
1442 		__builtin_arm_isb(ISB_SY);
1443 		current_cpu_datap()->sync_on_cswitch = 0;
1444 	}
1445 }
1446 
1447 #if __has_feature(ptrauth_calls)
1448 boolean_t
arm_user_jop_disabled(void)1449 arm_user_jop_disabled(void)
1450 {
1451 	return FALSE;
1452 }
1453 #endif /* __has_feature(ptrauth_calls) */
1454