xref: /xnu-11215.41.3/osfmk/arm64/pcb.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2007-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <debug.h>
30 
31 #include <types.h>
32 
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36 
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kpc.h>
45 #include <kern/monotonic.h>
46 
47 #include <machine/atomic.h>
48 #include <arm64/proc_reg.h>
49 #include <arm64/machine_machdep.h>
50 #include <arm/cpu_data_internal.h>
51 #include <arm/machdep_call.h>
52 #include <arm/misc_protos.h>
53 #include <arm/cpuid.h>
54 #include <arm/cpu_capabilities_public.h>
55 
56 #include <vm/vm_map_xnu.h>
57 #include <vm/vm_protos.h>
58 
59 #include <sys/kdebug.h>
60 
61 
62 #include <san/kcov_stksz.h>
63 
64 #include <IOKit/IOBSD.h>
65 
66 #include <pexpert/arm64/apple_arm64_cpu.h>
67 #include <pexpert/pexpert.h>
68 
69 // fixme: rdar://114299113 tracks resolving the supportlib issue with hwtrace features
70 
71 extern int debug_task;
72 
73 /* zone for debug_state area */
74 ZONE_DEFINE_TYPE(ads_zone, "arm debug state", arm_debug_state_t, ZC_NONE);
75 ZONE_DEFINE_TYPE(user_ss_zone, "user save state", arm_context_t, ZC_NONE);
76 
77 #if HAS_ARM_FEAT_SME
78 static SECURITY_READ_ONLY_LATE(uint16_t) sme_svl_b;
79 /* zone for arm_sme_saved_state_t allocations */
80 static SECURITY_READ_ONLY_LATE(zone_t) sme_ss_zone;
81 #endif
82 
83 #if HAVE_MACHINE_THREAD_MATRIX_STATE
84 struct arm_matrix_cpu_state {
85 #if HAS_ARM_FEAT_SME
86 	bool have_sme;
87 	bool za_is_enabled;
88 #endif
89 };
90 
91 static void
machine_get_matrix_cpu_state(struct arm_matrix_cpu_state * cpu_state)92 machine_get_matrix_cpu_state(struct arm_matrix_cpu_state *cpu_state)
93 {
94 #if HAS_ARM_FEAT_SME
95 	cpu_state->have_sme = arm_sme_version() > 0;
96 	if (cpu_state->have_sme) {
97 		cpu_state->za_is_enabled = !!(__builtin_arm_rsr64("SVCR") & SVCR_ZA);
98 	} else {
99 		cpu_state->za_is_enabled = false;
100 	}
101 #endif /* HAS_ARM_FEAT_SME */
102 }
103 #endif /* HAVE_MACHINE_THREAD_MATRIX_STATE */
104 
105 /*
106  * Routine: consider_machine_collect
107  *
108  */
109 void
consider_machine_collect(void)110 consider_machine_collect(void)
111 {
112 	pmap_gc();
113 }
114 
115 /*
116  * Routine: consider_machine_adjust
117  *
118  */
119 void
consider_machine_adjust(void)120 consider_machine_adjust(void)
121 {
122 }
123 
124 
125 
126 #if HAS_ARM_FEAT_SME
127 static inline bool
machine_thread_has_valid_za(const arm_sme_saved_state_t * _Nullable sme_ss)128 machine_thread_has_valid_za(const arm_sme_saved_state_t *_Nullable sme_ss)
129 {
130 	return sme_ss && (sme_ss->svcr & SVCR_ZA);
131 }
132 
133 arm_sme_saved_state_t *
machine_thread_get_sme_state(thread_t thread)134 machine_thread_get_sme_state(thread_t thread)
135 {
136 	arm_state_hdr_t *hdr = thread->machine.umatrix_hdr;
137 	if (hdr) {
138 		assert(hdr->flavor == ARM_SME_SAVED_STATE);
139 		return thread->machine.usme;
140 	}
141 
142 	return NULL;
143 }
144 
145 static void
machine_save_sme_context(thread_t old,arm_sme_saved_state_t * old_sme_ss,const struct arm_matrix_cpu_state * cpu_state)146 machine_save_sme_context(thread_t old, arm_sme_saved_state_t *old_sme_ss, const struct arm_matrix_cpu_state *cpu_state)
147 {
148 	/*
149 	 * Note: we're deliberately not saving old_sme_ss->svcr, since it
150 	 * already happened on kernel entry.  Likewise we're not restoring the
151 	 * SM bit from new_sme_ss->svcr, since we don't want streaming SVE mode
152 	 * active while we're in kernel space; we'll put it back on kernel exit.
153 	 */
154 
155 	old->machine.tpidr2_el0 = __builtin_arm_rsr64("TPIDR2_EL0");
156 
157 
158 	if (cpu_state->za_is_enabled) {
159 		arm_save_sme_za_zt0(&old_sme_ss->context, old_sme_ss->svl_b);
160 	}
161 }
162 
163 static void
machine_restore_sme_context(thread_t new,const arm_sme_saved_state_t * new_sme_ss,const struct arm_matrix_cpu_state * cpu_state)164 machine_restore_sme_context(thread_t new, const arm_sme_saved_state_t *new_sme_ss, const struct arm_matrix_cpu_state *cpu_state)
165 {
166 	__builtin_arm_wsr64("TPIDR2_EL0", new->machine.tpidr2_el0);
167 
168 	if (new_sme_ss) {
169 		if (machine_thread_has_valid_za(new_sme_ss)) {
170 			if (!cpu_state->za_is_enabled) {
171 				asm volatile ("smstart za");
172 			}
173 			arm_load_sme_za_zt0(&new_sme_ss->context, new_sme_ss->svl_b);
174 		} else if (cpu_state->za_is_enabled) {
175 			asm volatile ("smstop za");
176 		}
177 
178 		arm_sme_trap_at_el0(false);
179 	}
180 }
181 
182 static void
machine_disable_sme_context(const struct arm_matrix_cpu_state * cpu_state)183 machine_disable_sme_context(const struct arm_matrix_cpu_state *cpu_state)
184 {
185 	if (cpu_state->za_is_enabled) {
186 		asm volatile ("smstop za");
187 	}
188 
189 	arm_sme_trap_at_el0(true);
190 }
191 #endif /* HAS_ARM_FEAT_SME */
192 
193 
194 #if HAVE_MACHINE_THREAD_MATRIX_STATE
195 static void
machine_switch_matrix_context(thread_t old,thread_t new)196 machine_switch_matrix_context(thread_t old, thread_t new)
197 {
198 	struct arm_matrix_cpu_state cpu_state;
199 	machine_get_matrix_cpu_state(&cpu_state);
200 
201 
202 #if HAS_ARM_FEAT_SME
203 	arm_sme_saved_state_t *old_sme_ss = machine_thread_get_sme_state(old);
204 	const arm_sme_saved_state_t *new_sme_ss = machine_thread_get_sme_state(new);
205 
206 	if (cpu_state.have_sme) {
207 		machine_save_sme_context(old, old_sme_ss, &cpu_state);
208 	}
209 #endif /* HAS_ARM_FEAT_SME */
210 
211 
212 #if HAS_ARM_FEAT_SME
213 	if (cpu_state.have_sme && !new_sme_ss) {
214 		machine_disable_sme_context(&cpu_state);
215 	}
216 #endif /* HAS_ARM_FEAT_SME */
217 
218 
219 #if HAS_ARM_FEAT_SME
220 	if (cpu_state.have_sme) {
221 		machine_restore_sme_context(new, new_sme_ss, &cpu_state);
222 	}
223 #endif /* HAS_ARM_FEAT_SME */
224 
225 
226 }
227 
228 
229 #endif /* HAVE_MACHINE_THREAD_MATRIX_STATE */
230 
231 
232 
233 
234 static inline void
machine_thread_switch_cpu_data(thread_t old,thread_t new)235 machine_thread_switch_cpu_data(thread_t old, thread_t new)
236 {
237 	/*
238 	 * We build with -fno-strict-aliasing, so the load through temporaries
239 	 * is required so that this generates a single load / store pair.
240 	 */
241 	cpu_data_t *datap = old->machine.CpuDatap;
242 	vm_offset_t base  = old->machine.pcpu_data_base_and_cpu_number;
243 
244 	/* TODO: Should this be ordered? */
245 
246 	old->machine.CpuDatap = NULL;
247 	old->machine.pcpu_data_base_and_cpu_number = 0;
248 
249 	new->machine.CpuDatap = datap;
250 	new->machine.pcpu_data_base_and_cpu_number = base;
251 }
252 
253 /**
254  * routine: machine_switch_pmap_and_extended_context
255  *
256  * Helper function used by machine_switch_context and machine_stack_handoff to switch the
257  * extended context and switch the pmap if necessary.
258  *
259  */
260 
261 static inline void
machine_switch_pmap_and_extended_context(thread_t old,thread_t new)262 machine_switch_pmap_and_extended_context(thread_t old, thread_t new)
263 {
264 	pmap_t new_pmap;
265 
266 
267 
268 
269 
270 #if HAVE_MACHINE_THREAD_MATRIX_STATE
271 	machine_switch_matrix_context(old, new);
272 #endif
273 
274 
275 
276 
277 	new_pmap = new->map->pmap;
278 	if (old->map->pmap != new_pmap) {
279 		pmap_switch(new_pmap);
280 	} else {
281 		/*
282 		 * If the thread is preempted while performing cache or TLB maintenance,
283 		 * it may be migrated to a different CPU between the completion of the relevant
284 		 * maintenance instruction and the synchronizing DSB.   ARM requires that the
285 		 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
286 		 * in order to guarantee completion of the instruction and visibility of its effects.
287 		 * Issue DSB here to enforce that guarantee.  We only do this for the case in which
288 		 * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates
289 		 * TTBR0.  Note also that cache maintenance may be performed in userspace, so we
290 		 * cannot further limit this operation e.g. by setting a per-thread flag to indicate
291 		 * a pending kernel TLB or cache maintenance instruction.
292 		 */
293 		__builtin_arm_dsb(DSB_ISH);
294 	}
295 
296 
297 	machine_thread_switch_cpu_data(old, new);
298 }
299 
300 /*
301  * Routine: machine_switch_context
302  *
303  */
304 thread_t
machine_switch_context(thread_t old,thread_continue_t continuation,thread_t new)305 machine_switch_context(thread_t old,
306     thread_continue_t continuation,
307     thread_t new)
308 {
309 	thread_t retval;
310 
311 #if __ARM_PAN_AVAILABLE__
312 	if (__improbable(__builtin_arm_rsr("pan") == 0)) {
313 		panic("context switch with PAN disabled");
314 	}
315 #endif
316 
317 #define machine_switch_context_kprintf(x...) \
318 	/* kprintf("machine_switch_context: " x) */
319 
320 	if (old == new) {
321 		panic("machine_switch_context");
322 	}
323 
324 #if CONFIG_CPU_COUNTERS
325 	kpc_off_cpu(old);
326 #endif /* CONFIG_CPU_COUNTERS */
327 
328 	machine_switch_pmap_and_extended_context(old, new);
329 
330 	machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
331 
332 	retval = Switch_context(old, continuation, new);
333 	assert(retval != NULL);
334 
335 	return retval;
336 }
337 
338 boolean_t
machine_thread_on_core(thread_t thread)339 machine_thread_on_core(thread_t thread)
340 {
341 	return thread->machine.CpuDatap != NULL;
342 }
343 
344 boolean_t
machine_thread_on_core_allow_invalid(thread_t thread)345 machine_thread_on_core_allow_invalid(thread_t thread)
346 {
347 	extern int _copyin_atomic64(const char *src, uint64_t *dst);
348 	uint64_t addr;
349 
350 	/*
351 	 * Utilize that the thread zone is sequestered which means
352 	 * that this kernel-to-kernel copyin can't read data
353 	 * from anything but a thread, zeroed or freed memory.
354 	 */
355 	assert(get_preemption_level() > 0);
356 	thread = pgz_decode_allow_invalid(thread, ZONE_ID_THREAD);
357 	if (thread == THREAD_NULL) {
358 		return false;
359 	}
360 	thread_require(thread);
361 	if (_copyin_atomic64((void *)&thread->machine.CpuDatap, &addr) == 0) {
362 		return addr != 0;
363 	}
364 	return false;
365 }
366 
367 
368 /*
369  * Routine: machine_thread_create
370  *
371  */
372 void
machine_thread_create(thread_t thread,task_t task,bool first_thread)373 machine_thread_create(thread_t thread, task_t task, bool first_thread)
374 {
375 #define machine_thread_create_kprintf(x...) \
376 	/* kprintf("machine_thread_create: " x) */
377 
378 	machine_thread_create_kprintf("thread = %x\n", thread);
379 
380 	if (!first_thread) {
381 		thread->machine.CpuDatap = (cpu_data_t *)0;
382 		// setting this offset will cause trying to use it to panic
383 		thread->machine.pcpu_data_base_and_cpu_number =
384 		    ml_make_pcpu_base_and_cpu_number(VM_MIN_KERNEL_ADDRESS, 0);
385 	}
386 	thread->machine.arm_machine_flags = 0;
387 	thread->machine.preemption_count = 0;
388 	thread->machine.cthread_self = 0;
389 	thread->machine.kpcb = NULL;
390 	thread->machine.exception_trace_code = 0;
391 #if defined(HAS_APPLE_PAC)
392 	thread->machine.rop_pid = task->rop_pid;
393 	thread->machine.jop_pid = task->jop_pid;
394 	if (task->disable_user_jop) {
395 		thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_DISABLE_USER_JOP;
396 	}
397 #endif
398 
399 
400 
401 
402 	if (task != kernel_task) {
403 		/* If this isn't a kernel thread, we'll have userspace state. */
404 		arm_context_t *contextData = zalloc_flags(user_ss_zone,
405 		    Z_WAITOK | Z_NOFAIL);
406 
407 #if __has_feature(ptrauth_calls)
408 		uint64_t intr = ml_pac_safe_interrupts_disable();
409 		zone_require(user_ss_zone, contextData);
410 #endif
411 		thread->machine.contextData = contextData;
412 		thread->machine.upcb = &contextData->ss;
413 		thread->machine.uNeon = &contextData->ns;
414 #if __has_feature(ptrauth_calls)
415 		ml_pac_safe_interrupts_restore(intr);
416 #endif
417 
418 		if (task_has_64Bit_data(task)) {
419 			thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
420 			thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
421 			thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
422 			thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
423 
424 		} else {
425 			thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
426 			thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
427 			thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
428 			thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
429 		}
430 	} else {
431 		thread->machine.upcb = NULL;
432 		thread->machine.uNeon = NULL;
433 		thread->machine.contextData = NULL;
434 	}
435 
436 #if HAVE_MACHINE_THREAD_MATRIX_STATE
437 	thread->machine.umatrix_hdr = NULL;
438 #endif
439 
440 
441 #if HAS_ARM_FEAT_SME
442 	thread->machine.tpidr2_el0 = 0;
443 #endif
444 
445 	bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
446 	machine_thread_state_initialize(thread);
447 }
448 
449 /*
450  * Routine: machine_thread_process_signature
451  *
452  * Called to allow code signature dependent adjustments to the thread
453  * state. Note that this is usually called twice for the main thread:
454  * Once at thread creation by thread_create, when the signature is
455  * potentially not attached yet (which is usually the case for the
456  * first/main thread of a task), and once after the task's signature
457  * has actually been attached.
458  *
459  */
460 kern_return_t
machine_thread_process_signature(thread_t __unused thread,task_t __unused task)461 machine_thread_process_signature(thread_t __unused thread, task_t __unused task)
462 {
463 	kern_return_t result = KERN_SUCCESS;
464 
465 	/*
466 	 * Reset to default state.
467 	 *
468 	 * In general, this function must not assume anything about the
469 	 * previous signature dependent thread state.
470 	 *
471 	 * At least at the time of writing this, threads don't transition
472 	 * to different code signatures, so each thread this function
473 	 * operates on is "fresh" in the sense that
474 	 * machine_thread_process_signature() has either not even been
475 	 * called on it yet, or only been called as part of thread
476 	 * creation when there was no signature yet.
477 	 *
478 	 * But for easier reasoning, and to prevent future bugs, this
479 	 * function should always recalculate all signature-dependent
480 	 * thread state, as if the signature could actually change from an
481 	 * actual signature to another.
482 	 */
483 #if !__ARM_KERNEL_PROTECT__
484 	thread->machine.arm_machine_flags &= ~(ARM_MACHINE_THREAD_PRESERVE_X18);
485 #endif /* !__ARM_KERNEL_PROTECT__ */
486 	thread->machine.arm_machine_flags &= ~(ARM_MACHINE_THREAD_USES_1GHZ_TIMBASE);
487 
488 	/*
489 	 * Set signature dependent state.
490 	 */
491 	if (task != kernel_task && task_has_64Bit_data(task)) {
492 #if !__ARM_KERNEL_PROTECT__
493 #if CONFIG_ROSETTA
494 		if (task_is_translated(task)) {
495 			/* Note that for x86_64 translation specifically, the
496 			 * context switch path implicitly switches x18 regardless
497 			 * of this flag. */
498 			thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
499 		}
500 #endif /* CONFIG_ROSETTA */
501 
502 		if (task->preserve_x18) {
503 			thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
504 		}
505 #endif /* !__ARM_KERNEL_PROTECT__ */
506 
507 		if (task->uses_1ghz_timebase) {
508 			thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_USES_1GHZ_TIMBASE;
509 		}
510 	} else {
511 #if !__ARM_KERNEL_PROTECT__
512 		/*
513 		 * For informational value only, context switch only trashes
514 		 * x18 for user threads.  (Except for devices with
515 		 * __ARM_KERNEL_PROTECT__, which make real destructive use of
516 		 * x18.)
517 		 */
518 		thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
519 #endif /* !__ARM_KERNEL_PROTECT__ */
520 		thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_USES_1GHZ_TIMBASE;
521 	}
522 
523 	/**
524 	 * Make sure the machine flags are observed before the thread becomes available
525 	 * to run in user mode, especially in the posix_spawn() path.
526 	 */
527 	os_atomic_thread_fence(release);
528 	return result;
529 }
530 
531 /*
532  * Routine: machine_thread_destroy
533  *
534  */
535 void
machine_thread_destroy(thread_t thread)536 machine_thread_destroy(thread_t thread)
537 {
538 	arm_context_t *thread_user_ss;
539 
540 	if (thread->machine.contextData) {
541 		/* Disassociate the user save state from the thread before we free it. */
542 		thread_user_ss = thread->machine.contextData;
543 		thread->machine.upcb = NULL;
544 		thread->machine.uNeon = NULL;
545 		thread->machine.contextData = NULL;
546 
547 #if HAS_ARM_FEAT_SME
548 		machine_thread_sme_state_free(thread);
549 #endif
550 
551 		zfree(user_ss_zone, thread_user_ss);
552 	}
553 
554 	if (thread->machine.DebugData != NULL) {
555 		if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
556 			arm_debug_set(NULL);
557 		}
558 
559 		if (os_ref_release(&thread->machine.DebugData->ref) == 0) {
560 			zfree(ads_zone, thread->machine.DebugData);
561 		}
562 	}
563 }
564 
565 
566 #if HAS_ARM_FEAT_SME
567 static arm_sme_saved_state_t *
zalloc_sme_saved_state(void)568 zalloc_sme_saved_state(void)
569 {
570 	arm_sme_saved_state_t *sme_ss = zalloc_flags(sme_ss_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
571 	sme_ss->hdr.flavor = ARM_SME_SAVED_STATE;
572 	sme_ss->hdr.count = arm_sme_saved_state_count(sme_svl_b);
573 	sme_ss->svl_b = sme_svl_b;
574 	return sme_ss;
575 }
576 
577 kern_return_t
machine_thread_sme_state_alloc(thread_t thread)578 machine_thread_sme_state_alloc(thread_t thread)
579 {
580 	assert(arm_sme_version());
581 
582 
583 	if (thread->machine.usme) {
584 		panic("thread %p already has SME saved state %p",
585 		    thread, thread->machine.usme);
586 	}
587 
588 	arm_sme_saved_state_t *sme_ss = zalloc_sme_saved_state();
589 	disable_preemption();
590 
591 	arm_sme_trap_at_el0(false);
592 	__builtin_arm_isb(ISB_SY);
593 	thread->machine.usme = sme_ss;
594 
595 	enable_preemption();
596 
597 	return KERN_SUCCESS;
598 }
599 
600 void
machine_thread_sme_state_free(thread_t thread)601 machine_thread_sme_state_free(thread_t thread)
602 {
603 	arm_sme_saved_state_t *sme_ss = machine_thread_get_sme_state(thread);
604 
605 	if (sme_ss) {
606 		thread->machine.usme = NULL;
607 		zfree(sme_ss_zone, sme_ss);
608 	}
609 }
610 
611 static void
machine_thread_sme_state_dup(const arm_sme_saved_state_t * src_sme_ss,thread_t target)612 machine_thread_sme_state_dup(const arm_sme_saved_state_t *src_sme_ss, thread_t target)
613 {
614 	arm_sme_saved_state_t *sme_ss = zalloc_sme_saved_state();
615 	assert(sme_ss->svl_b == src_sme_ss->svl_b);
616 
617 	arm_sme_context_t *context = &sme_ss->context;
618 	uint16_t svl_b = sme_ss->svl_b;
619 
620 	sme_ss->svcr = src_sme_ss->svcr;
621 	/* Z and P are saved on kernel entry.  ZA and ZT0 may be stale. */
622 	if (sme_ss->svcr & SVCR_SM) {
623 		const arm_sme_context_t *src_context = &src_sme_ss->context;
624 		memcpy(arm_sme_z(context), const_arm_sme_z(src_context), arm_sme_z_size(svl_b));
625 		memcpy(arm_sme_p(context, svl_b), const_arm_sme_p(src_context, svl_b), arm_sme_p_size(svl_b));
626 	}
627 	if (sme_ss->svcr & SVCR_ZA) {
628 		arm_save_sme_za_zt0(context, svl_b);
629 	}
630 
631 	target->machine.usme = sme_ss;
632 }
633 #endif /* HAS_ARM_FEAT_SME */
634 
635 #if HAVE_MACHINE_THREAD_MATRIX_STATE
636 void
machine_thread_matrix_state_dup(thread_t target)637 machine_thread_matrix_state_dup(thread_t target)
638 {
639 	assert(!target->machine.umatrix_hdr);
640 	thread_t thread = current_thread();
641 
642 #if HAS_ARM_FEAT_SME
643 	const arm_sme_saved_state_t *sme_ss = machine_thread_get_sme_state(thread);
644 	if (sme_ss) {
645 		machine_thread_sme_state_dup(sme_ss, target);
646 		return;
647 	}
648 #endif
649 
650 }
651 #endif /* HAVE_MACHINE_THREAD_MATRIX_STATE */
652 
653 /*
654  * Routine: machine_thread_init
655  *
656  */
657 void
machine_thread_init(void)658 machine_thread_init(void)
659 {
660 
661 #if HAS_ARM_FEAT_SME
662 	if (arm_sme_version()) {
663 		sme_svl_b = arm_sme_svl_b();
664 		vm_size_t size = arm_sme_saved_state_count(sme_svl_b) * sizeof(unsigned int);
665 		sme_ss_zone = zone_create_ext("SME saved state", size, ZC_NONE, ZONE_ID_ANY, NULL);
666 	}
667 #endif
668 }
669 
670 /*
671  * Routine:	machine_thread_template_init
672  *
673  */
674 void
machine_thread_template_init(thread_t __unused thr_template)675 machine_thread_template_init(thread_t __unused thr_template)
676 {
677 	/* Nothing to do on this platform. */
678 }
679 
680 /*
681  * Routine: get_useraddr
682  *
683  */
684 user_addr_t
get_useraddr()685 get_useraddr()
686 {
687 	return get_saved_state_pc(current_thread()->machine.upcb);
688 }
689 
690 /*
691  * Routine: machine_stack_detach
692  *
693  */
694 vm_offset_t
machine_stack_detach(thread_t thread)695 machine_stack_detach(thread_t thread)
696 {
697 	vm_offset_t stack;
698 
699 	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
700 	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
701 
702 	stack = thread->kernel_stack;
703 #if CONFIG_STKSZ
704 	kcov_stksz_set_thread_stack(thread, stack);
705 #endif
706 	thread->kernel_stack = 0;
707 	thread->machine.kstackptr = NULL;
708 
709 	return stack;
710 }
711 
712 
713 /*
714  * Routine: machine_stack_attach
715  *
716  */
717 void
machine_stack_attach(thread_t thread,vm_offset_t stack)718 machine_stack_attach(thread_t thread,
719     vm_offset_t stack)
720 {
721 	struct arm_kernel_context *context;
722 	struct arm_kernel_saved_state *savestate;
723 	struct arm_kernel_neon_saved_state *neon_savestate;
724 	uint32_t current_el;
725 
726 #define machine_stack_attach_kprintf(x...) \
727 	/* kprintf("machine_stack_attach: " x) */
728 
729 	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
730 	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
731 
732 	thread->kernel_stack = stack;
733 #if CONFIG_STKSZ
734 	kcov_stksz_set_thread_stack(thread, 0);
735 #endif
736 	void *kstackptr = (void *)(stack + kernel_stack_size - sizeof(struct thread_kernel_state));
737 	thread->machine.kstackptr = kstackptr;
738 	thread_initialize_kernel_state(thread);
739 
740 	machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)kstackptr);
741 
742 	current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
743 	context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
744 	savestate = &context->ss;
745 	savestate->fp = 0;
746 	savestate->sp = (uint64_t)kstackptr;
747 	savestate->pc_was_in_userspace = false;
748 #if defined(HAS_APPLE_PAC)
749 	/* Sign the initial kernel stack saved state */
750 	uint64_t intr = ml_pac_safe_interrupts_disable();
751 	asm volatile (
752                 "adrp	x17, _thread_continue@page"             "\n"
753                 "add	x17, x17, _thread_continue@pageoff"     "\n"
754                 "ldr	x16, [%[ss], %[SS64_SP]]"               "\n"
755                 "pacia1716"                                     "\n"
756                 "str	x17, [%[ss], %[SS64_LR]]"               "\n"
757                 :
758                 : [ss]                  "r"(&context->ss),
759                   [SS64_SP]             "i"(offsetof(struct arm_kernel_saved_state, sp)),
760                   [SS64_LR]             "i"(offsetof(struct arm_kernel_saved_state, lr))
761                 : "x16", "x17"
762         );
763 	ml_pac_safe_interrupts_restore(intr);
764 #else
765 	savestate->lr = (uintptr_t)thread_continue;
766 #endif /* defined(HAS_APPLE_PAC) */
767 	neon_savestate = &context->ns;
768 	neon_savestate->fpcr = FPCR_DEFAULT;
769 	machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
770 }
771 
772 
773 /*
774  * Routine: machine_stack_handoff
775  *
776  */
777 void
machine_stack_handoff(thread_t old,thread_t new)778 machine_stack_handoff(thread_t old,
779     thread_t new)
780 {
781 	vm_offset_t  stack;
782 
783 #if __ARM_PAN_AVAILABLE__
784 	if (__improbable(__builtin_arm_rsr("pan") == 0)) {
785 		panic("stack handoff with PAN disabled");
786 	}
787 #endif
788 
789 #if CONFIG_CPU_COUNTERS
790 	kpc_off_cpu(old);
791 #endif /* CONFIG_CPU_COUNTERS */
792 
793 	stack = machine_stack_detach(old);
794 #if CONFIG_STKSZ
795 	kcov_stksz_set_thread_stack(new, 0);
796 #endif
797 	new->kernel_stack = stack;
798 	void *kstackptr = (void *)(stack + kernel_stack_size - sizeof(struct thread_kernel_state));
799 	new->machine.kstackptr = kstackptr;
800 	if (stack == old->reserved_stack) {
801 		assert(new->reserved_stack);
802 		old->reserved_stack = new->reserved_stack;
803 #if KASAN_TBI
804 		kasan_unpoison_stack(old->reserved_stack, kernel_stack_size);
805 #endif /* KASAN_TBI */
806 		new->reserved_stack = stack;
807 	}
808 
809 	machine_switch_pmap_and_extended_context(old, new);
810 
811 	machine_set_current_thread(new);
812 	thread_initialize_kernel_state(new);
813 }
814 
815 
816 /*
817  * Routine: call_continuation
818  *
819  */
820 void
call_continuation(thread_continue_t continuation,void * parameter,wait_result_t wresult,boolean_t enable_interrupts)821 call_continuation(thread_continue_t continuation,
822     void *parameter,
823     wait_result_t wresult,
824     boolean_t enable_interrupts)
825 {
826 #define call_continuation_kprintf(x...) \
827 	/* kprintf("call_continuation_kprintf:" x) */
828 
829 	call_continuation_kprintf("thread = %p continuation = %p, stack = %lx\n",
830 	    current_thread(), continuation, current_thread()->machine.kstackptr);
831 	Call_continuation(continuation, parameter, wresult, enable_interrupts);
832 }
833 
834 #define SET_DBGBCRn(n, value, accum) \
835 	__asm__ volatile( \
836 	        "msr DBGBCR" #n "_EL1, %[val]\n" \
837 	        "orr %[result], %[result], %[val]\n" \
838 	        : [result] "+r"(accum) : [val] "r"((value)))
839 
840 #define SET_DBGBVRn(n, value) \
841 	__asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
842 
843 #define SET_DBGWCRn(n, value, accum) \
844 	__asm__ volatile( \
845 	        "msr DBGWCR" #n "_EL1, %[val]\n" \
846 	        "orr %[result], %[result], %[val]\n" \
847 	        : [result] "+r"(accum) : [val] "r"((value)))
848 
849 #define SET_DBGWVRn(n, value) \
850 	__asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
851 
852 void
arm_debug_set32(arm_debug_state_t * debug_state)853 arm_debug_set32(arm_debug_state_t *debug_state)
854 {
855 	struct cpu_data *  cpu_data_ptr;
856 	arm_debug_info_t * debug_info    = arm_debug_info();
857 	boolean_t          intr;
858 	arm_debug_state_t  off_state;
859 	arm_debug_state_t  *cpu_debug;
860 	uint64_t           all_ctrls = 0;
861 
862 	intr = ml_set_interrupts_enabled(FALSE);
863 	cpu_data_ptr = getCpuDatap();
864 	cpu_debug = cpu_data_ptr->cpu_user_debug;
865 
866 	/*
867 	 * Retain and set new per-cpu state.
868 	 * Reference count does not matter when turning off debug state.
869 	 */
870 	if (debug_state == NULL) {
871 		bzero(&off_state, sizeof(off_state));
872 		cpu_data_ptr->cpu_user_debug = NULL;
873 		debug_state = &off_state;
874 	} else {
875 		os_ref_retain(&debug_state->ref);
876 		cpu_data_ptr->cpu_user_debug = debug_state;
877 	}
878 
879 	/* Release previous debug state. */
880 	if (cpu_debug != NULL) {
881 		if (os_ref_release(&cpu_debug->ref) == 0) {
882 			zfree(ads_zone, cpu_debug);
883 		}
884 	}
885 
886 	switch (debug_info->num_breakpoint_pairs) {
887 	case 16:
888 		SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
889 		SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
890 		OS_FALLTHROUGH;
891 	case 15:
892 		SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
893 		SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
894 		OS_FALLTHROUGH;
895 	case 14:
896 		SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
897 		SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
898 		OS_FALLTHROUGH;
899 	case 13:
900 		SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
901 		SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
902 		OS_FALLTHROUGH;
903 	case 12:
904 		SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
905 		SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
906 		OS_FALLTHROUGH;
907 	case 11:
908 		SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
909 		SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
910 		OS_FALLTHROUGH;
911 	case 10:
912 		SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
913 		SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
914 		OS_FALLTHROUGH;
915 	case 9:
916 		SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
917 		SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
918 		OS_FALLTHROUGH;
919 	case 8:
920 		SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
921 		SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
922 		OS_FALLTHROUGH;
923 	case 7:
924 		SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
925 		SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
926 		OS_FALLTHROUGH;
927 	case 6:
928 		SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
929 		SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
930 		OS_FALLTHROUGH;
931 	case 5:
932 		SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
933 		SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
934 		OS_FALLTHROUGH;
935 	case 4:
936 		SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
937 		SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
938 		OS_FALLTHROUGH;
939 	case 3:
940 		SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
941 		SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
942 		OS_FALLTHROUGH;
943 	case 2:
944 		SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
945 		SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
946 		OS_FALLTHROUGH;
947 	case 1:
948 		SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
949 		SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
950 		OS_FALLTHROUGH;
951 	default:
952 		break;
953 	}
954 
955 	switch (debug_info->num_watchpoint_pairs) {
956 	case 16:
957 		SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
958 		SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
959 		OS_FALLTHROUGH;
960 	case 15:
961 		SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
962 		SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
963 		OS_FALLTHROUGH;
964 	case 14:
965 		SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
966 		SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
967 		OS_FALLTHROUGH;
968 	case 13:
969 		SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
970 		SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
971 		OS_FALLTHROUGH;
972 	case 12:
973 		SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
974 		SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
975 		OS_FALLTHROUGH;
976 	case 11:
977 		SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
978 		SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
979 		OS_FALLTHROUGH;
980 	case 10:
981 		SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
982 		SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
983 		OS_FALLTHROUGH;
984 	case 9:
985 		SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
986 		SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
987 		OS_FALLTHROUGH;
988 	case 8:
989 		SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
990 		SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
991 		OS_FALLTHROUGH;
992 	case 7:
993 		SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
994 		SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
995 		OS_FALLTHROUGH;
996 	case 6:
997 		SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
998 		SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
999 		OS_FALLTHROUGH;
1000 	case 5:
1001 		SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
1002 		SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
1003 		OS_FALLTHROUGH;
1004 	case 4:
1005 		SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
1006 		SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
1007 		OS_FALLTHROUGH;
1008 	case 3:
1009 		SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
1010 		SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
1011 		OS_FALLTHROUGH;
1012 	case 2:
1013 		SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
1014 		SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
1015 		OS_FALLTHROUGH;
1016 	case 1:
1017 		SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
1018 		SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
1019 		OS_FALLTHROUGH;
1020 	default:
1021 		break;
1022 	}
1023 
1024 #if defined(CONFIG_KERNEL_INTEGRITY)
1025 	if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
1026 		panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
1027 	}
1028 #endif
1029 
1030 	/*
1031 	 * Breakpoint/Watchpoint Enable
1032 	 */
1033 	if (all_ctrls != 0) {
1034 		update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
1035 	} else {
1036 		update_mdscr(0x8000, 0);
1037 	}
1038 
1039 	/*
1040 	 * Software debug single step enable
1041 	 */
1042 	if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
1043 		update_mdscr(0, 1); // MDSCR_EL1[SS]
1044 
1045 		mask_user_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
1046 	} else {
1047 		update_mdscr(0x1, 0);
1048 	}
1049 
1050 	__builtin_arm_isb(ISB_SY);
1051 	(void) ml_set_interrupts_enabled(intr);
1052 }
1053 
1054 void
arm_debug_set64(arm_debug_state_t * debug_state)1055 arm_debug_set64(arm_debug_state_t *debug_state)
1056 {
1057 	struct cpu_data *  cpu_data_ptr;
1058 	arm_debug_info_t * debug_info    = arm_debug_info();
1059 	boolean_t          intr;
1060 	arm_debug_state_t  off_state;
1061 	arm_debug_state_t  *cpu_debug;
1062 	uint64_t           all_ctrls = 0;
1063 
1064 	intr = ml_set_interrupts_enabled(FALSE);
1065 	cpu_data_ptr = getCpuDatap();
1066 	cpu_debug = cpu_data_ptr->cpu_user_debug;
1067 
1068 	/*
1069 	 * Retain and set new per-cpu state.
1070 	 * Reference count does not matter when turning off debug state.
1071 	 */
1072 	if (debug_state == NULL) {
1073 		bzero(&off_state, sizeof(off_state));
1074 		cpu_data_ptr->cpu_user_debug = NULL;
1075 		debug_state = &off_state;
1076 	} else {
1077 		os_ref_retain(&debug_state->ref);
1078 		cpu_data_ptr->cpu_user_debug = debug_state;
1079 	}
1080 
1081 	/* Release previous debug state. */
1082 	if (cpu_debug != NULL) {
1083 		if (os_ref_release(&cpu_debug->ref) == 0) {
1084 			zfree(ads_zone, cpu_debug);
1085 		}
1086 	}
1087 
1088 	switch (debug_info->num_breakpoint_pairs) {
1089 	case 16:
1090 		SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
1091 		SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
1092 		OS_FALLTHROUGH;
1093 	case 15:
1094 		SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
1095 		SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
1096 		OS_FALLTHROUGH;
1097 	case 14:
1098 		SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
1099 		SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
1100 		OS_FALLTHROUGH;
1101 	case 13:
1102 		SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
1103 		SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
1104 		OS_FALLTHROUGH;
1105 	case 12:
1106 		SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
1107 		SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
1108 		OS_FALLTHROUGH;
1109 	case 11:
1110 		SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
1111 		SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
1112 		OS_FALLTHROUGH;
1113 	case 10:
1114 		SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
1115 		SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
1116 		OS_FALLTHROUGH;
1117 	case 9:
1118 		SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
1119 		SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
1120 		OS_FALLTHROUGH;
1121 	case 8:
1122 		SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
1123 		SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
1124 		OS_FALLTHROUGH;
1125 	case 7:
1126 		SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
1127 		SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
1128 		OS_FALLTHROUGH;
1129 	case 6:
1130 		SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
1131 		SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
1132 		OS_FALLTHROUGH;
1133 	case 5:
1134 		SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
1135 		SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
1136 		OS_FALLTHROUGH;
1137 	case 4:
1138 		SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
1139 		SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
1140 		OS_FALLTHROUGH;
1141 	case 3:
1142 		SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
1143 		SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
1144 		OS_FALLTHROUGH;
1145 	case 2:
1146 		SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
1147 		SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
1148 		OS_FALLTHROUGH;
1149 	case 1:
1150 		SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
1151 		SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
1152 		OS_FALLTHROUGH;
1153 	default:
1154 		break;
1155 	}
1156 
1157 	switch (debug_info->num_watchpoint_pairs) {
1158 	case 16:
1159 		SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
1160 		SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
1161 		OS_FALLTHROUGH;
1162 	case 15:
1163 		SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
1164 		SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
1165 		OS_FALLTHROUGH;
1166 	case 14:
1167 		SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
1168 		SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
1169 		OS_FALLTHROUGH;
1170 	case 13:
1171 		SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
1172 		SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
1173 		OS_FALLTHROUGH;
1174 	case 12:
1175 		SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
1176 		SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
1177 		OS_FALLTHROUGH;
1178 	case 11:
1179 		SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
1180 		SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
1181 		OS_FALLTHROUGH;
1182 	case 10:
1183 		SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
1184 		SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
1185 		OS_FALLTHROUGH;
1186 	case 9:
1187 		SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
1188 		SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
1189 		OS_FALLTHROUGH;
1190 	case 8:
1191 		SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
1192 		SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
1193 		OS_FALLTHROUGH;
1194 	case 7:
1195 		SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
1196 		SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
1197 		OS_FALLTHROUGH;
1198 	case 6:
1199 		SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
1200 		SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
1201 		OS_FALLTHROUGH;
1202 	case 5:
1203 		SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
1204 		SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
1205 		OS_FALLTHROUGH;
1206 	case 4:
1207 		SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
1208 		SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
1209 		OS_FALLTHROUGH;
1210 	case 3:
1211 		SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
1212 		SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
1213 		OS_FALLTHROUGH;
1214 	case 2:
1215 		SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
1216 		SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
1217 		OS_FALLTHROUGH;
1218 	case 1:
1219 		SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
1220 		SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
1221 		OS_FALLTHROUGH;
1222 	default:
1223 		break;
1224 	}
1225 
1226 #if defined(CONFIG_KERNEL_INTEGRITY)
1227 	if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
1228 		panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
1229 	}
1230 #endif
1231 
1232 	/*
1233 	 * Breakpoint/Watchpoint Enable
1234 	 */
1235 	if (all_ctrls != 0) {
1236 		update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
1237 	} else {
1238 		update_mdscr(0x8000, 0);
1239 	}
1240 
1241 	/*
1242 	 * Software debug single step enable
1243 	 */
1244 	if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
1245 
1246 		update_mdscr(0, 1); // MDSCR_EL1[SS]
1247 
1248 		mask_user_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
1249 	} else {
1250 		update_mdscr(0x1, 0);
1251 	}
1252 
1253 	__builtin_arm_isb(ISB_SY);
1254 	(void) ml_set_interrupts_enabled(intr);
1255 }
1256 
1257 void
arm_debug_set(arm_debug_state_t * debug_state)1258 arm_debug_set(arm_debug_state_t *debug_state)
1259 {
1260 	if (debug_state) {
1261 		switch (debug_state->dsh.flavor) {
1262 		case ARM_DEBUG_STATE32:
1263 			arm_debug_set32(debug_state);
1264 			break;
1265 		case ARM_DEBUG_STATE64:
1266 			arm_debug_set64(debug_state);
1267 			break;
1268 		default:
1269 			panic("arm_debug_set");
1270 			break;
1271 		}
1272 	} else {
1273 		if (thread_is_64bit_data(current_thread())) {
1274 			arm_debug_set64(debug_state);
1275 		} else {
1276 			arm_debug_set32(debug_state);
1277 		}
1278 	}
1279 }
1280 
1281 #define VM_MAX_ADDRESS32          ((vm_address_t) 0x80000000)
1282 boolean_t
debug_legacy_state_is_valid(arm_legacy_debug_state_t * debug_state)1283 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
1284 {
1285 	arm_debug_info_t *debug_info = arm_debug_info();
1286 	uint32_t i;
1287 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1288 		if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
1289 			return FALSE;
1290 		}
1291 	}
1292 
1293 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1294 		if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
1295 			return FALSE;
1296 		}
1297 	}
1298 	return TRUE;
1299 }
1300 
1301 boolean_t
debug_state_is_valid32(arm_debug_state32_t * debug_state)1302 debug_state_is_valid32(arm_debug_state32_t *debug_state)
1303 {
1304 	arm_debug_info_t *debug_info = arm_debug_info();
1305 	uint32_t i;
1306 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1307 		if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
1308 			return FALSE;
1309 		}
1310 	}
1311 
1312 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1313 		if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
1314 			return FALSE;
1315 		}
1316 	}
1317 	return TRUE;
1318 }
1319 
1320 boolean_t
debug_state_is_valid64(arm_debug_state64_t * debug_state)1321 debug_state_is_valid64(arm_debug_state64_t *debug_state)
1322 {
1323 	arm_debug_info_t *debug_info = arm_debug_info();
1324 	uint32_t i;
1325 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1326 		if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i]) {
1327 			return FALSE;
1328 		}
1329 	}
1330 
1331 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1332 		if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i]) {
1333 			return FALSE;
1334 		}
1335 	}
1336 	return TRUE;
1337 }
1338 
1339 /*
1340  * Duplicate one arm_debug_state_t to another.  "all" parameter
1341  * is ignored in the case of ARM -- Is this the right assumption?
1342  */
1343 void
copy_legacy_debug_state(arm_legacy_debug_state_t * src,arm_legacy_debug_state_t * target,__unused boolean_t all)1344 copy_legacy_debug_state(arm_legacy_debug_state_t * src,
1345     arm_legacy_debug_state_t * target,
1346     __unused boolean_t         all)
1347 {
1348 	bcopy(src, target, sizeof(arm_legacy_debug_state_t));
1349 }
1350 
1351 void
copy_debug_state32(arm_debug_state32_t * src,arm_debug_state32_t * target,__unused boolean_t all)1352 copy_debug_state32(arm_debug_state32_t * src,
1353     arm_debug_state32_t * target,
1354     __unused boolean_t    all)
1355 {
1356 	bcopy(src, target, sizeof(arm_debug_state32_t));
1357 }
1358 
1359 void
copy_debug_state64(arm_debug_state64_t * src,arm_debug_state64_t * target,__unused boolean_t all)1360 copy_debug_state64(arm_debug_state64_t * src,
1361     arm_debug_state64_t * target,
1362     __unused boolean_t    all)
1363 {
1364 	bcopy(src, target, sizeof(arm_debug_state64_t));
1365 }
1366 
1367 kern_return_t
machine_thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)1368 machine_thread_set_tsd_base(thread_t         thread,
1369     mach_vm_offset_t tsd_base)
1370 {
1371 	if (get_threadtask(thread) == kernel_task) {
1372 		return KERN_INVALID_ARGUMENT;
1373 	}
1374 
1375 	if (thread_is_64bit_addr(thread)) {
1376 		if (tsd_base > vm_map_max(thread->map)) {
1377 			tsd_base = 0ULL;
1378 		}
1379 	} else {
1380 		if (tsd_base > UINT32_MAX) {
1381 			tsd_base = 0ULL;
1382 		}
1383 	}
1384 
1385 	thread->machine.cthread_self = tsd_base;
1386 
1387 	/* For current thread, make the TSD base active immediately */
1388 	if (thread == current_thread()) {
1389 		mp_disable_preemption();
1390 		set_tpidrro(tsd_base);
1391 		mp_enable_preemption();
1392 	}
1393 
1394 	return KERN_SUCCESS;
1395 }
1396 
1397 void
machine_tecs(__unused thread_t thr)1398 machine_tecs(__unused thread_t thr)
1399 {
1400 }
1401 
1402 int
machine_csv(__unused cpuvn_e cve)1403 machine_csv(__unused cpuvn_e cve)
1404 {
1405 	return 0;
1406 }
1407 
1408 #if __ARM_ARCH_8_5__
1409 void
arm_context_switch_requires_sync()1410 arm_context_switch_requires_sync()
1411 {
1412 	current_cpu_datap()->sync_on_cswitch = 1;
1413 }
1414 #endif
1415 
1416 #if __has_feature(ptrauth_calls)
1417 boolean_t
arm_user_jop_disabled(void)1418 arm_user_jop_disabled(void)
1419 {
1420 	return FALSE;
1421 }
1422 #endif /* __has_feature(ptrauth_calls) */
1423