xref: /xnu-10063.141.1/osfmk/arm64/pcb.c (revision d8b80295118ef25ac3a784134bcf95cd8e88109f)
1 /*
2  * Copyright (c) 2007-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <debug.h>
30 
31 #include <types.h>
32 
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36 
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kpc.h>
45 #include <kern/monotonic.h>
46 
47 #include <machine/atomic.h>
48 #include <arm64/proc_reg.h>
49 #include <arm64/machine_machdep.h>
50 #include <arm/cpu_data_internal.h>
51 #include <arm/machdep_call.h>
52 #include <arm/misc_protos.h>
53 #include <arm/cpuid.h>
54 
55 #include <vm/vm_map.h>
56 #include <vm/vm_protos.h>
57 
58 #include <sys/kdebug.h>
59 
60 
61 #include <san/kcov_stksz.h>
62 
63 #include <IOKit/IOBSD.h>
64 
65 #include <pexpert/pexpert.h>
66 
67 extern int debug_task;
68 
69 /* zone for debug_state area */
70 ZONE_DEFINE_TYPE(ads_zone, "arm debug state", arm_debug_state_t, ZC_NONE);
71 ZONE_DEFINE_TYPE(user_ss_zone, "user save state", arm_context_t, ZC_NONE);
72 
73 
74 
75 /*
76  * Routine: consider_machine_collect
77  *
78  */
79 void
consider_machine_collect(void)80 consider_machine_collect(void)
81 {
82 	pmap_gc();
83 }
84 
85 /*
86  * Routine: consider_machine_adjust
87  *
88  */
89 void
consider_machine_adjust(void)90 consider_machine_adjust(void)
91 {
92 }
93 
94 
95 
96 
97 
98 
99 
100 
101 static inline void
machine_thread_switch_cpu_data(thread_t old,thread_t new)102 machine_thread_switch_cpu_data(thread_t old, thread_t new)
103 {
104 	/*
105 	 * We build with -fno-strict-aliasing, so the load through temporaries
106 	 * is required so that this generates a single load / store pair.
107 	 */
108 	cpu_data_t *datap = old->machine.CpuDatap;
109 	vm_offset_t base  = old->machine.pcpu_data_base;
110 
111 	/* TODO: Should this be ordered? */
112 
113 	old->machine.CpuDatap = NULL;
114 	old->machine.pcpu_data_base = 0;
115 
116 	new->machine.CpuDatap = datap;
117 	new->machine.pcpu_data_base = base;
118 }
119 
120 /**
121  * routine: machine_switch_pmap_and_extended_context
122  *
123  * Helper function used by machine_switch_context and machine_stack_handoff to switch the
124  * extended context and switch the pmap if necessary.
125  *
126  */
127 
128 static inline void
machine_switch_pmap_and_extended_context(thread_t old,thread_t new)129 machine_switch_pmap_and_extended_context(thread_t old, thread_t new)
130 {
131 	pmap_t new_pmap;
132 
133 
134 
135 
136 
137 
138 
139 	new_pmap = new->map->pmap;
140 	if (old->map->pmap != new_pmap) {
141 		pmap_switch(new_pmap);
142 	} else {
143 		/*
144 		 * If the thread is preempted while performing cache or TLB maintenance,
145 		 * it may be migrated to a different CPU between the completion of the relevant
146 		 * maintenance instruction and the synchronizing DSB.   ARM requires that the
147 		 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
148 		 * in order to guarantee completion of the instruction and visibility of its effects.
149 		 * Issue DSB here to enforce that guarantee.  We only do this for the case in which
150 		 * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates
151 		 * TTBR0.  Note also that cache maintenance may be performed in userspace, so we
152 		 * cannot further limit this operation e.g. by setting a per-thread flag to indicate
153 		 * a pending kernel TLB or cache maintenance instruction.
154 		 */
155 		__builtin_arm_dsb(DSB_ISH);
156 	}
157 
158 
159 	machine_thread_switch_cpu_data(old, new);
160 }
161 
162 /*
163  * Routine: machine_switch_context
164  *
165  */
166 thread_t
machine_switch_context(thread_t old,thread_continue_t continuation,thread_t new)167 machine_switch_context(thread_t old,
168     thread_continue_t continuation,
169     thread_t new)
170 {
171 	thread_t retval;
172 
173 #if __ARM_PAN_AVAILABLE__
174 	if (__improbable(__builtin_arm_rsr("pan") == 0)) {
175 		panic("context switch with PAN disabled");
176 	}
177 #endif
178 
179 #define machine_switch_context_kprintf(x...) \
180 	/* kprintf("machine_switch_context: " x) */
181 
182 	if (old == new) {
183 		panic("machine_switch_context");
184 	}
185 
186 #if CONFIG_CPU_COUNTERS
187 	kpc_off_cpu(old);
188 #endif /* CONFIG_CPU_COUNTERS */
189 
190 	machine_switch_pmap_and_extended_context(old, new);
191 
192 	machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
193 
194 	retval = Switch_context(old, continuation, new);
195 	assert(retval != NULL);
196 
197 	return retval;
198 }
199 
200 boolean_t
machine_thread_on_core(thread_t thread)201 machine_thread_on_core(thread_t thread)
202 {
203 	return thread->machine.CpuDatap != NULL;
204 }
205 
206 boolean_t
machine_thread_on_core_allow_invalid(thread_t thread)207 machine_thread_on_core_allow_invalid(thread_t thread)
208 {
209 	extern int _copyin_atomic64(const char *src, uint64_t *dst);
210 	uint64_t addr;
211 
212 	/*
213 	 * Utilize that the thread zone is sequestered which means
214 	 * that this kernel-to-kernel copyin can't read data
215 	 * from anything but a thread, zeroed or freed memory.
216 	 */
217 	assert(get_preemption_level() > 0);
218 	thread = pgz_decode_allow_invalid(thread, ZONE_ID_THREAD);
219 	if (thread == THREAD_NULL) {
220 		return false;
221 	}
222 	thread_require(thread);
223 	if (_copyin_atomic64((void *)&thread->machine.CpuDatap, &addr) == 0) {
224 		return addr != 0;
225 	}
226 	return false;
227 }
228 
229 
230 /*
231  * Routine: machine_thread_create
232  *
233  */
234 void
machine_thread_create(thread_t thread,task_t task,bool first_thread)235 machine_thread_create(thread_t thread, task_t task, bool first_thread)
236 {
237 #define machine_thread_create_kprintf(x...) \
238 	/* kprintf("machine_thread_create: " x) */
239 
240 	machine_thread_create_kprintf("thread = %x\n", thread);
241 
242 	if (!first_thread) {
243 		thread->machine.CpuDatap = (cpu_data_t *)0;
244 		// setting this offset will cause trying to use it to panic
245 		thread->machine.pcpu_data_base = (vm_offset_t)VM_MIN_KERNEL_ADDRESS;
246 	}
247 	thread->machine.arm_machine_flags = 0;
248 	thread->machine.preemption_count = 0;
249 	thread->machine.cthread_self = 0;
250 	thread->machine.kpcb = NULL;
251 	thread->machine.exception_trace_code = 0;
252 #if defined(HAS_APPLE_PAC)
253 	thread->machine.rop_pid = task->rop_pid;
254 	thread->machine.jop_pid = task->jop_pid;
255 	if (task->disable_user_jop) {
256 		thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_DISABLE_USER_JOP;
257 	}
258 #endif
259 
260 
261 
262 
263 	if (task != kernel_task) {
264 		/* If this isn't a kernel thread, we'll have userspace state. */
265 		arm_context_t *contextData = zalloc_flags(user_ss_zone,
266 		    Z_WAITOK | Z_NOFAIL);
267 
268 #if __has_feature(ptrauth_calls)
269 		uint64_t intr = ml_pac_safe_interrupts_disable();
270 		zone_require(user_ss_zone, contextData);
271 #endif
272 		thread->machine.contextData = contextData;
273 		thread->machine.upcb = &contextData->ss;
274 		thread->machine.uNeon = &contextData->ns;
275 #if __has_feature(ptrauth_calls)
276 		ml_pac_safe_interrupts_restore(intr);
277 #endif
278 
279 		if (task_has_64Bit_data(task)) {
280 			thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
281 			thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
282 			thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
283 			thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
284 
285 		} else {
286 			thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
287 			thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
288 			thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
289 			thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
290 		}
291 	} else {
292 		thread->machine.upcb = NULL;
293 		thread->machine.uNeon = NULL;
294 		thread->machine.contextData = NULL;
295 	}
296 
297 
298 
299 
300 	bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
301 	machine_thread_state_initialize(thread);
302 }
303 
304 /*
305  * Routine: machine_thread_process_signature
306  *
307  * Called to allow code signature dependent adjustments to the thread
308  * state. Note that this is usually called twice for the main thread:
309  * Once at thread creation by thread_create, when the signature is
310  * potentially not attached yet (which is usually the case for the
311  * first/main thread of a task), and once after the task's signature
312  * has actually been attached.
313  *
314  */
315 kern_return_t
machine_thread_process_signature(thread_t __unused thread,task_t __unused task)316 machine_thread_process_signature(thread_t __unused thread, task_t __unused task)
317 {
318 	kern_return_t result = KERN_SUCCESS;
319 
320 	/*
321 	 * Reset to default state.
322 	 *
323 	 * In general, this function must not assume anything about the
324 	 * previous signature dependent thread state.
325 	 *
326 	 * At least at the time of writing this, threads don't transition
327 	 * to different code signatures, so each thread this function
328 	 * operates on is "fresh" in the sense that
329 	 * machine_thread_process_signature() has either not even been
330 	 * called on it yet, or only been called as part of thread
331 	 * creation when there was no signature yet.
332 	 *
333 	 * But for easier reasoning, and to prevent future bugs, this
334 	 * function should always recalculate all signature-dependent
335 	 * thread state, as if the signature could actually change from an
336 	 * actual signature to another.
337 	 */
338 #if !__ARM_KERNEL_PROTECT__
339 	thread->machine.arm_machine_flags &= ~(ARM_MACHINE_THREAD_PRESERVE_X18);
340 #endif /* !__ARM_KERNEL_PROTECT__ */
341 
342 
343 	/*
344 	 * Set signature dependent state.
345 	 */
346 	if (task != kernel_task && task_has_64Bit_data(task)) {
347 #if !__ARM_KERNEL_PROTECT__
348 #if CONFIG_ROSETTA
349 		if (task_is_translated(task)) {
350 			/* Note that for x86_64 translation specifically, the
351 			 * context switch path implicitly switches x18 regardless
352 			 * of this flag. */
353 			thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
354 		}
355 #endif /* CONFIG_ROSETTA */
356 
357 		if (task->preserve_x18) {
358 			thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
359 		}
360 	} else {
361 		/*
362 		 * For informational value only, context switch only trashes
363 		 * x18 for user threads.  (Except for devices with
364 		 * __ARM_KERNEL_PROTECT__, which make real destructive use of
365 		 * x18.)
366 		 */
367 		thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
368 #endif /* !__ARM_KERNEL_PROTECT__ */
369 	}
370 
371 	return result;
372 }
373 
374 /*
375  * Routine: machine_thread_destroy
376  *
377  */
378 void
machine_thread_destroy(thread_t thread)379 machine_thread_destroy(thread_t thread)
380 {
381 	arm_context_t *thread_user_ss;
382 
383 	if (thread->machine.contextData) {
384 		/* Disassociate the user save state from the thread before we free it. */
385 		thread_user_ss = thread->machine.contextData;
386 		thread->machine.upcb = NULL;
387 		thread->machine.uNeon = NULL;
388 		thread->machine.contextData = NULL;
389 
390 
391 		zfree(user_ss_zone, thread_user_ss);
392 	}
393 
394 	if (thread->machine.DebugData != NULL) {
395 		if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
396 			arm_debug_set(NULL);
397 		}
398 
399 		if (os_ref_release(&thread->machine.DebugData->ref) == 0) {
400 			zfree(ads_zone, thread->machine.DebugData);
401 		}
402 	}
403 }
404 
405 
406 
407 
408 /*
409  * Routine: machine_thread_init
410  *
411  */
412 void
machine_thread_init(void)413 machine_thread_init(void)
414 {
415 
416 }
417 
418 /*
419  * Routine:	machine_thread_template_init
420  *
421  */
422 void
machine_thread_template_init(thread_t __unused thr_template)423 machine_thread_template_init(thread_t __unused thr_template)
424 {
425 	/* Nothing to do on this platform. */
426 }
427 
428 /*
429  * Routine: get_useraddr
430  *
431  */
432 user_addr_t
get_useraddr()433 get_useraddr()
434 {
435 	return get_saved_state_pc(current_thread()->machine.upcb);
436 }
437 
438 /*
439  * Routine: machine_stack_detach
440  *
441  */
442 vm_offset_t
machine_stack_detach(thread_t thread)443 machine_stack_detach(thread_t thread)
444 {
445 	vm_offset_t stack;
446 
447 	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
448 	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
449 
450 	stack = thread->kernel_stack;
451 #if CONFIG_STKSZ
452 	kcov_stksz_set_thread_stack(thread, stack);
453 #endif
454 	thread->kernel_stack = 0;
455 	thread->machine.kstackptr = NULL;
456 
457 	return stack;
458 }
459 
460 
461 /*
462  * Routine: machine_stack_attach
463  *
464  */
465 void
machine_stack_attach(thread_t thread,vm_offset_t stack)466 machine_stack_attach(thread_t thread,
467     vm_offset_t stack)
468 {
469 	struct arm_kernel_context *context;
470 	struct arm_kernel_saved_state *savestate;
471 	struct arm_kernel_neon_saved_state *neon_savestate;
472 	uint32_t current_el;
473 
474 #define machine_stack_attach_kprintf(x...) \
475 	/* kprintf("machine_stack_attach: " x) */
476 
477 	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
478 	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
479 
480 	thread->kernel_stack = stack;
481 #if CONFIG_STKSZ
482 	kcov_stksz_set_thread_stack(thread, 0);
483 #endif
484 	void *kstackptr = (void *)(stack + kernel_stack_size - sizeof(struct thread_kernel_state));
485 	thread->machine.kstackptr = kstackptr;
486 	thread_initialize_kernel_state(thread);
487 
488 	machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)kstackptr);
489 
490 	current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
491 	context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
492 	savestate = &context->ss;
493 	savestate->fp = 0;
494 	savestate->sp = (uint64_t)kstackptr;
495 	savestate->pc_was_in_userspace = false;
496 #if defined(HAS_APPLE_PAC)
497 	/* Sign the initial kernel stack saved state */
498 	uint64_t intr = ml_pac_safe_interrupts_disable();
499 	asm volatile (
500                 "adrp	x17, _thread_continue@page"             "\n"
501                 "add	x17, x17, _thread_continue@pageoff"     "\n"
502                 "ldr	x16, [%[ss], %[SS64_SP]]"               "\n"
503                 "pacia1716"                                     "\n"
504                 "str	x17, [%[ss], %[SS64_LR]]"               "\n"
505                 :
506                 : [ss]                  "r"(&context->ss),
507                   [SS64_SP]             "i"(offsetof(struct arm_kernel_saved_state, sp)),
508                   [SS64_LR]             "i"(offsetof(struct arm_kernel_saved_state, lr))
509                 : "x16", "x17"
510         );
511 	ml_pac_safe_interrupts_restore(intr);
512 #else
513 	savestate->lr = (uintptr_t)thread_continue;
514 #endif /* defined(HAS_APPLE_PAC) */
515 	neon_savestate = &context->ns;
516 	neon_savestate->fpcr = FPCR_DEFAULT;
517 	machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
518 }
519 
520 
521 /*
522  * Routine: machine_stack_handoff
523  *
524  */
525 void
machine_stack_handoff(thread_t old,thread_t new)526 machine_stack_handoff(thread_t old,
527     thread_t new)
528 {
529 	vm_offset_t  stack;
530 
531 #if __ARM_PAN_AVAILABLE__
532 	if (__improbable(__builtin_arm_rsr("pan") == 0)) {
533 		panic("stack handoff with PAN disabled");
534 	}
535 #endif
536 
537 #if CONFIG_CPU_COUNTERS
538 	kpc_off_cpu(old);
539 #endif /* CONFIG_CPU_COUNTERS */
540 
541 	stack = machine_stack_detach(old);
542 #if CONFIG_STKSZ
543 	kcov_stksz_set_thread_stack(new, 0);
544 #endif
545 	new->kernel_stack = stack;
546 	void *kstackptr = (void *)(stack + kernel_stack_size - sizeof(struct thread_kernel_state));
547 	new->machine.kstackptr = kstackptr;
548 	if (stack == old->reserved_stack) {
549 		assert(new->reserved_stack);
550 		old->reserved_stack = new->reserved_stack;
551 #if KASAN_TBI
552 		kasan_unpoison_stack(old->reserved_stack, kernel_stack_size);
553 #endif /* KASAN_TBI */
554 		new->reserved_stack = stack;
555 	}
556 
557 	machine_switch_pmap_and_extended_context(old, new);
558 
559 	machine_set_current_thread(new);
560 	thread_initialize_kernel_state(new);
561 }
562 
563 
564 /*
565  * Routine: call_continuation
566  *
567  */
568 void
call_continuation(thread_continue_t continuation,void * parameter,wait_result_t wresult,boolean_t enable_interrupts)569 call_continuation(thread_continue_t continuation,
570     void *parameter,
571     wait_result_t wresult,
572     boolean_t enable_interrupts)
573 {
574 #define call_continuation_kprintf(x...) \
575 	/* kprintf("call_continuation_kprintf:" x) */
576 
577 	call_continuation_kprintf("thread = %p continuation = %p, stack = %lx\n",
578 	    current_thread(), continuation, current_thread()->machine.kstackptr);
579 	Call_continuation(continuation, parameter, wresult, enable_interrupts);
580 }
581 
582 #define SET_DBGBCRn(n, value, accum) \
583 	__asm__ volatile( \
584 	        "msr DBGBCR" #n "_EL1, %[val]\n" \
585 	        "orr %[result], %[result], %[val]\n" \
586 	        : [result] "+r"(accum) : [val] "r"((value)))
587 
588 #define SET_DBGBVRn(n, value) \
589 	__asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
590 
591 #define SET_DBGWCRn(n, value, accum) \
592 	__asm__ volatile( \
593 	        "msr DBGWCR" #n "_EL1, %[val]\n" \
594 	        "orr %[result], %[result], %[val]\n" \
595 	        : [result] "+r"(accum) : [val] "r"((value)))
596 
597 #define SET_DBGWVRn(n, value) \
598 	__asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
599 
600 void
arm_debug_set32(arm_debug_state_t * debug_state)601 arm_debug_set32(arm_debug_state_t *debug_state)
602 {
603 	struct cpu_data *  cpu_data_ptr;
604 	arm_debug_info_t * debug_info    = arm_debug_info();
605 	boolean_t          intr;
606 	arm_debug_state_t  off_state;
607 	arm_debug_state_t  *cpu_debug;
608 	uint64_t           all_ctrls = 0;
609 
610 	intr = ml_set_interrupts_enabled(FALSE);
611 	cpu_data_ptr = getCpuDatap();
612 	cpu_debug = cpu_data_ptr->cpu_user_debug;
613 
614 	/*
615 	 * Retain and set new per-cpu state.
616 	 * Reference count does not matter when turning off debug state.
617 	 */
618 	if (debug_state == NULL) {
619 		bzero(&off_state, sizeof(off_state));
620 		cpu_data_ptr->cpu_user_debug = NULL;
621 		debug_state = &off_state;
622 	} else {
623 		os_ref_retain(&debug_state->ref);
624 		cpu_data_ptr->cpu_user_debug = debug_state;
625 	}
626 
627 	/* Release previous debug state. */
628 	if (cpu_debug != NULL) {
629 		if (os_ref_release(&cpu_debug->ref) == 0) {
630 			zfree(ads_zone, cpu_debug);
631 		}
632 	}
633 
634 	switch (debug_info->num_breakpoint_pairs) {
635 	case 16:
636 		SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
637 		SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
638 		OS_FALLTHROUGH;
639 	case 15:
640 		SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
641 		SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
642 		OS_FALLTHROUGH;
643 	case 14:
644 		SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
645 		SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
646 		OS_FALLTHROUGH;
647 	case 13:
648 		SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
649 		SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
650 		OS_FALLTHROUGH;
651 	case 12:
652 		SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
653 		SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
654 		OS_FALLTHROUGH;
655 	case 11:
656 		SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
657 		SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
658 		OS_FALLTHROUGH;
659 	case 10:
660 		SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
661 		SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
662 		OS_FALLTHROUGH;
663 	case 9:
664 		SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
665 		SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
666 		OS_FALLTHROUGH;
667 	case 8:
668 		SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
669 		SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
670 		OS_FALLTHROUGH;
671 	case 7:
672 		SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
673 		SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
674 		OS_FALLTHROUGH;
675 	case 6:
676 		SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
677 		SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
678 		OS_FALLTHROUGH;
679 	case 5:
680 		SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
681 		SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
682 		OS_FALLTHROUGH;
683 	case 4:
684 		SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
685 		SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
686 		OS_FALLTHROUGH;
687 	case 3:
688 		SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
689 		SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
690 		OS_FALLTHROUGH;
691 	case 2:
692 		SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
693 		SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
694 		OS_FALLTHROUGH;
695 	case 1:
696 		SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
697 		SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
698 		OS_FALLTHROUGH;
699 	default:
700 		break;
701 	}
702 
703 	switch (debug_info->num_watchpoint_pairs) {
704 	case 16:
705 		SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
706 		SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
707 		OS_FALLTHROUGH;
708 	case 15:
709 		SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
710 		SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
711 		OS_FALLTHROUGH;
712 	case 14:
713 		SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
714 		SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
715 		OS_FALLTHROUGH;
716 	case 13:
717 		SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
718 		SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
719 		OS_FALLTHROUGH;
720 	case 12:
721 		SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
722 		SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
723 		OS_FALLTHROUGH;
724 	case 11:
725 		SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
726 		SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
727 		OS_FALLTHROUGH;
728 	case 10:
729 		SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
730 		SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
731 		OS_FALLTHROUGH;
732 	case 9:
733 		SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
734 		SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
735 		OS_FALLTHROUGH;
736 	case 8:
737 		SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
738 		SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
739 		OS_FALLTHROUGH;
740 	case 7:
741 		SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
742 		SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
743 		OS_FALLTHROUGH;
744 	case 6:
745 		SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
746 		SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
747 		OS_FALLTHROUGH;
748 	case 5:
749 		SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
750 		SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
751 		OS_FALLTHROUGH;
752 	case 4:
753 		SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
754 		SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
755 		OS_FALLTHROUGH;
756 	case 3:
757 		SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
758 		SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
759 		OS_FALLTHROUGH;
760 	case 2:
761 		SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
762 		SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
763 		OS_FALLTHROUGH;
764 	case 1:
765 		SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
766 		SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
767 		OS_FALLTHROUGH;
768 	default:
769 		break;
770 	}
771 
772 #if defined(CONFIG_KERNEL_INTEGRITY)
773 	if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
774 		panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
775 	}
776 #endif
777 
778 	/*
779 	 * Breakpoint/Watchpoint Enable
780 	 */
781 	if (all_ctrls != 0) {
782 		update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
783 	} else {
784 		update_mdscr(0x8000, 0);
785 	}
786 
787 	/*
788 	 * Software debug single step enable
789 	 */
790 	if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
791 		update_mdscr(0, 1); // MDSCR_EL1[SS]
792 
793 		mask_user_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
794 	} else {
795 		update_mdscr(0x1, 0);
796 	}
797 
798 	__builtin_arm_isb(ISB_SY);
799 	(void) ml_set_interrupts_enabled(intr);
800 }
801 
802 void
arm_debug_set64(arm_debug_state_t * debug_state)803 arm_debug_set64(arm_debug_state_t *debug_state)
804 {
805 	struct cpu_data *  cpu_data_ptr;
806 	arm_debug_info_t * debug_info    = arm_debug_info();
807 	boolean_t          intr;
808 	arm_debug_state_t  off_state;
809 	arm_debug_state_t  *cpu_debug;
810 	uint64_t           all_ctrls = 0;
811 
812 	intr = ml_set_interrupts_enabled(FALSE);
813 	cpu_data_ptr = getCpuDatap();
814 	cpu_debug = cpu_data_ptr->cpu_user_debug;
815 
816 	/*
817 	 * Retain and set new per-cpu state.
818 	 * Reference count does not matter when turning off debug state.
819 	 */
820 	if (debug_state == NULL) {
821 		bzero(&off_state, sizeof(off_state));
822 		cpu_data_ptr->cpu_user_debug = NULL;
823 		debug_state = &off_state;
824 	} else {
825 		os_ref_retain(&debug_state->ref);
826 		cpu_data_ptr->cpu_user_debug = debug_state;
827 	}
828 
829 	/* Release previous debug state. */
830 	if (cpu_debug != NULL) {
831 		if (os_ref_release(&cpu_debug->ref) == 0) {
832 			zfree(ads_zone, cpu_debug);
833 		}
834 	}
835 
836 	switch (debug_info->num_breakpoint_pairs) {
837 	case 16:
838 		SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
839 		SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
840 		OS_FALLTHROUGH;
841 	case 15:
842 		SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
843 		SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
844 		OS_FALLTHROUGH;
845 	case 14:
846 		SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
847 		SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
848 		OS_FALLTHROUGH;
849 	case 13:
850 		SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
851 		SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
852 		OS_FALLTHROUGH;
853 	case 12:
854 		SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
855 		SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
856 		OS_FALLTHROUGH;
857 	case 11:
858 		SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
859 		SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
860 		OS_FALLTHROUGH;
861 	case 10:
862 		SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
863 		SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
864 		OS_FALLTHROUGH;
865 	case 9:
866 		SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
867 		SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
868 		OS_FALLTHROUGH;
869 	case 8:
870 		SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
871 		SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
872 		OS_FALLTHROUGH;
873 	case 7:
874 		SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
875 		SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
876 		OS_FALLTHROUGH;
877 	case 6:
878 		SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
879 		SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
880 		OS_FALLTHROUGH;
881 	case 5:
882 		SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
883 		SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
884 		OS_FALLTHROUGH;
885 	case 4:
886 		SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
887 		SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
888 		OS_FALLTHROUGH;
889 	case 3:
890 		SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
891 		SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
892 		OS_FALLTHROUGH;
893 	case 2:
894 		SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
895 		SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
896 		OS_FALLTHROUGH;
897 	case 1:
898 		SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
899 		SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
900 		OS_FALLTHROUGH;
901 	default:
902 		break;
903 	}
904 
905 	switch (debug_info->num_watchpoint_pairs) {
906 	case 16:
907 		SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
908 		SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
909 		OS_FALLTHROUGH;
910 	case 15:
911 		SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
912 		SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
913 		OS_FALLTHROUGH;
914 	case 14:
915 		SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
916 		SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
917 		OS_FALLTHROUGH;
918 	case 13:
919 		SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
920 		SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
921 		OS_FALLTHROUGH;
922 	case 12:
923 		SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
924 		SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
925 		OS_FALLTHROUGH;
926 	case 11:
927 		SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
928 		SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
929 		OS_FALLTHROUGH;
930 	case 10:
931 		SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
932 		SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
933 		OS_FALLTHROUGH;
934 	case 9:
935 		SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
936 		SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
937 		OS_FALLTHROUGH;
938 	case 8:
939 		SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
940 		SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
941 		OS_FALLTHROUGH;
942 	case 7:
943 		SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
944 		SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
945 		OS_FALLTHROUGH;
946 	case 6:
947 		SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
948 		SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
949 		OS_FALLTHROUGH;
950 	case 5:
951 		SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
952 		SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
953 		OS_FALLTHROUGH;
954 	case 4:
955 		SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
956 		SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
957 		OS_FALLTHROUGH;
958 	case 3:
959 		SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
960 		SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
961 		OS_FALLTHROUGH;
962 	case 2:
963 		SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
964 		SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
965 		OS_FALLTHROUGH;
966 	case 1:
967 		SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
968 		SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
969 		OS_FALLTHROUGH;
970 	default:
971 		break;
972 	}
973 
974 #if defined(CONFIG_KERNEL_INTEGRITY)
975 	if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
976 		panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
977 	}
978 #endif
979 
980 	/*
981 	 * Breakpoint/Watchpoint Enable
982 	 */
983 	if (all_ctrls != 0) {
984 		update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
985 	} else {
986 		update_mdscr(0x8000, 0);
987 	}
988 
989 	/*
990 	 * Software debug single step enable
991 	 */
992 	if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
993 
994 		update_mdscr(0, 1); // MDSCR_EL1[SS]
995 
996 		mask_user_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
997 	} else {
998 		update_mdscr(0x1, 0);
999 	}
1000 
1001 	__builtin_arm_isb(ISB_SY);
1002 	(void) ml_set_interrupts_enabled(intr);
1003 }
1004 
1005 void
arm_debug_set(arm_debug_state_t * debug_state)1006 arm_debug_set(arm_debug_state_t *debug_state)
1007 {
1008 	if (debug_state) {
1009 		switch (debug_state->dsh.flavor) {
1010 		case ARM_DEBUG_STATE32:
1011 			arm_debug_set32(debug_state);
1012 			break;
1013 		case ARM_DEBUG_STATE64:
1014 			arm_debug_set64(debug_state);
1015 			break;
1016 		default:
1017 			panic("arm_debug_set");
1018 			break;
1019 		}
1020 	} else {
1021 		if (thread_is_64bit_data(current_thread())) {
1022 			arm_debug_set64(debug_state);
1023 		} else {
1024 			arm_debug_set32(debug_state);
1025 		}
1026 	}
1027 }
1028 
1029 #define VM_MAX_ADDRESS32          ((vm_address_t) 0x80000000)
1030 boolean_t
debug_legacy_state_is_valid(arm_legacy_debug_state_t * debug_state)1031 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
1032 {
1033 	arm_debug_info_t *debug_info = arm_debug_info();
1034 	uint32_t i;
1035 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1036 		if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
1037 			return FALSE;
1038 		}
1039 	}
1040 
1041 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1042 		if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
1043 			return FALSE;
1044 		}
1045 	}
1046 	return TRUE;
1047 }
1048 
1049 boolean_t
debug_state_is_valid32(arm_debug_state32_t * debug_state)1050 debug_state_is_valid32(arm_debug_state32_t *debug_state)
1051 {
1052 	arm_debug_info_t *debug_info = arm_debug_info();
1053 	uint32_t i;
1054 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1055 		if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
1056 			return FALSE;
1057 		}
1058 	}
1059 
1060 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1061 		if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
1062 			return FALSE;
1063 		}
1064 	}
1065 	return TRUE;
1066 }
1067 
1068 boolean_t
debug_state_is_valid64(arm_debug_state64_t * debug_state)1069 debug_state_is_valid64(arm_debug_state64_t *debug_state)
1070 {
1071 	arm_debug_info_t *debug_info = arm_debug_info();
1072 	uint32_t i;
1073 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1074 		if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i]) {
1075 			return FALSE;
1076 		}
1077 	}
1078 
1079 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1080 		if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i]) {
1081 			return FALSE;
1082 		}
1083 	}
1084 	return TRUE;
1085 }
1086 
1087 /*
1088  * Duplicate one arm_debug_state_t to another.  "all" parameter
1089  * is ignored in the case of ARM -- Is this the right assumption?
1090  */
1091 void
copy_legacy_debug_state(arm_legacy_debug_state_t * src,arm_legacy_debug_state_t * target,__unused boolean_t all)1092 copy_legacy_debug_state(arm_legacy_debug_state_t * src,
1093     arm_legacy_debug_state_t * target,
1094     __unused boolean_t         all)
1095 {
1096 	bcopy(src, target, sizeof(arm_legacy_debug_state_t));
1097 }
1098 
1099 void
copy_debug_state32(arm_debug_state32_t * src,arm_debug_state32_t * target,__unused boolean_t all)1100 copy_debug_state32(arm_debug_state32_t * src,
1101     arm_debug_state32_t * target,
1102     __unused boolean_t    all)
1103 {
1104 	bcopy(src, target, sizeof(arm_debug_state32_t));
1105 }
1106 
1107 void
copy_debug_state64(arm_debug_state64_t * src,arm_debug_state64_t * target,__unused boolean_t all)1108 copy_debug_state64(arm_debug_state64_t * src,
1109     arm_debug_state64_t * target,
1110     __unused boolean_t    all)
1111 {
1112 	bcopy(src, target, sizeof(arm_debug_state64_t));
1113 }
1114 
1115 kern_return_t
machine_thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)1116 machine_thread_set_tsd_base(thread_t         thread,
1117     mach_vm_offset_t tsd_base)
1118 {
1119 	if (get_threadtask(thread) == kernel_task) {
1120 		return KERN_INVALID_ARGUMENT;
1121 	}
1122 
1123 	if (thread_is_64bit_addr(thread)) {
1124 		if (tsd_base > vm_map_max(thread->map)) {
1125 			tsd_base = 0ULL;
1126 		}
1127 	} else {
1128 		if (tsd_base > UINT32_MAX) {
1129 			tsd_base = 0ULL;
1130 		}
1131 	}
1132 
1133 	thread->machine.cthread_self = tsd_base;
1134 
1135 	/* For current thread, make the TSD base active immediately */
1136 	if (thread == current_thread()) {
1137 		mp_disable_preemption();
1138 		set_tpidrro(tsd_base);
1139 		mp_enable_preemption();
1140 	}
1141 
1142 	return KERN_SUCCESS;
1143 }
1144 
1145 void
machine_tecs(__unused thread_t thr)1146 machine_tecs(__unused thread_t thr)
1147 {
1148 }
1149 
1150 int
machine_csv(__unused cpuvn_e cve)1151 machine_csv(__unused cpuvn_e cve)
1152 {
1153 	return 0;
1154 }
1155 
1156 #if __ARM_ARCH_8_5__
1157 void
arm_context_switch_requires_sync()1158 arm_context_switch_requires_sync()
1159 {
1160 	current_cpu_datap()->sync_on_cswitch = 1;
1161 }
1162 #endif
1163 
1164 #if __has_feature(ptrauth_calls)
1165 boolean_t
arm_user_jop_disabled(void)1166 arm_user_jop_disabled(void)
1167 {
1168 	return FALSE;
1169 }
1170 #endif /* __has_feature(ptrauth_calls) */
1171