xref: /xnu-8796.121.2/osfmk/arm64/pcb.c (revision c54f35ca767986246321eb901baf8f5ff7923f6a)
1 /*
2  * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <debug.h>
30 
31 #include <types.h>
32 
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36 
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kpc.h>
45 
46 #if MONOTONIC
47 #include <kern/monotonic.h>
48 #endif /* MONOTONIC */
49 
50 #include <machine/atomic.h>
51 #include <arm64/proc_reg.h>
52 #include <arm64/machine_machdep.h>
53 #include <arm/cpu_data_internal.h>
54 #include <arm/machdep_call.h>
55 #include <arm/misc_protos.h>
56 #include <arm/cpuid.h>
57 
58 #include <vm/vm_map.h>
59 #include <vm/vm_protos.h>
60 
61 #include <sys/kdebug.h>
62 
63 
64 #include <san/kcov_stksz.h>
65 
66 #include <IOKit/IOBSD.h>
67 
68 extern int debug_task;
69 
70 /* zone for debug_state area */
71 ZONE_DEFINE_TYPE(ads_zone, "arm debug state", arm_debug_state_t, ZC_NONE);
72 ZONE_DEFINE_TYPE(user_ss_zone, "user save state", arm_context_t, ZC_NONE);
73 
74 /*
75  * Routine: consider_machine_collect
76  *
77  */
78 void
consider_machine_collect(void)79 consider_machine_collect(void)
80 {
81 	pmap_gc();
82 }
83 
84 /*
85  * Routine: consider_machine_adjust
86  *
87  */
88 void
consider_machine_adjust(void)89 consider_machine_adjust(void)
90 {
91 }
92 
93 
94 
95 
96 
97 
98 static inline void
machine_thread_switch_cpu_data(thread_t old,thread_t new)99 machine_thread_switch_cpu_data(thread_t old, thread_t new)
100 {
101 	/*
102 	 * We build with -fno-strict-aliasing, so the load through temporaries
103 	 * is required so that this generates a single load / store pair.
104 	 */
105 	cpu_data_t *datap = old->machine.CpuDatap;
106 	vm_offset_t base  = old->machine.pcpu_data_base;
107 
108 	/* TODO: Should this be ordered? */
109 
110 	old->machine.CpuDatap = NULL;
111 	old->machine.pcpu_data_base = 0;
112 
113 	new->machine.CpuDatap = datap;
114 	new->machine.pcpu_data_base = base;
115 }
116 
117 /**
118  * routine: machine_switch_pmap_and_extended_context
119  *
120  * Helper function used by machine_switch_context and machine_stack_handoff to switch the
121  * extended context and switch the pmap if necessary.
122  *
123  */
124 
125 static inline void
machine_switch_pmap_and_extended_context(thread_t old,thread_t new)126 machine_switch_pmap_and_extended_context(thread_t old, thread_t new)
127 {
128 	pmap_t new_pmap;
129 
130 
131 
132 
133 
134 
135 
136 	new_pmap = new->map->pmap;
137 	if (old->map->pmap != new_pmap) {
138 		pmap_switch(new_pmap);
139 	} else {
140 		/*
141 		 * If the thread is preempted while performing cache or TLB maintenance,
142 		 * it may be migrated to a different CPU between the completion of the relevant
143 		 * maintenance instruction and the synchronizing DSB.   ARM requires that the
144 		 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
145 		 * in order to guarantee completion of the instruction and visibility of its effects.
146 		 * Issue DSB here to enforce that guarantee.  We only do this for the case in which
147 		 * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates
148 		 * TTBR0.  Note also that cache maintenance may be performed in userspace, so we
149 		 * cannot further limit this operation e.g. by setting a per-thread flag to indicate
150 		 * a pending kernel TLB or cache maintenance instruction.
151 		 */
152 		__builtin_arm_dsb(DSB_ISH);
153 	}
154 
155 
156 	machine_thread_switch_cpu_data(old, new);
157 }
158 
159 /*
160  * Routine: machine_switch_context
161  *
162  */
163 thread_t
machine_switch_context(thread_t old,thread_continue_t continuation,thread_t new)164 machine_switch_context(thread_t old,
165     thread_continue_t continuation,
166     thread_t new)
167 {
168 	thread_t retval;
169 
170 #if __ARM_PAN_AVAILABLE__
171 	if (__improbable(__builtin_arm_rsr("pan") == 0)) {
172 		panic("context switch with PAN disabled");
173 	}
174 #endif
175 
176 #define machine_switch_context_kprintf(x...) \
177 	/* kprintf("machine_switch_context: " x) */
178 
179 	if (old == new) {
180 		panic("machine_switch_context");
181 	}
182 
183 	kpc_off_cpu(old);
184 
185 	machine_switch_pmap_and_extended_context(old, new);
186 
187 	machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
188 
189 	retval = Switch_context(old, continuation, new);
190 	assert(retval != NULL);
191 
192 	return retval;
193 }
194 
195 boolean_t
machine_thread_on_core(thread_t thread)196 machine_thread_on_core(thread_t thread)
197 {
198 	return thread->machine.CpuDatap != NULL;
199 }
200 
201 boolean_t
machine_thread_on_core_allow_invalid(thread_t thread)202 machine_thread_on_core_allow_invalid(thread_t thread)
203 {
204 	extern int _copyin_atomic64(const char *src, uint64_t *dst);
205 	uint64_t addr;
206 
207 	/*
208 	 * Utilize that the thread zone is sequestered which means
209 	 * that this kernel-to-kernel copyin can't read data
210 	 * from anything but a thread, zeroed or freed memory.
211 	 */
212 	assert(get_preemption_level() > 0);
213 	thread = pgz_decode_allow_invalid(thread, ZONE_ID_THREAD);
214 	if (thread == THREAD_NULL) {
215 		return false;
216 	}
217 	thread_require(thread);
218 	if (_copyin_atomic64((void *)&thread->machine.CpuDatap, &addr) == 0) {
219 		return addr != 0;
220 	}
221 	return false;
222 }
223 
224 
225 /*
226  * Routine: machine_thread_create
227  *
228  */
229 void
machine_thread_create(thread_t thread,task_t task,bool first_thread)230 machine_thread_create(thread_t thread, task_t task, bool first_thread)
231 {
232 #define machine_thread_create_kprintf(x...) \
233 	/* kprintf("machine_thread_create: " x) */
234 
235 	machine_thread_create_kprintf("thread = %x\n", thread);
236 
237 	if (!first_thread) {
238 		thread->machine.CpuDatap = (cpu_data_t *)0;
239 		// setting this offset will cause trying to use it to panic
240 		thread->machine.pcpu_data_base = (vm_offset_t)VM_MIN_KERNEL_ADDRESS;
241 	}
242 	thread->machine.arm_machine_flags = 0;
243 	thread->machine.preemption_count = 0;
244 	thread->machine.cthread_self = 0;
245 	thread->machine.kpcb = NULL;
246 	thread->machine.exception_trace_code = 0;
247 #if defined(HAS_APPLE_PAC)
248 	thread->machine.rop_pid = task->rop_pid;
249 	thread->machine.jop_pid = task->jop_pid;
250 	if (task->disable_user_jop) {
251 		thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_DISABLE_USER_JOP;
252 	}
253 #endif
254 
255 
256 
257 
258 	if (task != kernel_task) {
259 		/* If this isn't a kernel thread, we'll have userspace state. */
260 		thread->machine.contextData = zalloc_flags(user_ss_zone,
261 		    Z_WAITOK | Z_NOFAIL);
262 
263 		thread->machine.upcb = &thread->machine.contextData->ss;
264 		thread->machine.uNeon = &thread->machine.contextData->ns;
265 
266 		if (task_has_64Bit_data(task)) {
267 			thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
268 			thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
269 			thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
270 			thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
271 
272 		} else {
273 			thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
274 			thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
275 			thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
276 			thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
277 		}
278 	} else {
279 		thread->machine.upcb = NULL;
280 		thread->machine.uNeon = NULL;
281 		thread->machine.contextData = NULL;
282 	}
283 
284 
285 
286 	bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
287 	machine_thread_state_initialize(thread);
288 }
289 
290 /*
291  * Routine: machine_thread_process_signature
292  *
293  * Called to allow code signature dependent adjustments to the thread
294  * state. Note that this is usually called twice for the main thread:
295  * Once at thread creation by thread_create, when the signature is
296  * potentially not attached yet (which is usually the case for the
297  * first/main thread of a task), and once after the task's signature
298  * has actually been attached.
299  *
300  */
301 kern_return_t
machine_thread_process_signature(thread_t __unused thread,task_t __unused task)302 machine_thread_process_signature(thread_t __unused thread, task_t __unused task)
303 {
304 	kern_return_t result = KERN_SUCCESS;
305 
306 	/*
307 	 * Reset to default state.
308 	 *
309 	 * In general, this function must not assume anything about the
310 	 * previous signature dependent thread state.
311 	 *
312 	 * At least at the time of writing this, threads don't transition
313 	 * to different code signatures, so each thread this function
314 	 * operates on is "fresh" in the sense that
315 	 * machine_thread_process_signature() has either not even been
316 	 * called on it yet, or only been called as part of thread
317 	 * creation when there was no signature yet.
318 	 *
319 	 * But for easier reasoning, and to prevent future bugs, this
320 	 * function should always recalculate all signature-dependent
321 	 * thread state, as if the signature could actually change from an
322 	 * actual signature to another.
323 	 */
324 #if !__ARM_KERNEL_PROTECT__
325 	thread->machine.arm_machine_flags &= ~(ARM_MACHINE_THREAD_PRESERVE_X18);
326 #endif /* !__ARM_KERNEL_PROTECT__ */
327 
328 
329 	/*
330 	 * Set signature dependent state.
331 	 */
332 	if (task != kernel_task && task_has_64Bit_data(task)) {
333 #if !__ARM_KERNEL_PROTECT__
334 #if CONFIG_ROSETTA
335 		if (task_is_translated(task)) {
336 			/* Note that for x86_64 translation specifically, the
337 			 * context switch path implicitly switches x18 regardless
338 			 * of this flag. */
339 			thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
340 		}
341 #endif /* CONFIG_ROSETTA */
342 
343 		if (task->preserve_x18) {
344 			thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
345 		}
346 	} else {
347 		/*
348 		 * For informational value only, context switch only trashes
349 		 * x18 for user threads.  (Except for devices with
350 		 * __ARM_KERNEL_PROTECT__, which make real destructive use of
351 		 * x18.)
352 		 */
353 		thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_PRESERVE_X18;
354 #endif /* !__ARM_KERNEL_PROTECT__ */
355 	}
356 
357 	return result;
358 }
359 
360 /*
361  * Routine: machine_thread_destroy
362  *
363  */
364 void
machine_thread_destroy(thread_t thread)365 machine_thread_destroy(thread_t thread)
366 {
367 	arm_context_t *thread_user_ss;
368 
369 	if (thread->machine.contextData) {
370 		/* Disassociate the user save state from the thread before we free it. */
371 		thread_user_ss = thread->machine.contextData;
372 		thread->machine.upcb = NULL;
373 		thread->machine.uNeon = NULL;
374 		thread->machine.contextData = NULL;
375 
376 
377 		zfree(user_ss_zone, thread_user_ss);
378 	}
379 
380 	if (thread->machine.DebugData != NULL) {
381 		if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
382 			arm_debug_set(NULL);
383 		}
384 
385 		if (os_ref_release(&thread->machine.DebugData->ref) == 0) {
386 			zfree(ads_zone, thread->machine.DebugData);
387 		}
388 	}
389 }
390 
391 
392 /*
393  * Routine: machine_thread_init
394  *
395  */
396 void
machine_thread_init(void)397 machine_thread_init(void)
398 {
399 }
400 
401 /*
402  * Routine:	machine_thread_template_init
403  *
404  */
405 void
machine_thread_template_init(thread_t __unused thr_template)406 machine_thread_template_init(thread_t __unused thr_template)
407 {
408 	/* Nothing to do on this platform. */
409 }
410 
411 /*
412  * Routine: get_useraddr
413  *
414  */
415 user_addr_t
get_useraddr()416 get_useraddr()
417 {
418 	return get_saved_state_pc(current_thread()->machine.upcb);
419 }
420 
421 /*
422  * Routine: machine_stack_detach
423  *
424  */
425 vm_offset_t
machine_stack_detach(thread_t thread)426 machine_stack_detach(thread_t thread)
427 {
428 	vm_offset_t stack;
429 
430 	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
431 	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
432 
433 	stack = thread->kernel_stack;
434 #if CONFIG_STKSZ
435 	kcov_stksz_set_thread_stack(thread, stack);
436 #endif
437 	thread->kernel_stack = 0;
438 	thread->machine.kstackptr = 0;
439 
440 	return stack;
441 }
442 
443 
444 /*
445  * Routine: machine_stack_attach
446  *
447  */
448 void
machine_stack_attach(thread_t thread,vm_offset_t stack)449 machine_stack_attach(thread_t thread,
450     vm_offset_t stack)
451 {
452 	struct arm_kernel_context *context;
453 	struct arm_kernel_saved_state *savestate;
454 	struct arm_kernel_neon_saved_state *neon_savestate;
455 	uint32_t current_el;
456 
457 #define machine_stack_attach_kprintf(x...) \
458 	/* kprintf("machine_stack_attach: " x) */
459 
460 	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
461 	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
462 
463 	thread->kernel_stack = stack;
464 #if CONFIG_STKSZ
465 	kcov_stksz_set_thread_stack(thread, 0);
466 #endif
467 	thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
468 	thread_initialize_kernel_state(thread);
469 
470 	machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)thread->machine.kstackptr);
471 
472 	current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
473 	context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
474 	savestate = &context->ss;
475 	savestate->fp = 0;
476 	savestate->sp = thread->machine.kstackptr;
477 	savestate->pc_was_in_userspace = false;
478 #if defined(HAS_APPLE_PAC)
479 	/* Sign the initial kernel stack saved state */
480 	uint64_t intr = ml_pac_safe_interrupts_disable();
481 	asm volatile (
482                 "adrp	x17, _thread_continue@page"             "\n"
483                 "add	x17, x17, _thread_continue@pageoff"     "\n"
484                 "ldr	x16, [%[ss], %[SS64_SP]]"               "\n"
485                 "pacia1716"                                     "\n"
486                 "str	x17, [%[ss], %[SS64_LR]]"               "\n"
487                 :
488                 : [ss]                  "r"(&context->ss),
489                   [SS64_SP]             "i"(offsetof(struct arm_kernel_saved_state, sp)),
490                   [SS64_LR]             "i"(offsetof(struct arm_kernel_saved_state, lr))
491                 : "x16", "x17"
492         );
493 	ml_pac_safe_interrupts_restore(intr);
494 #else
495 	savestate->lr = (uintptr_t)thread_continue;
496 #endif /* defined(HAS_APPLE_PAC) */
497 	neon_savestate = &context->ns;
498 	neon_savestate->fpcr = FPCR_DEFAULT;
499 	machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
500 }
501 
502 
503 /*
504  * Routine: machine_stack_handoff
505  *
506  */
507 void
machine_stack_handoff(thread_t old,thread_t new)508 machine_stack_handoff(thread_t old,
509     thread_t new)
510 {
511 	vm_offset_t  stack;
512 
513 #if __ARM_PAN_AVAILABLE__
514 	if (__improbable(__builtin_arm_rsr("pan") == 0)) {
515 		panic("stack handoff with PAN disabled");
516 	}
517 #endif
518 
519 	kpc_off_cpu(old);
520 
521 	stack = machine_stack_detach(old);
522 #if CONFIG_STKSZ
523 	kcov_stksz_set_thread_stack(new, 0);
524 #endif
525 	new->kernel_stack = stack;
526 	new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
527 	if (stack == old->reserved_stack) {
528 		assert(new->reserved_stack);
529 		old->reserved_stack = new->reserved_stack;
530 #if KASAN_TBI
531 		kasan_unpoison_stack(old->reserved_stack, kernel_stack_size);
532 #endif /* KASAN_TBI */
533 		new->reserved_stack = stack;
534 	}
535 
536 	machine_switch_pmap_and_extended_context(old, new);
537 
538 	machine_set_current_thread(new);
539 	thread_initialize_kernel_state(new);
540 }
541 
542 
543 /*
544  * Routine: call_continuation
545  *
546  */
547 void
call_continuation(thread_continue_t continuation,void * parameter,wait_result_t wresult,boolean_t enable_interrupts)548 call_continuation(thread_continue_t continuation,
549     void *parameter,
550     wait_result_t wresult,
551     boolean_t enable_interrupts)
552 {
553 #define call_continuation_kprintf(x...) \
554 	/* kprintf("call_continuation_kprintf:" x) */
555 
556 	call_continuation_kprintf("thread = %p continuation = %p, stack = %lx\n",
557 	    current_thread(), continuation, current_thread()->machine.kstackptr);
558 	Call_continuation(continuation, parameter, wresult, enable_interrupts);
559 }
560 
561 #define SET_DBGBCRn(n, value, accum) \
562 	__asm__ volatile( \
563 	        "msr DBGBCR" #n "_EL1, %[val]\n" \
564 	        "orr %[result], %[result], %[val]\n" \
565 	        : [result] "+r"(accum) : [val] "r"((value)))
566 
567 #define SET_DBGBVRn(n, value) \
568 	__asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
569 
570 #define SET_DBGWCRn(n, value, accum) \
571 	__asm__ volatile( \
572 	        "msr DBGWCR" #n "_EL1, %[val]\n" \
573 	        "orr %[result], %[result], %[val]\n" \
574 	        : [result] "+r"(accum) : [val] "r"((value)))
575 
576 #define SET_DBGWVRn(n, value) \
577 	__asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
578 
579 void
arm_debug_set32(arm_debug_state_t * debug_state)580 arm_debug_set32(arm_debug_state_t *debug_state)
581 {
582 	struct cpu_data *  cpu_data_ptr;
583 	arm_debug_info_t * debug_info    = arm_debug_info();
584 	boolean_t          intr;
585 	arm_debug_state_t  off_state;
586 	arm_debug_state_t  *cpu_debug;
587 	uint64_t           all_ctrls = 0;
588 
589 	intr = ml_set_interrupts_enabled(FALSE);
590 	cpu_data_ptr = getCpuDatap();
591 	cpu_debug = cpu_data_ptr->cpu_user_debug;
592 
593 	/*
594 	 * Retain and set new per-cpu state.
595 	 * Reference count does not matter when turning off debug state.
596 	 */
597 	if (debug_state == NULL) {
598 		bzero(&off_state, sizeof(off_state));
599 		cpu_data_ptr->cpu_user_debug = NULL;
600 		debug_state = &off_state;
601 	} else {
602 		os_ref_retain(&debug_state->ref);
603 		cpu_data_ptr->cpu_user_debug = debug_state;
604 	}
605 
606 	/* Release previous debug state. */
607 	if (cpu_debug != NULL) {
608 		if (os_ref_release(&cpu_debug->ref) == 0) {
609 			zfree(ads_zone, cpu_debug);
610 		}
611 	}
612 
613 	switch (debug_info->num_breakpoint_pairs) {
614 	case 16:
615 		SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
616 		SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
617 		OS_FALLTHROUGH;
618 	case 15:
619 		SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
620 		SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
621 		OS_FALLTHROUGH;
622 	case 14:
623 		SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
624 		SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
625 		OS_FALLTHROUGH;
626 	case 13:
627 		SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
628 		SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
629 		OS_FALLTHROUGH;
630 	case 12:
631 		SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
632 		SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
633 		OS_FALLTHROUGH;
634 	case 11:
635 		SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
636 		SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
637 		OS_FALLTHROUGH;
638 	case 10:
639 		SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
640 		SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
641 		OS_FALLTHROUGH;
642 	case 9:
643 		SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
644 		SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
645 		OS_FALLTHROUGH;
646 	case 8:
647 		SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
648 		SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
649 		OS_FALLTHROUGH;
650 	case 7:
651 		SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
652 		SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
653 		OS_FALLTHROUGH;
654 	case 6:
655 		SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
656 		SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
657 		OS_FALLTHROUGH;
658 	case 5:
659 		SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
660 		SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
661 		OS_FALLTHROUGH;
662 	case 4:
663 		SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
664 		SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
665 		OS_FALLTHROUGH;
666 	case 3:
667 		SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
668 		SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
669 		OS_FALLTHROUGH;
670 	case 2:
671 		SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
672 		SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
673 		OS_FALLTHROUGH;
674 	case 1:
675 		SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
676 		SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
677 		OS_FALLTHROUGH;
678 	default:
679 		break;
680 	}
681 
682 	switch (debug_info->num_watchpoint_pairs) {
683 	case 16:
684 		SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
685 		SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
686 		OS_FALLTHROUGH;
687 	case 15:
688 		SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
689 		SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
690 		OS_FALLTHROUGH;
691 	case 14:
692 		SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
693 		SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
694 		OS_FALLTHROUGH;
695 	case 13:
696 		SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
697 		SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
698 		OS_FALLTHROUGH;
699 	case 12:
700 		SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
701 		SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
702 		OS_FALLTHROUGH;
703 	case 11:
704 		SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
705 		SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
706 		OS_FALLTHROUGH;
707 	case 10:
708 		SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
709 		SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
710 		OS_FALLTHROUGH;
711 	case 9:
712 		SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
713 		SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
714 		OS_FALLTHROUGH;
715 	case 8:
716 		SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
717 		SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
718 		OS_FALLTHROUGH;
719 	case 7:
720 		SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
721 		SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
722 		OS_FALLTHROUGH;
723 	case 6:
724 		SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
725 		SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
726 		OS_FALLTHROUGH;
727 	case 5:
728 		SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
729 		SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
730 		OS_FALLTHROUGH;
731 	case 4:
732 		SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
733 		SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
734 		OS_FALLTHROUGH;
735 	case 3:
736 		SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
737 		SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
738 		OS_FALLTHROUGH;
739 	case 2:
740 		SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
741 		SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
742 		OS_FALLTHROUGH;
743 	case 1:
744 		SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
745 		SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
746 		OS_FALLTHROUGH;
747 	default:
748 		break;
749 	}
750 
751 #if defined(CONFIG_KERNEL_INTEGRITY)
752 	if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
753 		panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
754 	}
755 #endif
756 
757 	/*
758 	 * Breakpoint/Watchpoint Enable
759 	 */
760 	if (all_ctrls != 0) {
761 		update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
762 	} else {
763 		update_mdscr(0x8000, 0);
764 	}
765 
766 	/*
767 	 * Software debug single step enable
768 	 */
769 	if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
770 		update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
771 
772 		mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
773 	} else {
774 		update_mdscr(0x1, 0);
775 	}
776 
777 	__builtin_arm_isb(ISB_SY);
778 	(void) ml_set_interrupts_enabled(intr);
779 }
780 
781 void
arm_debug_set64(arm_debug_state_t * debug_state)782 arm_debug_set64(arm_debug_state_t *debug_state)
783 {
784 	struct cpu_data *  cpu_data_ptr;
785 	arm_debug_info_t * debug_info    = arm_debug_info();
786 	boolean_t          intr;
787 	arm_debug_state_t  off_state;
788 	arm_debug_state_t  *cpu_debug;
789 	uint64_t           all_ctrls = 0;
790 
791 	intr = ml_set_interrupts_enabled(FALSE);
792 	cpu_data_ptr = getCpuDatap();
793 	cpu_debug = cpu_data_ptr->cpu_user_debug;
794 
795 	/*
796 	 * Retain and set new per-cpu state.
797 	 * Reference count does not matter when turning off debug state.
798 	 */
799 	if (debug_state == NULL) {
800 		bzero(&off_state, sizeof(off_state));
801 		cpu_data_ptr->cpu_user_debug = NULL;
802 		debug_state = &off_state;
803 	} else {
804 		os_ref_retain(&debug_state->ref);
805 		cpu_data_ptr->cpu_user_debug = debug_state;
806 	}
807 
808 	/* Release previous debug state. */
809 	if (cpu_debug != NULL) {
810 		if (os_ref_release(&cpu_debug->ref) == 0) {
811 			zfree(ads_zone, cpu_debug);
812 		}
813 	}
814 
815 	switch (debug_info->num_breakpoint_pairs) {
816 	case 16:
817 		SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
818 		SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
819 		OS_FALLTHROUGH;
820 	case 15:
821 		SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
822 		SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
823 		OS_FALLTHROUGH;
824 	case 14:
825 		SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
826 		SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
827 		OS_FALLTHROUGH;
828 	case 13:
829 		SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
830 		SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
831 		OS_FALLTHROUGH;
832 	case 12:
833 		SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
834 		SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
835 		OS_FALLTHROUGH;
836 	case 11:
837 		SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
838 		SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
839 		OS_FALLTHROUGH;
840 	case 10:
841 		SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
842 		SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
843 		OS_FALLTHROUGH;
844 	case 9:
845 		SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
846 		SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
847 		OS_FALLTHROUGH;
848 	case 8:
849 		SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
850 		SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
851 		OS_FALLTHROUGH;
852 	case 7:
853 		SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
854 		SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
855 		OS_FALLTHROUGH;
856 	case 6:
857 		SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
858 		SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
859 		OS_FALLTHROUGH;
860 	case 5:
861 		SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
862 		SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
863 		OS_FALLTHROUGH;
864 	case 4:
865 		SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
866 		SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
867 		OS_FALLTHROUGH;
868 	case 3:
869 		SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
870 		SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
871 		OS_FALLTHROUGH;
872 	case 2:
873 		SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
874 		SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
875 		OS_FALLTHROUGH;
876 	case 1:
877 		SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
878 		SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
879 		OS_FALLTHROUGH;
880 	default:
881 		break;
882 	}
883 
884 	switch (debug_info->num_watchpoint_pairs) {
885 	case 16:
886 		SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
887 		SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
888 		OS_FALLTHROUGH;
889 	case 15:
890 		SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
891 		SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
892 		OS_FALLTHROUGH;
893 	case 14:
894 		SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
895 		SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
896 		OS_FALLTHROUGH;
897 	case 13:
898 		SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
899 		SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
900 		OS_FALLTHROUGH;
901 	case 12:
902 		SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
903 		SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
904 		OS_FALLTHROUGH;
905 	case 11:
906 		SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
907 		SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
908 		OS_FALLTHROUGH;
909 	case 10:
910 		SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
911 		SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
912 		OS_FALLTHROUGH;
913 	case 9:
914 		SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
915 		SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
916 		OS_FALLTHROUGH;
917 	case 8:
918 		SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
919 		SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
920 		OS_FALLTHROUGH;
921 	case 7:
922 		SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
923 		SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
924 		OS_FALLTHROUGH;
925 	case 6:
926 		SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
927 		SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
928 		OS_FALLTHROUGH;
929 	case 5:
930 		SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
931 		SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
932 		OS_FALLTHROUGH;
933 	case 4:
934 		SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
935 		SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
936 		OS_FALLTHROUGH;
937 	case 3:
938 		SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
939 		SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
940 		OS_FALLTHROUGH;
941 	case 2:
942 		SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
943 		SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
944 		OS_FALLTHROUGH;
945 	case 1:
946 		SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
947 		SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
948 		OS_FALLTHROUGH;
949 	default:
950 		break;
951 	}
952 
953 #if defined(CONFIG_KERNEL_INTEGRITY)
954 	if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
955 		panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
956 	}
957 #endif
958 
959 	/*
960 	 * Breakpoint/Watchpoint Enable
961 	 */
962 	if (all_ctrls != 0) {
963 		update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
964 	} else {
965 		update_mdscr(0x8000, 0);
966 	}
967 
968 	/*
969 	 * Software debug single step enable
970 	 */
971 	if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
972 		update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
973 
974 		mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
975 	} else {
976 		update_mdscr(0x1, 0);
977 	}
978 
979 	__builtin_arm_isb(ISB_SY);
980 	(void) ml_set_interrupts_enabled(intr);
981 }
982 
983 void
arm_debug_set(arm_debug_state_t * debug_state)984 arm_debug_set(arm_debug_state_t *debug_state)
985 {
986 	if (debug_state) {
987 		switch (debug_state->dsh.flavor) {
988 		case ARM_DEBUG_STATE32:
989 			arm_debug_set32(debug_state);
990 			break;
991 		case ARM_DEBUG_STATE64:
992 			arm_debug_set64(debug_state);
993 			break;
994 		default:
995 			panic("arm_debug_set");
996 			break;
997 		}
998 	} else {
999 		if (thread_is_64bit_data(current_thread())) {
1000 			arm_debug_set64(debug_state);
1001 		} else {
1002 			arm_debug_set32(debug_state);
1003 		}
1004 	}
1005 }
1006 
1007 #define VM_MAX_ADDRESS32          ((vm_address_t) 0x80000000)
1008 boolean_t
debug_legacy_state_is_valid(arm_legacy_debug_state_t * debug_state)1009 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
1010 {
1011 	arm_debug_info_t *debug_info = arm_debug_info();
1012 	uint32_t i;
1013 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1014 		if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
1015 			return FALSE;
1016 		}
1017 	}
1018 
1019 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1020 		if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
1021 			return FALSE;
1022 		}
1023 	}
1024 	return TRUE;
1025 }
1026 
1027 boolean_t
debug_state_is_valid32(arm_debug_state32_t * debug_state)1028 debug_state_is_valid32(arm_debug_state32_t *debug_state)
1029 {
1030 	arm_debug_info_t *debug_info = arm_debug_info();
1031 	uint32_t i;
1032 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1033 		if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
1034 			return FALSE;
1035 		}
1036 	}
1037 
1038 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1039 		if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
1040 			return FALSE;
1041 		}
1042 	}
1043 	return TRUE;
1044 }
1045 
1046 boolean_t
debug_state_is_valid64(arm_debug_state64_t * debug_state)1047 debug_state_is_valid64(arm_debug_state64_t *debug_state)
1048 {
1049 	arm_debug_info_t *debug_info = arm_debug_info();
1050 	uint32_t i;
1051 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
1052 		if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i]) {
1053 			return FALSE;
1054 		}
1055 	}
1056 
1057 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
1058 		if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i]) {
1059 			return FALSE;
1060 		}
1061 	}
1062 	return TRUE;
1063 }
1064 
1065 /*
1066  * Duplicate one arm_debug_state_t to another.  "all" parameter
1067  * is ignored in the case of ARM -- Is this the right assumption?
1068  */
1069 void
copy_legacy_debug_state(arm_legacy_debug_state_t * src,arm_legacy_debug_state_t * target,__unused boolean_t all)1070 copy_legacy_debug_state(arm_legacy_debug_state_t * src,
1071     arm_legacy_debug_state_t * target,
1072     __unused boolean_t         all)
1073 {
1074 	bcopy(src, target, sizeof(arm_legacy_debug_state_t));
1075 }
1076 
1077 void
copy_debug_state32(arm_debug_state32_t * src,arm_debug_state32_t * target,__unused boolean_t all)1078 copy_debug_state32(arm_debug_state32_t * src,
1079     arm_debug_state32_t * target,
1080     __unused boolean_t    all)
1081 {
1082 	bcopy(src, target, sizeof(arm_debug_state32_t));
1083 }
1084 
1085 void
copy_debug_state64(arm_debug_state64_t * src,arm_debug_state64_t * target,__unused boolean_t all)1086 copy_debug_state64(arm_debug_state64_t * src,
1087     arm_debug_state64_t * target,
1088     __unused boolean_t    all)
1089 {
1090 	bcopy(src, target, sizeof(arm_debug_state64_t));
1091 }
1092 
1093 kern_return_t
machine_thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)1094 machine_thread_set_tsd_base(thread_t         thread,
1095     mach_vm_offset_t tsd_base)
1096 {
1097 	if (get_threadtask(thread) == kernel_task) {
1098 		return KERN_INVALID_ARGUMENT;
1099 	}
1100 
1101 	if (thread_is_64bit_addr(thread)) {
1102 		if (tsd_base > vm_map_max(thread->map)) {
1103 			tsd_base = 0ULL;
1104 		}
1105 	} else {
1106 		if (tsd_base > UINT32_MAX) {
1107 			tsd_base = 0ULL;
1108 		}
1109 	}
1110 
1111 	thread->machine.cthread_self = tsd_base;
1112 
1113 	/* For current thread, make the TSD base active immediately */
1114 	if (thread == current_thread()) {
1115 		mp_disable_preemption();
1116 		set_tpidrro(tsd_base);
1117 		mp_enable_preemption();
1118 	}
1119 
1120 	return KERN_SUCCESS;
1121 }
1122 
1123 void
machine_tecs(__unused thread_t thr)1124 machine_tecs(__unused thread_t thr)
1125 {
1126 }
1127 
1128 int
machine_csv(__unused cpuvn_e cve)1129 machine_csv(__unused cpuvn_e cve)
1130 {
1131 	return 0;
1132 }
1133 
1134 #if __ARM_ARCH_8_5__
1135 void
arm_context_switch_requires_sync()1136 arm_context_switch_requires_sync()
1137 {
1138 	current_cpu_datap()->sync_on_cswitch = 1;
1139 }
1140 #endif
1141 
1142 #if __has_feature(ptrauth_calls)
1143 boolean_t
arm_user_jop_disabled(void)1144 arm_user_jop_disabled(void)
1145 {
1146 	return FALSE;
1147 }
1148 #endif /* __has_feature(ptrauth_calls) */
1149