xref: /xnu-8020.140.41/osfmk/arm64/pcb.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <debug.h>
30 
31 #include <types.h>
32 
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36 
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kpc.h>
45 
46 #if MONOTONIC
47 #include <kern/monotonic.h>
48 #endif /* MONOTONIC */
49 
50 #include <machine/atomic.h>
51 #include <arm64/proc_reg.h>
52 #include <arm64/machine_machdep.h>
53 #include <arm/cpu_data_internal.h>
54 #include <arm/machdep_call.h>
55 #include <arm/misc_protos.h>
56 #include <arm/cpuid.h>
57 
58 #include <vm/vm_map.h>
59 #include <vm/vm_protos.h>
60 
61 #include <sys/kdebug.h>
62 
63 
64 #include <san/kcov_stksz.h>
65 
66 extern int debug_task;
67 
68 /* zone for debug_state area */
69 ZONE_DEFINE_TYPE(ads_zone, "arm debug state", arm_debug_state_t, ZC_NONE);
70 ZONE_DEFINE_TYPE(user_ss_zone, "user save state", arm_context_t, ZC_NONE);
71 
72 /*
73  * Routine: consider_machine_collect
74  *
75  */
76 void
consider_machine_collect(void)77 consider_machine_collect(void)
78 {
79 	pmap_gc();
80 }
81 
82 /*
83  * Routine: consider_machine_adjust
84  *
85  */
86 void
consider_machine_adjust(void)87 consider_machine_adjust(void)
88 {
89 }
90 
91 
92 
93 
94 
95 
96 static inline void
machine_thread_switch_cpu_data(thread_t old,thread_t new)97 machine_thread_switch_cpu_data(thread_t old, thread_t new)
98 {
99 	/*
100 	 * We build with -fno-strict-aliasing, so the load through temporaries
101 	 * is required so that this generates a single load / store pair.
102 	 */
103 	cpu_data_t *datap = old->machine.CpuDatap;
104 	vm_offset_t base  = old->machine.pcpu_data_base;
105 
106 	/* TODO: Should this be ordered? */
107 
108 	old->machine.CpuDatap = NULL;
109 	old->machine.pcpu_data_base = 0;
110 
111 	new->machine.CpuDatap = datap;
112 	new->machine.pcpu_data_base = base;
113 }
114 
115 /**
116  * routine: machine_switch_pmap_and_extended_context
117  *
118  * Helper function used by machine_switch_context and machine_stack_handoff to switch the
119  * extended context and switch the pmap if necessary.
120  *
121  */
122 
123 static inline void
machine_switch_pmap_and_extended_context(thread_t old,thread_t new)124 machine_switch_pmap_and_extended_context(thread_t old, thread_t new)
125 {
126 	pmap_t new_pmap;
127 
128 
129 
130 
131 
132 
133 
134 	new_pmap = new->map->pmap;
135 	if (old->map->pmap != new_pmap) {
136 		pmap_switch(new_pmap);
137 	} else {
138 		/*
139 		 * If the thread is preempted while performing cache or TLB maintenance,
140 		 * it may be migrated to a different CPU between the completion of the relevant
141 		 * maintenance instruction and the synchronizing DSB.   ARM requires that the
142 		 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
143 		 * in order to guarantee completion of the instruction and visibility of its effects.
144 		 * Issue DSB here to enforce that guarantee.  We only do this for the case in which
145 		 * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates
146 		 * TTBR0.  Note also that cache maintenance may be performed in userspace, so we
147 		 * cannot further limit this operation e.g. by setting a per-thread flag to indicate
148 		 * a pending kernel TLB or cache maintenance instruction.
149 		 */
150 		__builtin_arm_dsb(DSB_ISH);
151 	}
152 
153 
154 	machine_thread_switch_cpu_data(old, new);
155 }
156 
157 /*
158  * Routine: machine_switch_context
159  *
160  */
161 thread_t
machine_switch_context(thread_t old,thread_continue_t continuation,thread_t new)162 machine_switch_context(thread_t old,
163     thread_continue_t continuation,
164     thread_t new)
165 {
166 	thread_t retval;
167 
168 #if __ARM_PAN_AVAILABLE__
169 	if (__improbable(__builtin_arm_rsr("pan") == 0)) {
170 		panic("context switch with PAN disabled");
171 	}
172 #endif
173 
174 #define machine_switch_context_kprintf(x...) \
175 	/* kprintf("machine_switch_context: " x) */
176 
177 	if (old == new) {
178 		panic("machine_switch_context");
179 	}
180 
181 	kpc_off_cpu(old);
182 
183 	machine_switch_pmap_and_extended_context(old, new);
184 
185 	machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
186 
187 	retval = Switch_context(old, continuation, new);
188 	assert(retval != NULL);
189 
190 	return retval;
191 }
192 
193 boolean_t
machine_thread_on_core(thread_t thread)194 machine_thread_on_core(thread_t thread)
195 {
196 	return thread->machine.CpuDatap != NULL;
197 }
198 
199 
200 /*
201  * Routine: machine_thread_create
202  *
203  */
204 void
machine_thread_create(thread_t thread,task_t task,bool first_thread)205 machine_thread_create(thread_t thread, task_t task, bool first_thread)
206 {
207 #define machine_thread_create_kprintf(x...) \
208 	/* kprintf("machine_thread_create: " x) */
209 
210 	machine_thread_create_kprintf("thread = %x\n", thread);
211 
212 	if (!first_thread) {
213 		thread->machine.CpuDatap = (cpu_data_t *)0;
214 		// setting this offset will cause trying to use it to panic
215 		thread->machine.pcpu_data_base = (vm_offset_t)VM_MIN_KERNEL_ADDRESS;
216 	}
217 	thread->machine.preemption_count = 0;
218 	thread->machine.cthread_self = 0;
219 	thread->machine.kpcb = NULL;
220 	thread->machine.exception_trace_code = 0;
221 #if defined(HAS_APPLE_PAC)
222 	thread->machine.rop_pid = task->rop_pid;
223 	thread->machine.jop_pid = task->jop_pid;
224 	thread->machine.disable_user_jop = task->disable_user_jop;
225 #endif
226 
227 
228 
229 	if (task != kernel_task) {
230 		/* If this isn't a kernel thread, we'll have userspace state. */
231 		thread->machine.contextData = zalloc_flags(user_ss_zone,
232 		    Z_WAITOK | Z_NOFAIL);
233 
234 		thread->machine.upcb = &thread->machine.contextData->ss;
235 		thread->machine.uNeon = &thread->machine.contextData->ns;
236 
237 		if (task_has_64Bit_data(task)) {
238 			thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
239 			thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
240 			thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
241 			thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
242 		} else {
243 			thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
244 			thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
245 			thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
246 			thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
247 		}
248 	} else {
249 		thread->machine.upcb = NULL;
250 		thread->machine.uNeon = NULL;
251 		thread->machine.contextData = NULL;
252 	}
253 
254 
255 
256 	bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
257 	machine_thread_state_initialize(thread);
258 }
259 
260 /*
261  * Routine: machine_thread_destroy
262  *
263  */
264 void
machine_thread_destroy(thread_t thread)265 machine_thread_destroy(thread_t thread)
266 {
267 	arm_context_t *thread_user_ss;
268 
269 	if (thread->machine.contextData) {
270 		/* Disassociate the user save state from the thread before we free it. */
271 		thread_user_ss = thread->machine.contextData;
272 		thread->machine.upcb = NULL;
273 		thread->machine.uNeon = NULL;
274 		thread->machine.contextData = NULL;
275 
276 
277 		zfree(user_ss_zone, thread_user_ss);
278 	}
279 
280 	if (thread->machine.DebugData != NULL) {
281 		if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
282 			arm_debug_set(NULL);
283 		}
284 
285 		if (os_ref_release(&thread->machine.DebugData->ref) == 0) {
286 			zfree(ads_zone, thread->machine.DebugData);
287 		}
288 	}
289 }
290 
291 
292 /*
293  * Routine: machine_thread_init
294  *
295  */
296 void
machine_thread_init(void)297 machine_thread_init(void)
298 {
299 }
300 
301 /*
302  * Routine:	machine_thread_template_init
303  *
304  */
305 void
machine_thread_template_init(thread_t __unused thr_template)306 machine_thread_template_init(thread_t __unused thr_template)
307 {
308 	/* Nothing to do on this platform. */
309 }
310 
311 /*
312  * Routine: get_useraddr
313  *
314  */
315 user_addr_t
get_useraddr()316 get_useraddr()
317 {
318 	return get_saved_state_pc(current_thread()->machine.upcb);
319 }
320 
321 /*
322  * Routine: machine_stack_detach
323  *
324  */
325 vm_offset_t
machine_stack_detach(thread_t thread)326 machine_stack_detach(thread_t thread)
327 {
328 	vm_offset_t stack;
329 
330 	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
331 	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
332 
333 	stack = thread->kernel_stack;
334 #if CONFIG_STKSZ
335 	kcov_stksz_set_thread_stack(thread, stack);
336 #endif
337 	thread->kernel_stack = 0;
338 	thread->machine.kstackptr = 0;
339 
340 	return stack;
341 }
342 
343 
344 /*
345  * Routine: machine_stack_attach
346  *
347  */
348 void
machine_stack_attach(thread_t thread,vm_offset_t stack)349 machine_stack_attach(thread_t thread,
350     vm_offset_t stack)
351 {
352 	struct arm_kernel_context *context;
353 	struct arm_kernel_saved_state *savestate;
354 	struct arm_kernel_neon_saved_state *neon_savestate;
355 	uint32_t current_el;
356 
357 #define machine_stack_attach_kprintf(x...) \
358 	/* kprintf("machine_stack_attach: " x) */
359 
360 	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
361 	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
362 
363 	thread->kernel_stack = stack;
364 #if CONFIG_STKSZ
365 	kcov_stksz_set_thread_stack(thread, 0);
366 #endif
367 	thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
368 	thread_initialize_kernel_state(thread);
369 
370 	machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)thread->machine.kstackptr);
371 
372 	current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
373 	context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
374 	savestate = &context->ss;
375 	savestate->fp = 0;
376 	savestate->sp = thread->machine.kstackptr;
377 	savestate->pc = 0;
378 #if defined(HAS_APPLE_PAC)
379 	/* Sign the initial kernel stack saved state */
380 	uint64_t intr = ml_pac_safe_interrupts_disable();
381 	asm volatile (
382                 "adrp	x17, _thread_continue@page"             "\n"
383                 "add	x17, x17, _thread_continue@pageoff"     "\n"
384                 "ldr	x16, [%[ss], %[SS64_SP]]"               "\n"
385                 "pacia1716"                                     "\n"
386                 "str	x17, [%[ss], %[SS64_LR]]"               "\n"
387                 :
388                 : [ss]                  "r"(&context->ss),
389                   [SS64_SP]             "i"(offsetof(struct arm_kernel_saved_state, sp)),
390                   [SS64_LR]             "i"(offsetof(struct arm_kernel_saved_state, lr))
391                 : "x16", "x17"
392         );
393 	ml_pac_safe_interrupts_restore(intr);
394 #else
395 	savestate->lr = (uintptr_t)thread_continue;
396 #endif /* defined(HAS_APPLE_PAC) */
397 	neon_savestate = &context->ns;
398 	neon_savestate->fpcr = FPCR_DEFAULT;
399 	machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
400 }
401 
402 
403 /*
404  * Routine: machine_stack_handoff
405  *
406  */
407 void
machine_stack_handoff(thread_t old,thread_t new)408 machine_stack_handoff(thread_t old,
409     thread_t new)
410 {
411 	vm_offset_t  stack;
412 
413 #if __ARM_PAN_AVAILABLE__
414 	if (__improbable(__builtin_arm_rsr("pan") == 0)) {
415 		panic("stack handoff with PAN disabled");
416 	}
417 #endif
418 
419 	kpc_off_cpu(old);
420 
421 	stack = machine_stack_detach(old);
422 #if CONFIG_STKSZ
423 	kcov_stksz_set_thread_stack(new, 0);
424 #endif
425 	new->kernel_stack = stack;
426 	new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
427 	if (stack == old->reserved_stack) {
428 		assert(new->reserved_stack);
429 		old->reserved_stack = new->reserved_stack;
430 		new->reserved_stack = stack;
431 	}
432 
433 	machine_switch_pmap_and_extended_context(old, new);
434 
435 	machine_set_current_thread(new);
436 	thread_initialize_kernel_state(new);
437 }
438 
439 
440 /*
441  * Routine: call_continuation
442  *
443  */
444 void
call_continuation(thread_continue_t continuation,void * parameter,wait_result_t wresult,boolean_t enable_interrupts)445 call_continuation(thread_continue_t continuation,
446     void *parameter,
447     wait_result_t wresult,
448     boolean_t enable_interrupts)
449 {
450 #define call_continuation_kprintf(x...) \
451 	/* kprintf("call_continuation_kprintf:" x) */
452 
453 	call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation, current_thread()->machine.kstackptr);
454 	Call_continuation(continuation, parameter, wresult, enable_interrupts);
455 }
456 
457 #define SET_DBGBCRn(n, value, accum) \
458 	__asm__ volatile( \
459 	        "msr DBGBCR" #n "_EL1, %[val]\n" \
460 	        "orr %[result], %[result], %[val]\n" \
461 	        : [result] "+r"(accum) : [val] "r"((value)))
462 
463 #define SET_DBGBVRn(n, value) \
464 	__asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
465 
466 #define SET_DBGWCRn(n, value, accum) \
467 	__asm__ volatile( \
468 	        "msr DBGWCR" #n "_EL1, %[val]\n" \
469 	        "orr %[result], %[result], %[val]\n" \
470 	        : [result] "+r"(accum) : [val] "r"((value)))
471 
472 #define SET_DBGWVRn(n, value) \
473 	__asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
474 
475 void
arm_debug_set32(arm_debug_state_t * debug_state)476 arm_debug_set32(arm_debug_state_t *debug_state)
477 {
478 	struct cpu_data *  cpu_data_ptr;
479 	arm_debug_info_t * debug_info    = arm_debug_info();
480 	boolean_t          intr;
481 	arm_debug_state_t  off_state;
482 	arm_debug_state_t  *cpu_debug;
483 	uint64_t           all_ctrls = 0;
484 
485 	intr = ml_set_interrupts_enabled(FALSE);
486 	cpu_data_ptr = getCpuDatap();
487 	cpu_debug = cpu_data_ptr->cpu_user_debug;
488 
489 	/*
490 	 * Retain and set new per-cpu state.
491 	 * Reference count does not matter when turning off debug state.
492 	 */
493 	if (debug_state == NULL) {
494 		bzero(&off_state, sizeof(off_state));
495 		cpu_data_ptr->cpu_user_debug = NULL;
496 		debug_state = &off_state;
497 	} else {
498 		os_ref_retain(&debug_state->ref);
499 		cpu_data_ptr->cpu_user_debug = debug_state;
500 	}
501 
502 	/* Release previous debug state. */
503 	if (cpu_debug != NULL) {
504 		if (os_ref_release(&cpu_debug->ref) == 0) {
505 			zfree(ads_zone, cpu_debug);
506 		}
507 	}
508 
509 	switch (debug_info->num_breakpoint_pairs) {
510 	case 16:
511 		SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
512 		SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
513 		OS_FALLTHROUGH;
514 	case 15:
515 		SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
516 		SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
517 		OS_FALLTHROUGH;
518 	case 14:
519 		SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
520 		SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
521 		OS_FALLTHROUGH;
522 	case 13:
523 		SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
524 		SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
525 		OS_FALLTHROUGH;
526 	case 12:
527 		SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
528 		SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
529 		OS_FALLTHROUGH;
530 	case 11:
531 		SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
532 		SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
533 		OS_FALLTHROUGH;
534 	case 10:
535 		SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
536 		SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
537 		OS_FALLTHROUGH;
538 	case 9:
539 		SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
540 		SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
541 		OS_FALLTHROUGH;
542 	case 8:
543 		SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
544 		SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
545 		OS_FALLTHROUGH;
546 	case 7:
547 		SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
548 		SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
549 		OS_FALLTHROUGH;
550 	case 6:
551 		SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
552 		SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
553 		OS_FALLTHROUGH;
554 	case 5:
555 		SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
556 		SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
557 		OS_FALLTHROUGH;
558 	case 4:
559 		SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
560 		SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
561 		OS_FALLTHROUGH;
562 	case 3:
563 		SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
564 		SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
565 		OS_FALLTHROUGH;
566 	case 2:
567 		SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
568 		SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
569 		OS_FALLTHROUGH;
570 	case 1:
571 		SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
572 		SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
573 		OS_FALLTHROUGH;
574 	default:
575 		break;
576 	}
577 
578 	switch (debug_info->num_watchpoint_pairs) {
579 	case 16:
580 		SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
581 		SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
582 		OS_FALLTHROUGH;
583 	case 15:
584 		SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
585 		SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
586 		OS_FALLTHROUGH;
587 	case 14:
588 		SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
589 		SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
590 		OS_FALLTHROUGH;
591 	case 13:
592 		SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
593 		SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
594 		OS_FALLTHROUGH;
595 	case 12:
596 		SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
597 		SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
598 		OS_FALLTHROUGH;
599 	case 11:
600 		SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
601 		SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
602 		OS_FALLTHROUGH;
603 	case 10:
604 		SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
605 		SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
606 		OS_FALLTHROUGH;
607 	case 9:
608 		SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
609 		SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
610 		OS_FALLTHROUGH;
611 	case 8:
612 		SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
613 		SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
614 		OS_FALLTHROUGH;
615 	case 7:
616 		SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
617 		SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
618 		OS_FALLTHROUGH;
619 	case 6:
620 		SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
621 		SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
622 		OS_FALLTHROUGH;
623 	case 5:
624 		SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
625 		SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
626 		OS_FALLTHROUGH;
627 	case 4:
628 		SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
629 		SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
630 		OS_FALLTHROUGH;
631 	case 3:
632 		SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
633 		SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
634 		OS_FALLTHROUGH;
635 	case 2:
636 		SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
637 		SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
638 		OS_FALLTHROUGH;
639 	case 1:
640 		SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
641 		SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
642 		OS_FALLTHROUGH;
643 	default:
644 		break;
645 	}
646 
647 #if defined(CONFIG_KERNEL_INTEGRITY)
648 	if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
649 		panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
650 	}
651 #endif
652 
653 	/*
654 	 * Breakpoint/Watchpoint Enable
655 	 */
656 	if (all_ctrls != 0) {
657 		update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
658 	} else {
659 		update_mdscr(0x8000, 0);
660 	}
661 
662 	/*
663 	 * Software debug single step enable
664 	 */
665 	if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
666 		update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
667 
668 		mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
669 	} else {
670 		update_mdscr(0x1, 0);
671 	}
672 
673 	__builtin_arm_isb(ISB_SY);
674 	(void) ml_set_interrupts_enabled(intr);
675 }
676 
677 void
arm_debug_set64(arm_debug_state_t * debug_state)678 arm_debug_set64(arm_debug_state_t *debug_state)
679 {
680 	struct cpu_data *  cpu_data_ptr;
681 	arm_debug_info_t * debug_info    = arm_debug_info();
682 	boolean_t          intr;
683 	arm_debug_state_t  off_state;
684 	arm_debug_state_t  *cpu_debug;
685 	uint64_t           all_ctrls = 0;
686 
687 	intr = ml_set_interrupts_enabled(FALSE);
688 	cpu_data_ptr = getCpuDatap();
689 	cpu_debug = cpu_data_ptr->cpu_user_debug;
690 
691 	/*
692 	 * Retain and set new per-cpu state.
693 	 * Reference count does not matter when turning off debug state.
694 	 */
695 	if (debug_state == NULL) {
696 		bzero(&off_state, sizeof(off_state));
697 		cpu_data_ptr->cpu_user_debug = NULL;
698 		debug_state = &off_state;
699 	} else {
700 		os_ref_retain(&debug_state->ref);
701 		cpu_data_ptr->cpu_user_debug = debug_state;
702 	}
703 
704 	/* Release previous debug state. */
705 	if (cpu_debug != NULL) {
706 		if (os_ref_release(&cpu_debug->ref) == 0) {
707 			zfree(ads_zone, cpu_debug);
708 		}
709 	}
710 
711 	switch (debug_info->num_breakpoint_pairs) {
712 	case 16:
713 		SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
714 		SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
715 		OS_FALLTHROUGH;
716 	case 15:
717 		SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
718 		SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
719 		OS_FALLTHROUGH;
720 	case 14:
721 		SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
722 		SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
723 		OS_FALLTHROUGH;
724 	case 13:
725 		SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
726 		SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
727 		OS_FALLTHROUGH;
728 	case 12:
729 		SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
730 		SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
731 		OS_FALLTHROUGH;
732 	case 11:
733 		SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
734 		SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
735 		OS_FALLTHROUGH;
736 	case 10:
737 		SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
738 		SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
739 		OS_FALLTHROUGH;
740 	case 9:
741 		SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
742 		SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
743 		OS_FALLTHROUGH;
744 	case 8:
745 		SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
746 		SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
747 		OS_FALLTHROUGH;
748 	case 7:
749 		SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
750 		SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
751 		OS_FALLTHROUGH;
752 	case 6:
753 		SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
754 		SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
755 		OS_FALLTHROUGH;
756 	case 5:
757 		SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
758 		SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
759 		OS_FALLTHROUGH;
760 	case 4:
761 		SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
762 		SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
763 		OS_FALLTHROUGH;
764 	case 3:
765 		SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
766 		SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
767 		OS_FALLTHROUGH;
768 	case 2:
769 		SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
770 		SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
771 		OS_FALLTHROUGH;
772 	case 1:
773 		SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
774 		SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
775 		OS_FALLTHROUGH;
776 	default:
777 		break;
778 	}
779 
780 	switch (debug_info->num_watchpoint_pairs) {
781 	case 16:
782 		SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
783 		SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
784 		OS_FALLTHROUGH;
785 	case 15:
786 		SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
787 		SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
788 		OS_FALLTHROUGH;
789 	case 14:
790 		SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
791 		SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
792 		OS_FALLTHROUGH;
793 	case 13:
794 		SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
795 		SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
796 		OS_FALLTHROUGH;
797 	case 12:
798 		SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
799 		SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
800 		OS_FALLTHROUGH;
801 	case 11:
802 		SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
803 		SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
804 		OS_FALLTHROUGH;
805 	case 10:
806 		SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
807 		SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
808 		OS_FALLTHROUGH;
809 	case 9:
810 		SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
811 		SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
812 		OS_FALLTHROUGH;
813 	case 8:
814 		SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
815 		SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
816 		OS_FALLTHROUGH;
817 	case 7:
818 		SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
819 		SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
820 		OS_FALLTHROUGH;
821 	case 6:
822 		SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
823 		SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
824 		OS_FALLTHROUGH;
825 	case 5:
826 		SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
827 		SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
828 		OS_FALLTHROUGH;
829 	case 4:
830 		SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
831 		SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
832 		OS_FALLTHROUGH;
833 	case 3:
834 		SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
835 		SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
836 		OS_FALLTHROUGH;
837 	case 2:
838 		SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
839 		SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
840 		OS_FALLTHROUGH;
841 	case 1:
842 		SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
843 		SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
844 		OS_FALLTHROUGH;
845 	default:
846 		break;
847 	}
848 
849 #if defined(CONFIG_KERNEL_INTEGRITY)
850 	if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
851 		panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
852 	}
853 #endif
854 
855 	/*
856 	 * Breakpoint/Watchpoint Enable
857 	 */
858 	if (all_ctrls != 0) {
859 		update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
860 	} else {
861 		update_mdscr(0x8000, 0);
862 	}
863 
864 	/*
865 	 * Software debug single step enable
866 	 */
867 	if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
868 		update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
869 
870 		mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
871 	} else {
872 		update_mdscr(0x1, 0);
873 	}
874 
875 	__builtin_arm_isb(ISB_SY);
876 	(void) ml_set_interrupts_enabled(intr);
877 }
878 
879 void
arm_debug_set(arm_debug_state_t * debug_state)880 arm_debug_set(arm_debug_state_t *debug_state)
881 {
882 	if (debug_state) {
883 		switch (debug_state->dsh.flavor) {
884 		case ARM_DEBUG_STATE32:
885 			arm_debug_set32(debug_state);
886 			break;
887 		case ARM_DEBUG_STATE64:
888 			arm_debug_set64(debug_state);
889 			break;
890 		default:
891 			panic("arm_debug_set");
892 			break;
893 		}
894 	} else {
895 		if (thread_is_64bit_data(current_thread())) {
896 			arm_debug_set64(debug_state);
897 		} else {
898 			arm_debug_set32(debug_state);
899 		}
900 	}
901 }
902 
903 #define VM_MAX_ADDRESS32          ((vm_address_t) 0x80000000)
904 boolean_t
debug_legacy_state_is_valid(arm_legacy_debug_state_t * debug_state)905 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
906 {
907 	arm_debug_info_t *debug_info = arm_debug_info();
908 	uint32_t i;
909 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
910 		if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
911 			return FALSE;
912 		}
913 	}
914 
915 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
916 		if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
917 			return FALSE;
918 		}
919 	}
920 	return TRUE;
921 }
922 
923 boolean_t
debug_state_is_valid32(arm_debug_state32_t * debug_state)924 debug_state_is_valid32(arm_debug_state32_t *debug_state)
925 {
926 	arm_debug_info_t *debug_info = arm_debug_info();
927 	uint32_t i;
928 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
929 		if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
930 			return FALSE;
931 		}
932 	}
933 
934 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
935 		if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
936 			return FALSE;
937 		}
938 	}
939 	return TRUE;
940 }
941 
942 boolean_t
debug_state_is_valid64(arm_debug_state64_t * debug_state)943 debug_state_is_valid64(arm_debug_state64_t *debug_state)
944 {
945 	arm_debug_info_t *debug_info = arm_debug_info();
946 	uint32_t i;
947 	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
948 		if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i]) {
949 			return FALSE;
950 		}
951 	}
952 
953 	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
954 		if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i]) {
955 			return FALSE;
956 		}
957 	}
958 	return TRUE;
959 }
960 
961 /*
962  * Duplicate one arm_debug_state_t to another.  "all" parameter
963  * is ignored in the case of ARM -- Is this the right assumption?
964  */
965 void
copy_legacy_debug_state(arm_legacy_debug_state_t * src,arm_legacy_debug_state_t * target,__unused boolean_t all)966 copy_legacy_debug_state(arm_legacy_debug_state_t * src,
967     arm_legacy_debug_state_t * target,
968     __unused boolean_t         all)
969 {
970 	bcopy(src, target, sizeof(arm_legacy_debug_state_t));
971 }
972 
973 void
copy_debug_state32(arm_debug_state32_t * src,arm_debug_state32_t * target,__unused boolean_t all)974 copy_debug_state32(arm_debug_state32_t * src,
975     arm_debug_state32_t * target,
976     __unused boolean_t    all)
977 {
978 	bcopy(src, target, sizeof(arm_debug_state32_t));
979 }
980 
981 void
copy_debug_state64(arm_debug_state64_t * src,arm_debug_state64_t * target,__unused boolean_t all)982 copy_debug_state64(arm_debug_state64_t * src,
983     arm_debug_state64_t * target,
984     __unused boolean_t    all)
985 {
986 	bcopy(src, target, sizeof(arm_debug_state64_t));
987 }
988 
989 kern_return_t
machine_thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)990 machine_thread_set_tsd_base(thread_t         thread,
991     mach_vm_offset_t tsd_base)
992 {
993 	if (get_threadtask(thread) == kernel_task) {
994 		return KERN_INVALID_ARGUMENT;
995 	}
996 
997 	if (thread_is_64bit_addr(thread)) {
998 		if (tsd_base > vm_map_max(thread->map)) {
999 			tsd_base = 0ULL;
1000 		}
1001 	} else {
1002 		if (tsd_base > UINT32_MAX) {
1003 			tsd_base = 0ULL;
1004 		}
1005 	}
1006 
1007 	thread->machine.cthread_self = tsd_base;
1008 
1009 	/* For current thread, make the TSD base active immediately */
1010 	if (thread == current_thread()) {
1011 		mp_disable_preemption();
1012 		set_tpidrro(tsd_base);
1013 		mp_enable_preemption();
1014 	}
1015 
1016 	return KERN_SUCCESS;
1017 }
1018 
1019 void
machine_tecs(__unused thread_t thr)1020 machine_tecs(__unused thread_t thr)
1021 {
1022 }
1023 
1024 int
machine_csv(__unused cpuvn_e cve)1025 machine_csv(__unused cpuvn_e cve)
1026 {
1027 	return 0;
1028 }
1029 
1030 #if __ARM_ARCH_8_5__
1031 void
arm_context_switch_requires_sync()1032 arm_context_switch_requires_sync()
1033 {
1034 	current_cpu_datap()->sync_on_cswitch = 1;
1035 }
1036 #endif
1037 
1038 #if __has_feature(ptrauth_calls)
1039 boolean_t
arm_user_jop_disabled(void)1040 arm_user_jop_disabled(void)
1041 {
1042 	return FALSE;
1043 }
1044 #endif /* __has_feature(ptrauth_calls) */
1045