1 /*
2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30
31 #include <types.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kpc.h>
45
46 #if MONOTONIC
47 #include <kern/monotonic.h>
48 #endif /* MONOTONIC */
49
50 #include <machine/atomic.h>
51 #include <arm64/proc_reg.h>
52 #include <arm64/machine_machdep.h>
53 #include <arm/cpu_data_internal.h>
54 #include <arm/machdep_call.h>
55 #include <arm/misc_protos.h>
56 #include <arm/cpuid.h>
57
58 #include <vm/vm_map.h>
59 #include <vm/vm_protos.h>
60
61 #include <sys/kdebug.h>
62
63
64 #include <san/kcov_stksz.h>
65
66 extern int debug_task;
67
68 /* zone for debug_state area */
69 ZONE_DECLARE(ads_zone, "arm debug state", sizeof(arm_debug_state_t), ZC_NONE);
70 ZONE_DECLARE(user_ss_zone, "user save state", sizeof(arm_context_t), ZC_NONE);
71
72 /*
73 * Routine: consider_machine_collect
74 *
75 */
76 void
consider_machine_collect(void)77 consider_machine_collect(void)
78 {
79 pmap_gc();
80 }
81
82 /*
83 * Routine: consider_machine_adjust
84 *
85 */
86 void
consider_machine_adjust(void)87 consider_machine_adjust(void)
88 {
89 }
90
91
92
93
94
95
96 static inline void
machine_thread_switch_cpu_data(thread_t old,thread_t new)97 machine_thread_switch_cpu_data(thread_t old, thread_t new)
98 {
99 /*
100 * We build with -fno-strict-aliasing, so the load through temporaries
101 * is required so that this generates a single load / store pair.
102 */
103 cpu_data_t *datap = old->machine.CpuDatap;
104 vm_offset_t base = old->machine.pcpu_data_base;
105
106 /* TODO: Should this be ordered? */
107
108 old->machine.CpuDatap = NULL;
109 old->machine.pcpu_data_base = 0;
110
111 new->machine.CpuDatap = datap;
112 new->machine.pcpu_data_base = base;
113 }
114
115 /**
116 * routine: machine_switch_pmap_and_extended_context
117 *
118 * Helper function used by machine_switch_context and machine_stack_handoff to switch the
119 * extended context and switch the pmap if necessary.
120 *
121 */
122
123 static inline void
machine_switch_pmap_and_extended_context(thread_t old,thread_t new)124 machine_switch_pmap_and_extended_context(thread_t old, thread_t new)
125 {
126 pmap_t new_pmap;
127
128
129
130
131
132
133
134 new_pmap = new->map->pmap;
135 if (old->map->pmap != new_pmap) {
136 pmap_switch(new_pmap);
137 } else {
138 /*
139 * If the thread is preempted while performing cache or TLB maintenance,
140 * it may be migrated to a different CPU between the completion of the relevant
141 * maintenance instruction and the synchronizing DSB. ARM requires that the
142 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
143 * in order to guarantee completion of the instruction and visibility of its effects.
144 * Issue DSB here to enforce that guarantee. We only do this for the case in which
145 * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates
146 * TTBR0. Note also that cache maintenance may be performed in userspace, so we
147 * cannot further limit this operation e.g. by setting a per-thread flag to indicate
148 * a pending kernel TLB or cache maintenance instruction.
149 */
150 __builtin_arm_dsb(DSB_ISH);
151 }
152
153
154 machine_thread_switch_cpu_data(old, new);
155 }
156
157 /*
158 * Routine: machine_switch_context
159 *
160 */
161 thread_t
machine_switch_context(thread_t old,thread_continue_t continuation,thread_t new)162 machine_switch_context(thread_t old,
163 thread_continue_t continuation,
164 thread_t new)
165 {
166 thread_t retval;
167
168 #if __ARM_PAN_AVAILABLE__
169 if (__improbable(__builtin_arm_rsr("pan") == 0)) {
170 panic("context switch with PAN disabled");
171 }
172 #endif
173
174 #define machine_switch_context_kprintf(x...) \
175 /* kprintf("machine_switch_context: " x) */
176
177 if (old == new) {
178 panic("machine_switch_context");
179 }
180
181 kpc_off_cpu(old);
182
183 machine_switch_pmap_and_extended_context(old, new);
184
185 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
186
187 retval = Switch_context(old, continuation, new);
188 assert(retval != NULL);
189
190 return retval;
191 }
192
193 boolean_t
machine_thread_on_core(thread_t thread)194 machine_thread_on_core(thread_t thread)
195 {
196 return thread->machine.CpuDatap != NULL;
197 }
198
199
200 /*
201 * Routine: machine_thread_create
202 *
203 */
204 void
machine_thread_create(thread_t thread,task_t task,bool first_thread)205 machine_thread_create(thread_t thread, task_t task, bool first_thread)
206 {
207 #define machine_thread_create_kprintf(x...) \
208 /* kprintf("machine_thread_create: " x) */
209
210 machine_thread_create_kprintf("thread = %x\n", thread);
211
212 if (!first_thread) {
213 thread->machine.CpuDatap = (cpu_data_t *)0;
214 // setting this offset will cause trying to use it to panic
215 thread->machine.pcpu_data_base = (vm_offset_t)VM_MIN_KERNEL_ADDRESS;
216 }
217 thread->machine.preemption_count = 0;
218 thread->machine.cthread_self = 0;
219 thread->machine.kpcb = NULL;
220 thread->machine.exception_trace_code = 0;
221 #if defined(HAS_APPLE_PAC)
222 thread->machine.rop_pid = task->rop_pid;
223 thread->machine.jop_pid = task->jop_pid;
224 thread->machine.disable_user_jop = task->disable_user_jop;
225 #endif
226
227
228
229 if (task != kernel_task) {
230 /* If this isn't a kernel thread, we'll have userspace state. */
231 thread->machine.contextData = zalloc_flags(user_ss_zone,
232 Z_WAITOK | Z_NOFAIL);
233
234 thread->machine.upcb = &thread->machine.contextData->ss;
235 thread->machine.uNeon = &thread->machine.contextData->ns;
236
237 if (task_has_64Bit_data(task)) {
238 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
239 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
240 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
241 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
242 } else {
243 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
244 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
245 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
246 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
247 }
248 } else {
249 thread->machine.upcb = NULL;
250 thread->machine.uNeon = NULL;
251 thread->machine.contextData = NULL;
252 }
253
254
255
256 bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
257 machine_thread_state_initialize(thread);
258 }
259
260 /*
261 * Routine: machine_thread_destroy
262 *
263 */
264 void
machine_thread_destroy(thread_t thread)265 machine_thread_destroy(thread_t thread)
266 {
267 arm_context_t *thread_user_ss;
268
269 if (thread->machine.contextData) {
270 /* Disassociate the user save state from the thread before we free it. */
271 thread_user_ss = thread->machine.contextData;
272 thread->machine.upcb = NULL;
273 thread->machine.uNeon = NULL;
274 thread->machine.contextData = NULL;
275
276
277 zfree(user_ss_zone, thread_user_ss);
278 }
279
280 if (thread->machine.DebugData != NULL) {
281 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
282 arm_debug_set(NULL);
283 }
284
285 zfree(ads_zone, thread->machine.DebugData);
286 }
287 }
288
289
290 /*
291 * Routine: machine_thread_init
292 *
293 */
294 void
machine_thread_init(void)295 machine_thread_init(void)
296 {
297 }
298
299 /*
300 * Routine: machine_thread_template_init
301 *
302 */
303 void
machine_thread_template_init(thread_t __unused thr_template)304 machine_thread_template_init(thread_t __unused thr_template)
305 {
306 /* Nothing to do on this platform. */
307 }
308
309 /*
310 * Routine: get_useraddr
311 *
312 */
313 user_addr_t
get_useraddr()314 get_useraddr()
315 {
316 return get_saved_state_pc(current_thread()->machine.upcb);
317 }
318
319 /*
320 * Routine: machine_stack_detach
321 *
322 */
323 vm_offset_t
machine_stack_detach(thread_t thread)324 machine_stack_detach(thread_t thread)
325 {
326 vm_offset_t stack;
327
328 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
329 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
330
331 stack = thread->kernel_stack;
332 #if CONFIG_STKSZ
333 kcov_stksz_set_thread_stack(thread, stack);
334 #endif
335 thread->kernel_stack = 0;
336 thread->machine.kstackptr = 0;
337
338 return stack;
339 }
340
341
342 /*
343 * Routine: machine_stack_attach
344 *
345 */
346 void
machine_stack_attach(thread_t thread,vm_offset_t stack)347 machine_stack_attach(thread_t thread,
348 vm_offset_t stack)
349 {
350 struct arm_kernel_context *context;
351 struct arm_kernel_saved_state *savestate;
352 struct arm_kernel_neon_saved_state *neon_savestate;
353 uint32_t current_el;
354
355 #define machine_stack_attach_kprintf(x...) \
356 /* kprintf("machine_stack_attach: " x) */
357
358 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
359 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
360
361 thread->kernel_stack = stack;
362 #if CONFIG_STKSZ
363 kcov_stksz_set_thread_stack(thread, 0);
364 #endif
365 thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
366 thread_initialize_kernel_state(thread);
367
368 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)thread->machine.kstackptr);
369
370 current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
371 context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
372 savestate = &context->ss;
373 savestate->fp = 0;
374 savestate->sp = thread->machine.kstackptr;
375 savestate->pc = 0;
376 #if defined(HAS_APPLE_PAC)
377 /* Sign the initial kernel stack saved state */
378 uint64_t intr = ml_pac_safe_interrupts_disable();
379 asm volatile (
380 "adrp x17, _thread_continue@page" "\n"
381 "add x17, x17, _thread_continue@pageoff" "\n"
382 "ldr x16, [%[ss], %[SS64_SP]]" "\n"
383 "pacia1716" "\n"
384 "str x17, [%[ss], %[SS64_LR]]" "\n"
385 :
386 : [ss] "r"(&context->ss),
387 [SS64_SP] "i"(offsetof(struct arm_kernel_saved_state, sp)),
388 [SS64_LR] "i"(offsetof(struct arm_kernel_saved_state, lr))
389 : "x16", "x17"
390 );
391 ml_pac_safe_interrupts_restore(intr);
392 #else
393 savestate->lr = (uintptr_t)thread_continue;
394 #endif /* defined(HAS_APPLE_PAC) */
395 neon_savestate = &context->ns;
396 neon_savestate->fpcr = FPCR_DEFAULT;
397 machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
398 }
399
400
401 /*
402 * Routine: machine_stack_handoff
403 *
404 */
405 void
machine_stack_handoff(thread_t old,thread_t new)406 machine_stack_handoff(thread_t old,
407 thread_t new)
408 {
409 vm_offset_t stack;
410
411 #if __ARM_PAN_AVAILABLE__
412 if (__improbable(__builtin_arm_rsr("pan") == 0)) {
413 panic("stack handoff with PAN disabled");
414 }
415 #endif
416
417 kpc_off_cpu(old);
418
419 stack = machine_stack_detach(old);
420 #if CONFIG_STKSZ
421 kcov_stksz_set_thread_stack(new, 0);
422 #endif
423 new->kernel_stack = stack;
424 new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
425 if (stack == old->reserved_stack) {
426 assert(new->reserved_stack);
427 old->reserved_stack = new->reserved_stack;
428 new->reserved_stack = stack;
429 }
430
431 machine_switch_pmap_and_extended_context(old, new);
432
433 machine_set_current_thread(new);
434 thread_initialize_kernel_state(new);
435 }
436
437
438 /*
439 * Routine: call_continuation
440 *
441 */
442 void
call_continuation(thread_continue_t continuation,void * parameter,wait_result_t wresult,boolean_t enable_interrupts)443 call_continuation(thread_continue_t continuation,
444 void *parameter,
445 wait_result_t wresult,
446 boolean_t enable_interrupts)
447 {
448 #define call_continuation_kprintf(x...) \
449 /* kprintf("call_continuation_kprintf:" x) */
450
451 call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation, current_thread()->machine.kstackptr);
452 Call_continuation(continuation, parameter, wresult, enable_interrupts);
453 }
454
455 #define SET_DBGBCRn(n, value, accum) \
456 __asm__ volatile( \
457 "msr DBGBCR" #n "_EL1, %[val]\n" \
458 "orr %[result], %[result], %[val]\n" \
459 : [result] "+r"(accum) : [val] "r"((value)))
460
461 #define SET_DBGBVRn(n, value) \
462 __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
463
464 #define SET_DBGWCRn(n, value, accum) \
465 __asm__ volatile( \
466 "msr DBGWCR" #n "_EL1, %[val]\n" \
467 "orr %[result], %[result], %[val]\n" \
468 : [result] "+r"(accum) : [val] "r"((value)))
469
470 #define SET_DBGWVRn(n, value) \
471 __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
472
473 void
arm_debug_set32(arm_debug_state_t * debug_state)474 arm_debug_set32(arm_debug_state_t *debug_state)
475 {
476 struct cpu_data * cpu_data_ptr;
477 arm_debug_info_t * debug_info = arm_debug_info();
478 boolean_t intr;
479 arm_debug_state_t off_state;
480 uint64_t all_ctrls = 0;
481
482 intr = ml_set_interrupts_enabled(FALSE);
483 cpu_data_ptr = getCpuDatap();
484
485 // Set current user debug
486 cpu_data_ptr->cpu_user_debug = debug_state;
487
488 if (NULL == debug_state) {
489 bzero(&off_state, sizeof(off_state));
490 debug_state = &off_state;
491 }
492
493 switch (debug_info->num_breakpoint_pairs) {
494 case 16:
495 SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
496 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
497 OS_FALLTHROUGH;
498 case 15:
499 SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
500 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
501 OS_FALLTHROUGH;
502 case 14:
503 SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
504 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
505 OS_FALLTHROUGH;
506 case 13:
507 SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
508 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
509 OS_FALLTHROUGH;
510 case 12:
511 SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
512 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
513 OS_FALLTHROUGH;
514 case 11:
515 SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
516 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
517 OS_FALLTHROUGH;
518 case 10:
519 SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
520 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
521 OS_FALLTHROUGH;
522 case 9:
523 SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
524 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
525 OS_FALLTHROUGH;
526 case 8:
527 SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
528 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
529 OS_FALLTHROUGH;
530 case 7:
531 SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
532 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
533 OS_FALLTHROUGH;
534 case 6:
535 SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
536 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
537 OS_FALLTHROUGH;
538 case 5:
539 SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
540 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
541 OS_FALLTHROUGH;
542 case 4:
543 SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
544 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
545 OS_FALLTHROUGH;
546 case 3:
547 SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
548 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
549 OS_FALLTHROUGH;
550 case 2:
551 SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
552 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
553 OS_FALLTHROUGH;
554 case 1:
555 SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
556 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
557 OS_FALLTHROUGH;
558 default:
559 break;
560 }
561
562 switch (debug_info->num_watchpoint_pairs) {
563 case 16:
564 SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
565 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
566 OS_FALLTHROUGH;
567 case 15:
568 SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
569 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
570 OS_FALLTHROUGH;
571 case 14:
572 SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
573 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
574 OS_FALLTHROUGH;
575 case 13:
576 SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
577 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
578 OS_FALLTHROUGH;
579 case 12:
580 SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
581 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
582 OS_FALLTHROUGH;
583 case 11:
584 SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
585 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
586 OS_FALLTHROUGH;
587 case 10:
588 SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
589 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
590 OS_FALLTHROUGH;
591 case 9:
592 SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
593 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
594 OS_FALLTHROUGH;
595 case 8:
596 SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
597 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
598 OS_FALLTHROUGH;
599 case 7:
600 SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
601 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
602 OS_FALLTHROUGH;
603 case 6:
604 SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
605 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
606 OS_FALLTHROUGH;
607 case 5:
608 SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
609 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
610 OS_FALLTHROUGH;
611 case 4:
612 SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
613 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
614 OS_FALLTHROUGH;
615 case 3:
616 SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
617 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
618 OS_FALLTHROUGH;
619 case 2:
620 SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
621 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
622 OS_FALLTHROUGH;
623 case 1:
624 SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
625 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
626 OS_FALLTHROUGH;
627 default:
628 break;
629 }
630
631 #if defined(CONFIG_KERNEL_INTEGRITY)
632 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
633 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
634 }
635 #endif
636
637 /*
638 * Breakpoint/Watchpoint Enable
639 */
640 if (all_ctrls != 0) {
641 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
642 } else {
643 update_mdscr(0x8000, 0);
644 }
645
646 /*
647 * Software debug single step enable
648 */
649 if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
650 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
651
652 mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
653 } else {
654 update_mdscr(0x1, 0);
655
656 #if SINGLE_STEP_RETIRE_ERRATA
657 // Workaround for radar 20619637
658 __builtin_arm_isb(ISB_SY);
659 #endif
660 }
661
662 (void) ml_set_interrupts_enabled(intr);
663 }
664
665 void
arm_debug_set64(arm_debug_state_t * debug_state)666 arm_debug_set64(arm_debug_state_t *debug_state)
667 {
668 struct cpu_data * cpu_data_ptr;
669 arm_debug_info_t * debug_info = arm_debug_info();
670 boolean_t intr;
671 arm_debug_state_t off_state;
672 uint64_t all_ctrls = 0;
673
674 intr = ml_set_interrupts_enabled(FALSE);
675 cpu_data_ptr = getCpuDatap();
676
677 // Set current user debug
678 cpu_data_ptr->cpu_user_debug = debug_state;
679
680 if (NULL == debug_state) {
681 bzero(&off_state, sizeof(off_state));
682 debug_state = &off_state;
683 }
684
685 switch (debug_info->num_breakpoint_pairs) {
686 case 16:
687 SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
688 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
689 OS_FALLTHROUGH;
690 case 15:
691 SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
692 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
693 OS_FALLTHROUGH;
694 case 14:
695 SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
696 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
697 OS_FALLTHROUGH;
698 case 13:
699 SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
700 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
701 OS_FALLTHROUGH;
702 case 12:
703 SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
704 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
705 OS_FALLTHROUGH;
706 case 11:
707 SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
708 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
709 OS_FALLTHROUGH;
710 case 10:
711 SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
712 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
713 OS_FALLTHROUGH;
714 case 9:
715 SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
716 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
717 OS_FALLTHROUGH;
718 case 8:
719 SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
720 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
721 OS_FALLTHROUGH;
722 case 7:
723 SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
724 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
725 OS_FALLTHROUGH;
726 case 6:
727 SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
728 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
729 OS_FALLTHROUGH;
730 case 5:
731 SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
732 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
733 OS_FALLTHROUGH;
734 case 4:
735 SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
736 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
737 OS_FALLTHROUGH;
738 case 3:
739 SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
740 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
741 OS_FALLTHROUGH;
742 case 2:
743 SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
744 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
745 OS_FALLTHROUGH;
746 case 1:
747 SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
748 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
749 OS_FALLTHROUGH;
750 default:
751 break;
752 }
753
754 switch (debug_info->num_watchpoint_pairs) {
755 case 16:
756 SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
757 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
758 OS_FALLTHROUGH;
759 case 15:
760 SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
761 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
762 OS_FALLTHROUGH;
763 case 14:
764 SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
765 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
766 OS_FALLTHROUGH;
767 case 13:
768 SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
769 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
770 OS_FALLTHROUGH;
771 case 12:
772 SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
773 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
774 OS_FALLTHROUGH;
775 case 11:
776 SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
777 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
778 OS_FALLTHROUGH;
779 case 10:
780 SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
781 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
782 OS_FALLTHROUGH;
783 case 9:
784 SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
785 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
786 OS_FALLTHROUGH;
787 case 8:
788 SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
789 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
790 OS_FALLTHROUGH;
791 case 7:
792 SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
793 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
794 OS_FALLTHROUGH;
795 case 6:
796 SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
797 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
798 OS_FALLTHROUGH;
799 case 5:
800 SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
801 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
802 OS_FALLTHROUGH;
803 case 4:
804 SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
805 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
806 OS_FALLTHROUGH;
807 case 3:
808 SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
809 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
810 OS_FALLTHROUGH;
811 case 2:
812 SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
813 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
814 OS_FALLTHROUGH;
815 case 1:
816 SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
817 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
818 OS_FALLTHROUGH;
819 default:
820 break;
821 }
822
823 #if defined(CONFIG_KERNEL_INTEGRITY)
824 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
825 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
826 }
827 #endif
828
829 /*
830 * Breakpoint/Watchpoint Enable
831 */
832 if (all_ctrls != 0) {
833 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
834 } else {
835 update_mdscr(0x8000, 0);
836 }
837
838 /*
839 * Software debug single step enable
840 */
841 if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
842 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
843
844 mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
845 } else {
846 update_mdscr(0x1, 0);
847
848 #if SINGLE_STEP_RETIRE_ERRATA
849 // Workaround for radar 20619637
850 __builtin_arm_isb(ISB_SY);
851 #endif
852 }
853
854 (void) ml_set_interrupts_enabled(intr);
855 }
856
857 void
arm_debug_set(arm_debug_state_t * debug_state)858 arm_debug_set(arm_debug_state_t *debug_state)
859 {
860 if (debug_state) {
861 switch (debug_state->dsh.flavor) {
862 case ARM_DEBUG_STATE32:
863 arm_debug_set32(debug_state);
864 break;
865 case ARM_DEBUG_STATE64:
866 arm_debug_set64(debug_state);
867 break;
868 default:
869 panic("arm_debug_set");
870 break;
871 }
872 } else {
873 if (thread_is_64bit_data(current_thread())) {
874 arm_debug_set64(debug_state);
875 } else {
876 arm_debug_set32(debug_state);
877 }
878 }
879 }
880
881 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
882 boolean_t
debug_legacy_state_is_valid(arm_legacy_debug_state_t * debug_state)883 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
884 {
885 arm_debug_info_t *debug_info = arm_debug_info();
886 uint32_t i;
887 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
888 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
889 return FALSE;
890 }
891 }
892
893 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
894 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
895 return FALSE;
896 }
897 }
898 return TRUE;
899 }
900
901 boolean_t
debug_state_is_valid32(arm_debug_state32_t * debug_state)902 debug_state_is_valid32(arm_debug_state32_t *debug_state)
903 {
904 arm_debug_info_t *debug_info = arm_debug_info();
905 uint32_t i;
906 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
907 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
908 return FALSE;
909 }
910 }
911
912 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
913 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
914 return FALSE;
915 }
916 }
917 return TRUE;
918 }
919
920 boolean_t
debug_state_is_valid64(arm_debug_state64_t * debug_state)921 debug_state_is_valid64(arm_debug_state64_t *debug_state)
922 {
923 arm_debug_info_t *debug_info = arm_debug_info();
924 uint32_t i;
925 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
926 if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i]) {
927 return FALSE;
928 }
929 }
930
931 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
932 if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i]) {
933 return FALSE;
934 }
935 }
936 return TRUE;
937 }
938
939 /*
940 * Duplicate one arm_debug_state_t to another. "all" parameter
941 * is ignored in the case of ARM -- Is this the right assumption?
942 */
943 void
copy_legacy_debug_state(arm_legacy_debug_state_t * src,arm_legacy_debug_state_t * target,__unused boolean_t all)944 copy_legacy_debug_state(arm_legacy_debug_state_t * src,
945 arm_legacy_debug_state_t * target,
946 __unused boolean_t all)
947 {
948 bcopy(src, target, sizeof(arm_legacy_debug_state_t));
949 }
950
951 void
copy_debug_state32(arm_debug_state32_t * src,arm_debug_state32_t * target,__unused boolean_t all)952 copy_debug_state32(arm_debug_state32_t * src,
953 arm_debug_state32_t * target,
954 __unused boolean_t all)
955 {
956 bcopy(src, target, sizeof(arm_debug_state32_t));
957 }
958
959 void
copy_debug_state64(arm_debug_state64_t * src,arm_debug_state64_t * target,__unused boolean_t all)960 copy_debug_state64(arm_debug_state64_t * src,
961 arm_debug_state64_t * target,
962 __unused boolean_t all)
963 {
964 bcopy(src, target, sizeof(arm_debug_state64_t));
965 }
966
967 kern_return_t
machine_thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)968 machine_thread_set_tsd_base(thread_t thread,
969 mach_vm_offset_t tsd_base)
970 {
971 if (get_threadtask(thread) == kernel_task) {
972 return KERN_INVALID_ARGUMENT;
973 }
974
975 if (thread_is_64bit_addr(thread)) {
976 if (tsd_base > vm_map_max(thread->map)) {
977 tsd_base = 0ULL;
978 }
979 } else {
980 if (tsd_base > UINT32_MAX) {
981 tsd_base = 0ULL;
982 }
983 }
984
985 thread->machine.cthread_self = tsd_base;
986
987 /* For current thread, make the TSD base active immediately */
988 if (thread == current_thread()) {
989 mp_disable_preemption();
990 set_tpidrro(tsd_base);
991 mp_enable_preemption();
992 }
993
994 return KERN_SUCCESS;
995 }
996
997 void
machine_tecs(__unused thread_t thr)998 machine_tecs(__unused thread_t thr)
999 {
1000 }
1001
1002 int
machine_csv(__unused cpuvn_e cve)1003 machine_csv(__unused cpuvn_e cve)
1004 {
1005 return 0;
1006 }
1007
1008 #if __ARM_ARCH_8_5__
1009 void
arm_context_switch_requires_sync()1010 arm_context_switch_requires_sync()
1011 {
1012 current_cpu_datap()->sync_on_cswitch = 1;
1013 }
1014 #endif
1015
1016 #if __has_feature(ptrauth_calls)
1017 boolean_t
arm_user_jop_disabled(void)1018 arm_user_jop_disabled(void)
1019 {
1020 return FALSE;
1021 }
1022 #endif /* __has_feature(ptrauth_calls) */
1023