1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * Hardware trap/fault handler.
61 */
62
63 #include <mach_kdp.h>
64 #include <mach_ldebug.h>
65
66 #include <types.h>
67 #include <i386/eflags.h>
68 #include <i386/trap_internal.h>
69 #include <i386/pmap.h>
70 #include <i386/fpu.h>
71 #include <i386/panic_notify.h>
72 #include <i386/lapic.h>
73
74 #include <mach/exception.h>
75 #include <mach/kern_return.h>
76 #include <mach/vm_param.h>
77 #include <mach/i386/thread_status.h>
78
79 #include <vm/vm_kern.h>
80 #include <vm/vm_fault.h>
81 #include <vm/vm_map_xnu.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/processor.h>
85 #include <kern/thread.h>
86 #include <kern/task.h>
87 #include <kern/restartable.h>
88 #include <kern/sched.h>
89 #include <kern/sched_prim.h>
90 #include <kern/exception.h>
91 #include <kern/spl.h>
92 #include <kern/misc_protos.h>
93 #include <kern/debug.h>
94 #if CONFIG_TELEMETRY
95 #include <kern/telemetry.h>
96 #include <kern/trap_telemetry.h>
97 #endif
98 #include <kern/zalloc_internal.h>
99 #include <sys/kdebug.h>
100 #include <kperf/kperf.h>
101 #include <prng/random.h>
102 #include <prng/entropy.h>
103
104 #include <string.h>
105
106 #include <i386/postcode.h>
107 #include <i386/mp_desc.h>
108 #include <i386/proc_reg.h>
109 #include <i386/machine_routines.h>
110 #if CONFIG_MCA
111 #include <i386/machine_check.h>
112 #endif
113 #include <mach/i386/syscall_sw.h>
114
115 #include <libkern/OSDebug.h>
116 #include <i386/cpu_threads.h>
117 #include <machine/pal_routines.h>
118 #include <i386/lbr.h>
119
120 extern void throttle_lowpri_io(int);
121 extern void kprint_state(x86_saved_state64_t *saved_state);
122 #if DEVELOPMENT || DEBUG
123 int insnstream_force_cacheline_mismatch = 0;
124 extern int panic_on_cacheline_mismatch;
125 extern char panic_on_trap_procname[];
126 extern uint32_t panic_on_trap_mask;
127 #endif
128
129 extern int insn_copyin_count;
130
131 /*
132 * Forward declarations
133 */
134 static void panic_trap(x86_saved_state64_t *saved_state, uint16_t comment, const char *trapreason, uint32_t pl, kern_return_t fault_result) __dead2;
135 static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip);
136 #if DEVELOPMENT || DEBUG
137 static __attribute__((noinline)) void copy_instruction_stream(thread_t thread, uint64_t rip, int trap_code, bool inspect_cacheline);
138 #else
139 static __attribute__((noinline)) void copy_instruction_stream(thread_t thread, uint64_t rip, int trap_code);
140 #endif
141
142 #if CONFIG_DTRACE
143 /* See <rdar://problem/4613924> */
144 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
145
146 extern boolean_t dtrace_tally_fault(user_addr_t);
147 extern boolean_t dtrace_handle_trap(int, x86_saved_state_t *);
148 #endif
149
150 #ifdef MACH_BSD
151 extern char * proc_name_address(void *p);
152 #endif /* MACH_BSD */
153
154 extern boolean_t pmap_smep_enabled;
155 extern boolean_t pmap_smap_enabled;
156
157 __attribute__((noreturn))
158 void
thread_syscall_return(kern_return_t ret)159 thread_syscall_return(
160 kern_return_t ret)
161 {
162 thread_t thr_act = current_thread();
163 boolean_t is_mach;
164 int code;
165
166 pal_register_cache_state(thr_act, DIRTY);
167
168 if (thread_is_64bit_addr(thr_act)) {
169 x86_saved_state64_t *regs;
170
171 regs = USER_REGS64(thr_act);
172
173 code = (int) (regs->rax & SYSCALL_NUMBER_MASK);
174 is_mach = (regs->rax & SYSCALL_CLASS_MASK)
175 == (SYSCALL_CLASS_MACH << SYSCALL_CLASS_SHIFT);
176 if (kdebug_enable && is_mach) {
177 /* Mach trap */
178 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
179 MACHDBG_CODE(DBG_MACH_EXCP_SC, code) | DBG_FUNC_END,
180 ret, 0, 0, 0, 0);
181 }
182 regs->rax = ret;
183 #if DEBUG
184 if (is_mach) {
185 DEBUG_KPRINT_SYSCALL_MACH(
186 "thread_syscall_return: 64-bit mach ret=%u\n",
187 ret);
188 } else {
189 DEBUG_KPRINT_SYSCALL_UNIX(
190 "thread_syscall_return: 64-bit unix ret=%u\n",
191 ret);
192 }
193 #endif
194 } else {
195 x86_saved_state32_t *regs;
196
197 regs = USER_REGS32(thr_act);
198
199 code = ((int) regs->eax);
200 is_mach = (code < 0);
201 if (kdebug_enable && is_mach) {
202 /* Mach trap */
203 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
204 MACHDBG_CODE(DBG_MACH_EXCP_SC, -code) | DBG_FUNC_END,
205 ret, 0, 0, 0, 0);
206 }
207 regs->eax = ret;
208 #if DEBUG
209 if (is_mach) {
210 DEBUG_KPRINT_SYSCALL_MACH(
211 "thread_syscall_return: 32-bit mach ret=%u\n",
212 ret);
213 } else {
214 DEBUG_KPRINT_SYSCALL_UNIX(
215 "thread_syscall_return: 32-bit unix ret=%u\n",
216 ret);
217 }
218 #endif
219 }
220
221 #if DEBUG || DEVELOPMENT
222 kern_allocation_name_t
223 prior __assert_only = thread_get_kernel_state(thr_act)->allocation_name;
224 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
225 #endif /* DEBUG || DEVELOPMENT */
226
227 throttle_lowpri_io(1);
228
229 thread_exception_return();
230 /*NOTREACHED*/
231 }
232
233 /*
234 * Fault recovery in copyin/copyout routines.
235 */
236 struct recovery {
237 uintptr_t fault_addr;
238 uintptr_t recover_addr;
239 };
240
241 extern struct recovery recover_table[];
242 extern struct recovery recover_table_end[];
243
244 const char * trap_type[] = {TRAP_NAMES};
245 unsigned TRAP_TYPES = sizeof(trap_type) / sizeof(trap_type[0]);
246
247 extern void PE_incoming_interrupt(int interrupt);
248
249 #if defined(__x86_64__) && DEBUG
250 void
kprint_state(x86_saved_state64_t * saved_state)251 kprint_state(x86_saved_state64_t *saved_state)
252 {
253 kprintf("current_cpu_datap() 0x%lx\n", (uintptr_t)current_cpu_datap());
254 kprintf("Current GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_GS_BASE));
255 kprintf("Kernel GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_KERNEL_GS_BASE));
256 kprintf("state at 0x%lx:\n", (uintptr_t) saved_state);
257
258 kprintf(" rdi 0x%llx\n", saved_state->rdi);
259 kprintf(" rsi 0x%llx\n", saved_state->rsi);
260 kprintf(" rdx 0x%llx\n", saved_state->rdx);
261 kprintf(" r10 0x%llx\n", saved_state->r10);
262 kprintf(" r8 0x%llx\n", saved_state->r8);
263 kprintf(" r9 0x%llx\n", saved_state->r9);
264
265 kprintf(" cr2 0x%llx\n", saved_state->cr2);
266 kprintf("real cr2 0x%lx\n", get_cr2());
267 kprintf(" r15 0x%llx\n", saved_state->r15);
268 kprintf(" r14 0x%llx\n", saved_state->r14);
269 kprintf(" r13 0x%llx\n", saved_state->r13);
270 kprintf(" r12 0x%llx\n", saved_state->r12);
271 kprintf(" r11 0x%llx\n", saved_state->r11);
272 kprintf(" rbp 0x%llx\n", saved_state->rbp);
273 kprintf(" rbx 0x%llx\n", saved_state->rbx);
274 kprintf(" rcx 0x%llx\n", saved_state->rcx);
275 kprintf(" rax 0x%llx\n", saved_state->rax);
276
277 kprintf(" gs 0x%x\n", saved_state->gs);
278 kprintf(" fs 0x%x\n", saved_state->fs);
279
280 kprintf(" isf.trapno 0x%x\n", saved_state->isf.trapno);
281 kprintf(" isf._pad 0x%x\n", saved_state->isf._pad);
282 kprintf(" isf.trapfn 0x%llx\n", saved_state->isf.trapfn);
283 kprintf(" isf.err 0x%llx\n", saved_state->isf.err);
284 kprintf(" isf.rip 0x%llx\n", saved_state->isf.rip);
285 kprintf(" isf.cs 0x%llx\n", saved_state->isf.cs);
286 kprintf(" isf.rflags 0x%llx\n", saved_state->isf.rflags);
287 kprintf(" isf.rsp 0x%llx\n", saved_state->isf.rsp);
288 kprintf(" isf.ss 0x%llx\n", saved_state->isf.ss);
289 }
290 #endif
291
292
293 /*
294 * Non-zero indicates latency assert is enabled and capped at valued
295 * absolute time units.
296 */
297
298 uint64_t interrupt_latency_cap = 0;
299 boolean_t ilat_assert = FALSE;
300
301 void
interrupt_latency_tracker_setup(void)302 interrupt_latency_tracker_setup(void)
303 {
304 uint32_t ilat_cap_us;
305 if (PE_parse_boot_argn("interrupt_latency_cap_us", &ilat_cap_us, sizeof(ilat_cap_us))) {
306 interrupt_latency_cap = ilat_cap_us * NSEC_PER_USEC;
307 nanoseconds_to_absolutetime(interrupt_latency_cap, &interrupt_latency_cap);
308 } else {
309 interrupt_latency_cap = LockTimeOut;
310 }
311 PE_parse_boot_argn("-interrupt_latency_assert_enable", &ilat_assert, sizeof(ilat_assert));
312 }
313
314 void
interrupt_reset_latency_stats(void)315 interrupt_reset_latency_stats(void)
316 {
317 uint32_t i;
318 for (i = 0; i < real_ncpus; i++) {
319 cpu_data_ptr[i]->cpu_max_observed_int_latency =
320 cpu_data_ptr[i]->cpu_max_observed_int_latency_vector = 0;
321 }
322 }
323
324 void
interrupt_populate_latency_stats(char * buf,unsigned bufsize)325 interrupt_populate_latency_stats(char *buf, unsigned bufsize)
326 {
327 uint32_t i, tcpu = ~0;
328 uint64_t cur_max = 0;
329
330 for (i = 0; i < real_ncpus; i++) {
331 if (cur_max < cpu_data_ptr[i]->cpu_max_observed_int_latency) {
332 cur_max = cpu_data_ptr[i]->cpu_max_observed_int_latency;
333 tcpu = i;
334 }
335 }
336
337 if (tcpu < real_ncpus) {
338 snprintf(buf, bufsize, "0x%x 0x%x 0x%llx", tcpu, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency_vector, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency);
339 }
340 }
341
342 uint32_t interrupt_timer_coalescing_enabled = 1;
343 uint64_t interrupt_coalesced_timers;
344
345 /*
346 * Handle interrupts:
347 * - local APIC interrupts (IPIs, timers, etc) are handled by the kernel,
348 * - device interrupts go to the platform expert.
349 */
350 void
interrupt(x86_saved_state_t * state)351 interrupt(x86_saved_state_t *state)
352 {
353 uint64_t rip;
354 uint64_t rsp;
355 int interrupt_num;
356 boolean_t user_mode = FALSE;
357 int ipl;
358 int cnum = cpu_number();
359 cpu_data_t *cdp = cpu_data_ptr[cnum];
360 int itype = DBG_INTR_TYPE_UNKNOWN;
361 int handled;
362
363
364 x86_saved_state64_t *state64 = saved_state64(state);
365 rip = state64->isf.rip;
366 rsp = state64->isf.rsp;
367 interrupt_num = state64->isf.trapno;
368 if (state64->isf.cs & 0x03) {
369 user_mode = TRUE;
370 }
371
372 #if DEVELOPMENT || DEBUG
373 uint64_t frameptr = is_saved_state64(state) ? state64->rbp : saved_state32(state)->ebp;
374 uint32_t traptrace_index = traptrace_start(interrupt_num, rip, mach_absolute_time(), frameptr);
375 #endif
376
377 if (cpu_data_ptr[cnum]->lcpu.package->num_idle == topoParms.nLThreadsPerPackage) {
378 cpu_data_ptr[cnum]->cpu_hwIntpexits[interrupt_num]++;
379 }
380
381 if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT)) {
382 itype = DBG_INTR_TYPE_IPI;
383 } else if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT)) {
384 itype = DBG_INTR_TYPE_TIMER;
385 } else {
386 itype = DBG_INTR_TYPE_OTHER;
387 }
388
389 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
390 MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
391 interrupt_num,
392 (user_mode ? rip : VM_KERNEL_UNSLIDE(rip)),
393 user_mode, itype, 0);
394
395 SCHED_STATS_INC(interrupt_count);
396
397 ipl = get_preemption_level();
398
399 /*
400 * Handle local APIC interrupts
401 * else call platform expert for devices.
402 */
403 handled = lapic_interrupt(interrupt_num, state);
404
405 if (!handled) {
406 if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_CMCI_INTERRUPT)) {
407 /*
408 * CMCI can be signalled on any logical processor, and the kexts
409 * that implement handling CMCI use IOKit to register handlers for
410 * the CMCI vector, so if we see a CMCI, do not encode a CPU
411 * number in bits 8:31 (since the vector is the same regardless of
412 * the handling CPU).
413 */
414 PE_incoming_interrupt(interrupt_num);
415 } else if (cnum <= lapic_max_interrupt_cpunum) {
416 PE_incoming_interrupt((cnum << 8) | interrupt_num);
417 }
418 }
419
420 if (__improbable(get_preemption_level() != ipl)) {
421 panic("Preemption level altered by interrupt vector 0x%x: initial 0x%x, final: 0x%x", interrupt_num, ipl, get_preemption_level());
422 }
423
424
425 if (__improbable(cdp->cpu_nested_istack)) {
426 cdp->cpu_nested_istack_events++;
427 } else {
428 uint64_t ctime = mach_absolute_time();
429 uint64_t int_latency = ctime - cdp->cpu_int_event_time;
430 uint64_t esdeadline, ehdeadline;
431 /* Attempt to process deferred timers in the context of
432 * this interrupt, unless interrupt time has already exceeded
433 * TCOAL_ILAT_THRESHOLD.
434 */
435 #define TCOAL_ILAT_THRESHOLD (30000ULL)
436
437 if ((int_latency < TCOAL_ILAT_THRESHOLD) &&
438 interrupt_timer_coalescing_enabled) {
439 esdeadline = cdp->rtclock_timer.queue.earliest_soft_deadline;
440 ehdeadline = cdp->rtclock_timer.deadline;
441 if ((ctime >= esdeadline) && (ctime < ehdeadline)) {
442 interrupt_coalesced_timers++;
443 TCOAL_DEBUG(0x88880000 | DBG_FUNC_START, ctime, esdeadline, ehdeadline, interrupt_coalesced_timers, 0);
444 rtclock_intr(state);
445 TCOAL_DEBUG(0x88880000 | DBG_FUNC_END, ctime, esdeadline, interrupt_coalesced_timers, 0, 0);
446 } else {
447 TCOAL_DEBUG(0x77770000, ctime, cdp->rtclock_timer.queue.earliest_soft_deadline, cdp->rtclock_timer.deadline, interrupt_coalesced_timers, 0);
448 }
449 }
450
451 if (__improbable(ilat_assert && (int_latency > interrupt_latency_cap) && !machine_timeout_suspended())) {
452 panic("Interrupt vector 0x%x exceeded interrupt latency threshold, 0x%llx absolute time delta, prior signals: 0x%x, current signals: 0x%x", interrupt_num, int_latency, cdp->cpu_prior_signals, cdp->cpu_signals);
453 }
454
455 if (__improbable(int_latency > cdp->cpu_max_observed_int_latency)) {
456 cdp->cpu_max_observed_int_latency = int_latency;
457 cdp->cpu_max_observed_int_latency_vector = interrupt_num;
458 }
459 }
460
461 /*
462 * Having serviced the interrupt first, look at the interrupted stack depth.
463 */
464 if (!user_mode) {
465 uint64_t depth = cdp->cpu_kernel_stack
466 + sizeof(struct thread_kernel_state)
467 + sizeof(struct i386_exception_link *)
468 - rsp;
469 if (__improbable(depth > kernel_stack_depth_max)) {
470 kernel_stack_depth_max = (vm_offset_t)depth;
471 KERNEL_DEBUG_CONSTANT(
472 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
473 (long) depth, (long) VM_KERNEL_UNSLIDE(rip), 0, 0, 0);
474 }
475 }
476
477 if (cnum == master_cpu) {
478 entropy_collect();
479 }
480
481 #if KPERF
482 kperf_interrupt();
483 #endif /* KPERF */
484
485 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
486 interrupt_num);
487
488 assert(ml_get_interrupts_enabled() == FALSE);
489
490 #if DEVELOPMENT || DEBUG
491 if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
492 traptrace_end(traptrace_index, mach_absolute_time());
493 }
494 #endif
495 }
496
497 static inline void
reset_dr7(void)498 reset_dr7(void)
499 {
500 long dr7 = 0x400; /* magic dr7 reset value; 32 bit on i386, 64 bit on x86_64 */
501 __asm__ volatile ("mov %0,%%dr7" : : "r" (dr7));
502 }
503 #if MACH_KDP
504 unsigned kdp_has_active_watchpoints = 0;
505 #define NO_WATCHPOINTS (!kdp_has_active_watchpoints)
506 #else
507 #define NO_WATCHPOINTS 1
508 #endif
509
510 static uint32_t bound_chk_violations_event;
511
512 static const char *
xnu_soft_trap_handle_breakpoint(void * tstate,uint16_t comment)513 xnu_soft_trap_handle_breakpoint(void *tstate, uint16_t comment)
514 {
515 #pragma unused(tstate)
516 if (comment == CLANG_SOFT_TRAP_BOUND_CHK) {
517 os_atomic_inc(&bound_chk_violations_event, relaxed);
518 }
519
520 return NULL;
521 }
522
523 static const char *
xnu_hard_trap_handle_breakpoint(void * tstate,uint16_t comment)524 xnu_hard_trap_handle_breakpoint(void *tstate, uint16_t comment)
525 {
526 kernel_panic_reason_t pr = PERCPU_GET(panic_reason);
527 x86_saved_state64_t *state = tstate;
528
529 switch (comment) {
530 case XNU_HARD_TRAP_SAFE_UNLINK:
531 snprintf(pr->buf, sizeof(pr->buf),
532 "panic: corrupt list around element %p",
533 (void *)state->rax);
534 return pr->buf;
535
536 case XNU_HARD_TRAP_STRING_CHK:
537 return "panic: string operation caused an overflow";
538
539 case XNU_HARD_TRAP_ASSERT_FAILURE:
540 /*
541 * Read the implicit assert arguments, see:
542 * ML_TRAP_REGISTER_1: rax
543 * ML_TRAP_REGISTER_2: r10
544 * ML_TRAP_REGISTER_3: r11
545 */
546 panic_assert_format(pr->buf, sizeof(pr->buf),
547 (struct mach_assert_hdr *)state->rax,
548 state->r10, state->r11);
549 return pr->buf;
550
551 default:
552 return NULL;
553 }
554 }
555
556 KERNEL_BRK_DESCRIPTOR_DEFINE(clang_desc,
557 .type = TRAP_TELEMETRY_TYPE_KERNEL_BRK_CLANG,
558 .base = CLANG_X86_TRAP_START,
559 .max = CLANG_X86_TRAP_END,
560 .options = BRK_TELEMETRY_OPTIONS_FATAL_DEFAULT,
561 .handle_breakpoint = NULL);
562
563 KERNEL_BRK_DESCRIPTOR_DEFINE(xnu_soft_traps_desc,
564 .type = TRAP_TELEMETRY_TYPE_KERNEL_BRK_TELEMETRY,
565 .base = XNU_SOFT_TRAP_START,
566 .max = XNU_SOFT_TRAP_END,
567 .options = BRK_TELEMETRY_OPTIONS_RECOVERABLE_DEFAULT(
568 /* enable_telemetry */ true),
569 .handle_breakpoint = xnu_soft_trap_handle_breakpoint);
570
571 KERNEL_BRK_DESCRIPTOR_DEFINE(libcxx_desc,
572 .type = TRAP_TELEMETRY_TYPE_KERNEL_BRK_LIBCXX,
573 .base = LIBCXX_TRAP_START,
574 .max = LIBCXX_TRAP_END,
575 .options = BRK_TELEMETRY_OPTIONS_FATAL_DEFAULT,
576 .handle_breakpoint = NULL);
577
578 KERNEL_BRK_DESCRIPTOR_DEFINE(xnu_hard_traps_desc,
579 .type = TRAP_TELEMETRY_TYPE_KERNEL_BRK_XNU,
580 .base = XNU_HARD_TRAP_START,
581 .max = XNU_HARD_TRAP_END,
582 .options = BRK_TELEMETRY_OPTIONS_FATAL_DEFAULT,
583 .handle_breakpoint = xnu_hard_trap_handle_breakpoint);
584
585 static bool
handle_kernel_breakpoint(x86_saved_state64_t * state,const char ** reason,uint16_t * out_comment)586 handle_kernel_breakpoint(
587 x86_saved_state64_t *state,
588 const char **reason,
589 uint16_t *out_comment)
590 {
591 uint16_t comment;
592 const struct kernel_brk_descriptor *desc;
593 uint8_t inst_buf[8];
594 uint32_t prefix16 = 0x80B90F67; /* Encoding prefix for ud1 <16-bit code>(%eax), %eax */
595 uint32_t prefix8 = 0x40B90F67; /* Encoding prefix for ud1 <8-bit code>(%eax), %eax */
596 bool found_prefix8 = false;
597
598 vm_size_t sz = ml_nofault_copy(state->isf.rip, (vm_offset_t)inst_buf, sizeof(inst_buf));
599 if (sz != sizeof(inst_buf)) {
600 return false;
601 }
602
603 if (bcmp(inst_buf, &prefix16, sizeof(prefix16)) == 0) {
604 /* The two bytes following the prefix is our code */
605 comment = inst_buf[5] << 8 | inst_buf[4];
606 } else if (bcmp(inst_buf, &prefix8, sizeof(prefix8)) == 0) {
607 /* The one byte following the prefix is our code */
608 found_prefix8 = true;
609 comment = inst_buf[4];
610 } else {
611 return false;
612 }
613
614 if (out_comment) {
615 *out_comment = comment;
616 }
617 desc = find_kernel_brk_descriptor_by_comment(comment);
618
619 if (!desc) {
620 return false;
621 }
622
623 if (desc->options.enable_trap_telemetry) {
624 trap_telemetry_report_exception(
625 /* trap_type */ desc->type,
626 /* trap_code */ comment,
627 /* options */ desc->options.telemetry_options,
628 /* saved_state */ (void *)state);
629 }
630
631 if (desc->handle_breakpoint) {
632 *reason = desc->handle_breakpoint(state, comment);
633 }
634
635 /* Still alive? Check if we should recover. */
636 if (desc->options.recoverable) {
637 /* ud1 can be five or eight-byte long depending on the prefix */
638 set_recovery_ip(state, state->isf.rip + (found_prefix8 ? 5 : 8));
639 return true;
640 }
641
642 return false;
643 }
644
645 // Find a recovery entry for an instruction address if one is present.
646 static struct recovery const*
find_recovery_entry(vm_offset_t kern_ip)647 find_recovery_entry(vm_offset_t kern_ip)
648 {
649 for (struct recovery const* rp = recover_table; rp < recover_table_end; rp++) {
650 if (kern_ip == rp->fault_addr) {
651 return rp;
652 }
653 }
654 return NULL;
655 }
656
657 /*
658 * Trap from kernel mode. Only page-fault errors are recoverable,
659 * and then only in special circumstances. All other errors are
660 * fatal. Return value indicates if trap was handled.
661 */
662
663 void
kernel_trap(x86_saved_state_t * state,uintptr_t * lo_spp)664 kernel_trap(
665 x86_saved_state_t *state,
666 uintptr_t *lo_spp)
667 {
668 const char *reason = NULL;
669 uint16_t trapcomment = 0;
670
671 x86_saved_state64_t *saved_state;
672 int code;
673 user_addr_t vaddr;
674 int type;
675 vm_map_t map = 0; /* protected by T_PAGE_FAULT */
676 kern_return_t result = KERN_FAILURE;
677 kern_return_t fault_result = KERN_SUCCESS;
678 thread_t thread;
679 boolean_t intr;
680 vm_prot_t prot;
681 struct recovery const *rp = NULL;
682 vm_offset_t kern_ip;
683 int is_user;
684 int trap_pl = get_preemption_level();
685
686 thread = current_thread();
687
688 if (__improbable(is_saved_state32(state))) {
689 panic("kernel_trap(%p) with 32-bit state", state);
690 }
691 saved_state = saved_state64(state);
692
693 /* Record cpu where state was captured */
694 saved_state->isf.cpu = cpu_number();
695
696 vaddr = (user_addr_t)saved_state->cr2;
697 type = saved_state->isf.trapno;
698 code = (int)(saved_state->isf.err & 0xffff);
699 intr = (saved_state->isf.rflags & EFL_IF) != 0; /* state of ints at trap */
700 kern_ip = (vm_offset_t)saved_state->isf.rip;
701
702 is_user = (vaddr < VM_MAX_USER_PAGE_ADDRESS);
703
704 #if DEVELOPMENT || DEBUG
705 uint32_t traptrace_index = traptrace_start(type, kern_ip, mach_absolute_time(), saved_state->rbp);
706 #endif
707
708 #if CONFIG_DTRACE
709 /*
710 * Is there a DTrace hook?
711 */
712 if (__improbable(tempDTraceTrapHook != NULL)) {
713 if (tempDTraceTrapHook(type, state, lo_spp, 0) == KERN_SUCCESS) {
714 /*
715 * If it succeeds, we are done...
716 */
717 goto common_return;
718 }
719 }
720
721 /* Handle traps originated from probe context. */
722 if (thread != THREAD_NULL && thread->t_dtrace_inprobe) {
723 if (dtrace_handle_trap(type, state)) {
724 goto common_return;
725 }
726 }
727
728 #endif /* CONFIG_DTRACE */
729
730 /*
731 * we come here with interrupts off as we don't want to recurse
732 * on preemption below. but we do want to re-enable interrupts
733 * as soon we possibly can to hold latency down
734 */
735 if (__improbable(T_PREEMPT == type)) {
736 ast_taken_kernel();
737
738 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
739 (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
740 0, 0, 0, VM_KERNEL_UNSLIDE(kern_ip), 0);
741
742 goto common_return;
743 }
744
745 user_addr_t kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr);
746 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
747 (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
748 (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user,
749 VM_KERNEL_UNSLIDE(kern_ip), 0);
750
751
752 if (T_PAGE_FAULT == type) {
753 /*
754 * assume we're faulting in the kernel map
755 */
756 map = kernel_map;
757
758 if (__probable((thread != THREAD_NULL) && (thread->map != kernel_map) &&
759 (vaddr < VM_MAX_USER_PAGE_ADDRESS))) {
760 /* fault occurred in userspace */
761 map = thread->map;
762
763 /* Intercept a potential Supervisor Mode Execute
764 * Protection fault. These criteria identify
765 * both NX faults and SMEP faults, but both
766 * are fatal. We avoid checking PTEs (racy).
767 * (The VM could just redrive a SMEP fault, hence
768 * the intercept).
769 */
770 if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) &&
771 (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) {
772 goto debugger_entry;
773 }
774
775 /*
776 * Additionally check for SMAP faults...
777 * which are characterized by page-present and
778 * the AC bit unset (i.e. not from copyin/out path).
779 */
780 if (__improbable(code & T_PF_PROT &&
781 pmap_smap_enabled &&
782 (saved_state->isf.rflags & EFL_AC) == 0)) {
783 goto debugger_entry;
784 }
785
786 /*
787 * If we're not sharing cr3 with the user
788 * and we faulted in copyio,
789 * then switch cr3 here and dismiss the fault.
790 */
791 if (no_shared_cr3 &&
792 (thread->machine.specFlags & CopyIOActive) &&
793 map->pmap->pm_cr3 != get_cr3_base()) {
794 pmap_assert(current_cpu_datap()->cpu_pmap_pcid_enabled == FALSE);
795 set_cr3_raw(map->pmap->pm_cr3);
796 return;
797 }
798 if (__improbable(vaddr < PAGE_SIZE) &&
799 ((thread->machine.specFlags & CopyIOActive) == 0)) {
800 goto debugger_entry;
801 }
802 }
803 }
804
805 (void) ml_set_interrupts_enabled(intr);
806
807 switch (type) {
808 case T_NO_FPU:
809 fpnoextflt();
810 goto common_return;
811
812 case T_FPU_FAULT:
813 fpextovrflt();
814 goto common_return;
815
816 case T_FLOATING_POINT_ERROR:
817 fpexterrflt();
818 goto common_return;
819
820 case T_SSE_FLOAT_ERROR:
821 fpSSEexterrflt();
822 goto common_return;
823
824 case T_INVALID_OPCODE:
825 if (handle_kernel_breakpoint(saved_state, &reason, &trapcomment)) {
826 goto common_return;
827 }
828 fpUDflt(kern_ip);
829 goto debugger_entry;
830
831 case T_DEBUG:
832 /*
833 * Re-enable LBR tracing for core/panic files if necessary. i386_lbr_enable confirms LBR should be re-enabled.
834 */
835 i386_lbr_enable();
836 if ((saved_state->isf.rflags & EFL_TF) == 0 && NO_WATCHPOINTS) {
837 /* We've somehow encountered a debug
838 * register match that does not belong
839 * to the kernel debugger.
840 * This isn't supposed to happen.
841 */
842 reset_dr7();
843 goto common_return;
844 }
845 goto debugger_entry;
846 case T_INT3:
847 goto debugger_entry;
848 case T_PAGE_FAULT:
849
850 #if CONFIG_DTRACE
851 if (thread != THREAD_NULL && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
852 if (dtrace_tally_fault(vaddr)) { /* Should a fault under dtrace be ignored? */
853 /*
854 * DTrace has "anticipated" the possibility of this fault, and has
855 * established the suitable recovery state. Drop down now into the
856 * recovery handling code in "case T_GENERAL_PROTECTION:".
857 */
858 goto FALL_THROUGH;
859 }
860 }
861 #endif /* CONFIG_DTRACE */
862
863 prot = VM_PROT_READ;
864
865 if (code & T_PF_WRITE) {
866 prot |= VM_PROT_WRITE;
867 }
868 if (code & T_PF_EXECUTE) {
869 prot |= VM_PROT_EXECUTE;
870 }
871
872 /**
873 * vm_fault() can be called with preemption disabled (and indeed this is expected for
874 * certain copyio() scenarios), but can't safely be called with interrupts disabled
875 * once the system has gone multi-threaded. Other than some early-boot situations
876 * such as startup kext loading, kernel paging operations should never be triggered
877 * by non-interruptible code in the first place, so a fault from such a context will
878 * ultimately produce a kernel page fault panic anyway. In these cases, skip calling
879 * vm_fault() to avoid masking the real kernel panic with a failed VM locking assertion.
880 */
881 if (__improbable(!(intr ||
882 startup_phase < STARTUP_SUB_EARLY_BOOT ||
883 current_cpu_datap()->cpu_hibernate))) {
884 fault_result = result = KERN_FAILURE;
885 goto FALL_THROUGH;
886 }
887
888 // VM will query this property when deciding to throttle this fault, we don't want to
889 // throttle kernel faults for copyio faults. The presence of a recovery entry is used as a
890 // proxy for being in copyio code.
891 rp = find_recovery_entry(kern_ip);
892 const bool was_recover = thread->recover;
893 thread->recover = was_recover || (rp != NULL);
894
895 fault_result = result = vm_fault(map,
896 vaddr,
897 prot,
898 FALSE, VM_KERN_MEMORY_NONE,
899 THREAD_UNINT, NULL, 0);
900
901 thread->recover = was_recover;
902 if (result == KERN_SUCCESS) {
903 goto common_return;
904 }
905 /*
906 * fall through
907 */
908 FALL_THROUGH:
909
910 case T_GENERAL_PROTECTION:
911 /*
912 * If there is a failure recovery address
913 * for this fault, go there.
914 */
915 if ((rp != NULL) || (rp = find_recovery_entry(kern_ip))) {
916 set_recovery_ip(saved_state, rp->recover_addr);
917 goto common_return;
918 }
919
920 /*
921 * Unanticipated page-fault errors in kernel
922 * should not happen.
923 *
924 * fall through...
925 */
926 OS_FALLTHROUGH;
927 default:
928 /*
929 * Exception 15 is reserved but some chips may generate it
930 * spuriously. Seen at startup on AMD Athlon-64.
931 */
932 if (type == 15) {
933 kprintf("kernel_trap() ignoring spurious trap 15\n");
934 goto common_return;
935 }
936 debugger_entry:
937 /* Ensure that the i386_kernel_state at the base of the
938 * current thread's stack (if any) is synchronized with the
939 * context at the moment of the trap, to facilitate
940 * access through the debugger.
941 */
942 sync_iss_to_iks(state);
943 #if MACH_KDP
944 if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr)) {
945 goto common_return;
946 }
947 #endif
948 }
949 if (type == T_PAGE_FAULT) {
950 panic_fault_address = vaddr;
951 }
952 pal_cli();
953
954 panic_trap(saved_state, trapcomment, reason, trap_pl, fault_result);
955 /*
956 * NO RETURN
957 */
958
959 common_return:
960 #if DEVELOPMENT || DEBUG
961 if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
962 traptrace_end(traptrace_index, mach_absolute_time());
963 }
964 #endif
965 return;
966 }
967
968 static void
set_recovery_ip(x86_saved_state64_t * saved_state,vm_offset_t ip)969 set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip)
970 {
971 saved_state->isf.rip = ip;
972 }
973
974 static void
panic_trap(x86_saved_state64_t * regs,uint16_t trapcomment,const char * trapreason,uint32_t pl,kern_return_t fault_result)975 panic_trap(
976 x86_saved_state64_t *regs,
977 uint16_t trapcomment,
978 const char *trapreason,
979 uint32_t pl,
980 kern_return_t fault_result)
981 {
982 char trapbuf[64];
983 pal_cr_t cr0, cr2, cr3, cr4;
984 boolean_t potential_smep_fault = FALSE, potential_kernel_NX_fault = FALSE;
985 boolean_t potential_smap_fault = FALSE;
986
987 pal_get_control_registers( &cr0, &cr2, &cr3, &cr4 );
988 assert(ml_get_interrupts_enabled() == FALSE);
989 current_cpu_datap()->cpu_fatal_trap_state = regs;
990 /*
991 * Issue an I/O port read if one has been requested - this is an
992 * event logic analyzers can use as a trigger point.
993 */
994 panic_notify();
995
996 kprintf("CPU %d panic trap number 0x%x, rip 0x%016llx\n",
997 cpu_number(), regs->isf.trapno, regs->isf.rip);
998 kprintf("cr0 0x%016llx cr2 0x%016llx cr3 0x%016llx cr4 0x%016llx\n",
999 cr0, cr2, cr3, cr4);
1000
1001 if ((regs->isf.trapno == T_PAGE_FAULT) && (regs->isf.err == (T_PF_PROT | T_PF_EXECUTE)) && (regs->isf.rip == regs->cr2)) {
1002 if (pmap_smep_enabled && (regs->isf.rip < VM_MAX_USER_PAGE_ADDRESS)) {
1003 potential_smep_fault = TRUE;
1004 } else if (regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
1005 potential_kernel_NX_fault = TRUE;
1006 }
1007 } else if (pmap_smap_enabled &&
1008 regs->isf.trapno == T_PAGE_FAULT &&
1009 regs->isf.err & T_PF_PROT &&
1010 regs->cr2 < VM_MAX_USER_PAGE_ADDRESS &&
1011 regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
1012 potential_smap_fault = TRUE;
1013 }
1014
1015 if (trapreason == NULL) {
1016 const char *traptype = "Unknown";
1017
1018 if (regs->isf.trapno < TRAP_TYPES) {
1019 traptype = trap_type[regs->isf.trapno];
1020 }
1021
1022 trapreason = "Kernel trap";
1023
1024 if (trapcomment == 0) {
1025 snprintf(trapbuf, sizeof(trapbuf),
1026 "type = %d=%s, ",
1027 regs->isf.trapno, traptype);
1028 } else {
1029 snprintf(trapbuf, sizeof(trapbuf),
1030 "type = %d=%s #%#04hx, ",
1031 regs->isf.trapno, traptype, trapcomment);
1032 }
1033 } else {
1034 trapbuf[0] = '\0';
1035 }
1036
1037 #undef panic
1038 panic("%s at 0x%016llx, %sregisters:\n"
1039 "CR0: 0x%016llx, CR2: 0x%016llx, CR3: 0x%016llx, CR4: 0x%016llx\n"
1040 "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1041 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1042 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1043 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1044 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n"
1045 "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d, VF: %d\n",
1046 trapreason, regs->isf.rip, trapbuf,
1047 cr0, cr2, cr3, cr4,
1048 regs->rax, regs->rbx, regs->rcx, regs->rdx,
1049 regs->isf.rsp, regs->rbp, regs->rsi, regs->rdi,
1050 regs->r8, regs->r9, regs->r10, regs->r11,
1051 regs->r12, regs->r13, regs->r14, regs->r15,
1052 regs->isf.rflags, regs->isf.rip, regs->isf.cs & 0xFFFF,
1053 regs->isf.ss & 0xFFFF, regs->cr2, regs->isf.err, regs->isf.cpu,
1054 virtualized ? " VMM" : "",
1055 potential_kernel_NX_fault ? " Kernel NX fault" : "",
1056 potential_smep_fault ? " SMEP/User NX fault" : "",
1057 potential_smap_fault ? " SMAP fault" : "",
1058 pl,
1059 fault_result);
1060 }
1061
1062 #if CONFIG_DTRACE
1063 extern kern_return_t dtrace_user_probe(x86_saved_state_t *);
1064 #endif
1065
1066 #if DEBUG
1067 uint32_t fsigs[2];
1068 uint32_t fsigns, fsigcs;
1069 #endif
1070
1071 /*
1072 * Trap from user mode.
1073 */
1074 void
user_trap(x86_saved_state_t * saved_state)1075 user_trap(
1076 x86_saved_state_t *saved_state)
1077 {
1078 int exc;
1079 int err;
1080 mach_exception_code_t code;
1081 mach_exception_subcode_t subcode;
1082 int type;
1083 user_addr_t vaddr;
1084 vm_prot_t prot;
1085 thread_t thread = current_thread();
1086 kern_return_t kret;
1087 user_addr_t rip;
1088 unsigned long dr6 = 0; /* 32 bit for i386, 64 bit for x86_64 */
1089 int current_cpu = cpu_number();
1090 #if DEVELOPMENT || DEBUG
1091 bool inspect_cacheline = false;
1092 uint32_t traptrace_index;
1093 #endif
1094 assert((is_saved_state32(saved_state) && !thread_is_64bit_addr(thread)) ||
1095 (is_saved_state64(saved_state) && thread_is_64bit_addr(thread)));
1096
1097 if (is_saved_state64(saved_state)) {
1098 x86_saved_state64_t *regs;
1099
1100 regs = saved_state64(saved_state);
1101
1102 /* Record cpu where state was captured */
1103 regs->isf.cpu = current_cpu;
1104
1105 type = regs->isf.trapno;
1106 err = (int)regs->isf.err & 0xffff;
1107 vaddr = (user_addr_t)regs->cr2;
1108 rip = (user_addr_t)regs->isf.rip;
1109 #if DEVELOPMENT || DEBUG
1110 traptrace_index = traptrace_start(type, rip, mach_absolute_time(), regs->rbp);
1111 #endif
1112 } else {
1113 x86_saved_state32_t *regs;
1114
1115 regs = saved_state32(saved_state);
1116
1117 /* Record cpu where state was captured */
1118 regs->cpu = current_cpu;
1119
1120 type = regs->trapno;
1121 err = regs->err & 0xffff;
1122 vaddr = (user_addr_t)regs->cr2;
1123 rip = (user_addr_t)regs->eip;
1124 #if DEVELOPMENT || DEBUG
1125 traptrace_index = traptrace_start(type, rip, mach_absolute_time(), regs->ebp);
1126 #endif
1127 }
1128
1129 #if DEVELOPMENT || DEBUG
1130 /*
1131 * Copy the cacheline of code into the thread's instruction stream save area
1132 * before enabling interrupts (the assumption is that we have not otherwise faulted or
1133 * trapped since the original cache line stores). If the saved code is not valid,
1134 * we'll catch it below when we process the copyin() for unhandled faults.
1135 */
1136 if (thread->machine.insn_copy_optout == false &&
1137 (type == T_PAGE_FAULT || type == T_INVALID_OPCODE || type == T_GENERAL_PROTECTION)) {
1138 #define CACHELINE_SIZE 64
1139 THREAD_TO_PCB(thread)->insn_cacheline[CACHELINE_SIZE] = (uint8_t)(rip & (CACHELINE_SIZE - 1));
1140 bcopy(&cpu_shadowp(current_cpu)->cpu_rtimes[0],
1141 &THREAD_TO_PCB(thread)->insn_cacheline[0],
1142 sizeof(THREAD_TO_PCB(thread)->insn_cacheline) - 1);
1143 inspect_cacheline = true;
1144 }
1145 #endif
1146
1147 if (type == T_DEBUG) {
1148 if (thread->machine.ids) {
1149 unsigned long clear = 0;
1150 /* Stash and clear this processor's DR6 value, in the event
1151 * this was a debug register match
1152 */
1153 __asm__ volatile ("mov %%db6, %0" : "=r" (dr6));
1154 __asm__ volatile ("mov %0, %%db6" : : "r" (clear));
1155 }
1156 /* [Re]Enable LBRs *BEFORE* enabling interrupts to ensure we hit the right CPU */
1157 i386_lbr_enable();
1158 }
1159
1160 if (type == T_PAGE_FAULT) {
1161 thread_reset_pcs_will_fault(thread);
1162 }
1163
1164 pal_sti();
1165
1166 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1167 (MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86, type)) | DBG_FUNC_NONE,
1168 (unsigned)(vaddr >> 32), (unsigned)vaddr,
1169 (unsigned)(rip >> 32), (unsigned)rip, 0);
1170
1171 code = 0;
1172 subcode = 0;
1173 exc = 0;
1174
1175 #if CONFIG_DTRACE
1176 /*
1177 * DTrace does not consume all user traps, only INT_3's for now.
1178 * Avoid needlessly calling tempDTraceTrapHook here, and let the
1179 * INT_3 case handle them.
1180 */
1181 #endif
1182
1183 DEBUG_KPRINT_SYSCALL_MASK(1,
1184 "user_trap: type=0x%x(%s) err=0x%x cr2=%p rip=%p\n",
1185 type, trap_type[type], err, (void *)(long) vaddr, (void *)(long) rip);
1186
1187 switch (type) {
1188 case T_DIVIDE_ERROR:
1189 exc = EXC_ARITHMETIC;
1190 code = EXC_I386_DIV;
1191 break;
1192
1193 case T_DEBUG:
1194 {
1195 pcb_t pcb;
1196 /*
1197 * Update the PCB with this processor's DR6 value
1198 * in the event this was a debug register match.
1199 */
1200 pcb = THREAD_TO_PCB(thread);
1201 if (pcb->ids) {
1202 /*
1203 * We can get and set the status register
1204 * in 32-bit mode even on a 64-bit thread
1205 * because the high order bits are not
1206 * used on x86_64
1207 */
1208 if (thread_is_64bit_addr(thread)) {
1209 x86_debug_state64_t *ids = pcb->ids;
1210 ids->dr6 = dr6;
1211 } else { /* 32 bit thread */
1212 x86_debug_state32_t *ids = pcb->ids;
1213 ids->dr6 = (uint32_t) dr6;
1214 }
1215 }
1216 exc = EXC_BREAKPOINT;
1217 code = EXC_I386_SGL;
1218 break;
1219 }
1220 case T_INT3:
1221 #if CONFIG_DTRACE
1222 if (dtrace_user_probe(saved_state) == KERN_SUCCESS) {
1223 return; /* If it succeeds, we are done... */
1224 }
1225 #endif
1226 exc = EXC_BREAKPOINT;
1227 code = EXC_I386_BPT;
1228 break;
1229
1230 case T_OVERFLOW:
1231 exc = EXC_ARITHMETIC;
1232 code = EXC_I386_INTO;
1233 break;
1234
1235 case T_OUT_OF_BOUNDS:
1236 exc = EXC_SOFTWARE;
1237 code = EXC_I386_BOUND;
1238 break;
1239
1240 case T_INVALID_OPCODE:
1241 if (fpUDflt(rip) == 1) {
1242 exc = EXC_BAD_INSTRUCTION;
1243 code = EXC_I386_INVOP;
1244 }
1245 break;
1246
1247 case T_NO_FPU:
1248 fpnoextflt();
1249 break;
1250
1251 case T_FPU_FAULT:
1252 fpextovrflt();
1253 /*
1254 * Raise exception.
1255 */
1256 exc = EXC_BAD_ACCESS;
1257 code = VM_PROT_READ | VM_PROT_EXECUTE;
1258 subcode = 0;
1259 break;
1260
1261 case T_INVALID_TSS: /* invalid TSS == iret with NT flag set */
1262 exc = EXC_BAD_INSTRUCTION;
1263 code = EXC_I386_INVTSSFLT;
1264 subcode = err;
1265 break;
1266
1267 case T_SEGMENT_NOT_PRESENT:
1268 exc = EXC_BAD_INSTRUCTION;
1269 code = EXC_I386_SEGNPFLT;
1270 subcode = err;
1271 break;
1272
1273 case T_STACK_FAULT:
1274 exc = EXC_BAD_INSTRUCTION;
1275 code = EXC_I386_STKFLT;
1276 subcode = err;
1277 break;
1278
1279 case T_GENERAL_PROTECTION:
1280 /*
1281 * There's a wide range of circumstances which generate this
1282 * class of exception. From user-space, many involve bad
1283 * addresses (such as a non-canonical 64-bit address).
1284 * So we map this to EXC_BAD_ACCESS (and thereby SIGSEGV).
1285 * The trouble is cr2 doesn't contain the faulting address;
1286 * we'd need to decode the faulting instruction to really
1287 * determine this. We'll leave that to debuggers.
1288 * However, attempted execution of privileged instructions
1289 * (e.g. cli) also generate GP faults and so we map these to
1290 * to EXC_BAD_ACCESS (and thence SIGSEGV) also - rather than
1291 * EXC_BAD_INSTRUCTION which is more accurate. We just can't
1292 * win!
1293 */
1294 exc = EXC_BAD_ACCESS;
1295 code = EXC_I386_GPFLT;
1296 subcode = err;
1297 break;
1298
1299 case T_PAGE_FAULT:
1300 {
1301 prot = VM_PROT_READ;
1302
1303 if (err & T_PF_WRITE) {
1304 prot |= VM_PROT_WRITE;
1305 }
1306 if (__improbable(err & T_PF_EXECUTE)) {
1307 prot |= VM_PROT_EXECUTE;
1308 }
1309 #if DEVELOPMENT || DEBUG
1310 bool do_simd_hash = thread_fpsimd_hash_enabled();
1311 uint32_t fsig = 0;
1312 fsig = do_simd_hash ? thread_fpsimd_hash(thread) : 0;
1313 #if DEBUG
1314 fsigs[0] = fsig;
1315 #endif
1316 #endif
1317 kret = vm_fault(thread->map,
1318 vaddr,
1319 prot, FALSE, VM_KERN_MEMORY_NONE,
1320 THREAD_ABORTSAFE, NULL, 0);
1321 #if DEVELOPMENT || DEBUG
1322 if (do_simd_hash && fsig) {
1323 uint32_t fsig2 = thread_fpsimd_hash(thread);
1324 #if DEBUG
1325 fsigcs++;
1326 fsigs[1] = fsig2;
1327 #endif
1328 if (fsig != fsig2) {
1329 panic("FP/SIMD state hash mismatch across fault thread: %p 0x%x->0x%x", thread, fsig, fsig2);
1330 }
1331 } else {
1332 #if DEBUG
1333 fsigns++;
1334 #endif
1335 }
1336 #endif
1337 if (__probable((kret == KERN_SUCCESS) || (kret == KERN_ABORTED))) {
1338 break;
1339 } else if (__improbable(kret == KERN_FAILURE)) {
1340 /*
1341 * For a user trap, vm_fault() should never return KERN_FAILURE.
1342 * If it does, we're leaking preemption disables somewhere in the kernel.
1343 */
1344 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
1345 }
1346
1347 /* PAL debug hook (empty on x86) */
1348 pal_dbg_page_fault(thread, vaddr, kret);
1349 exc = EXC_BAD_ACCESS;
1350 code = kret;
1351 subcode = vaddr;
1352 }
1353 break;
1354
1355 case T_SSE_FLOAT_ERROR:
1356 fpSSEexterrflt();
1357 exc = EXC_ARITHMETIC;
1358 code = EXC_I386_SSEEXTERR;
1359 subcode = ((struct x86_fx_thread_state *)thread->machine.ifps)->fx_MXCSR;
1360 break;
1361
1362
1363 case T_FLOATING_POINT_ERROR:
1364 fpexterrflt();
1365 exc = EXC_ARITHMETIC;
1366 code = EXC_I386_EXTERR;
1367 subcode = ((struct x86_fx_thread_state *)thread->machine.ifps)->fx_status;
1368 break;
1369
1370 case T_DTRACE_RET:
1371 #if CONFIG_DTRACE
1372 if (dtrace_user_probe(saved_state) == KERN_SUCCESS) {
1373 return; /* If it succeeds, we are done... */
1374 }
1375 #endif
1376 /*
1377 * If we get an INT 0x7f when we do not expect to,
1378 * treat it as an illegal instruction
1379 */
1380 exc = EXC_BAD_INSTRUCTION;
1381 code = EXC_I386_INVOP;
1382 break;
1383
1384 default:
1385 panic("Unexpected user trap, type %d", type);
1386 }
1387
1388 if (type == T_PAGE_FAULT) {
1389 thread_reset_pcs_done_faulting(thread);
1390 }
1391
1392 if (exc != 0) {
1393 uint16_t cs;
1394 boolean_t intrs;
1395
1396 if (is_saved_state64(saved_state)) {
1397 cs = saved_state64(saved_state)->isf.cs;
1398 } else {
1399 cs = saved_state32(saved_state)->cs;
1400 }
1401
1402 if (last_branch_enabled_modes == LBR_ENABLED_USERMODE) {
1403 intrs = ml_set_interrupts_enabled(FALSE);
1404 /*
1405 * This is a bit racy (it's possible for this thread to migrate to another CPU, then
1406 * migrate back, but that seems rather rare in practice), but good enough to ensure
1407 * the LBRs are saved before proceeding with exception/signal dispatch.
1408 */
1409 if (current_cpu == cpu_number()) {
1410 i386_lbr_synch(thread);
1411 }
1412 ml_set_interrupts_enabled(intrs);
1413 }
1414
1415 /*
1416 * Do not try to copyin from the instruction stream if the page fault was due
1417 * to an access to rip and was unhandled.
1418 * Do not deal with cases when %cs != USER[64]_CS
1419 * And of course there's no need to copy the instruction stream if the boot-arg
1420 * was set to 0.
1421 */
1422 if (thread->machine.insn_copy_optout == false && insn_copyin_count > 0 &&
1423 (cs == USER64_CS || cs == USER_CS) && (type != T_PAGE_FAULT || vaddr != rip)) {
1424 #if DEVELOPMENT || DEBUG
1425 copy_instruction_stream(thread, rip, type, inspect_cacheline);
1426 #else
1427 copy_instruction_stream(thread, rip, type);
1428 #endif
1429 }
1430
1431 #if DEVELOPMENT || DEBUG
1432 if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
1433 traptrace_end(traptrace_index, mach_absolute_time());
1434 }
1435 #endif
1436 /*
1437 * Note: Codepaths that directly return from user_trap() have pending
1438 * ASTs processed in locore
1439 */
1440 i386_exception(exc, code, subcode);
1441 /* NOTREACHED */
1442 } else {
1443 #if DEVELOPMENT || DEBUG
1444 if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
1445 traptrace_end(traptrace_index, mach_absolute_time());
1446 }
1447 #endif
1448 }
1449 }
1450
1451 /*
1452 * Copyin up to x86_INSTRUCTION_STATE_MAX_INSN_BYTES bytes from the page that includes `rip`,
1453 * ensuring that we stay on the same page, clipping the start or end, as needed.
1454 * Add the clipped amount back at the start or end, depending on where it fits.
1455 * Consult the variable populated by the boot-arg `insn_capcnt'
1456 */
1457 static __attribute__((noinline)) void
copy_instruction_stream(thread_t thread,uint64_t rip,int __unused trap_code,bool inspect_cacheline)1458 copy_instruction_stream(thread_t thread, uint64_t rip, int __unused trap_code
1459 #if DEVELOPMENT || DEBUG
1460 , bool inspect_cacheline
1461 #endif
1462 )
1463 {
1464 #if x86_INSTRUCTION_STATE_MAX_INSN_BYTES > 4096
1465 #error x86_INSTRUCTION_STATE_MAX_INSN_BYTES cannot exceed a page in size.
1466 #endif
1467 pcb_t pcb = THREAD_TO_PCB(thread);
1468 vm_map_offset_t pagemask = ~vm_map_page_mask(current_map());
1469 vm_map_offset_t rip_page = rip & pagemask;
1470 vm_map_offset_t start_addr;
1471 vm_map_offset_t insn_offset;
1472 vm_map_offset_t end_addr = rip + (insn_copyin_count / 2);
1473 void *stack_buffer;
1474 int copyin_err = 0;
1475 #if defined(MACH_BSD) && (DEVELOPMENT || DEBUG)
1476 void *procname;
1477 #endif
1478
1479 #if DEVELOPMENT || DEBUG
1480 assert(insn_copyin_count <= x86_INSTRUCTION_STATE_MAX_INSN_BYTES);
1481 #else
1482 if (insn_copyin_count > x86_INSTRUCTION_STATE_MAX_INSN_BYTES ||
1483 insn_copyin_count < 64 /* CACHELINE_SIZE */) {
1484 return;
1485 }
1486 #endif
1487
1488 #pragma clang diagnostic push
1489 #pragma clang diagnostic ignored "-Walloca"
1490 stack_buffer = __builtin_alloca(insn_copyin_count);
1491 #pragma clang diagnostic pop
1492
1493 if (rip >= (insn_copyin_count / 2)) {
1494 start_addr = rip - (insn_copyin_count / 2);
1495 } else {
1496 start_addr = 0;
1497 }
1498
1499 if (start_addr < rip_page) {
1500 insn_offset = (insn_copyin_count / 2) - (rip_page - start_addr);
1501 end_addr += (rip_page - start_addr);
1502 start_addr = rip_page;
1503 } else if (end_addr >= (rip_page + (~pagemask + 1))) {
1504 start_addr -= (end_addr - (rip_page + (~pagemask + 1))); /* Adjust start address backward */
1505 /* Adjust instruction offset due to start address change */
1506 insn_offset = (insn_copyin_count / 2) + (end_addr - (rip_page + (~pagemask + 1)));
1507 end_addr = rip_page + (~pagemask + 1); /* clip to the start of the next page (non-inclusive */
1508 } else {
1509 insn_offset = insn_copyin_count / 2;
1510 }
1511
1512 disable_preemption(); /* Prevent copyin from faulting in the instruction stream */
1513 if (
1514 #if DEVELOPMENT || DEBUG
1515 (insnstream_force_cacheline_mismatch < 2) &&
1516 #endif
1517 ((end_addr > start_addr) && (copyin_err = copyin(start_addr, stack_buffer, end_addr - start_addr)) == 0)) {
1518 enable_preemption();
1519
1520 if (pcb->insn_state == 0) {
1521 pcb->insn_state = kalloc_data(sizeof(x86_instruction_state_t), Z_WAITOK);
1522 }
1523
1524 if (pcb->insn_state != 0) {
1525 bcopy(stack_buffer, pcb->insn_state->insn_bytes, end_addr - start_addr);
1526 bzero(&pcb->insn_state->insn_bytes[end_addr - start_addr],
1527 insn_copyin_count - (end_addr - start_addr));
1528
1529 pcb->insn_state->insn_stream_valid_bytes = (int)(end_addr - start_addr);
1530 pcb->insn_state->insn_offset = (int)insn_offset;
1531
1532 #if DEVELOPMENT || DEBUG
1533 /* Now try to validate the cacheline we read at early-fault time matches the code
1534 * copied in. Before we do that, we have to make sure the buffer contains a valid
1535 * cacheline by looking for the 2 sentinel values written in the event the cacheline
1536 * could not be copied.
1537 */
1538 #define CACHELINE_DATA_NOT_PRESENT 0xdeadc0debeefcafeULL
1539 #define CACHELINE_MASK (CACHELINE_SIZE - 1)
1540
1541 if (inspect_cacheline &&
1542 (*(uint64_t *)(uintptr_t)&pcb->insn_cacheline[0] != CACHELINE_DATA_NOT_PRESENT &&
1543 *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[8] != CACHELINE_DATA_NOT_PRESENT)) {
1544 /*
1545 * The position of the cacheline in the instruction buffer is at offset
1546 * insn_offset - (rip & CACHELINE_MASK)
1547 */
1548 if (__improbable((rip & CACHELINE_MASK) > insn_offset)) {
1549 printf("thread %p code cacheline @ %p clipped wrt copied-in code (offset %d)\n",
1550 thread, (void *)(rip & ~CACHELINE_MASK), (int)(rip & CACHELINE_MASK));
1551 } else if (bcmp(&pcb->insn_state->insn_bytes[insn_offset - (rip & CACHELINE_MASK)],
1552 &pcb->insn_cacheline[0], CACHELINE_SIZE) != 0
1553 || insnstream_force_cacheline_mismatch
1554 ) {
1555 #if x86_INSTRUCTION_STATE_CACHELINE_SIZE != CACHELINE_SIZE
1556 #error cacheline size mismatch
1557 #endif
1558 bcopy(&pcb->insn_cacheline[0], &pcb->insn_state->insn_cacheline[0],
1559 x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1560 /* Mark the instruction stream as being out-of-synch */
1561 pcb->insn_state->out_of_synch = 1;
1562
1563 printf("thread %p code cacheline @ %p mismatches with copied-in code [trap 0x%x]\n",
1564 thread, (void *)(rip & ~CACHELINE_MASK), trap_code);
1565 for (int i = 0; i < 8; i++) {
1566 printf("\t[%d] cl=0x%08llx vs. ci=0x%08llx\n", i, *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[i * 8],
1567 *(uint64_t *)(uintptr_t)&pcb->insn_state->insn_bytes[(i * 8) + insn_offset - (rip & CACHELINE_MASK)]);
1568 }
1569 if (panic_on_cacheline_mismatch) {
1570 panic("Cacheline mismatch while processing unhandled exception.");
1571 }
1572 } else {
1573 pcb->insn_state->out_of_synch = 0;
1574 }
1575 } else if (inspect_cacheline) {
1576 printf("thread %p could not capture code cacheline at fault IP %p [offset %d]\n",
1577 (void *)thread, (void *)rip, (int)(insn_offset - (rip & CACHELINE_MASK)));
1578 pcb->insn_state->out_of_synch = 0;
1579 }
1580 #else
1581 pcb->insn_state->out_of_synch = 0;
1582 #endif /* DEVELOPMENT || DEBUG */
1583
1584 #if defined(MACH_BSD) && (DEVELOPMENT || DEBUG)
1585 if (panic_on_trap_procname[0] != 0) {
1586 task_t task = get_threadtask(thread);
1587 char procnamebuf[65] = {0};
1588
1589 if (get_bsdtask_info(task) != NULL) {
1590 procname = proc_name_address(get_bsdtask_info(task));
1591 strlcpy(procnamebuf, procname, sizeof(procnamebuf));
1592
1593 if (strcasecmp(panic_on_trap_procname, procnamebuf) == 0 &&
1594 ((1U << trap_code) & panic_on_trap_mask) != 0) {
1595 panic("Panic requested on trap type 0x%x for process `%s'", trap_code,
1596 panic_on_trap_procname);
1597 /*NORETURN*/
1598 }
1599 }
1600 }
1601 #endif /* MACH_BSD && (DEVELOPMENT || DEBUG) */
1602 }
1603 } else {
1604 enable_preemption();
1605
1606 pcb->insn_state_copyin_failure_errorcode = copyin_err;
1607 #if DEVELOPMENT || DEBUG
1608 if (inspect_cacheline && pcb->insn_state == 0) {
1609 pcb->insn_state = kalloc_data(sizeof(x86_instruction_state_t), Z_WAITOK);
1610 }
1611 if (pcb->insn_state != 0) {
1612 pcb->insn_state->insn_stream_valid_bytes = 0;
1613 pcb->insn_state->insn_offset = 0;
1614
1615 if (inspect_cacheline &&
1616 (*(uint64_t *)(uintptr_t)&pcb->insn_cacheline[0] != CACHELINE_DATA_NOT_PRESENT &&
1617 *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[8] != CACHELINE_DATA_NOT_PRESENT)) {
1618 /*
1619 * We can still copy the cacheline into the instruction state structure
1620 * if it contains valid data
1621 */
1622 pcb->insn_state->out_of_synch = 1;
1623 bcopy(&pcb->insn_cacheline[0], &pcb->insn_state->insn_cacheline[0],
1624 x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1625 }
1626 }
1627 #endif /* DEVELOPMENT || DEBUG */
1628 }
1629 }
1630
1631 /*
1632 * Handle exceptions for i386.
1633 *
1634 * If we are an AT bus machine, we must turn off the AST for a
1635 * delayed floating-point exception.
1636 *
1637 * If we are providing floating-point emulation, we may have
1638 * to retrieve the real register values from the floating point
1639 * emulator.
1640 */
1641 void
i386_exception(int exc,mach_exception_code_t code,mach_exception_subcode_t subcode)1642 i386_exception(
1643 int exc,
1644 mach_exception_code_t code,
1645 mach_exception_subcode_t subcode)
1646 {
1647 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
1648
1649 DEBUG_KPRINT_SYSCALL_MACH("i386_exception: exc=%d code=0x%llx subcode=0x%llx\n",
1650 exc, code, subcode);
1651 codes[0] = code; /* new exception interface */
1652 codes[1] = subcode;
1653 exception_triage(exc, codes, 2);
1654 /*NOTREACHED*/
1655 }
1656
1657
1658 /* Synchronize a thread's x86_kernel_state (if any) with the given
1659 * x86_saved_state_t obtained from the trap/IPI handler; called in
1660 * kernel_trap() prior to entering the debugger, and when receiving
1661 * an "MP_KDP" IPI. Called with null saved_state if an incoming IPI
1662 * was detected from the kernel while spinning with interrupts masked.
1663 */
1664
1665 void
sync_iss_to_iks(x86_saved_state_t * saved_state)1666 sync_iss_to_iks(x86_saved_state_t *saved_state)
1667 {
1668 struct x86_kernel_state *iks = NULL;
1669 vm_offset_t kstack;
1670 boolean_t record_active_regs = FALSE;
1671
1672 /* The PAL may have a special way to sync registers */
1673 if (saved_state && saved_state->flavor == THREAD_STATE_NONE) {
1674 pal_get_kern_regs( saved_state );
1675 }
1676
1677 if (current_thread() != NULL &&
1678 (kstack = current_thread()->kernel_stack) != 0) {
1679 x86_saved_state64_t *regs = saved_state64(saved_state);
1680
1681 iks = STACK_IKS(kstack);
1682
1683 /* Did we take the trap/interrupt in kernel mode? */
1684 if (saved_state == NULL || /* NULL => polling in kernel */
1685 regs == USER_REGS64(current_thread())) {
1686 record_active_regs = TRUE;
1687 } else {
1688 iks->k_rbx = regs->rbx;
1689 iks->k_rsp = regs->isf.rsp;
1690 iks->k_rbp = regs->rbp;
1691 iks->k_r12 = regs->r12;
1692 iks->k_r13 = regs->r13;
1693 iks->k_r14 = regs->r14;
1694 iks->k_r15 = regs->r15;
1695 iks->k_rip = regs->isf.rip;
1696 }
1697 }
1698
1699 if (record_active_regs == TRUE) {
1700 /* Show the trap handler path */
1701 __asm__ volatile ("movq %%rbx, %0" : "=m" (iks->k_rbx));
1702 __asm__ volatile ("movq %%rsp, %0" : "=m" (iks->k_rsp));
1703 __asm__ volatile ("movq %%rbp, %0" : "=m" (iks->k_rbp));
1704 __asm__ volatile ("movq %%r12, %0" : "=m" (iks->k_r12));
1705 __asm__ volatile ("movq %%r13, %0" : "=m" (iks->k_r13));
1706 __asm__ volatile ("movq %%r14, %0" : "=m" (iks->k_r14));
1707 __asm__ volatile ("movq %%r15, %0" : "=m" (iks->k_r15));
1708 /* "Current" instruction pointer */
1709 __asm__ volatile ("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:"
1710 : "=m" (iks->k_rip)
1711 :
1712 : "rax");
1713 }
1714 }
1715
1716 /*
1717 * This is used by the NMI interrupt handler (from mp.c) to
1718 * uncondtionally sync the trap handler context to the IKS
1719 * irrespective of whether the NMI was fielded in kernel
1720 * or user space.
1721 */
1722 void
sync_iss_to_iks_unconditionally(__unused x86_saved_state_t * saved_state)1723 sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state)
1724 {
1725 struct x86_kernel_state *iks;
1726 vm_offset_t kstack;
1727
1728 if ((kstack = current_thread()->kernel_stack) != 0) {
1729 iks = STACK_IKS(kstack);
1730 /* Display the trap handler path */
1731 __asm__ volatile ("movq %%rbx, %0" : "=m" (iks->k_rbx));
1732 __asm__ volatile ("movq %%rsp, %0" : "=m" (iks->k_rsp));
1733 __asm__ volatile ("movq %%rbp, %0" : "=m" (iks->k_rbp));
1734 __asm__ volatile ("movq %%r12, %0" : "=m" (iks->k_r12));
1735 __asm__ volatile ("movq %%r13, %0" : "=m" (iks->k_r13));
1736 __asm__ volatile ("movq %%r14, %0" : "=m" (iks->k_r14));
1737 __asm__ volatile ("movq %%r15, %0" : "=m" (iks->k_r15));
1738 /* "Current" instruction pointer */
1739 __asm__ volatile ("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks->k_rip)::"rax");
1740 }
1741 }
1742
1743 #if DEBUG
1744 #define TERI 1
1745 #endif
1746
1747 #if TERI
1748 extern void thread_exception_return_internal(void) __dead2;
1749
1750 void
thread_exception_return(void)1751 thread_exception_return(void)
1752 {
1753 thread_t thread = current_thread();
1754 task_t task = current_task();
1755
1756 ml_set_interrupts_enabled(FALSE);
1757 if (thread_is_64bit_addr(thread) != task_has_64Bit_addr(task)) {
1758 panic("Task/thread bitness mismatch %p %p, task: %d, thread: %d",
1759 thread, task, thread_is_64bit_addr(thread), task_has_64Bit_addr(task));
1760 }
1761
1762 if (thread_is_64bit_addr(thread)) {
1763 if ((gdt_desc_p(USER64_CS)->access & ACC_PL_U) == 0) {
1764 panic("64-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER64_CS));
1765 }
1766 } else {
1767 if ((gdt_desc_p(USER_CS)->access & ACC_PL_U) == 0) {
1768 panic("32-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER_CS));
1769 }
1770 }
1771 assert(get_preemption_level() == 0);
1772 thread_exception_return_internal();
1773 }
1774 #endif
1775
1776 #if DEVELOPMENT || DEBUG
1777 static int trap_handled;
1778
1779 static const char *
handle_recoverable_kernel_trap(__unused void * tstate,uint16_t comment)1780 handle_recoverable_kernel_trap(
1781 __unused void *tstate,
1782 uint16_t comment)
1783 {
1784 assert(comment == TEST_RECOVERABLE_SOFT_TRAP);
1785
1786 printf("Recoverable trap handled.\n");
1787 trap_handled = 1;
1788
1789 return NULL;
1790 }
1791
1792 KERNEL_BRK_DESCRIPTOR_DEFINE(test_desc,
1793 .type = TRAP_TELEMETRY_TYPE_KERNEL_BRK_TEST,
1794 .base = TEST_RECOVERABLE_SOFT_TRAP,
1795 .max = TEST_RECOVERABLE_SOFT_TRAP,
1796 .options = BRK_TELEMETRY_OPTIONS_RECOVERABLE_DEFAULT(
1797 /* enable_telemetry */ false),
1798 .handle_breakpoint = handle_recoverable_kernel_trap);
1799
1800 static int
recoverable_kernel_trap_test(__unused int64_t in,int64_t * out)1801 recoverable_kernel_trap_test(__unused int64_t in, int64_t *out)
1802 {
1803 ml_recoverable_trap(TEST_RECOVERABLE_SOFT_TRAP);
1804
1805 *out = trap_handled;
1806 return 0;
1807 }
1808
1809 SYSCTL_TEST_REGISTER(recoverable_kernel_trap, recoverable_kernel_trap_test);
1810 #endif
1811