1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * Hardware trap/fault handler.
61 */
62
63 #include <mach_kdp.h>
64 #include <mach_ldebug.h>
65
66 #include <types.h>
67 #include <i386/eflags.h>
68 #include <i386/trap.h>
69 #include <i386/pmap.h>
70 #include <i386/fpu.h>
71 #include <i386/panic_notify.h>
72 #include <i386/lapic.h>
73
74 #include <mach/exception.h>
75 #include <mach/kern_return.h>
76 #include <mach/vm_param.h>
77 #include <mach/i386/thread_status.h>
78
79 #include <vm/vm_kern.h>
80 #include <vm/vm_fault.h>
81
82 #include <kern/kern_types.h>
83 #include <kern/processor.h>
84 #include <kern/thread.h>
85 #include <kern/task.h>
86 #include <kern/restartable.h>
87 #include <kern/sched.h>
88 #include <kern/sched_prim.h>
89 #include <kern/exception.h>
90 #include <kern/spl.h>
91 #include <kern/misc_protos.h>
92 #include <kern/debug.h>
93 #if CONFIG_TELEMETRY
94 #include <kern/telemetry.h>
95 #endif
96 #include <kern/zalloc_internal.h>
97 #include <sys/kdebug.h>
98 #include <kperf/kperf.h>
99 #include <prng/random.h>
100 #include <prng/entropy.h>
101
102 #include <string.h>
103
104 #include <i386/postcode.h>
105 #include <i386/mp_desc.h>
106 #include <i386/proc_reg.h>
107 #include <i386/machine_routines.h>
108 #if CONFIG_MCA
109 #include <i386/machine_check.h>
110 #endif
111 #include <mach/i386/syscall_sw.h>
112
113 #include <libkern/OSDebug.h>
114 #include <i386/cpu_threads.h>
115 #include <machine/pal_routines.h>
116 #include <i386/lbr.h>
117
118 extern void throttle_lowpri_io(int);
119 extern void kprint_state(x86_saved_state64_t *saved_state);
120 #if DEVELOPMENT || DEBUG
121 int insnstream_force_cacheline_mismatch = 0;
122 extern int panic_on_cacheline_mismatch;
123 extern char panic_on_trap_procname[];
124 extern uint32_t panic_on_trap_mask;
125 #endif
126
127 extern int insn_copyin_count;
128
129 /*
130 * Forward declarations
131 */
132 static void panic_trap(x86_saved_state64_t *saved_state, uint32_t pl, kern_return_t fault_result) __dead2;
133 static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip);
134 #if DEVELOPMENT || DEBUG
135 static __attribute__((noinline)) void copy_instruction_stream(thread_t thread, uint64_t rip, int trap_code, bool inspect_cacheline);
136 #else
137 static __attribute__((noinline)) void copy_instruction_stream(thread_t thread, uint64_t rip, int trap_code);
138 #endif
139
140 #if CONFIG_DTRACE
141 /* See <rdar://problem/4613924> */
142 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
143
144 extern boolean_t dtrace_tally_fault(user_addr_t);
145 extern boolean_t dtrace_handle_trap(int, x86_saved_state_t *);
146 #endif
147
148 #ifdef MACH_BSD
149 extern char * proc_name_address(void *p);
150 #endif /* MACH_BSD */
151
152 extern boolean_t pmap_smep_enabled;
153 extern boolean_t pmap_smap_enabled;
154
155 __attribute__((noreturn))
156 void
thread_syscall_return(kern_return_t ret)157 thread_syscall_return(
158 kern_return_t ret)
159 {
160 thread_t thr_act = current_thread();
161 boolean_t is_mach;
162 int code;
163
164 pal_register_cache_state(thr_act, DIRTY);
165
166 if (thread_is_64bit_addr(thr_act)) {
167 x86_saved_state64_t *regs;
168
169 regs = USER_REGS64(thr_act);
170
171 code = (int) (regs->rax & SYSCALL_NUMBER_MASK);
172 is_mach = (regs->rax & SYSCALL_CLASS_MASK)
173 == (SYSCALL_CLASS_MACH << SYSCALL_CLASS_SHIFT);
174 if (kdebug_enable && is_mach) {
175 /* Mach trap */
176 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
177 MACHDBG_CODE(DBG_MACH_EXCP_SC, code) | DBG_FUNC_END,
178 ret, 0, 0, 0, 0);
179 }
180 regs->rax = ret;
181 #if DEBUG
182 if (is_mach) {
183 DEBUG_KPRINT_SYSCALL_MACH(
184 "thread_syscall_return: 64-bit mach ret=%u\n",
185 ret);
186 } else {
187 DEBUG_KPRINT_SYSCALL_UNIX(
188 "thread_syscall_return: 64-bit unix ret=%u\n",
189 ret);
190 }
191 #endif
192 } else {
193 x86_saved_state32_t *regs;
194
195 regs = USER_REGS32(thr_act);
196
197 code = ((int) regs->eax);
198 is_mach = (code < 0);
199 if (kdebug_enable && is_mach) {
200 /* Mach trap */
201 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
202 MACHDBG_CODE(DBG_MACH_EXCP_SC, -code) | DBG_FUNC_END,
203 ret, 0, 0, 0, 0);
204 }
205 regs->eax = ret;
206 #if DEBUG
207 if (is_mach) {
208 DEBUG_KPRINT_SYSCALL_MACH(
209 "thread_syscall_return: 32-bit mach ret=%u\n",
210 ret);
211 } else {
212 DEBUG_KPRINT_SYSCALL_UNIX(
213 "thread_syscall_return: 32-bit unix ret=%u\n",
214 ret);
215 }
216 #endif
217 }
218
219 #if DEBUG || DEVELOPMENT
220 kern_allocation_name_t
221 prior __assert_only = thread_get_kernel_state(thr_act)->allocation_name;
222 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
223 #endif /* DEBUG || DEVELOPMENT */
224
225 throttle_lowpri_io(1);
226
227 thread_exception_return();
228 /*NOTREACHED*/
229 }
230
231 /*
232 * Fault recovery in copyin/copyout routines.
233 */
234 struct recovery {
235 uintptr_t fault_addr;
236 uintptr_t recover_addr;
237 };
238
239 extern struct recovery recover_table[];
240 extern struct recovery recover_table_end[];
241
242 const char * trap_type[] = {TRAP_NAMES};
243 unsigned TRAP_TYPES = sizeof(trap_type) / sizeof(trap_type[0]);
244
245 extern void PE_incoming_interrupt(int interrupt);
246
247 #if defined(__x86_64__) && DEBUG
248 void
kprint_state(x86_saved_state64_t * saved_state)249 kprint_state(x86_saved_state64_t *saved_state)
250 {
251 kprintf("current_cpu_datap() 0x%lx\n", (uintptr_t)current_cpu_datap());
252 kprintf("Current GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_GS_BASE));
253 kprintf("Kernel GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_KERNEL_GS_BASE));
254 kprintf("state at 0x%lx:\n", (uintptr_t) saved_state);
255
256 kprintf(" rdi 0x%llx\n", saved_state->rdi);
257 kprintf(" rsi 0x%llx\n", saved_state->rsi);
258 kprintf(" rdx 0x%llx\n", saved_state->rdx);
259 kprintf(" r10 0x%llx\n", saved_state->r10);
260 kprintf(" r8 0x%llx\n", saved_state->r8);
261 kprintf(" r9 0x%llx\n", saved_state->r9);
262
263 kprintf(" cr2 0x%llx\n", saved_state->cr2);
264 kprintf("real cr2 0x%lx\n", get_cr2());
265 kprintf(" r15 0x%llx\n", saved_state->r15);
266 kprintf(" r14 0x%llx\n", saved_state->r14);
267 kprintf(" r13 0x%llx\n", saved_state->r13);
268 kprintf(" r12 0x%llx\n", saved_state->r12);
269 kprintf(" r11 0x%llx\n", saved_state->r11);
270 kprintf(" rbp 0x%llx\n", saved_state->rbp);
271 kprintf(" rbx 0x%llx\n", saved_state->rbx);
272 kprintf(" rcx 0x%llx\n", saved_state->rcx);
273 kprintf(" rax 0x%llx\n", saved_state->rax);
274
275 kprintf(" gs 0x%x\n", saved_state->gs);
276 kprintf(" fs 0x%x\n", saved_state->fs);
277
278 kprintf(" isf.trapno 0x%x\n", saved_state->isf.trapno);
279 kprintf(" isf._pad 0x%x\n", saved_state->isf._pad);
280 kprintf(" isf.trapfn 0x%llx\n", saved_state->isf.trapfn);
281 kprintf(" isf.err 0x%llx\n", saved_state->isf.err);
282 kprintf(" isf.rip 0x%llx\n", saved_state->isf.rip);
283 kprintf(" isf.cs 0x%llx\n", saved_state->isf.cs);
284 kprintf(" isf.rflags 0x%llx\n", saved_state->isf.rflags);
285 kprintf(" isf.rsp 0x%llx\n", saved_state->isf.rsp);
286 kprintf(" isf.ss 0x%llx\n", saved_state->isf.ss);
287 }
288 #endif
289
290
291 /*
292 * Non-zero indicates latency assert is enabled and capped at valued
293 * absolute time units.
294 */
295
296 uint64_t interrupt_latency_cap = 0;
297 boolean_t ilat_assert = FALSE;
298
299 void
interrupt_latency_tracker_setup(void)300 interrupt_latency_tracker_setup(void)
301 {
302 uint32_t ilat_cap_us;
303 if (PE_parse_boot_argn("interrupt_latency_cap_us", &ilat_cap_us, sizeof(ilat_cap_us))) {
304 interrupt_latency_cap = ilat_cap_us * NSEC_PER_USEC;
305 nanoseconds_to_absolutetime(interrupt_latency_cap, &interrupt_latency_cap);
306 } else {
307 interrupt_latency_cap = LockTimeOut;
308 }
309 PE_parse_boot_argn("-interrupt_latency_assert_enable", &ilat_assert, sizeof(ilat_assert));
310 }
311
312 void
interrupt_reset_latency_stats(void)313 interrupt_reset_latency_stats(void)
314 {
315 uint32_t i;
316 for (i = 0; i < real_ncpus; i++) {
317 cpu_data_ptr[i]->cpu_max_observed_int_latency =
318 cpu_data_ptr[i]->cpu_max_observed_int_latency_vector = 0;
319 }
320 }
321
322 void
interrupt_populate_latency_stats(char * buf,unsigned bufsize)323 interrupt_populate_latency_stats(char *buf, unsigned bufsize)
324 {
325 uint32_t i, tcpu = ~0;
326 uint64_t cur_max = 0;
327
328 for (i = 0; i < real_ncpus; i++) {
329 if (cur_max < cpu_data_ptr[i]->cpu_max_observed_int_latency) {
330 cur_max = cpu_data_ptr[i]->cpu_max_observed_int_latency;
331 tcpu = i;
332 }
333 }
334
335 if (tcpu < real_ncpus) {
336 snprintf(buf, bufsize, "0x%x 0x%x 0x%llx", tcpu, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency_vector, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency);
337 }
338 }
339
340 uint32_t interrupt_timer_coalescing_enabled = 1;
341 uint64_t interrupt_coalesced_timers;
342
343 /*
344 * Handle interrupts:
345 * - local APIC interrupts (IPIs, timers, etc) are handled by the kernel,
346 * - device interrupts go to the platform expert.
347 */
348 void
interrupt(x86_saved_state_t * state)349 interrupt(x86_saved_state_t *state)
350 {
351 uint64_t rip;
352 uint64_t rsp;
353 int interrupt_num;
354 boolean_t user_mode = FALSE;
355 int ipl;
356 int cnum = cpu_number();
357 cpu_data_t *cdp = cpu_data_ptr[cnum];
358 int itype = DBG_INTR_TYPE_UNKNOWN;
359 int handled;
360
361
362 x86_saved_state64_t *state64 = saved_state64(state);
363 rip = state64->isf.rip;
364 rsp = state64->isf.rsp;
365 interrupt_num = state64->isf.trapno;
366 if (state64->isf.cs & 0x03) {
367 user_mode = TRUE;
368 }
369
370 #if DEVELOPMENT || DEBUG
371 uint64_t frameptr = is_saved_state64(state) ? state64->rbp : saved_state32(state)->ebp;
372 uint32_t traptrace_index = traptrace_start(interrupt_num, rip, mach_absolute_time(), frameptr);
373 #endif
374
375 if (cpu_data_ptr[cnum]->lcpu.package->num_idle == topoParms.nLThreadsPerPackage) {
376 cpu_data_ptr[cnum]->cpu_hwIntpexits[interrupt_num]++;
377 }
378
379 if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT)) {
380 itype = DBG_INTR_TYPE_IPI;
381 } else if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT)) {
382 itype = DBG_INTR_TYPE_TIMER;
383 } else {
384 itype = DBG_INTR_TYPE_OTHER;
385 }
386
387 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
388 MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
389 interrupt_num,
390 (user_mode ? rip : VM_KERNEL_UNSLIDE(rip)),
391 user_mode, itype, 0);
392
393 SCHED_STATS_INC(interrupt_count);
394
395 #if CONFIG_TELEMETRY
396 if (telemetry_needs_record) {
397 telemetry_mark_curthread(user_mode, FALSE);
398 }
399 #endif
400
401 ipl = get_preemption_level();
402
403 /*
404 * Handle local APIC interrupts
405 * else call platform expert for devices.
406 */
407 handled = lapic_interrupt(interrupt_num, state);
408
409 if (!handled) {
410 if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_CMCI_INTERRUPT)) {
411 /*
412 * CMCI can be signalled on any logical processor, and the kexts
413 * that implement handling CMCI use IOKit to register handlers for
414 * the CMCI vector, so if we see a CMCI, do not encode a CPU
415 * number in bits 8:31 (since the vector is the same regardless of
416 * the handling CPU).
417 */
418 PE_incoming_interrupt(interrupt_num);
419 } else if (cnum <= lapic_max_interrupt_cpunum) {
420 PE_incoming_interrupt((cnum << 8) | interrupt_num);
421 }
422 }
423
424 if (__improbable(get_preemption_level() != ipl)) {
425 panic("Preemption level altered by interrupt vector 0x%x: initial 0x%x, final: 0x%x", interrupt_num, ipl, get_preemption_level());
426 }
427
428
429 if (__improbable(cdp->cpu_nested_istack)) {
430 cdp->cpu_nested_istack_events++;
431 } else {
432 uint64_t ctime = mach_absolute_time();
433 uint64_t int_latency = ctime - cdp->cpu_int_event_time;
434 uint64_t esdeadline, ehdeadline;
435 /* Attempt to process deferred timers in the context of
436 * this interrupt, unless interrupt time has already exceeded
437 * TCOAL_ILAT_THRESHOLD.
438 */
439 #define TCOAL_ILAT_THRESHOLD (30000ULL)
440
441 if ((int_latency < TCOAL_ILAT_THRESHOLD) &&
442 interrupt_timer_coalescing_enabled) {
443 esdeadline = cdp->rtclock_timer.queue.earliest_soft_deadline;
444 ehdeadline = cdp->rtclock_timer.deadline;
445 if ((ctime >= esdeadline) && (ctime < ehdeadline)) {
446 interrupt_coalesced_timers++;
447 TCOAL_DEBUG(0x88880000 | DBG_FUNC_START, ctime, esdeadline, ehdeadline, interrupt_coalesced_timers, 0);
448 rtclock_intr(state);
449 TCOAL_DEBUG(0x88880000 | DBG_FUNC_END, ctime, esdeadline, interrupt_coalesced_timers, 0, 0);
450 } else {
451 TCOAL_DEBUG(0x77770000, ctime, cdp->rtclock_timer.queue.earliest_soft_deadline, cdp->rtclock_timer.deadline, interrupt_coalesced_timers, 0);
452 }
453 }
454
455 if (__improbable(ilat_assert && (int_latency > interrupt_latency_cap) && !machine_timeout_suspended())) {
456 panic("Interrupt vector 0x%x exceeded interrupt latency threshold, 0x%llx absolute time delta, prior signals: 0x%x, current signals: 0x%x", interrupt_num, int_latency, cdp->cpu_prior_signals, cdp->cpu_signals);
457 }
458
459 if (__improbable(int_latency > cdp->cpu_max_observed_int_latency)) {
460 cdp->cpu_max_observed_int_latency = int_latency;
461 cdp->cpu_max_observed_int_latency_vector = interrupt_num;
462 }
463 }
464
465 /*
466 * Having serviced the interrupt first, look at the interrupted stack depth.
467 */
468 if (!user_mode) {
469 uint64_t depth = cdp->cpu_kernel_stack
470 + sizeof(struct thread_kernel_state)
471 + sizeof(struct i386_exception_link *)
472 - rsp;
473 if (__improbable(depth > kernel_stack_depth_max)) {
474 kernel_stack_depth_max = (vm_offset_t)depth;
475 KERNEL_DEBUG_CONSTANT(
476 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
477 (long) depth, (long) VM_KERNEL_UNSLIDE(rip), 0, 0, 0);
478 }
479 }
480
481 if (cnum == master_cpu) {
482 entropy_collect();
483 }
484
485 #if KPERF
486 kperf_interrupt();
487 #endif /* KPERF */
488
489 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
490 interrupt_num);
491
492 assert(ml_get_interrupts_enabled() == FALSE);
493
494 #if DEVELOPMENT || DEBUG
495 if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
496 traptrace_end(traptrace_index, mach_absolute_time());
497 }
498 #endif
499 }
500
501 static inline void
reset_dr7(void)502 reset_dr7(void)
503 {
504 long dr7 = 0x400; /* magic dr7 reset value; 32 bit on i386, 64 bit on x86_64 */
505 __asm__ volatile ("mov %0,%%dr7" : : "r" (dr7));
506 }
507 #if MACH_KDP
508 unsigned kdp_has_active_watchpoints = 0;
509 #define NO_WATCHPOINTS (!kdp_has_active_watchpoints)
510 #else
511 #define NO_WATCHPOINTS 1
512 #endif
513
514 static uint32_t bound_chk_violations_event;
515
516 static void
telemetry_handle_brk_trap(__unused void * tstate,uint16_t comment)517 telemetry_handle_brk_trap(
518 __unused void *tstate,
519 uint16_t comment)
520 {
521 if (comment == CLANG_BOUND_CHK_SOFT_TRAP) {
522 os_atomic_inc(&bound_chk_violations_event, relaxed);
523 }
524 }
525
526 KERNEL_BRK_DESCRIPTOR_DEFINE(clang_desc,
527 .type = KERNEL_BRK_TYPE_CLANG,
528 .base = CLANG_TRAPS_X86_START,
529 .max = CLANG_TRAPS_X86_END,
530 .options = KERNEL_BRK_UNRECOVERABLE,
531 .handle_breakpoint = NULL);
532
533 KERNEL_BRK_DESCRIPTOR_DEFINE(telemetry_desc,
534 .type = KERNEL_BRK_TYPE_TELEMETRY,
535 .base = TELEMETRY_TRAPS_START,
536 .max = TELEMETRY_TRAPS_END,
537 .options = KERNEL_BRK_RECOVERABLE | KERNEL_BRK_CORE_ANALYTICS,
538 .handle_breakpoint = telemetry_handle_brk_trap);
539
540 KERNEL_BRK_DESCRIPTOR_DEFINE(libcxx_desc,
541 .type = KERNEL_BRK_TYPE_LIBCXX,
542 .base = LIBCXX_TRAPS_START,
543 .max = LIBCXX_TRAPS_END,
544 .options = KERNEL_BRK_UNRECOVERABLE,
545 .handle_breakpoint = NULL);
546
547 static bool
handle_kernel_breakpoint(x86_saved_state64_t * state)548 handle_kernel_breakpoint(x86_saved_state64_t *state)
549 {
550 uint16_t comment;
551 const struct kernel_brk_descriptor *desc;
552 uint8_t inst_buf[8];
553 uint32_t prefix16 = 0x80B90F67; /* Encoding prefix for ud1 <16-bit code>(%eax), %eax */
554 uint32_t prefix8 = 0x40B90F67; /* Encoding prefix for ud1 <8-bit code>(%eax), %eax */
555 bool found_prefix8 = false;
556
557 vm_size_t sz = ml_nofault_copy(state->isf.rip, (vm_offset_t)inst_buf, sizeof(inst_buf));
558 if (sz != sizeof(inst_buf)) {
559 return false;
560 }
561
562 if (bcmp(inst_buf, &prefix16, sizeof(prefix16)) == 0) {
563 /* The two bytes following the prefix is our code */
564 comment = inst_buf[5] << 8 | inst_buf[4];
565 } else if (bcmp(inst_buf, &prefix8, sizeof(prefix8)) == 0) {
566 /* The one byte following the prefix is our code */
567 found_prefix8 = true;
568 comment = inst_buf[4];
569 } else {
570 return false;
571 }
572
573 desc = find_brk_descriptor_by_comment(comment);
574
575 if (!desc) {
576 return false;
577 }
578
579 if (desc->options & KERNEL_BRK_TELEMETRY_OPTIONS) {
580 telemetry_kernel_brk(desc->type, desc->options, (void *)state, comment);
581 }
582
583 if (desc->handle_breakpoint) {
584 desc->handle_breakpoint(state, comment); /* May trigger panic */
585 }
586
587 /* Still alive? Check if we should recover. */
588 if (desc->options & KERNEL_BRK_RECOVERABLE) {
589 /* ud1 can be five or eight-byte long depending on the prefix */
590 set_recovery_ip(state, state->isf.rip + (found_prefix8 ? 5 : 8));
591 return true;
592 }
593
594 return false;
595 }
596
597 /*
598 * Trap from kernel mode. Only page-fault errors are recoverable,
599 * and then only in special circumstances. All other errors are
600 * fatal. Return value indicates if trap was handled.
601 */
602
603 void
kernel_trap(x86_saved_state_t * state,uintptr_t * lo_spp)604 kernel_trap(
605 x86_saved_state_t *state,
606 uintptr_t *lo_spp)
607 {
608 x86_saved_state64_t *saved_state;
609 int code;
610 user_addr_t vaddr;
611 int type;
612 vm_map_t map = 0; /* protected by T_PAGE_FAULT */
613 kern_return_t result = KERN_FAILURE;
614 kern_return_t fault_result = KERN_SUCCESS;
615 thread_t thread;
616 boolean_t intr;
617 vm_prot_t prot;
618 struct recovery *rp;
619 vm_offset_t kern_ip;
620 int is_user;
621 int trap_pl = get_preemption_level();
622
623 thread = current_thread();
624
625 if (__improbable(is_saved_state32(state))) {
626 panic("kernel_trap(%p) with 32-bit state", state);
627 }
628 saved_state = saved_state64(state);
629
630 /* Record cpu where state was captured */
631 saved_state->isf.cpu = cpu_number();
632
633 vaddr = (user_addr_t)saved_state->cr2;
634 type = saved_state->isf.trapno;
635 code = (int)(saved_state->isf.err & 0xffff);
636 intr = (saved_state->isf.rflags & EFL_IF) != 0; /* state of ints at trap */
637 kern_ip = (vm_offset_t)saved_state->isf.rip;
638
639 is_user = (vaddr < VM_MAX_USER_PAGE_ADDRESS);
640
641 #if DEVELOPMENT || DEBUG
642 uint32_t traptrace_index = traptrace_start(type, kern_ip, mach_absolute_time(), saved_state->rbp);
643 #endif
644
645 #if CONFIG_DTRACE
646 /*
647 * Is there a DTrace hook?
648 */
649 if (__improbable(tempDTraceTrapHook != NULL)) {
650 if (tempDTraceTrapHook(type, state, lo_spp, 0) == KERN_SUCCESS) {
651 /*
652 * If it succeeds, we are done...
653 */
654 goto common_return;
655 }
656 }
657
658 /* Handle traps originated from probe context. */
659 if (thread != THREAD_NULL && thread->t_dtrace_inprobe) {
660 if (dtrace_handle_trap(type, state)) {
661 goto common_return;
662 }
663 }
664
665 #endif /* CONFIG_DTRACE */
666
667 /*
668 * we come here with interrupts off as we don't want to recurse
669 * on preemption below. but we do want to re-enable interrupts
670 * as soon we possibly can to hold latency down
671 */
672 if (__improbable(T_PREEMPT == type)) {
673 ast_taken_kernel();
674
675 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
676 (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
677 0, 0, 0, VM_KERNEL_UNSLIDE(kern_ip), 0);
678
679 goto common_return;
680 }
681
682 user_addr_t kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr);
683 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
684 (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
685 (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user,
686 VM_KERNEL_UNSLIDE(kern_ip), 0);
687
688
689 if (T_PAGE_FAULT == type) {
690 /*
691 * assume we're faulting in the kernel map
692 */
693 map = kernel_map;
694
695 if (__probable((thread != THREAD_NULL) && (thread->map != kernel_map) &&
696 (vaddr < VM_MAX_USER_PAGE_ADDRESS))) {
697 /* fault occurred in userspace */
698 map = thread->map;
699
700 /* Intercept a potential Supervisor Mode Execute
701 * Protection fault. These criteria identify
702 * both NX faults and SMEP faults, but both
703 * are fatal. We avoid checking PTEs (racy).
704 * (The VM could just redrive a SMEP fault, hence
705 * the intercept).
706 */
707 if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) &&
708 (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) {
709 goto debugger_entry;
710 }
711
712 /*
713 * Additionally check for SMAP faults...
714 * which are characterized by page-present and
715 * the AC bit unset (i.e. not from copyin/out path).
716 */
717 if (__improbable(code & T_PF_PROT &&
718 pmap_smap_enabled &&
719 (saved_state->isf.rflags & EFL_AC) == 0)) {
720 goto debugger_entry;
721 }
722
723 /*
724 * If we're not sharing cr3 with the user
725 * and we faulted in copyio,
726 * then switch cr3 here and dismiss the fault.
727 */
728 if (no_shared_cr3 &&
729 (thread->machine.specFlags & CopyIOActive) &&
730 map->pmap->pm_cr3 != get_cr3_base()) {
731 pmap_assert(current_cpu_datap()->cpu_pmap_pcid_enabled == FALSE);
732 set_cr3_raw(map->pmap->pm_cr3);
733 return;
734 }
735 if (__improbable(vaddr < PAGE_SIZE) &&
736 ((thread->machine.specFlags & CopyIOActive) == 0)) {
737 goto debugger_entry;
738 }
739 }
740 }
741
742 (void) ml_set_interrupts_enabled(intr);
743
744 switch (type) {
745 case T_NO_FPU:
746 fpnoextflt();
747 goto common_return;
748
749 case T_FPU_FAULT:
750 fpextovrflt();
751 goto common_return;
752
753 case T_FLOATING_POINT_ERROR:
754 fpexterrflt();
755 goto common_return;
756
757 case T_SSE_FLOAT_ERROR:
758 fpSSEexterrflt();
759 goto common_return;
760
761 case T_INVALID_OPCODE:
762 if (handle_kernel_breakpoint(saved_state)) {
763 goto common_return;
764 }
765 fpUDflt(kern_ip);
766 goto debugger_entry;
767
768 case T_DEBUG:
769 /*
770 * Re-enable LBR tracing for core/panic files if necessary. i386_lbr_enable confirms LBR should be re-enabled.
771 */
772 i386_lbr_enable();
773 if ((saved_state->isf.rflags & EFL_TF) == 0 && NO_WATCHPOINTS) {
774 /* We've somehow encountered a debug
775 * register match that does not belong
776 * to the kernel debugger.
777 * This isn't supposed to happen.
778 */
779 reset_dr7();
780 goto common_return;
781 }
782 goto debugger_entry;
783 case T_INT3:
784 goto debugger_entry;
785 case T_PAGE_FAULT:
786
787 #if CONFIG_DTRACE
788 if (thread != THREAD_NULL && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
789 if (dtrace_tally_fault(vaddr)) { /* Should a fault under dtrace be ignored? */
790 /*
791 * DTrace has "anticipated" the possibility of this fault, and has
792 * established the suitable recovery state. Drop down now into the
793 * recovery handling code in "case T_GENERAL_PROTECTION:".
794 */
795 goto FALL_THROUGH;
796 }
797 }
798 #endif /* CONFIG_DTRACE */
799
800 prot = VM_PROT_READ;
801
802 if (code & T_PF_WRITE) {
803 prot |= VM_PROT_WRITE;
804 }
805 if (code & T_PF_EXECUTE) {
806 prot |= VM_PROT_EXECUTE;
807 }
808
809 fault_result = result = vm_fault(map,
810 vaddr,
811 prot,
812 FALSE, VM_KERN_MEMORY_NONE,
813 THREAD_UNINT, NULL, 0);
814
815 if (result == KERN_SUCCESS) {
816 goto common_return;
817 }
818 /*
819 * fall through
820 */
821 #if CONFIG_DTRACE
822 FALL_THROUGH:
823 #endif /* CONFIG_DTRACE */
824
825 case T_GENERAL_PROTECTION:
826 /*
827 * If there is a failure recovery address
828 * for this fault, go there.
829 */
830 for (rp = recover_table; rp < recover_table_end; rp++) {
831 if (kern_ip == rp->fault_addr) {
832 set_recovery_ip(saved_state, rp->recover_addr);
833 goto common_return;
834 }
835 }
836
837 /*
838 * Unanticipated page-fault errors in kernel
839 * should not happen.
840 *
841 * fall through...
842 */
843 OS_FALLTHROUGH;
844 default:
845 /*
846 * Exception 15 is reserved but some chips may generate it
847 * spuriously. Seen at startup on AMD Athlon-64.
848 */
849 if (type == 15) {
850 kprintf("kernel_trap() ignoring spurious trap 15\n");
851 goto common_return;
852 }
853 debugger_entry:
854 /* Ensure that the i386_kernel_state at the base of the
855 * current thread's stack (if any) is synchronized with the
856 * context at the moment of the trap, to facilitate
857 * access through the debugger.
858 */
859 sync_iss_to_iks(state);
860 #if MACH_KDP
861 if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr)) {
862 goto common_return;
863 }
864 #endif
865 }
866 if (type == T_PAGE_FAULT) {
867 panic_fault_address = vaddr;
868 }
869 pal_cli();
870 panic_trap(saved_state, trap_pl, fault_result);
871 /*
872 * NO RETURN
873 */
874
875 common_return:
876 #if DEVELOPMENT || DEBUG
877 if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
878 traptrace_end(traptrace_index, mach_absolute_time());
879 }
880 #endif
881 return;
882 }
883
884 static void
set_recovery_ip(x86_saved_state64_t * saved_state,vm_offset_t ip)885 set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip)
886 {
887 saved_state->isf.rip = ip;
888 }
889
890 static void
panic_trap(x86_saved_state64_t * regs,uint32_t pl,kern_return_t fault_result)891 panic_trap(x86_saved_state64_t *regs, uint32_t pl, kern_return_t fault_result)
892 {
893 const char *trapname = "Unknown";
894 pal_cr_t cr0, cr2, cr3, cr4;
895 boolean_t potential_smep_fault = FALSE, potential_kernel_NX_fault = FALSE;
896 boolean_t potential_smap_fault = FALSE;
897
898 pal_get_control_registers( &cr0, &cr2, &cr3, &cr4 );
899 assert(ml_get_interrupts_enabled() == FALSE);
900 current_cpu_datap()->cpu_fatal_trap_state = regs;
901 /*
902 * Issue an I/O port read if one has been requested - this is an
903 * event logic analyzers can use as a trigger point.
904 */
905 panic_notify();
906
907 kprintf("CPU %d panic trap number 0x%x, rip 0x%016llx\n",
908 cpu_number(), regs->isf.trapno, regs->isf.rip);
909 kprintf("cr0 0x%016llx cr2 0x%016llx cr3 0x%016llx cr4 0x%016llx\n",
910 cr0, cr2, cr3, cr4);
911
912 if (regs->isf.trapno < TRAP_TYPES) {
913 trapname = trap_type[regs->isf.trapno];
914 }
915
916 if ((regs->isf.trapno == T_PAGE_FAULT) && (regs->isf.err == (T_PF_PROT | T_PF_EXECUTE)) && (regs->isf.rip == regs->cr2)) {
917 if (pmap_smep_enabled && (regs->isf.rip < VM_MAX_USER_PAGE_ADDRESS)) {
918 potential_smep_fault = TRUE;
919 } else if (regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
920 potential_kernel_NX_fault = TRUE;
921 }
922 } else if (pmap_smap_enabled &&
923 regs->isf.trapno == T_PAGE_FAULT &&
924 regs->isf.err & T_PF_PROT &&
925 regs->cr2 < VM_MAX_USER_PAGE_ADDRESS &&
926 regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
927 potential_smap_fault = TRUE;
928 }
929
930 #undef panic
931 panic("Kernel trap at 0x%016llx, type %d=%s, registers:\n"
932 "CR0: 0x%016llx, CR2: 0x%016llx, CR3: 0x%016llx, CR4: 0x%016llx\n"
933 "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
934 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
935 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
936 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
937 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n"
938 "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d, VF: %d\n",
939 regs->isf.rip, regs->isf.trapno, trapname,
940 cr0, cr2, cr3, cr4,
941 regs->rax, regs->rbx, regs->rcx, regs->rdx,
942 regs->isf.rsp, regs->rbp, regs->rsi, regs->rdi,
943 regs->r8, regs->r9, regs->r10, regs->r11,
944 regs->r12, regs->r13, regs->r14, regs->r15,
945 regs->isf.rflags, regs->isf.rip, regs->isf.cs & 0xFFFF,
946 regs->isf.ss & 0xFFFF, regs->cr2, regs->isf.err, regs->isf.cpu,
947 virtualized ? " VMM" : "",
948 potential_kernel_NX_fault ? " Kernel NX fault" : "",
949 potential_smep_fault ? " SMEP/User NX fault" : "",
950 potential_smap_fault ? " SMAP fault" : "",
951 pl,
952 fault_result);
953 }
954
955 #if CONFIG_DTRACE
956 extern kern_return_t dtrace_user_probe(x86_saved_state_t *);
957 #endif
958
959 #if DEBUG
960 uint32_t fsigs[2];
961 uint32_t fsigns, fsigcs;
962 #endif
963
964 /*
965 * Trap from user mode.
966 */
967 void
user_trap(x86_saved_state_t * saved_state)968 user_trap(
969 x86_saved_state_t *saved_state)
970 {
971 int exc;
972 int err;
973 mach_exception_code_t code;
974 mach_exception_subcode_t subcode;
975 int type;
976 user_addr_t vaddr;
977 vm_prot_t prot;
978 thread_t thread = current_thread();
979 kern_return_t kret;
980 user_addr_t rip;
981 unsigned long dr6 = 0; /* 32 bit for i386, 64 bit for x86_64 */
982 int current_cpu = cpu_number();
983 #if DEVELOPMENT || DEBUG
984 bool inspect_cacheline = false;
985 uint32_t traptrace_index;
986 #endif
987 assert((is_saved_state32(saved_state) && !thread_is_64bit_addr(thread)) ||
988 (is_saved_state64(saved_state) && thread_is_64bit_addr(thread)));
989
990 if (is_saved_state64(saved_state)) {
991 x86_saved_state64_t *regs;
992
993 regs = saved_state64(saved_state);
994
995 /* Record cpu where state was captured */
996 regs->isf.cpu = current_cpu;
997
998 type = regs->isf.trapno;
999 err = (int)regs->isf.err & 0xffff;
1000 vaddr = (user_addr_t)regs->cr2;
1001 rip = (user_addr_t)regs->isf.rip;
1002 #if DEVELOPMENT || DEBUG
1003 traptrace_index = traptrace_start(type, rip, mach_absolute_time(), regs->rbp);
1004 #endif
1005 } else {
1006 x86_saved_state32_t *regs;
1007
1008 regs = saved_state32(saved_state);
1009
1010 /* Record cpu where state was captured */
1011 regs->cpu = current_cpu;
1012
1013 type = regs->trapno;
1014 err = regs->err & 0xffff;
1015 vaddr = (user_addr_t)regs->cr2;
1016 rip = (user_addr_t)regs->eip;
1017 #if DEVELOPMENT || DEBUG
1018 traptrace_index = traptrace_start(type, rip, mach_absolute_time(), regs->ebp);
1019 #endif
1020 }
1021
1022 #if DEVELOPMENT || DEBUG
1023 /*
1024 * Copy the cacheline of code into the thread's instruction stream save area
1025 * before enabling interrupts (the assumption is that we have not otherwise faulted or
1026 * trapped since the original cache line stores). If the saved code is not valid,
1027 * we'll catch it below when we process the copyin() for unhandled faults.
1028 */
1029 if (thread->machine.insn_copy_optout == false &&
1030 (type == T_PAGE_FAULT || type == T_INVALID_OPCODE || type == T_GENERAL_PROTECTION)) {
1031 #define CACHELINE_SIZE 64
1032 THREAD_TO_PCB(thread)->insn_cacheline[CACHELINE_SIZE] = (uint8_t)(rip & (CACHELINE_SIZE - 1));
1033 bcopy(&cpu_shadowp(current_cpu)->cpu_rtimes[0],
1034 &THREAD_TO_PCB(thread)->insn_cacheline[0],
1035 sizeof(THREAD_TO_PCB(thread)->insn_cacheline) - 1);
1036 inspect_cacheline = true;
1037 }
1038 #endif
1039
1040 if (type == T_DEBUG) {
1041 if (thread->machine.ids) {
1042 unsigned long clear = 0;
1043 /* Stash and clear this processor's DR6 value, in the event
1044 * this was a debug register match
1045 */
1046 __asm__ volatile ("mov %%db6, %0" : "=r" (dr6));
1047 __asm__ volatile ("mov %0, %%db6" : : "r" (clear));
1048 }
1049 /* [Re]Enable LBRs *BEFORE* enabling interrupts to ensure we hit the right CPU */
1050 i386_lbr_enable();
1051 }
1052
1053 if (type == T_PAGE_FAULT) {
1054 thread_reset_pcs_will_fault(thread);
1055 }
1056
1057 pal_sti();
1058
1059 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1060 (MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86, type)) | DBG_FUNC_NONE,
1061 (unsigned)(vaddr >> 32), (unsigned)vaddr,
1062 (unsigned)(rip >> 32), (unsigned)rip, 0);
1063
1064 code = 0;
1065 subcode = 0;
1066 exc = 0;
1067
1068 #if CONFIG_DTRACE
1069 /*
1070 * DTrace does not consume all user traps, only INT_3's for now.
1071 * Avoid needlessly calling tempDTraceTrapHook here, and let the
1072 * INT_3 case handle them.
1073 */
1074 #endif
1075
1076 DEBUG_KPRINT_SYSCALL_MASK(1,
1077 "user_trap: type=0x%x(%s) err=0x%x cr2=%p rip=%p\n",
1078 type, trap_type[type], err, (void *)(long) vaddr, (void *)(long) rip);
1079
1080 switch (type) {
1081 case T_DIVIDE_ERROR:
1082 exc = EXC_ARITHMETIC;
1083 code = EXC_I386_DIV;
1084 break;
1085
1086 case T_DEBUG:
1087 {
1088 pcb_t pcb;
1089 /*
1090 * Update the PCB with this processor's DR6 value
1091 * in the event this was a debug register match.
1092 */
1093 pcb = THREAD_TO_PCB(thread);
1094 if (pcb->ids) {
1095 /*
1096 * We can get and set the status register
1097 * in 32-bit mode even on a 64-bit thread
1098 * because the high order bits are not
1099 * used on x86_64
1100 */
1101 if (thread_is_64bit_addr(thread)) {
1102 x86_debug_state64_t *ids = pcb->ids;
1103 ids->dr6 = dr6;
1104 } else { /* 32 bit thread */
1105 x86_debug_state32_t *ids = pcb->ids;
1106 ids->dr6 = (uint32_t) dr6;
1107 }
1108 }
1109 exc = EXC_BREAKPOINT;
1110 code = EXC_I386_SGL;
1111 break;
1112 }
1113 case T_INT3:
1114 #if CONFIG_DTRACE
1115 if (dtrace_user_probe(saved_state) == KERN_SUCCESS) {
1116 return; /* If it succeeds, we are done... */
1117 }
1118 #endif
1119 exc = EXC_BREAKPOINT;
1120 code = EXC_I386_BPT;
1121 break;
1122
1123 case T_OVERFLOW:
1124 exc = EXC_ARITHMETIC;
1125 code = EXC_I386_INTO;
1126 break;
1127
1128 case T_OUT_OF_BOUNDS:
1129 exc = EXC_SOFTWARE;
1130 code = EXC_I386_BOUND;
1131 break;
1132
1133 case T_INVALID_OPCODE:
1134 if (fpUDflt(rip) == 1) {
1135 exc = EXC_BAD_INSTRUCTION;
1136 code = EXC_I386_INVOP;
1137 }
1138 break;
1139
1140 case T_NO_FPU:
1141 fpnoextflt();
1142 break;
1143
1144 case T_FPU_FAULT:
1145 fpextovrflt();
1146 /*
1147 * Raise exception.
1148 */
1149 exc = EXC_BAD_ACCESS;
1150 code = VM_PROT_READ | VM_PROT_EXECUTE;
1151 subcode = 0;
1152 break;
1153
1154 case T_INVALID_TSS: /* invalid TSS == iret with NT flag set */
1155 exc = EXC_BAD_INSTRUCTION;
1156 code = EXC_I386_INVTSSFLT;
1157 subcode = err;
1158 break;
1159
1160 case T_SEGMENT_NOT_PRESENT:
1161 exc = EXC_BAD_INSTRUCTION;
1162 code = EXC_I386_SEGNPFLT;
1163 subcode = err;
1164 break;
1165
1166 case T_STACK_FAULT:
1167 exc = EXC_BAD_INSTRUCTION;
1168 code = EXC_I386_STKFLT;
1169 subcode = err;
1170 break;
1171
1172 case T_GENERAL_PROTECTION:
1173 /*
1174 * There's a wide range of circumstances which generate this
1175 * class of exception. From user-space, many involve bad
1176 * addresses (such as a non-canonical 64-bit address).
1177 * So we map this to EXC_BAD_ACCESS (and thereby SIGSEGV).
1178 * The trouble is cr2 doesn't contain the faulting address;
1179 * we'd need to decode the faulting instruction to really
1180 * determine this. We'll leave that to debuggers.
1181 * However, attempted execution of privileged instructions
1182 * (e.g. cli) also generate GP faults and so we map these to
1183 * to EXC_BAD_ACCESS (and thence SIGSEGV) also - rather than
1184 * EXC_BAD_INSTRUCTION which is more accurate. We just can't
1185 * win!
1186 */
1187 exc = EXC_BAD_ACCESS;
1188 code = EXC_I386_GPFLT;
1189 subcode = err;
1190 break;
1191
1192 case T_PAGE_FAULT:
1193 {
1194 prot = VM_PROT_READ;
1195
1196 if (err & T_PF_WRITE) {
1197 prot |= VM_PROT_WRITE;
1198 }
1199 if (__improbable(err & T_PF_EXECUTE)) {
1200 prot |= VM_PROT_EXECUTE;
1201 }
1202 #if DEVELOPMENT || DEBUG
1203 bool do_simd_hash = thread_fpsimd_hash_enabled();
1204 uint32_t fsig = 0;
1205 fsig = do_simd_hash ? thread_fpsimd_hash(thread) : 0;
1206 #if DEBUG
1207 fsigs[0] = fsig;
1208 #endif
1209 #endif
1210 kret = vm_fault(thread->map,
1211 vaddr,
1212 prot, FALSE, VM_KERN_MEMORY_NONE,
1213 THREAD_ABORTSAFE, NULL, 0);
1214 #if DEVELOPMENT || DEBUG
1215 if (do_simd_hash && fsig) {
1216 uint32_t fsig2 = thread_fpsimd_hash(thread);
1217 #if DEBUG
1218 fsigcs++;
1219 fsigs[1] = fsig2;
1220 #endif
1221 if (fsig != fsig2) {
1222 panic("FP/SIMD state hash mismatch across fault thread: %p 0x%x->0x%x", thread, fsig, fsig2);
1223 }
1224 } else {
1225 #if DEBUG
1226 fsigns++;
1227 #endif
1228 }
1229 #endif
1230 if (__probable((kret == KERN_SUCCESS) || (kret == KERN_ABORTED))) {
1231 break;
1232 } else if (__improbable(kret == KERN_FAILURE)) {
1233 /*
1234 * For a user trap, vm_fault() should never return KERN_FAILURE.
1235 * If it does, we're leaking preemption disables somewhere in the kernel.
1236 */
1237 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
1238 }
1239
1240 /* PAL debug hook (empty on x86) */
1241 pal_dbg_page_fault(thread, vaddr, kret);
1242 exc = EXC_BAD_ACCESS;
1243 code = kret;
1244 subcode = vaddr;
1245 }
1246 break;
1247
1248 case T_SSE_FLOAT_ERROR:
1249 fpSSEexterrflt();
1250 exc = EXC_ARITHMETIC;
1251 code = EXC_I386_SSEEXTERR;
1252 subcode = ((struct x86_fx_thread_state *)thread->machine.ifps)->fx_MXCSR;
1253 break;
1254
1255
1256 case T_FLOATING_POINT_ERROR:
1257 fpexterrflt();
1258 exc = EXC_ARITHMETIC;
1259 code = EXC_I386_EXTERR;
1260 subcode = ((struct x86_fx_thread_state *)thread->machine.ifps)->fx_status;
1261 break;
1262
1263 case T_DTRACE_RET:
1264 #if CONFIG_DTRACE
1265 if (dtrace_user_probe(saved_state) == KERN_SUCCESS) {
1266 return; /* If it succeeds, we are done... */
1267 }
1268 #endif
1269 /*
1270 * If we get an INT 0x7f when we do not expect to,
1271 * treat it as an illegal instruction
1272 */
1273 exc = EXC_BAD_INSTRUCTION;
1274 code = EXC_I386_INVOP;
1275 break;
1276
1277 default:
1278 panic("Unexpected user trap, type %d", type);
1279 }
1280
1281 if (type == T_PAGE_FAULT) {
1282 thread_reset_pcs_done_faulting(thread);
1283 }
1284
1285 if (exc != 0) {
1286 uint16_t cs;
1287 boolean_t intrs;
1288
1289 if (is_saved_state64(saved_state)) {
1290 cs = saved_state64(saved_state)->isf.cs;
1291 } else {
1292 cs = saved_state32(saved_state)->cs;
1293 }
1294
1295 if (last_branch_enabled_modes == LBR_ENABLED_USERMODE) {
1296 intrs = ml_set_interrupts_enabled(FALSE);
1297 /*
1298 * This is a bit racy (it's possible for this thread to migrate to another CPU, then
1299 * migrate back, but that seems rather rare in practice), but good enough to ensure
1300 * the LBRs are saved before proceeding with exception/signal dispatch.
1301 */
1302 if (current_cpu == cpu_number()) {
1303 i386_lbr_synch(thread);
1304 }
1305 ml_set_interrupts_enabled(intrs);
1306 }
1307
1308 /*
1309 * Do not try to copyin from the instruction stream if the page fault was due
1310 * to an access to rip and was unhandled.
1311 * Do not deal with cases when %cs != USER[64]_CS
1312 * And of course there's no need to copy the instruction stream if the boot-arg
1313 * was set to 0.
1314 */
1315 if (thread->machine.insn_copy_optout == false && insn_copyin_count > 0 &&
1316 (cs == USER64_CS || cs == USER_CS) && (type != T_PAGE_FAULT || vaddr != rip)) {
1317 #if DEVELOPMENT || DEBUG
1318 copy_instruction_stream(thread, rip, type, inspect_cacheline);
1319 #else
1320 copy_instruction_stream(thread, rip, type);
1321 #endif
1322 }
1323
1324 #if DEVELOPMENT || DEBUG
1325 if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
1326 traptrace_end(traptrace_index, mach_absolute_time());
1327 }
1328 #endif
1329 /*
1330 * Note: Codepaths that directly return from user_trap() have pending
1331 * ASTs processed in locore
1332 */
1333 i386_exception(exc, code, subcode);
1334 /* NOTREACHED */
1335 } else {
1336 #if DEVELOPMENT || DEBUG
1337 if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
1338 traptrace_end(traptrace_index, mach_absolute_time());
1339 }
1340 #endif
1341 }
1342 }
1343
1344 /*
1345 * Copyin up to x86_INSTRUCTION_STATE_MAX_INSN_BYTES bytes from the page that includes `rip`,
1346 * ensuring that we stay on the same page, clipping the start or end, as needed.
1347 * Add the clipped amount back at the start or end, depending on where it fits.
1348 * Consult the variable populated by the boot-arg `insn_capcnt'
1349 */
1350 static __attribute__((noinline)) void
copy_instruction_stream(thread_t thread,uint64_t rip,int __unused trap_code,bool inspect_cacheline)1351 copy_instruction_stream(thread_t thread, uint64_t rip, int __unused trap_code
1352 #if DEVELOPMENT || DEBUG
1353 , bool inspect_cacheline
1354 #endif
1355 )
1356 {
1357 #if x86_INSTRUCTION_STATE_MAX_INSN_BYTES > 4096
1358 #error x86_INSTRUCTION_STATE_MAX_INSN_BYTES cannot exceed a page in size.
1359 #endif
1360 pcb_t pcb = THREAD_TO_PCB(thread);
1361 vm_map_offset_t pagemask = ~vm_map_page_mask(current_map());
1362 vm_map_offset_t rip_page = rip & pagemask;
1363 vm_map_offset_t start_addr;
1364 vm_map_offset_t insn_offset;
1365 vm_map_offset_t end_addr = rip + (insn_copyin_count / 2);
1366 void *stack_buffer;
1367 int copyin_err = 0;
1368 #if defined(MACH_BSD) && (DEVELOPMENT || DEBUG)
1369 void *procname;
1370 #endif
1371
1372 #if DEVELOPMENT || DEBUG
1373 assert(insn_copyin_count <= x86_INSTRUCTION_STATE_MAX_INSN_BYTES);
1374 #else
1375 if (insn_copyin_count > x86_INSTRUCTION_STATE_MAX_INSN_BYTES ||
1376 insn_copyin_count < 64 /* CACHELINE_SIZE */) {
1377 return;
1378 }
1379 #endif
1380
1381 #pragma clang diagnostic push
1382 #pragma clang diagnostic ignored "-Walloca"
1383 stack_buffer = __builtin_alloca(insn_copyin_count);
1384 #pragma clang diagnostic pop
1385
1386 if (rip >= (insn_copyin_count / 2)) {
1387 start_addr = rip - (insn_copyin_count / 2);
1388 } else {
1389 start_addr = 0;
1390 }
1391
1392 if (start_addr < rip_page) {
1393 insn_offset = (insn_copyin_count / 2) - (rip_page - start_addr);
1394 end_addr += (rip_page - start_addr);
1395 start_addr = rip_page;
1396 } else if (end_addr >= (rip_page + (~pagemask + 1))) {
1397 start_addr -= (end_addr - (rip_page + (~pagemask + 1))); /* Adjust start address backward */
1398 /* Adjust instruction offset due to start address change */
1399 insn_offset = (insn_copyin_count / 2) + (end_addr - (rip_page + (~pagemask + 1)));
1400 end_addr = rip_page + (~pagemask + 1); /* clip to the start of the next page (non-inclusive */
1401 } else {
1402 insn_offset = insn_copyin_count / 2;
1403 }
1404
1405 disable_preemption(); /* Prevent copyin from faulting in the instruction stream */
1406 if (
1407 #if DEVELOPMENT || DEBUG
1408 (insnstream_force_cacheline_mismatch < 2) &&
1409 #endif
1410 ((end_addr > start_addr) && (copyin_err = copyin(start_addr, stack_buffer, end_addr - start_addr)) == 0)) {
1411 enable_preemption();
1412
1413 if (pcb->insn_state == 0) {
1414 pcb->insn_state = kalloc_data(sizeof(x86_instruction_state_t), Z_WAITOK);
1415 }
1416
1417 if (pcb->insn_state != 0) {
1418 bcopy(stack_buffer, pcb->insn_state->insn_bytes, end_addr - start_addr);
1419 bzero(&pcb->insn_state->insn_bytes[end_addr - start_addr],
1420 insn_copyin_count - (end_addr - start_addr));
1421
1422 pcb->insn_state->insn_stream_valid_bytes = (int)(end_addr - start_addr);
1423 pcb->insn_state->insn_offset = (int)insn_offset;
1424
1425 #if DEVELOPMENT || DEBUG
1426 /* Now try to validate the cacheline we read at early-fault time matches the code
1427 * copied in. Before we do that, we have to make sure the buffer contains a valid
1428 * cacheline by looking for the 2 sentinel values written in the event the cacheline
1429 * could not be copied.
1430 */
1431 #define CACHELINE_DATA_NOT_PRESENT 0xdeadc0debeefcafeULL
1432 #define CACHELINE_MASK (CACHELINE_SIZE - 1)
1433
1434 if (inspect_cacheline &&
1435 (*(uint64_t *)(uintptr_t)&pcb->insn_cacheline[0] != CACHELINE_DATA_NOT_PRESENT &&
1436 *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[8] != CACHELINE_DATA_NOT_PRESENT)) {
1437 /*
1438 * The position of the cacheline in the instruction buffer is at offset
1439 * insn_offset - (rip & CACHELINE_MASK)
1440 */
1441 if (__improbable((rip & CACHELINE_MASK) > insn_offset)) {
1442 printf("thread %p code cacheline @ %p clipped wrt copied-in code (offset %d)\n",
1443 thread, (void *)(rip & ~CACHELINE_MASK), (int)(rip & CACHELINE_MASK));
1444 } else if (bcmp(&pcb->insn_state->insn_bytes[insn_offset - (rip & CACHELINE_MASK)],
1445 &pcb->insn_cacheline[0], CACHELINE_SIZE) != 0
1446 || insnstream_force_cacheline_mismatch
1447 ) {
1448 #if x86_INSTRUCTION_STATE_CACHELINE_SIZE != CACHELINE_SIZE
1449 #error cacheline size mismatch
1450 #endif
1451 bcopy(&pcb->insn_cacheline[0], &pcb->insn_state->insn_cacheline[0],
1452 x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1453 /* Mark the instruction stream as being out-of-synch */
1454 pcb->insn_state->out_of_synch = 1;
1455
1456 printf("thread %p code cacheline @ %p mismatches with copied-in code [trap 0x%x]\n",
1457 thread, (void *)(rip & ~CACHELINE_MASK), trap_code);
1458 for (int i = 0; i < 8; i++) {
1459 printf("\t[%d] cl=0x%08llx vs. ci=0x%08llx\n", i, *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[i * 8],
1460 *(uint64_t *)(uintptr_t)&pcb->insn_state->insn_bytes[(i * 8) + insn_offset - (rip & CACHELINE_MASK)]);
1461 }
1462 if (panic_on_cacheline_mismatch) {
1463 panic("Cacheline mismatch while processing unhandled exception.");
1464 }
1465 } else {
1466 pcb->insn_state->out_of_synch = 0;
1467 }
1468 } else if (inspect_cacheline) {
1469 printf("thread %p could not capture code cacheline at fault IP %p [offset %d]\n",
1470 (void *)thread, (void *)rip, (int)(insn_offset - (rip & CACHELINE_MASK)));
1471 pcb->insn_state->out_of_synch = 0;
1472 }
1473 #else
1474 pcb->insn_state->out_of_synch = 0;
1475 #endif /* DEVELOPMENT || DEBUG */
1476
1477 #if defined(MACH_BSD) && (DEVELOPMENT || DEBUG)
1478 if (panic_on_trap_procname[0] != 0) {
1479 task_t task = get_threadtask(thread);
1480 char procnamebuf[65] = {0};
1481
1482 if (get_bsdtask_info(task) != NULL) {
1483 procname = proc_name_address(get_bsdtask_info(task));
1484 strlcpy(procnamebuf, procname, sizeof(procnamebuf));
1485
1486 if (strcasecmp(panic_on_trap_procname, procnamebuf) == 0 &&
1487 ((1U << trap_code) & panic_on_trap_mask) != 0) {
1488 panic("Panic requested on trap type 0x%x for process `%s'", trap_code,
1489 panic_on_trap_procname);
1490 /*NORETURN*/
1491 }
1492 }
1493 }
1494 #endif /* MACH_BSD && (DEVELOPMENT || DEBUG) */
1495 }
1496 } else {
1497 enable_preemption();
1498
1499 pcb->insn_state_copyin_failure_errorcode = copyin_err;
1500 #if DEVELOPMENT || DEBUG
1501 if (inspect_cacheline && pcb->insn_state == 0) {
1502 pcb->insn_state = kalloc_data(sizeof(x86_instruction_state_t), Z_WAITOK);
1503 }
1504 if (pcb->insn_state != 0) {
1505 pcb->insn_state->insn_stream_valid_bytes = 0;
1506 pcb->insn_state->insn_offset = 0;
1507
1508 if (inspect_cacheline &&
1509 (*(uint64_t *)(uintptr_t)&pcb->insn_cacheline[0] != CACHELINE_DATA_NOT_PRESENT &&
1510 *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[8] != CACHELINE_DATA_NOT_PRESENT)) {
1511 /*
1512 * We can still copy the cacheline into the instruction state structure
1513 * if it contains valid data
1514 */
1515 pcb->insn_state->out_of_synch = 1;
1516 bcopy(&pcb->insn_cacheline[0], &pcb->insn_state->insn_cacheline[0],
1517 x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1518 }
1519 }
1520 #endif /* DEVELOPMENT || DEBUG */
1521 }
1522 }
1523
1524 /*
1525 * Handle exceptions for i386.
1526 *
1527 * If we are an AT bus machine, we must turn off the AST for a
1528 * delayed floating-point exception.
1529 *
1530 * If we are providing floating-point emulation, we may have
1531 * to retrieve the real register values from the floating point
1532 * emulator.
1533 */
1534 void
i386_exception(int exc,mach_exception_code_t code,mach_exception_subcode_t subcode)1535 i386_exception(
1536 int exc,
1537 mach_exception_code_t code,
1538 mach_exception_subcode_t subcode)
1539 {
1540 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
1541
1542 DEBUG_KPRINT_SYSCALL_MACH("i386_exception: exc=%d code=0x%llx subcode=0x%llx\n",
1543 exc, code, subcode);
1544 codes[0] = code; /* new exception interface */
1545 codes[1] = subcode;
1546 exception_triage(exc, codes, 2);
1547 /*NOTREACHED*/
1548 }
1549
1550
1551 /* Synchronize a thread's x86_kernel_state (if any) with the given
1552 * x86_saved_state_t obtained from the trap/IPI handler; called in
1553 * kernel_trap() prior to entering the debugger, and when receiving
1554 * an "MP_KDP" IPI. Called with null saved_state if an incoming IPI
1555 * was detected from the kernel while spinning with interrupts masked.
1556 */
1557
1558 void
sync_iss_to_iks(x86_saved_state_t * saved_state)1559 sync_iss_to_iks(x86_saved_state_t *saved_state)
1560 {
1561 struct x86_kernel_state *iks = NULL;
1562 vm_offset_t kstack;
1563 boolean_t record_active_regs = FALSE;
1564
1565 /* The PAL may have a special way to sync registers */
1566 if (saved_state && saved_state->flavor == THREAD_STATE_NONE) {
1567 pal_get_kern_regs( saved_state );
1568 }
1569
1570 if (current_thread() != NULL &&
1571 (kstack = current_thread()->kernel_stack) != 0) {
1572 x86_saved_state64_t *regs = saved_state64(saved_state);
1573
1574 iks = STACK_IKS(kstack);
1575
1576 /* Did we take the trap/interrupt in kernel mode? */
1577 if (saved_state == NULL || /* NULL => polling in kernel */
1578 regs == USER_REGS64(current_thread())) {
1579 record_active_regs = TRUE;
1580 } else {
1581 iks->k_rbx = regs->rbx;
1582 iks->k_rsp = regs->isf.rsp;
1583 iks->k_rbp = regs->rbp;
1584 iks->k_r12 = regs->r12;
1585 iks->k_r13 = regs->r13;
1586 iks->k_r14 = regs->r14;
1587 iks->k_r15 = regs->r15;
1588 iks->k_rip = regs->isf.rip;
1589 }
1590 }
1591
1592 if (record_active_regs == TRUE) {
1593 /* Show the trap handler path */
1594 __asm__ volatile ("movq %%rbx, %0" : "=m" (iks->k_rbx));
1595 __asm__ volatile ("movq %%rsp, %0" : "=m" (iks->k_rsp));
1596 __asm__ volatile ("movq %%rbp, %0" : "=m" (iks->k_rbp));
1597 __asm__ volatile ("movq %%r12, %0" : "=m" (iks->k_r12));
1598 __asm__ volatile ("movq %%r13, %0" : "=m" (iks->k_r13));
1599 __asm__ volatile ("movq %%r14, %0" : "=m" (iks->k_r14));
1600 __asm__ volatile ("movq %%r15, %0" : "=m" (iks->k_r15));
1601 /* "Current" instruction pointer */
1602 __asm__ volatile ("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:"
1603 : "=m" (iks->k_rip)
1604 :
1605 : "rax");
1606 }
1607 }
1608
1609 /*
1610 * This is used by the NMI interrupt handler (from mp.c) to
1611 * uncondtionally sync the trap handler context to the IKS
1612 * irrespective of whether the NMI was fielded in kernel
1613 * or user space.
1614 */
1615 void
sync_iss_to_iks_unconditionally(__unused x86_saved_state_t * saved_state)1616 sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state)
1617 {
1618 struct x86_kernel_state *iks;
1619 vm_offset_t kstack;
1620
1621 if ((kstack = current_thread()->kernel_stack) != 0) {
1622 iks = STACK_IKS(kstack);
1623 /* Display the trap handler path */
1624 __asm__ volatile ("movq %%rbx, %0" : "=m" (iks->k_rbx));
1625 __asm__ volatile ("movq %%rsp, %0" : "=m" (iks->k_rsp));
1626 __asm__ volatile ("movq %%rbp, %0" : "=m" (iks->k_rbp));
1627 __asm__ volatile ("movq %%r12, %0" : "=m" (iks->k_r12));
1628 __asm__ volatile ("movq %%r13, %0" : "=m" (iks->k_r13));
1629 __asm__ volatile ("movq %%r14, %0" : "=m" (iks->k_r14));
1630 __asm__ volatile ("movq %%r15, %0" : "=m" (iks->k_r15));
1631 /* "Current" instruction pointer */
1632 __asm__ volatile ("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks->k_rip)::"rax");
1633 }
1634 }
1635
1636 #if DEBUG
1637 #define TERI 1
1638 #endif
1639
1640 #if TERI
1641 extern void thread_exception_return_internal(void) __dead2;
1642
1643 void
thread_exception_return(void)1644 thread_exception_return(void)
1645 {
1646 thread_t thread = current_thread();
1647 task_t task = current_task();
1648
1649 ml_set_interrupts_enabled(FALSE);
1650 if (thread_is_64bit_addr(thread) != task_has_64Bit_addr(task)) {
1651 panic("Task/thread bitness mismatch %p %p, task: %d, thread: %d",
1652 thread, task, thread_is_64bit_addr(thread), task_has_64Bit_addr(task));
1653 }
1654
1655 if (thread_is_64bit_addr(thread)) {
1656 if ((gdt_desc_p(USER64_CS)->access & ACC_PL_U) == 0) {
1657 panic("64-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER64_CS));
1658 }
1659 } else {
1660 if ((gdt_desc_p(USER_CS)->access & ACC_PL_U) == 0) {
1661 panic("32-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER_CS));
1662 }
1663 }
1664 assert(get_preemption_level() == 0);
1665 thread_exception_return_internal();
1666 }
1667 #endif
1668
1669 #if DEVELOPMENT || DEBUG
1670 static int trap_handled;
1671
1672 static void
handle_recoverable_kernel_trap(__unused void * tstate,uint16_t comment)1673 handle_recoverable_kernel_trap(
1674 __unused void *tstate,
1675 uint16_t comment)
1676 {
1677 assert(comment == TEST_RECOVERABLE_SOFT_TRAP);
1678
1679 printf("Recoverable trap handled.\n");
1680 trap_handled = 1;
1681 }
1682
1683 KERNEL_BRK_DESCRIPTOR_DEFINE(test_desc,
1684 .type = KERNEL_BRK_TYPE_TEST,
1685 .base = TEST_RECOVERABLE_SOFT_TRAP,
1686 .max = TEST_RECOVERABLE_SOFT_TRAP,
1687 .options = KERNEL_BRK_RECOVERABLE,
1688 .handle_breakpoint = handle_recoverable_kernel_trap);
1689
1690 static int
recoverable_kernel_trap_test(__unused int64_t in,int64_t * out)1691 recoverable_kernel_trap_test(__unused int64_t in, int64_t *out)
1692 {
1693 ml_recoverable_trap(TEST_RECOVERABLE_SOFT_TRAP);
1694
1695 *out = trap_handled;
1696 return 0;
1697 }
1698
1699 SYSCTL_TEST_REGISTER(recoverable_kernel_trap, recoverable_kernel_trap_test);
1700 #endif
1701