xref: /xnu-11215.1.10/osfmk/i386/trap.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 /*
60  * Hardware trap/fault handler.
61  */
62 
63 #include <mach_kdp.h>
64 #include <mach_ldebug.h>
65 
66 #include <types.h>
67 #include <i386/eflags.h>
68 #include <i386/trap_internal.h>
69 #include <i386/pmap.h>
70 #include <i386/fpu.h>
71 #include <i386/panic_notify.h>
72 #include <i386/lapic.h>
73 
74 #include <mach/exception.h>
75 #include <mach/kern_return.h>
76 #include <mach/vm_param.h>
77 #include <mach/i386/thread_status.h>
78 
79 #include <vm/vm_kern.h>
80 #include <vm/vm_fault.h>
81 #include <vm/vm_map_xnu.h>
82 
83 #include <kern/kern_types.h>
84 #include <kern/processor.h>
85 #include <kern/thread.h>
86 #include <kern/task.h>
87 #include <kern/restartable.h>
88 #include <kern/sched.h>
89 #include <kern/sched_prim.h>
90 #include <kern/exception.h>
91 #include <kern/spl.h>
92 #include <kern/misc_protos.h>
93 #include <kern/debug.h>
94 #if CONFIG_TELEMETRY
95 #include <kern/telemetry.h>
96 #endif
97 #include <kern/zalloc_internal.h>
98 #include <sys/kdebug.h>
99 #include <kperf/kperf.h>
100 #include <prng/random.h>
101 #include <prng/entropy.h>
102 
103 #include <string.h>
104 
105 #include <i386/postcode.h>
106 #include <i386/mp_desc.h>
107 #include <i386/proc_reg.h>
108 #include <i386/machine_routines.h>
109 #if CONFIG_MCA
110 #include <i386/machine_check.h>
111 #endif
112 #include <mach/i386/syscall_sw.h>
113 
114 #include <libkern/OSDebug.h>
115 #include <i386/cpu_threads.h>
116 #include <machine/pal_routines.h>
117 #include <i386/lbr.h>
118 
119 extern void throttle_lowpri_io(int);
120 extern void kprint_state(x86_saved_state64_t *saved_state);
121 #if DEVELOPMENT || DEBUG
122 int insnstream_force_cacheline_mismatch = 0;
123 extern int panic_on_cacheline_mismatch;
124 extern char panic_on_trap_procname[];
125 extern uint32_t panic_on_trap_mask;
126 #endif
127 
128 extern int insn_copyin_count;
129 
130 /*
131  * Forward declarations
132  */
133 static void panic_trap(x86_saved_state64_t *saved_state, const char *trapreason, uint32_t pl, kern_return_t fault_result) __dead2;
134 static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip);
135 #if DEVELOPMENT || DEBUG
136 static __attribute__((noinline)) void copy_instruction_stream(thread_t thread, uint64_t rip, int trap_code, bool inspect_cacheline);
137 #else
138 static __attribute__((noinline)) void copy_instruction_stream(thread_t thread, uint64_t rip, int trap_code);
139 #endif
140 
141 #if CONFIG_DTRACE
142 /* See <rdar://problem/4613924> */
143 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
144 
145 extern boolean_t dtrace_tally_fault(user_addr_t);
146 extern boolean_t dtrace_handle_trap(int, x86_saved_state_t *);
147 #endif
148 
149 #ifdef MACH_BSD
150 extern char *   proc_name_address(void *p);
151 #endif /* MACH_BSD */
152 
153 extern boolean_t pmap_smep_enabled;
154 extern boolean_t pmap_smap_enabled;
155 
156 __attribute__((noreturn))
157 void
thread_syscall_return(kern_return_t ret)158 thread_syscall_return(
159 	kern_return_t ret)
160 {
161 	thread_t        thr_act = current_thread();
162 	boolean_t       is_mach;
163 	int             code;
164 
165 	pal_register_cache_state(thr_act, DIRTY);
166 
167 	if (thread_is_64bit_addr(thr_act)) {
168 		x86_saved_state64_t     *regs;
169 
170 		regs = USER_REGS64(thr_act);
171 
172 		code = (int) (regs->rax & SYSCALL_NUMBER_MASK);
173 		is_mach = (regs->rax & SYSCALL_CLASS_MASK)
174 		    == (SYSCALL_CLASS_MACH << SYSCALL_CLASS_SHIFT);
175 		if (kdebug_enable && is_mach) {
176 			/* Mach trap */
177 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
178 			    MACHDBG_CODE(DBG_MACH_EXCP_SC, code) | DBG_FUNC_END,
179 			    ret, 0, 0, 0, 0);
180 		}
181 		regs->rax = ret;
182 #if DEBUG
183 		if (is_mach) {
184 			DEBUG_KPRINT_SYSCALL_MACH(
185 				"thread_syscall_return: 64-bit mach ret=%u\n",
186 				ret);
187 		} else {
188 			DEBUG_KPRINT_SYSCALL_UNIX(
189 				"thread_syscall_return: 64-bit unix ret=%u\n",
190 				ret);
191 		}
192 #endif
193 	} else {
194 		x86_saved_state32_t     *regs;
195 
196 		regs = USER_REGS32(thr_act);
197 
198 		code = ((int) regs->eax);
199 		is_mach = (code < 0);
200 		if (kdebug_enable && is_mach) {
201 			/* Mach trap */
202 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
203 			    MACHDBG_CODE(DBG_MACH_EXCP_SC, -code) | DBG_FUNC_END,
204 			    ret, 0, 0, 0, 0);
205 		}
206 		regs->eax = ret;
207 #if DEBUG
208 		if (is_mach) {
209 			DEBUG_KPRINT_SYSCALL_MACH(
210 				"thread_syscall_return: 32-bit mach ret=%u\n",
211 				ret);
212 		} else {
213 			DEBUG_KPRINT_SYSCALL_UNIX(
214 				"thread_syscall_return: 32-bit unix ret=%u\n",
215 				ret);
216 		}
217 #endif
218 	}
219 
220 #if DEBUG || DEVELOPMENT
221 	kern_allocation_name_t
222 	prior __assert_only = thread_get_kernel_state(thr_act)->allocation_name;
223 	assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
224 #endif /* DEBUG || DEVELOPMENT */
225 
226 	throttle_lowpri_io(1);
227 
228 	thread_exception_return();
229 	/*NOTREACHED*/
230 }
231 
232 /*
233  * Fault recovery in copyin/copyout routines.
234  */
235 struct recovery {
236 	uintptr_t       fault_addr;
237 	uintptr_t       recover_addr;
238 };
239 
240 extern struct recovery  recover_table[];
241 extern struct recovery  recover_table_end[];
242 
243 const char *    trap_type[] = {TRAP_NAMES};
244 unsigned        TRAP_TYPES = sizeof(trap_type) / sizeof(trap_type[0]);
245 
246 extern void     PE_incoming_interrupt(int interrupt);
247 
248 #if defined(__x86_64__) && DEBUG
249 void
kprint_state(x86_saved_state64_t * saved_state)250 kprint_state(x86_saved_state64_t        *saved_state)
251 {
252 	kprintf("current_cpu_datap() 0x%lx\n", (uintptr_t)current_cpu_datap());
253 	kprintf("Current GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_GS_BASE));
254 	kprintf("Kernel  GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_KERNEL_GS_BASE));
255 	kprintf("state at 0x%lx:\n", (uintptr_t) saved_state);
256 
257 	kprintf("      rdi    0x%llx\n", saved_state->rdi);
258 	kprintf("      rsi    0x%llx\n", saved_state->rsi);
259 	kprintf("      rdx    0x%llx\n", saved_state->rdx);
260 	kprintf("      r10    0x%llx\n", saved_state->r10);
261 	kprintf("      r8     0x%llx\n", saved_state->r8);
262 	kprintf("      r9     0x%llx\n", saved_state->r9);
263 
264 	kprintf("      cr2    0x%llx\n", saved_state->cr2);
265 	kprintf("real  cr2    0x%lx\n", get_cr2());
266 	kprintf("      r15    0x%llx\n", saved_state->r15);
267 	kprintf("      r14    0x%llx\n", saved_state->r14);
268 	kprintf("      r13    0x%llx\n", saved_state->r13);
269 	kprintf("      r12    0x%llx\n", saved_state->r12);
270 	kprintf("      r11    0x%llx\n", saved_state->r11);
271 	kprintf("      rbp    0x%llx\n", saved_state->rbp);
272 	kprintf("      rbx    0x%llx\n", saved_state->rbx);
273 	kprintf("      rcx    0x%llx\n", saved_state->rcx);
274 	kprintf("      rax    0x%llx\n", saved_state->rax);
275 
276 	kprintf("      gs     0x%x\n", saved_state->gs);
277 	kprintf("      fs     0x%x\n", saved_state->fs);
278 
279 	kprintf("  isf.trapno 0x%x\n", saved_state->isf.trapno);
280 	kprintf("  isf._pad   0x%x\n", saved_state->isf._pad);
281 	kprintf("  isf.trapfn 0x%llx\n", saved_state->isf.trapfn);
282 	kprintf("  isf.err    0x%llx\n", saved_state->isf.err);
283 	kprintf("  isf.rip    0x%llx\n", saved_state->isf.rip);
284 	kprintf("  isf.cs     0x%llx\n", saved_state->isf.cs);
285 	kprintf("  isf.rflags 0x%llx\n", saved_state->isf.rflags);
286 	kprintf("  isf.rsp    0x%llx\n", saved_state->isf.rsp);
287 	kprintf("  isf.ss     0x%llx\n", saved_state->isf.ss);
288 }
289 #endif
290 
291 
292 /*
293  * Non-zero indicates latency assert is enabled and capped at valued
294  * absolute time units.
295  */
296 
297 uint64_t interrupt_latency_cap = 0;
298 boolean_t ilat_assert = FALSE;
299 
300 void
interrupt_latency_tracker_setup(void)301 interrupt_latency_tracker_setup(void)
302 {
303 	uint32_t ilat_cap_us;
304 	if (PE_parse_boot_argn("interrupt_latency_cap_us", &ilat_cap_us, sizeof(ilat_cap_us))) {
305 		interrupt_latency_cap = ilat_cap_us * NSEC_PER_USEC;
306 		nanoseconds_to_absolutetime(interrupt_latency_cap, &interrupt_latency_cap);
307 	} else {
308 		interrupt_latency_cap = LockTimeOut;
309 	}
310 	PE_parse_boot_argn("-interrupt_latency_assert_enable", &ilat_assert, sizeof(ilat_assert));
311 }
312 
313 void
interrupt_reset_latency_stats(void)314 interrupt_reset_latency_stats(void)
315 {
316 	uint32_t i;
317 	for (i = 0; i < real_ncpus; i++) {
318 		cpu_data_ptr[i]->cpu_max_observed_int_latency =
319 		    cpu_data_ptr[i]->cpu_max_observed_int_latency_vector = 0;
320 	}
321 }
322 
323 void
interrupt_populate_latency_stats(char * buf,unsigned bufsize)324 interrupt_populate_latency_stats(char *buf, unsigned bufsize)
325 {
326 	uint32_t i, tcpu = ~0;
327 	uint64_t cur_max = 0;
328 
329 	for (i = 0; i < real_ncpus; i++) {
330 		if (cur_max < cpu_data_ptr[i]->cpu_max_observed_int_latency) {
331 			cur_max = cpu_data_ptr[i]->cpu_max_observed_int_latency;
332 			tcpu = i;
333 		}
334 	}
335 
336 	if (tcpu < real_ncpus) {
337 		snprintf(buf, bufsize, "0x%x 0x%x 0x%llx", tcpu, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency_vector, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency);
338 	}
339 }
340 
341 uint32_t interrupt_timer_coalescing_enabled = 1;
342 uint64_t interrupt_coalesced_timers;
343 
344 /*
345  * Handle interrupts:
346  *  - local APIC interrupts (IPIs, timers, etc) are handled by the kernel,
347  *  - device interrupts go to the platform expert.
348  */
349 void
interrupt(x86_saved_state_t * state)350 interrupt(x86_saved_state_t *state)
351 {
352 	uint64_t        rip;
353 	uint64_t        rsp;
354 	int             interrupt_num;
355 	boolean_t       user_mode = FALSE;
356 	int             ipl;
357 	int             cnum = cpu_number();
358 	cpu_data_t      *cdp = cpu_data_ptr[cnum];
359 	int             itype = DBG_INTR_TYPE_UNKNOWN;
360 	int             handled;
361 
362 
363 	x86_saved_state64_t     *state64 = saved_state64(state);
364 	rip = state64->isf.rip;
365 	rsp = state64->isf.rsp;
366 	interrupt_num = state64->isf.trapno;
367 	if (state64->isf.cs & 0x03) {
368 		user_mode = TRUE;
369 	}
370 
371 #if DEVELOPMENT || DEBUG
372 	uint64_t frameptr = is_saved_state64(state) ? state64->rbp : saved_state32(state)->ebp;
373 	uint32_t traptrace_index = traptrace_start(interrupt_num, rip, mach_absolute_time(), frameptr);
374 #endif
375 
376 	if (cpu_data_ptr[cnum]->lcpu.package->num_idle == topoParms.nLThreadsPerPackage) {
377 		cpu_data_ptr[cnum]->cpu_hwIntpexits[interrupt_num]++;
378 	}
379 
380 	if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT)) {
381 		itype = DBG_INTR_TYPE_IPI;
382 	} else if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT)) {
383 		itype = DBG_INTR_TYPE_TIMER;
384 	} else {
385 		itype = DBG_INTR_TYPE_OTHER;
386 	}
387 
388 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
389 	    MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
390 	    interrupt_num,
391 	    (user_mode ? rip : VM_KERNEL_UNSLIDE(rip)),
392 	    user_mode, itype, 0);
393 
394 	SCHED_STATS_INC(interrupt_count);
395 
396 #if CONFIG_TELEMETRY
397 	if (telemetry_needs_record) {
398 		telemetry_mark_curthread(user_mode, FALSE);
399 	}
400 #endif
401 
402 	ipl = get_preemption_level();
403 
404 	/*
405 	 * Handle local APIC interrupts
406 	 * else call platform expert for devices.
407 	 */
408 	handled = lapic_interrupt(interrupt_num, state);
409 
410 	if (!handled) {
411 		if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_CMCI_INTERRUPT)) {
412 			/*
413 			 * CMCI can be signalled on any logical processor, and the kexts
414 			 * that implement handling CMCI use IOKit to register handlers for
415 			 * the CMCI vector, so if we see a CMCI, do not encode a CPU
416 			 * number in bits 8:31 (since the vector is the same regardless of
417 			 * the handling CPU).
418 			 */
419 			PE_incoming_interrupt(interrupt_num);
420 		} else if (cnum <= lapic_max_interrupt_cpunum) {
421 			PE_incoming_interrupt((cnum << 8) | interrupt_num);
422 		}
423 	}
424 
425 	if (__improbable(get_preemption_level() != ipl)) {
426 		panic("Preemption level altered by interrupt vector 0x%x: initial 0x%x, final: 0x%x", interrupt_num, ipl, get_preemption_level());
427 	}
428 
429 
430 	if (__improbable(cdp->cpu_nested_istack)) {
431 		cdp->cpu_nested_istack_events++;
432 	} else {
433 		uint64_t ctime = mach_absolute_time();
434 		uint64_t int_latency = ctime - cdp->cpu_int_event_time;
435 		uint64_t esdeadline, ehdeadline;
436 		/* Attempt to process deferred timers in the context of
437 		 * this interrupt, unless interrupt time has already exceeded
438 		 * TCOAL_ILAT_THRESHOLD.
439 		 */
440 #define TCOAL_ILAT_THRESHOLD (30000ULL)
441 
442 		if ((int_latency < TCOAL_ILAT_THRESHOLD) &&
443 		    interrupt_timer_coalescing_enabled) {
444 			esdeadline = cdp->rtclock_timer.queue.earliest_soft_deadline;
445 			ehdeadline = cdp->rtclock_timer.deadline;
446 			if ((ctime >= esdeadline) && (ctime < ehdeadline)) {
447 				interrupt_coalesced_timers++;
448 				TCOAL_DEBUG(0x88880000 | DBG_FUNC_START, ctime, esdeadline, ehdeadline, interrupt_coalesced_timers, 0);
449 				rtclock_intr(state);
450 				TCOAL_DEBUG(0x88880000 | DBG_FUNC_END, ctime, esdeadline, interrupt_coalesced_timers, 0, 0);
451 			} else {
452 				TCOAL_DEBUG(0x77770000, ctime, cdp->rtclock_timer.queue.earliest_soft_deadline, cdp->rtclock_timer.deadline, interrupt_coalesced_timers, 0);
453 			}
454 		}
455 
456 		if (__improbable(ilat_assert && (int_latency > interrupt_latency_cap) && !machine_timeout_suspended())) {
457 			panic("Interrupt vector 0x%x exceeded interrupt latency threshold, 0x%llx absolute time delta, prior signals: 0x%x, current signals: 0x%x", interrupt_num, int_latency, cdp->cpu_prior_signals, cdp->cpu_signals);
458 		}
459 
460 		if (__improbable(int_latency > cdp->cpu_max_observed_int_latency)) {
461 			cdp->cpu_max_observed_int_latency = int_latency;
462 			cdp->cpu_max_observed_int_latency_vector = interrupt_num;
463 		}
464 	}
465 
466 	/*
467 	 * Having serviced the interrupt first, look at the interrupted stack depth.
468 	 */
469 	if (!user_mode) {
470 		uint64_t depth = cdp->cpu_kernel_stack
471 		    + sizeof(struct thread_kernel_state)
472 		    + sizeof(struct i386_exception_link *)
473 		    - rsp;
474 		if (__improbable(depth > kernel_stack_depth_max)) {
475 			kernel_stack_depth_max = (vm_offset_t)depth;
476 			KERNEL_DEBUG_CONSTANT(
477 				MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
478 				(long) depth, (long) VM_KERNEL_UNSLIDE(rip), 0, 0, 0);
479 		}
480 	}
481 
482 	if (cnum == master_cpu) {
483 		entropy_collect();
484 	}
485 
486 #if KPERF
487 	kperf_interrupt();
488 #endif /* KPERF */
489 
490 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
491 	    interrupt_num);
492 
493 	assert(ml_get_interrupts_enabled() == FALSE);
494 
495 #if DEVELOPMENT || DEBUG
496 	if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
497 		traptrace_end(traptrace_index, mach_absolute_time());
498 	}
499 #endif
500 }
501 
502 static inline void
reset_dr7(void)503 reset_dr7(void)
504 {
505 	long dr7 = 0x400; /* magic dr7 reset value; 32 bit on i386, 64 bit on x86_64 */
506 	__asm__ volatile ("mov %0,%%dr7" : : "r" (dr7));
507 }
508 #if MACH_KDP
509 unsigned kdp_has_active_watchpoints = 0;
510 #define NO_WATCHPOINTS (!kdp_has_active_watchpoints)
511 #else
512 #define NO_WATCHPOINTS 1
513 #endif
514 
515 static uint32_t bound_chk_violations_event;
516 
517 static void
xnu_soft_trap_handle_breakpoint(__unused void * tstate,uint16_t comment)518 xnu_soft_trap_handle_breakpoint(
519 	__unused void     *tstate,
520 	uint16_t          comment)
521 {
522 	if (comment == CLANG_SOFT_TRAP_BOUND_CHK) {
523 		os_atomic_inc(&bound_chk_violations_event, relaxed);
524 	}
525 }
526 
527 KERNEL_BRK_DESCRIPTOR_DEFINE(clang_desc,
528     .type                = KERNEL_BRK_TYPE_CLANG,
529     .base                = CLANG_X86_TRAP_START,
530     .max                 = CLANG_X86_TRAP_END,
531     .options             = KERNEL_BRK_UNRECOVERABLE,
532     .handle_breakpoint   = NULL);
533 
534 KERNEL_BRK_DESCRIPTOR_DEFINE(xnu_soft_traps_desc,
535     .type                = KERNEL_BRK_TYPE_TELEMETRY,
536     .base                = XNU_SOFT_TRAP_START,
537     .max                 = XNU_SOFT_TRAP_END,
538     .options             = KERNEL_BRK_RECOVERABLE | KERNEL_BRK_CORE_ANALYTICS,
539     .handle_breakpoint   = xnu_soft_trap_handle_breakpoint);
540 
541 KERNEL_BRK_DESCRIPTOR_DEFINE(libcxx_desc,
542     .type                = KERNEL_BRK_TYPE_LIBCXX,
543     .base                = LIBCXX_TRAP_START,
544     .max                 = LIBCXX_TRAP_END,
545     .options             = KERNEL_BRK_UNRECOVERABLE,
546     .handle_breakpoint   = NULL);
547 
548 KERNEL_BRK_DESCRIPTOR_DEFINE(xnu_hard_traps_desc,
549     .type                = KERNEL_BRK_TYPE_XNU,
550     .base                = XNU_HARD_TRAP_START,
551     .max                 = XNU_HARD_TRAP_END,
552     .options             = KERNEL_BRK_UNRECOVERABLE,
553     .handle_breakpoint   = NULL);
554 
555 static bool
handle_kernel_breakpoint(x86_saved_state64_t * state,uint16_t * out_comment)556 handle_kernel_breakpoint(x86_saved_state64_t *state, uint16_t *out_comment)
557 {
558 	uint16_t comment;
559 	const struct kernel_brk_descriptor *desc;
560 	uint8_t inst_buf[8];
561 	uint32_t prefix16 = 0x80B90F67; /* Encoding prefix for ud1 <16-bit code>(%eax), %eax */
562 	uint32_t prefix8 = 0x40B90F67; /* Encoding prefix for ud1 <8-bit code>(%eax), %eax */
563 	bool found_prefix8 = false;
564 
565 	vm_size_t sz = ml_nofault_copy(state->isf.rip, (vm_offset_t)inst_buf, sizeof(inst_buf));
566 	if (sz != sizeof(inst_buf)) {
567 		return false;
568 	}
569 
570 	if (bcmp(inst_buf, &prefix16, sizeof(prefix16)) == 0) {
571 		/* The two bytes following the prefix is our code */
572 		comment = inst_buf[5] << 8 | inst_buf[4];
573 	} else if (bcmp(inst_buf, &prefix8, sizeof(prefix8)) == 0) {
574 		/* The one byte following the prefix is our code */
575 		found_prefix8 = true;
576 		comment = inst_buf[4];
577 	} else {
578 		return false;
579 	}
580 
581 	if (out_comment) {
582 		*out_comment = comment;
583 	}
584 	desc = find_brk_descriptor_by_comment(comment);
585 
586 	if (!desc) {
587 		return false;
588 	}
589 
590 	if (desc->options & KERNEL_BRK_TELEMETRY_OPTIONS) {
591 		telemetry_kernel_brk(desc->type, desc->options, (void *)state, comment);
592 	}
593 
594 	if (desc->handle_breakpoint) {
595 		desc->handle_breakpoint(state, comment); /* May trigger panic */
596 	}
597 
598 	/* Still alive? Check if we should recover. */
599 	if (desc->options & KERNEL_BRK_RECOVERABLE) {
600 		/* ud1 can be five or eight-byte long depending on the prefix */
601 		set_recovery_ip(state, state->isf.rip + (found_prefix8 ? 5 : 8));
602 		return true;
603 	}
604 
605 	return false;
606 }
607 
608 /*
609  * Trap from kernel mode.  Only page-fault errors are recoverable,
610  * and then only in special circumstances.  All other errors are
611  * fatal.  Return value indicates if trap was handled.
612  */
613 
614 void
kernel_trap(x86_saved_state_t * state,uintptr_t * lo_spp)615 kernel_trap(
616 	x86_saved_state_t       *state,
617 	uintptr_t *lo_spp)
618 {
619 	char                    trapreason[32];
620 	const char              *trapname = NULL;
621 	uint16_t                trapcomment = 0;
622 
623 	x86_saved_state64_t     *saved_state;
624 	int                     code;
625 	user_addr_t             vaddr;
626 	int                     type;
627 	vm_map_t                map = 0;        /* protected by T_PAGE_FAULT */
628 	kern_return_t           result = KERN_FAILURE;
629 	kern_return_t           fault_result = KERN_SUCCESS;
630 	thread_t                thread;
631 	boolean_t               intr;
632 	vm_prot_t               prot;
633 	struct recovery         *rp;
634 	vm_offset_t             kern_ip;
635 	int                     is_user;
636 	int                     trap_pl = get_preemption_level();
637 
638 	thread = current_thread();
639 
640 	if (__improbable(is_saved_state32(state))) {
641 		panic("kernel_trap(%p) with 32-bit state", state);
642 	}
643 	saved_state = saved_state64(state);
644 
645 	/* Record cpu where state was captured */
646 	saved_state->isf.cpu = cpu_number();
647 
648 	vaddr = (user_addr_t)saved_state->cr2;
649 	type  = saved_state->isf.trapno;
650 	code  = (int)(saved_state->isf.err & 0xffff);
651 	intr  = (saved_state->isf.rflags & EFL_IF) != 0;        /* state of ints at trap */
652 	kern_ip = (vm_offset_t)saved_state->isf.rip;
653 
654 	is_user = (vaddr < VM_MAX_USER_PAGE_ADDRESS);
655 
656 #if DEVELOPMENT || DEBUG
657 	uint32_t traptrace_index = traptrace_start(type, kern_ip, mach_absolute_time(), saved_state->rbp);
658 #endif
659 
660 #if CONFIG_DTRACE
661 	/*
662 	 * Is there a DTrace hook?
663 	 */
664 	if (__improbable(tempDTraceTrapHook != NULL)) {
665 		if (tempDTraceTrapHook(type, state, lo_spp, 0) == KERN_SUCCESS) {
666 			/*
667 			 * If it succeeds, we are done...
668 			 */
669 			goto common_return;
670 		}
671 	}
672 
673 	/* Handle traps originated from probe context. */
674 	if (thread != THREAD_NULL && thread->t_dtrace_inprobe) {
675 		if (dtrace_handle_trap(type, state)) {
676 			goto common_return;
677 		}
678 	}
679 
680 #endif /* CONFIG_DTRACE */
681 
682 	/*
683 	 * we come here with interrupts off as we don't want to recurse
684 	 * on preemption below.  but we do want to re-enable interrupts
685 	 * as soon we possibly can to hold latency down
686 	 */
687 	if (__improbable(T_PREEMPT == type)) {
688 		ast_taken_kernel();
689 
690 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
691 		    (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
692 		    0, 0, 0, VM_KERNEL_UNSLIDE(kern_ip), 0);
693 
694 		goto common_return;
695 	}
696 
697 	user_addr_t     kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr);
698 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
699 	    (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
700 	    (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user,
701 	    VM_KERNEL_UNSLIDE(kern_ip), 0);
702 
703 
704 	if (T_PAGE_FAULT == type) {
705 		/*
706 		 * assume we're faulting in the kernel map
707 		 */
708 		map = kernel_map;
709 
710 		if (__probable((thread != THREAD_NULL) && (thread->map != kernel_map) &&
711 		    (vaddr < VM_MAX_USER_PAGE_ADDRESS))) {
712 			/* fault occurred in userspace */
713 			map = thread->map;
714 
715 			/* Intercept a potential Supervisor Mode Execute
716 			 * Protection fault. These criteria identify
717 			 * both NX faults and SMEP faults, but both
718 			 * are fatal. We avoid checking PTEs (racy).
719 			 * (The VM could just redrive a SMEP fault, hence
720 			 * the intercept).
721 			 */
722 			if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) &&
723 			    (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) {
724 				goto debugger_entry;
725 			}
726 
727 			/*
728 			 * Additionally check for SMAP faults...
729 			 * which are characterized by page-present and
730 			 * the AC bit unset (i.e. not from copyin/out path).
731 			 */
732 			if (__improbable(code & T_PF_PROT &&
733 			    pmap_smap_enabled &&
734 			    (saved_state->isf.rflags & EFL_AC) == 0)) {
735 				goto debugger_entry;
736 			}
737 
738 			/*
739 			 * If we're not sharing cr3 with the user
740 			 * and we faulted in copyio,
741 			 * then switch cr3 here and dismiss the fault.
742 			 */
743 			if (no_shared_cr3 &&
744 			    (thread->machine.specFlags & CopyIOActive) &&
745 			    map->pmap->pm_cr3 != get_cr3_base()) {
746 				pmap_assert(current_cpu_datap()->cpu_pmap_pcid_enabled == FALSE);
747 				set_cr3_raw(map->pmap->pm_cr3);
748 				return;
749 			}
750 			if (__improbable(vaddr < PAGE_SIZE) &&
751 			    ((thread->machine.specFlags & CopyIOActive) == 0)) {
752 				goto debugger_entry;
753 			}
754 		}
755 	}
756 
757 	(void) ml_set_interrupts_enabled(intr);
758 
759 	switch (type) {
760 	case T_NO_FPU:
761 		fpnoextflt();
762 		goto common_return;
763 
764 	case T_FPU_FAULT:
765 		fpextovrflt();
766 		goto common_return;
767 
768 	case T_FLOATING_POINT_ERROR:
769 		fpexterrflt();
770 		goto common_return;
771 
772 	case T_SSE_FLOAT_ERROR:
773 		fpSSEexterrflt();
774 		goto common_return;
775 
776 	case T_INVALID_OPCODE:
777 		if (handle_kernel_breakpoint(saved_state, &trapcomment)) {
778 			goto common_return;
779 		} else if (trapcomment != 0) {
780 			/* augment trap name with trap comment */
781 			trapname = tsnprintf(trapreason, sizeof(trapreason), "%s #%#04hx", trap_type[type], trapcomment);
782 		}
783 		fpUDflt(kern_ip);
784 		goto debugger_entry;
785 
786 	case T_DEBUG:
787 		/*
788 		 * Re-enable LBR tracing for core/panic files if necessary. i386_lbr_enable confirms LBR should be re-enabled.
789 		 */
790 		i386_lbr_enable();
791 		if ((saved_state->isf.rflags & EFL_TF) == 0 && NO_WATCHPOINTS) {
792 			/* We've somehow encountered a debug
793 			 * register match that does not belong
794 			 * to the kernel debugger.
795 			 * This isn't supposed to happen.
796 			 */
797 			reset_dr7();
798 			goto common_return;
799 		}
800 		goto debugger_entry;
801 	case T_INT3:
802 		goto debugger_entry;
803 	case T_PAGE_FAULT:
804 
805 #if CONFIG_DTRACE
806 		if (thread != THREAD_NULL && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
807 			if (dtrace_tally_fault(vaddr)) { /* Should a fault under dtrace be ignored? */
808 				/*
809 				 * DTrace has "anticipated" the possibility of this fault, and has
810 				 * established the suitable recovery state. Drop down now into the
811 				 * recovery handling code in "case T_GENERAL_PROTECTION:".
812 				 */
813 				goto FALL_THROUGH;
814 			}
815 		}
816 #endif /* CONFIG_DTRACE */
817 
818 		prot = VM_PROT_READ;
819 
820 		if (code & T_PF_WRITE) {
821 			prot |= VM_PROT_WRITE;
822 		}
823 		if (code & T_PF_EXECUTE) {
824 			prot |= VM_PROT_EXECUTE;
825 		}
826 
827 		/**
828 		 * vm_fault() can be called with preemption disabled (and indeed this is expected for
829 		 * certain copyio() scenarios), but can't safely be called with interrupts disabled
830 		 * once the system has gone multi-threaded.  Other than some early-boot situations
831 		 * such as startup kext loading, kernel paging operations should never be triggered
832 		 * by non-interruptible code in the first place, so a fault from such a context will
833 		 * ultimately produce a kernel page fault panic anyway.  In these cases, skip calling
834 		 * vm_fault() to avoid masking the real kernel panic with a failed VM locking assertion.
835 		 */
836 		if (__improbable(!(intr ||
837 		    startup_phase < STARTUP_SUB_EARLY_BOOT ||
838 		    current_cpu_datap()->cpu_hibernate))) {
839 			fault_result = result = KERN_FAILURE;
840 			goto FALL_THROUGH;
841 		}
842 		fault_result = result = vm_fault(map,
843 		    vaddr,
844 		    prot,
845 		    FALSE, VM_KERN_MEMORY_NONE,
846 		    THREAD_UNINT, NULL, 0);
847 
848 		if (result == KERN_SUCCESS) {
849 			goto common_return;
850 		}
851 		/*
852 		 * fall through
853 		 */
854 FALL_THROUGH:
855 
856 	case T_GENERAL_PROTECTION:
857 		/*
858 		 * If there is a failure recovery address
859 		 * for this fault, go there.
860 		 */
861 		for (rp = recover_table; rp < recover_table_end; rp++) {
862 			if (kern_ip == rp->fault_addr) {
863 				set_recovery_ip(saved_state, rp->recover_addr);
864 				goto common_return;
865 			}
866 		}
867 
868 		/*
869 		 * Unanticipated page-fault errors in kernel
870 		 * should not happen.
871 		 *
872 		 * fall through...
873 		 */
874 		OS_FALLTHROUGH;
875 	default:
876 		/*
877 		 * Exception 15 is reserved but some chips may generate it
878 		 * spuriously. Seen at startup on AMD Athlon-64.
879 		 */
880 		if (type == 15) {
881 			kprintf("kernel_trap() ignoring spurious trap 15\n");
882 			goto common_return;
883 		}
884 debugger_entry:
885 		/* Ensure that the i386_kernel_state at the base of the
886 		 * current thread's stack (if any) is synchronized with the
887 		 * context at the moment of the trap, to facilitate
888 		 * access through the debugger.
889 		 */
890 		sync_iss_to_iks(state);
891 #if  MACH_KDP
892 		if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr)) {
893 			goto common_return;
894 		}
895 #endif
896 	}
897 	if (type == T_PAGE_FAULT) {
898 		panic_fault_address = vaddr;
899 	}
900 	pal_cli();
901 
902 	if (trapname == NULL) {
903 		trapname = type < TRAP_TYPES ? trap_type[type] : "Unknown";
904 	}
905 
906 	panic_trap(saved_state, trapname, trap_pl, fault_result);
907 	/*
908 	 * NO RETURN
909 	 */
910 
911 common_return:
912 #if DEVELOPMENT || DEBUG
913 	if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
914 		traptrace_end(traptrace_index, mach_absolute_time());
915 	}
916 #endif
917 	return;
918 }
919 
920 static void
set_recovery_ip(x86_saved_state64_t * saved_state,vm_offset_t ip)921 set_recovery_ip(x86_saved_state64_t  *saved_state, vm_offset_t ip)
922 {
923 	saved_state->isf.rip = ip;
924 }
925 
926 static void
panic_trap(x86_saved_state64_t * regs,const char * trapname,uint32_t pl,kern_return_t fault_result)927 panic_trap(x86_saved_state64_t *regs, const char *trapname, uint32_t pl, kern_return_t fault_result)
928 {
929 	pal_cr_t        cr0, cr2, cr3, cr4;
930 	boolean_t       potential_smep_fault = FALSE, potential_kernel_NX_fault = FALSE;
931 	boolean_t       potential_smap_fault = FALSE;
932 
933 	pal_get_control_registers( &cr0, &cr2, &cr3, &cr4 );
934 	assert(ml_get_interrupts_enabled() == FALSE);
935 	current_cpu_datap()->cpu_fatal_trap_state = regs;
936 	/*
937 	 * Issue an I/O port read if one has been requested - this is an
938 	 * event logic analyzers can use as a trigger point.
939 	 */
940 	panic_notify();
941 
942 	kprintf("CPU %d panic trap number 0x%x, rip 0x%016llx\n",
943 	    cpu_number(), regs->isf.trapno, regs->isf.rip);
944 	kprintf("cr0 0x%016llx cr2 0x%016llx cr3 0x%016llx cr4 0x%016llx\n",
945 	    cr0, cr2, cr3, cr4);
946 
947 	if ((regs->isf.trapno == T_PAGE_FAULT) && (regs->isf.err == (T_PF_PROT | T_PF_EXECUTE)) && (regs->isf.rip == regs->cr2)) {
948 		if (pmap_smep_enabled && (regs->isf.rip < VM_MAX_USER_PAGE_ADDRESS)) {
949 			potential_smep_fault = TRUE;
950 		} else if (regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
951 			potential_kernel_NX_fault = TRUE;
952 		}
953 	} else if (pmap_smap_enabled &&
954 	    regs->isf.trapno == T_PAGE_FAULT &&
955 	    regs->isf.err & T_PF_PROT &&
956 	    regs->cr2 < VM_MAX_USER_PAGE_ADDRESS &&
957 	    regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
958 		potential_smap_fault = TRUE;
959 	}
960 
961 #undef panic
962 	panic("Kernel trap at 0x%016llx, type %d=%s, registers:\n"
963 	    "CR0: 0x%016llx, CR2: 0x%016llx, CR3: 0x%016llx, CR4: 0x%016llx\n"
964 	    "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
965 	    "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
966 	    "R8:  0x%016llx, R9:  0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
967 	    "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
968 	    "RFL: 0x%016llx, RIP: 0x%016llx, CS:  0x%016llx, SS:  0x%016llx\n"
969 	    "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d, VF: %d\n",
970 	    regs->isf.rip, regs->isf.trapno, trapname,
971 	    cr0, cr2, cr3, cr4,
972 	    regs->rax, regs->rbx, regs->rcx, regs->rdx,
973 	    regs->isf.rsp, regs->rbp, regs->rsi, regs->rdi,
974 	    regs->r8, regs->r9, regs->r10, regs->r11,
975 	    regs->r12, regs->r13, regs->r14, regs->r15,
976 	    regs->isf.rflags, regs->isf.rip, regs->isf.cs & 0xFFFF,
977 	    regs->isf.ss & 0xFFFF, regs->cr2, regs->isf.err, regs->isf.cpu,
978 	    virtualized ? " VMM" : "",
979 	    potential_kernel_NX_fault ? " Kernel NX fault" : "",
980 	    potential_smep_fault ? " SMEP/User NX fault" : "",
981 	    potential_smap_fault ? " SMAP fault" : "",
982 	    pl,
983 	    fault_result);
984 }
985 
986 #if CONFIG_DTRACE
987 extern kern_return_t dtrace_user_probe(x86_saved_state_t *);
988 #endif
989 
990 #if DEBUG
991 uint32_t fsigs[2];
992 uint32_t fsigns, fsigcs;
993 #endif
994 
995 /*
996  *	Trap from user mode.
997  */
998 void
user_trap(x86_saved_state_t * saved_state)999 user_trap(
1000 	x86_saved_state_t *saved_state)
1001 {
1002 	int                     exc;
1003 	int                     err;
1004 	mach_exception_code_t   code;
1005 	mach_exception_subcode_t subcode;
1006 	int                     type;
1007 	user_addr_t             vaddr;
1008 	vm_prot_t               prot;
1009 	thread_t                thread = current_thread();
1010 	kern_return_t           kret;
1011 	user_addr_t             rip;
1012 	unsigned long           dr6 = 0; /* 32 bit for i386, 64 bit for x86_64 */
1013 	int                     current_cpu = cpu_number();
1014 #if DEVELOPMENT || DEBUG
1015 	bool                    inspect_cacheline = false;
1016 	uint32_t                traptrace_index;
1017 #endif
1018 	assert((is_saved_state32(saved_state) && !thread_is_64bit_addr(thread)) ||
1019 	    (is_saved_state64(saved_state) && thread_is_64bit_addr(thread)));
1020 
1021 	if (is_saved_state64(saved_state)) {
1022 		x86_saved_state64_t     *regs;
1023 
1024 		regs = saved_state64(saved_state);
1025 
1026 		/* Record cpu where state was captured */
1027 		regs->isf.cpu = current_cpu;
1028 
1029 		type = regs->isf.trapno;
1030 		err  = (int)regs->isf.err & 0xffff;
1031 		vaddr = (user_addr_t)regs->cr2;
1032 		rip   = (user_addr_t)regs->isf.rip;
1033 #if DEVELOPMENT || DEBUG
1034 		traptrace_index = traptrace_start(type, rip, mach_absolute_time(), regs->rbp);
1035 #endif
1036 	} else {
1037 		x86_saved_state32_t     *regs;
1038 
1039 		regs = saved_state32(saved_state);
1040 
1041 		/* Record cpu where state was captured */
1042 		regs->cpu = current_cpu;
1043 
1044 		type  = regs->trapno;
1045 		err   = regs->err & 0xffff;
1046 		vaddr = (user_addr_t)regs->cr2;
1047 		rip   = (user_addr_t)regs->eip;
1048 #if DEVELOPMENT || DEBUG
1049 		traptrace_index = traptrace_start(type, rip, mach_absolute_time(), regs->ebp);
1050 #endif
1051 	}
1052 
1053 #if DEVELOPMENT || DEBUG
1054 	/*
1055 	 * Copy the cacheline of code into the thread's instruction stream save area
1056 	 * before enabling interrupts (the assumption is that we have not otherwise faulted or
1057 	 * trapped since the original cache line stores).  If the saved code is not valid,
1058 	 * we'll catch it below when we process the copyin() for unhandled faults.
1059 	 */
1060 	if (thread->machine.insn_copy_optout == false &&
1061 	    (type == T_PAGE_FAULT || type == T_INVALID_OPCODE || type == T_GENERAL_PROTECTION)) {
1062 #define CACHELINE_SIZE 64
1063 		THREAD_TO_PCB(thread)->insn_cacheline[CACHELINE_SIZE] = (uint8_t)(rip & (CACHELINE_SIZE - 1));
1064 		bcopy(&cpu_shadowp(current_cpu)->cpu_rtimes[0],
1065 		    &THREAD_TO_PCB(thread)->insn_cacheline[0],
1066 		    sizeof(THREAD_TO_PCB(thread)->insn_cacheline) - 1);
1067 		inspect_cacheline = true;
1068 	}
1069 #endif
1070 
1071 	if (type == T_DEBUG) {
1072 		if (thread->machine.ids) {
1073 			unsigned long clear = 0;
1074 			/* Stash and clear this processor's DR6 value, in the event
1075 			 * this was a debug register match
1076 			 */
1077 			__asm__ volatile ("mov %%db6, %0" : "=r" (dr6));
1078 			__asm__ volatile ("mov %0, %%db6" : : "r" (clear));
1079 		}
1080 		/* [Re]Enable LBRs *BEFORE* enabling interrupts to ensure we hit the right CPU */
1081 		i386_lbr_enable();
1082 	}
1083 
1084 	if (type == T_PAGE_FAULT) {
1085 		thread_reset_pcs_will_fault(thread);
1086 	}
1087 
1088 	pal_sti();
1089 
1090 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1091 	    (MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86, type)) | DBG_FUNC_NONE,
1092 	    (unsigned)(vaddr >> 32), (unsigned)vaddr,
1093 	    (unsigned)(rip >> 32), (unsigned)rip, 0);
1094 
1095 	code = 0;
1096 	subcode = 0;
1097 	exc = 0;
1098 
1099 #if CONFIG_DTRACE
1100 	/*
1101 	 * DTrace does not consume all user traps, only INT_3's for now.
1102 	 * Avoid needlessly calling tempDTraceTrapHook here, and let the
1103 	 * INT_3 case handle them.
1104 	 */
1105 #endif
1106 
1107 	DEBUG_KPRINT_SYSCALL_MASK(1,
1108 	    "user_trap: type=0x%x(%s) err=0x%x cr2=%p rip=%p\n",
1109 	    type, trap_type[type], err, (void *)(long) vaddr, (void *)(long) rip);
1110 
1111 	switch (type) {
1112 	case T_DIVIDE_ERROR:
1113 		exc = EXC_ARITHMETIC;
1114 		code = EXC_I386_DIV;
1115 		break;
1116 
1117 	case T_DEBUG:
1118 	{
1119 		pcb_t   pcb;
1120 		/*
1121 		 * Update the PCB with this processor's DR6 value
1122 		 * in the event this was a debug register match.
1123 		 */
1124 		pcb = THREAD_TO_PCB(thread);
1125 		if (pcb->ids) {
1126 			/*
1127 			 * We can get and set the status register
1128 			 * in 32-bit mode even on a 64-bit thread
1129 			 * because the high order bits are not
1130 			 * used on x86_64
1131 			 */
1132 			if (thread_is_64bit_addr(thread)) {
1133 				x86_debug_state64_t *ids = pcb->ids;
1134 				ids->dr6 = dr6;
1135 			} else {         /* 32 bit thread */
1136 				x86_debug_state32_t *ids = pcb->ids;
1137 				ids->dr6 = (uint32_t) dr6;
1138 			}
1139 		}
1140 		exc = EXC_BREAKPOINT;
1141 		code = EXC_I386_SGL;
1142 		break;
1143 	}
1144 	case T_INT3:
1145 #if CONFIG_DTRACE
1146 		if (dtrace_user_probe(saved_state) == KERN_SUCCESS) {
1147 			return; /* If it succeeds, we are done... */
1148 		}
1149 #endif
1150 		exc = EXC_BREAKPOINT;
1151 		code = EXC_I386_BPT;
1152 		break;
1153 
1154 	case T_OVERFLOW:
1155 		exc = EXC_ARITHMETIC;
1156 		code = EXC_I386_INTO;
1157 		break;
1158 
1159 	case T_OUT_OF_BOUNDS:
1160 		exc = EXC_SOFTWARE;
1161 		code = EXC_I386_BOUND;
1162 		break;
1163 
1164 	case T_INVALID_OPCODE:
1165 		if (fpUDflt(rip) == 1) {
1166 			exc = EXC_BAD_INSTRUCTION;
1167 			code = EXC_I386_INVOP;
1168 		}
1169 		break;
1170 
1171 	case T_NO_FPU:
1172 		fpnoextflt();
1173 		break;
1174 
1175 	case T_FPU_FAULT:
1176 		fpextovrflt();
1177 		/*
1178 		 * Raise exception.
1179 		 */
1180 		exc = EXC_BAD_ACCESS;
1181 		code = VM_PROT_READ | VM_PROT_EXECUTE;
1182 		subcode = 0;
1183 		break;
1184 
1185 	case T_INVALID_TSS:     /* invalid TSS == iret with NT flag set */
1186 		exc = EXC_BAD_INSTRUCTION;
1187 		code = EXC_I386_INVTSSFLT;
1188 		subcode = err;
1189 		break;
1190 
1191 	case T_SEGMENT_NOT_PRESENT:
1192 		exc = EXC_BAD_INSTRUCTION;
1193 		code = EXC_I386_SEGNPFLT;
1194 		subcode = err;
1195 		break;
1196 
1197 	case T_STACK_FAULT:
1198 		exc = EXC_BAD_INSTRUCTION;
1199 		code = EXC_I386_STKFLT;
1200 		subcode = err;
1201 		break;
1202 
1203 	case T_GENERAL_PROTECTION:
1204 		/*
1205 		 * There's a wide range of circumstances which generate this
1206 		 * class of exception. From user-space, many involve bad
1207 		 * addresses (such as a non-canonical 64-bit address).
1208 		 * So we map this to EXC_BAD_ACCESS (and thereby SIGSEGV).
1209 		 * The trouble is cr2 doesn't contain the faulting address;
1210 		 * we'd need to decode the faulting instruction to really
1211 		 * determine this. We'll leave that to debuggers.
1212 		 * However, attempted execution of privileged instructions
1213 		 * (e.g. cli) also generate GP faults and so we map these to
1214 		 * to EXC_BAD_ACCESS (and thence SIGSEGV) also - rather than
1215 		 * EXC_BAD_INSTRUCTION which is more accurate. We just can't
1216 		 * win!
1217 		 */
1218 		exc = EXC_BAD_ACCESS;
1219 		code = EXC_I386_GPFLT;
1220 		subcode = err;
1221 		break;
1222 
1223 	case T_PAGE_FAULT:
1224 	{
1225 		prot = VM_PROT_READ;
1226 
1227 		if (err & T_PF_WRITE) {
1228 			prot |= VM_PROT_WRITE;
1229 		}
1230 		if (__improbable(err & T_PF_EXECUTE)) {
1231 			prot |= VM_PROT_EXECUTE;
1232 		}
1233 #if DEVELOPMENT || DEBUG
1234 		bool do_simd_hash = thread_fpsimd_hash_enabled();
1235 		uint32_t fsig = 0;
1236 		fsig = do_simd_hash ? thread_fpsimd_hash(thread) : 0;
1237 #if DEBUG
1238 		fsigs[0] = fsig;
1239 #endif
1240 #endif
1241 		kret = vm_fault(thread->map,
1242 		    vaddr,
1243 		    prot, FALSE, VM_KERN_MEMORY_NONE,
1244 		    THREAD_ABORTSAFE, NULL, 0);
1245 #if DEVELOPMENT || DEBUG
1246 		if (do_simd_hash && fsig) {
1247 			uint32_t fsig2 = thread_fpsimd_hash(thread);
1248 #if DEBUG
1249 			fsigcs++;
1250 			fsigs[1] = fsig2;
1251 #endif
1252 			if (fsig != fsig2) {
1253 				panic("FP/SIMD state hash mismatch across fault thread: %p 0x%x->0x%x", thread, fsig, fsig2);
1254 			}
1255 		} else {
1256 #if DEBUG
1257 			fsigns++;
1258 #endif
1259 		}
1260 #endif
1261 		if (__probable((kret == KERN_SUCCESS) || (kret == KERN_ABORTED))) {
1262 			break;
1263 		} else if (__improbable(kret == KERN_FAILURE)) {
1264 			/*
1265 			 * For a user trap, vm_fault() should never return KERN_FAILURE.
1266 			 * If it does, we're leaking preemption disables somewhere in the kernel.
1267 			 */
1268 			panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
1269 		}
1270 
1271 		/* PAL debug hook (empty on x86) */
1272 		pal_dbg_page_fault(thread, vaddr, kret);
1273 		exc = EXC_BAD_ACCESS;
1274 		code = kret;
1275 		subcode = vaddr;
1276 	}
1277 	break;
1278 
1279 	case T_SSE_FLOAT_ERROR:
1280 		fpSSEexterrflt();
1281 		exc = EXC_ARITHMETIC;
1282 		code = EXC_I386_SSEEXTERR;
1283 		subcode = ((struct x86_fx_thread_state *)thread->machine.ifps)->fx_MXCSR;
1284 		break;
1285 
1286 
1287 	case T_FLOATING_POINT_ERROR:
1288 		fpexterrflt();
1289 		exc = EXC_ARITHMETIC;
1290 		code = EXC_I386_EXTERR;
1291 		subcode = ((struct x86_fx_thread_state *)thread->machine.ifps)->fx_status;
1292 		break;
1293 
1294 	case T_DTRACE_RET:
1295 #if CONFIG_DTRACE
1296 		if (dtrace_user_probe(saved_state) == KERN_SUCCESS) {
1297 			return; /* If it succeeds, we are done... */
1298 		}
1299 #endif
1300 		/*
1301 		 * If we get an INT 0x7f when we do not expect to,
1302 		 * treat it as an illegal instruction
1303 		 */
1304 		exc = EXC_BAD_INSTRUCTION;
1305 		code = EXC_I386_INVOP;
1306 		break;
1307 
1308 	default:
1309 		panic("Unexpected user trap, type %d", type);
1310 	}
1311 
1312 	if (type == T_PAGE_FAULT) {
1313 		thread_reset_pcs_done_faulting(thread);
1314 	}
1315 
1316 	if (exc != 0) {
1317 		uint16_t cs;
1318 		boolean_t intrs;
1319 
1320 		if (is_saved_state64(saved_state)) {
1321 			cs = saved_state64(saved_state)->isf.cs;
1322 		} else {
1323 			cs = saved_state32(saved_state)->cs;
1324 		}
1325 
1326 		if (last_branch_enabled_modes == LBR_ENABLED_USERMODE) {
1327 			intrs = ml_set_interrupts_enabled(FALSE);
1328 			/*
1329 			 * This is a bit racy (it's possible for this thread to migrate to another CPU, then
1330 			 * migrate back, but that seems rather rare in practice), but good enough to ensure
1331 			 * the LBRs are saved before proceeding with exception/signal dispatch.
1332 			 */
1333 			if (current_cpu == cpu_number()) {
1334 				i386_lbr_synch(thread);
1335 			}
1336 			ml_set_interrupts_enabled(intrs);
1337 		}
1338 
1339 		/*
1340 		 * Do not try to copyin from the instruction stream if the page fault was due
1341 		 * to an access to rip and was unhandled.
1342 		 * Do not deal with cases when %cs != USER[64]_CS
1343 		 * And of course there's no need to copy the instruction stream if the boot-arg
1344 		 * was set to 0.
1345 		 */
1346 		if (thread->machine.insn_copy_optout == false && insn_copyin_count > 0 &&
1347 		    (cs == USER64_CS || cs == USER_CS) && (type != T_PAGE_FAULT || vaddr != rip)) {
1348 #if DEVELOPMENT || DEBUG
1349 			copy_instruction_stream(thread, rip, type, inspect_cacheline);
1350 #else
1351 			copy_instruction_stream(thread, rip, type);
1352 #endif
1353 		}
1354 
1355 #if DEVELOPMENT || DEBUG
1356 		if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
1357 			traptrace_end(traptrace_index, mach_absolute_time());
1358 		}
1359 #endif
1360 		/*
1361 		 * Note: Codepaths that directly return from user_trap() have pending
1362 		 * ASTs processed in locore
1363 		 */
1364 		i386_exception(exc, code, subcode);
1365 		/* NOTREACHED */
1366 	} else {
1367 #if DEVELOPMENT || DEBUG
1368 		if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
1369 			traptrace_end(traptrace_index, mach_absolute_time());
1370 		}
1371 #endif
1372 	}
1373 }
1374 
1375 /*
1376  * Copyin up to x86_INSTRUCTION_STATE_MAX_INSN_BYTES bytes from the page that includes `rip`,
1377  * ensuring that we stay on the same page, clipping the start or end, as needed.
1378  * Add the clipped amount back at the start or end, depending on where it fits.
1379  * Consult the variable populated by the boot-arg `insn_capcnt'
1380  */
1381 static __attribute__((noinline)) void
copy_instruction_stream(thread_t thread,uint64_t rip,int __unused trap_code,bool inspect_cacheline)1382 copy_instruction_stream(thread_t thread, uint64_t rip, int __unused trap_code
1383 #if DEVELOPMENT || DEBUG
1384     , bool inspect_cacheline
1385 #endif
1386     )
1387 {
1388 #if x86_INSTRUCTION_STATE_MAX_INSN_BYTES > 4096
1389 #error x86_INSTRUCTION_STATE_MAX_INSN_BYTES cannot exceed a page in size.
1390 #endif
1391 	pcb_t pcb = THREAD_TO_PCB(thread);
1392 	vm_map_offset_t pagemask = ~vm_map_page_mask(current_map());
1393 	vm_map_offset_t rip_page = rip & pagemask;
1394 	vm_map_offset_t start_addr;
1395 	vm_map_offset_t insn_offset;
1396 	vm_map_offset_t end_addr = rip + (insn_copyin_count / 2);
1397 	void *stack_buffer;
1398 	int copyin_err = 0;
1399 #if defined(MACH_BSD) && (DEVELOPMENT || DEBUG)
1400 	void *procname;
1401 #endif
1402 
1403 #if DEVELOPMENT || DEBUG
1404 	assert(insn_copyin_count <= x86_INSTRUCTION_STATE_MAX_INSN_BYTES);
1405 #else
1406 	if (insn_copyin_count > x86_INSTRUCTION_STATE_MAX_INSN_BYTES ||
1407 	    insn_copyin_count < 64 /* CACHELINE_SIZE */) {
1408 		return;
1409 	}
1410 #endif
1411 
1412 #pragma clang diagnostic push
1413 #pragma clang diagnostic ignored "-Walloca"
1414 	stack_buffer = __builtin_alloca(insn_copyin_count);
1415 #pragma clang diagnostic pop
1416 
1417 	if (rip >= (insn_copyin_count / 2)) {
1418 		start_addr = rip - (insn_copyin_count / 2);
1419 	} else {
1420 		start_addr = 0;
1421 	}
1422 
1423 	if (start_addr < rip_page) {
1424 		insn_offset = (insn_copyin_count / 2) - (rip_page - start_addr);
1425 		end_addr += (rip_page - start_addr);
1426 		start_addr = rip_page;
1427 	} else if (end_addr >= (rip_page + (~pagemask + 1))) {
1428 		start_addr -= (end_addr - (rip_page + (~pagemask + 1))); /* Adjust start address backward */
1429 		/* Adjust instruction offset due to start address change */
1430 		insn_offset = (insn_copyin_count / 2) + (end_addr - (rip_page + (~pagemask + 1)));
1431 		end_addr = rip_page + (~pagemask + 1);  /* clip to the start of the next page (non-inclusive */
1432 	} else {
1433 		insn_offset = insn_copyin_count / 2;
1434 	}
1435 
1436 	disable_preemption();   /* Prevent copyin from faulting in the instruction stream */
1437 	if (
1438 #if DEVELOPMENT || DEBUG
1439 		(insnstream_force_cacheline_mismatch < 2) &&
1440 #endif
1441 		((end_addr > start_addr) && (copyin_err = copyin(start_addr, stack_buffer, end_addr - start_addr)) == 0)) {
1442 		enable_preemption();
1443 
1444 		if (pcb->insn_state == 0) {
1445 			pcb->insn_state = kalloc_data(sizeof(x86_instruction_state_t), Z_WAITOK);
1446 		}
1447 
1448 		if (pcb->insn_state != 0) {
1449 			bcopy(stack_buffer, pcb->insn_state->insn_bytes, end_addr - start_addr);
1450 			bzero(&pcb->insn_state->insn_bytes[end_addr - start_addr],
1451 			    insn_copyin_count - (end_addr - start_addr));
1452 
1453 			pcb->insn_state->insn_stream_valid_bytes = (int)(end_addr - start_addr);
1454 			pcb->insn_state->insn_offset = (int)insn_offset;
1455 
1456 #if DEVELOPMENT || DEBUG
1457 			/* Now try to validate the cacheline we read at early-fault time matches the code
1458 			 * copied in. Before we do that, we have to make sure the buffer contains a valid
1459 			 * cacheline by looking for the 2 sentinel values written in the event the cacheline
1460 			 * could not be copied.
1461 			 */
1462 #define CACHELINE_DATA_NOT_PRESENT 0xdeadc0debeefcafeULL
1463 #define CACHELINE_MASK (CACHELINE_SIZE - 1)
1464 
1465 			if (inspect_cacheline &&
1466 			    (*(uint64_t *)(uintptr_t)&pcb->insn_cacheline[0] != CACHELINE_DATA_NOT_PRESENT &&
1467 			    *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[8] != CACHELINE_DATA_NOT_PRESENT)) {
1468 				/*
1469 				 * The position of the cacheline in the instruction buffer is at offset
1470 				 * insn_offset - (rip & CACHELINE_MASK)
1471 				 */
1472 				if (__improbable((rip & CACHELINE_MASK) > insn_offset)) {
1473 					printf("thread %p code cacheline @ %p clipped wrt copied-in code (offset %d)\n",
1474 					    thread, (void *)(rip & ~CACHELINE_MASK), (int)(rip & CACHELINE_MASK));
1475 				} else if (bcmp(&pcb->insn_state->insn_bytes[insn_offset - (rip & CACHELINE_MASK)],
1476 				    &pcb->insn_cacheline[0], CACHELINE_SIZE) != 0
1477 				    || insnstream_force_cacheline_mismatch
1478 				    ) {
1479 #if x86_INSTRUCTION_STATE_CACHELINE_SIZE != CACHELINE_SIZE
1480 #error cacheline size mismatch
1481 #endif
1482 					bcopy(&pcb->insn_cacheline[0], &pcb->insn_state->insn_cacheline[0],
1483 					    x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1484 					/* Mark the instruction stream as being out-of-synch */
1485 					pcb->insn_state->out_of_synch = 1;
1486 
1487 					printf("thread %p code cacheline @ %p mismatches with copied-in code [trap 0x%x]\n",
1488 					    thread, (void *)(rip & ~CACHELINE_MASK), trap_code);
1489 					for (int i = 0; i < 8; i++) {
1490 						printf("\t[%d] cl=0x%08llx vs. ci=0x%08llx\n", i, *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[i * 8],
1491 						    *(uint64_t *)(uintptr_t)&pcb->insn_state->insn_bytes[(i * 8) + insn_offset - (rip & CACHELINE_MASK)]);
1492 					}
1493 					if (panic_on_cacheline_mismatch) {
1494 						panic("Cacheline mismatch while processing unhandled exception.");
1495 					}
1496 				} else {
1497 					pcb->insn_state->out_of_synch = 0;
1498 				}
1499 			} else if (inspect_cacheline) {
1500 				printf("thread %p could not capture code cacheline at fault IP %p [offset %d]\n",
1501 				    (void *)thread, (void *)rip, (int)(insn_offset - (rip & CACHELINE_MASK)));
1502 				pcb->insn_state->out_of_synch = 0;
1503 			}
1504 #else
1505 			pcb->insn_state->out_of_synch = 0;
1506 #endif /* DEVELOPMENT || DEBUG */
1507 
1508 #if defined(MACH_BSD) && (DEVELOPMENT || DEBUG)
1509 			if (panic_on_trap_procname[0] != 0) {
1510 				task_t task = get_threadtask(thread);
1511 				char procnamebuf[65] = {0};
1512 
1513 				if (get_bsdtask_info(task) != NULL) {
1514 					procname = proc_name_address(get_bsdtask_info(task));
1515 					strlcpy(procnamebuf, procname, sizeof(procnamebuf));
1516 
1517 					if (strcasecmp(panic_on_trap_procname, procnamebuf) == 0 &&
1518 					    ((1U << trap_code) & panic_on_trap_mask) != 0) {
1519 						panic("Panic requested on trap type 0x%x for process `%s'", trap_code,
1520 						    panic_on_trap_procname);
1521 						/*NORETURN*/
1522 					}
1523 				}
1524 			}
1525 #endif /* MACH_BSD && (DEVELOPMENT || DEBUG) */
1526 		}
1527 	} else {
1528 		enable_preemption();
1529 
1530 		pcb->insn_state_copyin_failure_errorcode = copyin_err;
1531 #if DEVELOPMENT || DEBUG
1532 		if (inspect_cacheline && pcb->insn_state == 0) {
1533 			pcb->insn_state = kalloc_data(sizeof(x86_instruction_state_t), Z_WAITOK);
1534 		}
1535 		if (pcb->insn_state != 0) {
1536 			pcb->insn_state->insn_stream_valid_bytes = 0;
1537 			pcb->insn_state->insn_offset = 0;
1538 
1539 			if (inspect_cacheline &&
1540 			    (*(uint64_t *)(uintptr_t)&pcb->insn_cacheline[0] != CACHELINE_DATA_NOT_PRESENT &&
1541 			    *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[8] != CACHELINE_DATA_NOT_PRESENT)) {
1542 				/*
1543 				 * We can still copy the cacheline into the instruction state structure
1544 				 * if it contains valid data
1545 				 */
1546 				pcb->insn_state->out_of_synch = 1;
1547 				bcopy(&pcb->insn_cacheline[0], &pcb->insn_state->insn_cacheline[0],
1548 				    x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1549 			}
1550 		}
1551 #endif /* DEVELOPMENT || DEBUG */
1552 	}
1553 }
1554 
1555 /*
1556  * Handle exceptions for i386.
1557  *
1558  * If we are an AT bus machine, we must turn off the AST for a
1559  * delayed floating-point exception.
1560  *
1561  * If we are providing floating-point emulation, we may have
1562  * to retrieve the real register values from the floating point
1563  * emulator.
1564  */
1565 void
i386_exception(int exc,mach_exception_code_t code,mach_exception_subcode_t subcode)1566 i386_exception(
1567 	int     exc,
1568 	mach_exception_code_t code,
1569 	mach_exception_subcode_t subcode)
1570 {
1571 	mach_exception_data_type_t   codes[EXCEPTION_CODE_MAX];
1572 
1573 	DEBUG_KPRINT_SYSCALL_MACH("i386_exception: exc=%d code=0x%llx subcode=0x%llx\n",
1574 	    exc, code, subcode);
1575 	codes[0] = code;                /* new exception interface */
1576 	codes[1] = subcode;
1577 	exception_triage(exc, codes, 2);
1578 	/*NOTREACHED*/
1579 }
1580 
1581 
1582 /* Synchronize a thread's x86_kernel_state (if any) with the given
1583  * x86_saved_state_t obtained from the trap/IPI handler; called in
1584  * kernel_trap() prior to entering the debugger, and when receiving
1585  * an "MP_KDP" IPI. Called with null saved_state if an incoming IPI
1586  * was detected from the kernel while spinning with interrupts masked.
1587  */
1588 
1589 void
sync_iss_to_iks(x86_saved_state_t * saved_state)1590 sync_iss_to_iks(x86_saved_state_t *saved_state)
1591 {
1592 	struct x86_kernel_state *iks = NULL;
1593 	vm_offset_t kstack;
1594 	boolean_t record_active_regs = FALSE;
1595 
1596 	/* The PAL may have a special way to sync registers */
1597 	if (saved_state && saved_state->flavor == THREAD_STATE_NONE) {
1598 		pal_get_kern_regs( saved_state );
1599 	}
1600 
1601 	if (current_thread() != NULL &&
1602 	    (kstack = current_thread()->kernel_stack) != 0) {
1603 		x86_saved_state64_t     *regs = saved_state64(saved_state);
1604 
1605 		iks = STACK_IKS(kstack);
1606 
1607 		/* Did we take the trap/interrupt in kernel mode? */
1608 		if (saved_state == NULL || /* NULL => polling in kernel */
1609 		    regs == USER_REGS64(current_thread())) {
1610 			record_active_regs = TRUE;
1611 		} else {
1612 			iks->k_rbx = regs->rbx;
1613 			iks->k_rsp = regs->isf.rsp;
1614 			iks->k_rbp = regs->rbp;
1615 			iks->k_r12 = regs->r12;
1616 			iks->k_r13 = regs->r13;
1617 			iks->k_r14 = regs->r14;
1618 			iks->k_r15 = regs->r15;
1619 			iks->k_rip = regs->isf.rip;
1620 		}
1621 	}
1622 
1623 	if (record_active_regs == TRUE) {
1624 		/* Show the trap handler path */
1625 		__asm__ volatile ("movq %%rbx, %0" : "=m" (iks->k_rbx));
1626 		__asm__ volatile ("movq %%rsp, %0" : "=m" (iks->k_rsp));
1627 		__asm__ volatile ("movq %%rbp, %0" : "=m" (iks->k_rbp));
1628 		__asm__ volatile ("movq %%r12, %0" : "=m" (iks->k_r12));
1629 		__asm__ volatile ("movq %%r13, %0" : "=m" (iks->k_r13));
1630 		__asm__ volatile ("movq %%r14, %0" : "=m" (iks->k_r14));
1631 		__asm__ volatile ("movq %%r15, %0" : "=m" (iks->k_r15));
1632 		/* "Current" instruction pointer */
1633 		__asm__ volatile ("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:"
1634                                   : "=m" (iks->k_rip)
1635                                   :
1636                                   : "rax");
1637 	}
1638 }
1639 
1640 /*
1641  * This is used by the NMI interrupt handler (from mp.c) to
1642  * uncondtionally sync the trap handler context to the IKS
1643  * irrespective of whether the NMI was fielded in kernel
1644  * or user space.
1645  */
1646 void
sync_iss_to_iks_unconditionally(__unused x86_saved_state_t * saved_state)1647 sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state)
1648 {
1649 	struct x86_kernel_state *iks;
1650 	vm_offset_t kstack;
1651 
1652 	if ((kstack = current_thread()->kernel_stack) != 0) {
1653 		iks = STACK_IKS(kstack);
1654 		/* Display the trap handler path */
1655 		__asm__ volatile ("movq %%rbx, %0" : "=m" (iks->k_rbx));
1656 		__asm__ volatile ("movq %%rsp, %0" : "=m" (iks->k_rsp));
1657 		__asm__ volatile ("movq %%rbp, %0" : "=m" (iks->k_rbp));
1658 		__asm__ volatile ("movq %%r12, %0" : "=m" (iks->k_r12));
1659 		__asm__ volatile ("movq %%r13, %0" : "=m" (iks->k_r13));
1660 		__asm__ volatile ("movq %%r14, %0" : "=m" (iks->k_r14));
1661 		__asm__ volatile ("movq %%r15, %0" : "=m" (iks->k_r15));
1662 		/* "Current" instruction pointer */
1663 		__asm__ volatile ("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks->k_rip)::"rax");
1664 	}
1665 }
1666 
1667 #if DEBUG
1668 #define TERI 1
1669 #endif
1670 
1671 #if TERI
1672 extern void     thread_exception_return_internal(void) __dead2;
1673 
1674 void
thread_exception_return(void)1675 thread_exception_return(void)
1676 {
1677 	thread_t thread = current_thread();
1678 	task_t   task   = current_task();
1679 
1680 	ml_set_interrupts_enabled(FALSE);
1681 	if (thread_is_64bit_addr(thread) != task_has_64Bit_addr(task)) {
1682 		panic("Task/thread bitness mismatch %p %p, task: %d, thread: %d",
1683 		    thread, task, thread_is_64bit_addr(thread), task_has_64Bit_addr(task));
1684 	}
1685 
1686 	if (thread_is_64bit_addr(thread)) {
1687 		if ((gdt_desc_p(USER64_CS)->access & ACC_PL_U) == 0) {
1688 			panic("64-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER64_CS));
1689 		}
1690 	} else {
1691 		if ((gdt_desc_p(USER_CS)->access & ACC_PL_U) == 0) {
1692 			panic("32-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER_CS));
1693 		}
1694 	}
1695 	assert(get_preemption_level() == 0);
1696 	thread_exception_return_internal();
1697 }
1698 #endif
1699 
1700 #if DEVELOPMENT || DEBUG
1701 static int trap_handled;
1702 
1703 static void
handle_recoverable_kernel_trap(__unused void * tstate,uint16_t comment)1704 handle_recoverable_kernel_trap(
1705 	__unused void     *tstate,
1706 	uint16_t          comment)
1707 {
1708 	assert(comment == TEST_RECOVERABLE_SOFT_TRAP);
1709 
1710 	printf("Recoverable trap handled.\n");
1711 	trap_handled = 1;
1712 }
1713 
1714 KERNEL_BRK_DESCRIPTOR_DEFINE(test_desc,
1715     .type                = KERNEL_BRK_TYPE_TEST,
1716     .base                = TEST_RECOVERABLE_SOFT_TRAP,
1717     .max                 = TEST_RECOVERABLE_SOFT_TRAP,
1718     .options             = KERNEL_BRK_RECOVERABLE,
1719     .handle_breakpoint   = handle_recoverable_kernel_trap);
1720 
1721 static int
recoverable_kernel_trap_test(__unused int64_t in,int64_t * out)1722 recoverable_kernel_trap_test(__unused int64_t in, int64_t *out)
1723 {
1724 	ml_recoverable_trap(TEST_RECOVERABLE_SOFT_TRAP);
1725 
1726 	*out = trap_handled;
1727 	return 0;
1728 }
1729 
1730 SYSCTL_TEST_REGISTER(recoverable_kernel_trap, recoverable_kernel_trap_test);
1731 #endif
1732