xref: /xnu-8020.121.3/osfmk/i386/trap.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 /*
60  * Hardware trap/fault handler.
61  */
62 
63 #include <mach_kdp.h>
64 #include <mach_ldebug.h>
65 
66 #include <types.h>
67 #include <i386/eflags.h>
68 #include <i386/trap.h>
69 #include <i386/pmap.h>
70 #include <i386/fpu.h>
71 #include <i386/panic_notify.h>
72 #include <i386/lapic.h>
73 
74 #include <mach/exception.h>
75 #include <mach/kern_return.h>
76 #include <mach/vm_param.h>
77 #include <mach/i386/thread_status.h>
78 
79 #include <vm/vm_kern.h>
80 #include <vm/vm_fault.h>
81 
82 #include <kern/kern_types.h>
83 #include <kern/processor.h>
84 #include <kern/thread.h>
85 #include <kern/task.h>
86 #include <kern/sched.h>
87 #include <kern/sched_prim.h>
88 #include <kern/exception.h>
89 #include <kern/spl.h>
90 #include <kern/misc_protos.h>
91 #include <kern/debug.h>
92 #if CONFIG_TELEMETRY
93 #include <kern/telemetry.h>
94 #endif
95 #include <sys/kdebug.h>
96 #include <kperf/kperf.h>
97 #include <prng/random.h>
98 #include <prng/entropy.h>
99 
100 #include <string.h>
101 
102 #include <i386/postcode.h>
103 #include <i386/mp_desc.h>
104 #include <i386/proc_reg.h>
105 #include <i386/machine_routines.h>
106 #if CONFIG_MCA
107 #include <i386/machine_check.h>
108 #endif
109 #include <mach/i386/syscall_sw.h>
110 
111 #include <libkern/OSDebug.h>
112 #include <i386/cpu_threads.h>
113 #include <machine/pal_routines.h>
114 #include <i386/lbr.h>
115 
116 extern void throttle_lowpri_io(int);
117 extern void kprint_state(x86_saved_state64_t *saved_state);
118 #if DEVELOPMENT || DEBUG
119 int insnstream_force_cacheline_mismatch = 0;
120 extern int panic_on_cacheline_mismatch;
121 extern char panic_on_trap_procname[];
122 extern uint32_t panic_on_trap_mask;
123 #endif
124 
125 extern int insn_copyin_count;
126 
127 /*
128  * Forward declarations
129  */
130 static void panic_trap(x86_saved_state64_t *saved_state, uint32_t pl, kern_return_t fault_result) __dead2;
131 static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip);
132 #if DEVELOPMENT || DEBUG
133 static __attribute__((noinline)) void copy_instruction_stream(thread_t thread, uint64_t rip, int trap_code, bool inspect_cacheline);
134 #else
135 static __attribute__((noinline)) void copy_instruction_stream(thread_t thread, uint64_t rip, int trap_code);
136 #endif
137 
138 #if CONFIG_DTRACE
139 /* See <rdar://problem/4613924> */
140 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
141 
142 extern boolean_t dtrace_tally_fault(user_addr_t);
143 extern boolean_t dtrace_handle_trap(int, x86_saved_state_t *);
144 #endif
145 
146 #ifdef MACH_BSD
147 extern char *   proc_name_address(void *p);
148 #endif /* MACH_BSD */
149 
150 extern boolean_t pmap_smep_enabled;
151 extern boolean_t pmap_smap_enabled;
152 
153 __attribute__((noreturn))
154 void
thread_syscall_return(kern_return_t ret)155 thread_syscall_return(
156 	kern_return_t ret)
157 {
158 	thread_t        thr_act = current_thread();
159 	boolean_t       is_mach;
160 	int             code;
161 
162 	pal_register_cache_state(thr_act, DIRTY);
163 
164 	if (thread_is_64bit_addr(thr_act)) {
165 		x86_saved_state64_t     *regs;
166 
167 		regs = USER_REGS64(thr_act);
168 
169 		code = (int) (regs->rax & SYSCALL_NUMBER_MASK);
170 		is_mach = (regs->rax & SYSCALL_CLASS_MASK)
171 		    == (SYSCALL_CLASS_MACH << SYSCALL_CLASS_SHIFT);
172 		if (kdebug_enable && is_mach) {
173 			/* Mach trap */
174 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
175 			    MACHDBG_CODE(DBG_MACH_EXCP_SC, code) | DBG_FUNC_END,
176 			    ret, 0, 0, 0, 0);
177 		}
178 		regs->rax = ret;
179 #if DEBUG
180 		if (is_mach) {
181 			DEBUG_KPRINT_SYSCALL_MACH(
182 				"thread_syscall_return: 64-bit mach ret=%u\n",
183 				ret);
184 		} else {
185 			DEBUG_KPRINT_SYSCALL_UNIX(
186 				"thread_syscall_return: 64-bit unix ret=%u\n",
187 				ret);
188 		}
189 #endif
190 	} else {
191 		x86_saved_state32_t     *regs;
192 
193 		regs = USER_REGS32(thr_act);
194 
195 		code = ((int) regs->eax);
196 		is_mach = (code < 0);
197 		if (kdebug_enable && is_mach) {
198 			/* Mach trap */
199 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
200 			    MACHDBG_CODE(DBG_MACH_EXCP_SC, -code) | DBG_FUNC_END,
201 			    ret, 0, 0, 0, 0);
202 		}
203 		regs->eax = ret;
204 #if DEBUG
205 		if (is_mach) {
206 			DEBUG_KPRINT_SYSCALL_MACH(
207 				"thread_syscall_return: 32-bit mach ret=%u\n",
208 				ret);
209 		} else {
210 			DEBUG_KPRINT_SYSCALL_UNIX(
211 				"thread_syscall_return: 32-bit unix ret=%u\n",
212 				ret);
213 		}
214 #endif
215 	}
216 
217 #if DEBUG || DEVELOPMENT
218 	kern_allocation_name_t
219 	prior __assert_only = thread_get_kernel_state(thr_act)->allocation_name;
220 	assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
221 #endif /* DEBUG || DEVELOPMENT */
222 
223 	throttle_lowpri_io(1);
224 
225 	thread_exception_return();
226 	/*NOTREACHED*/
227 }
228 
229 /*
230  * Fault recovery in copyin/copyout routines.
231  */
232 struct recovery {
233 	uintptr_t       fault_addr;
234 	uintptr_t       recover_addr;
235 };
236 
237 extern struct recovery  recover_table[];
238 extern struct recovery  recover_table_end[];
239 
240 const char *    trap_type[] = {TRAP_NAMES};
241 unsigned        TRAP_TYPES = sizeof(trap_type) / sizeof(trap_type[0]);
242 
243 extern void     PE_incoming_interrupt(int interrupt);
244 
245 #if defined(__x86_64__) && DEBUG
246 void
kprint_state(x86_saved_state64_t * saved_state)247 kprint_state(x86_saved_state64_t        *saved_state)
248 {
249 	kprintf("current_cpu_datap() 0x%lx\n", (uintptr_t)current_cpu_datap());
250 	kprintf("Current GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_GS_BASE));
251 	kprintf("Kernel  GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_KERNEL_GS_BASE));
252 	kprintf("state at 0x%lx:\n", (uintptr_t) saved_state);
253 
254 	kprintf("      rdi    0x%llx\n", saved_state->rdi);
255 	kprintf("      rsi    0x%llx\n", saved_state->rsi);
256 	kprintf("      rdx    0x%llx\n", saved_state->rdx);
257 	kprintf("      r10    0x%llx\n", saved_state->r10);
258 	kprintf("      r8     0x%llx\n", saved_state->r8);
259 	kprintf("      r9     0x%llx\n", saved_state->r9);
260 
261 	kprintf("      cr2    0x%llx\n", saved_state->cr2);
262 	kprintf("real  cr2    0x%lx\n", get_cr2());
263 	kprintf("      r15    0x%llx\n", saved_state->r15);
264 	kprintf("      r14    0x%llx\n", saved_state->r14);
265 	kprintf("      r13    0x%llx\n", saved_state->r13);
266 	kprintf("      r12    0x%llx\n", saved_state->r12);
267 	kprintf("      r11    0x%llx\n", saved_state->r11);
268 	kprintf("      rbp    0x%llx\n", saved_state->rbp);
269 	kprintf("      rbx    0x%llx\n", saved_state->rbx);
270 	kprintf("      rcx    0x%llx\n", saved_state->rcx);
271 	kprintf("      rax    0x%llx\n", saved_state->rax);
272 
273 	kprintf("      gs     0x%x\n", saved_state->gs);
274 	kprintf("      fs     0x%x\n", saved_state->fs);
275 
276 	kprintf("  isf.trapno 0x%x\n", saved_state->isf.trapno);
277 	kprintf("  isf._pad   0x%x\n", saved_state->isf._pad);
278 	kprintf("  isf.trapfn 0x%llx\n", saved_state->isf.trapfn);
279 	kprintf("  isf.err    0x%llx\n", saved_state->isf.err);
280 	kprintf("  isf.rip    0x%llx\n", saved_state->isf.rip);
281 	kprintf("  isf.cs     0x%llx\n", saved_state->isf.cs);
282 	kprintf("  isf.rflags 0x%llx\n", saved_state->isf.rflags);
283 	kprintf("  isf.rsp    0x%llx\n", saved_state->isf.rsp);
284 	kprintf("  isf.ss     0x%llx\n", saved_state->isf.ss);
285 }
286 #endif
287 
288 
289 /*
290  * Non-zero indicates latency assert is enabled and capped at valued
291  * absolute time units.
292  */
293 
294 uint64_t interrupt_latency_cap = 0;
295 boolean_t ilat_assert = FALSE;
296 
297 void
interrupt_latency_tracker_setup(void)298 interrupt_latency_tracker_setup(void)
299 {
300 	uint32_t ilat_cap_us;
301 	if (PE_parse_boot_argn("interrupt_latency_cap_us", &ilat_cap_us, sizeof(ilat_cap_us))) {
302 		interrupt_latency_cap = ilat_cap_us * NSEC_PER_USEC;
303 		nanoseconds_to_absolutetime(interrupt_latency_cap, &interrupt_latency_cap);
304 	} else {
305 		interrupt_latency_cap = LockTimeOut;
306 	}
307 	PE_parse_boot_argn("-interrupt_latency_assert_enable", &ilat_assert, sizeof(ilat_assert));
308 }
309 
310 void
interrupt_reset_latency_stats(void)311 interrupt_reset_latency_stats(void)
312 {
313 	uint32_t i;
314 	for (i = 0; i < real_ncpus; i++) {
315 		cpu_data_ptr[i]->cpu_max_observed_int_latency =
316 		    cpu_data_ptr[i]->cpu_max_observed_int_latency_vector = 0;
317 	}
318 }
319 
320 void
interrupt_populate_latency_stats(char * buf,unsigned bufsize)321 interrupt_populate_latency_stats(char *buf, unsigned bufsize)
322 {
323 	uint32_t i, tcpu = ~0;
324 	uint64_t cur_max = 0;
325 
326 	for (i = 0; i < real_ncpus; i++) {
327 		if (cur_max < cpu_data_ptr[i]->cpu_max_observed_int_latency) {
328 			cur_max = cpu_data_ptr[i]->cpu_max_observed_int_latency;
329 			tcpu = i;
330 		}
331 	}
332 
333 	if (tcpu < real_ncpus) {
334 		snprintf(buf, bufsize, "0x%x 0x%x 0x%llx", tcpu, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency_vector, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency);
335 	}
336 }
337 
338 uint32_t interrupt_timer_coalescing_enabled = 1;
339 uint64_t interrupt_coalesced_timers;
340 
341 /*
342  * Handle interrupts:
343  *  - local APIC interrupts (IPIs, timers, etc) are handled by the kernel,
344  *  - device interrupts go to the platform expert.
345  */
346 void
interrupt(x86_saved_state_t * state)347 interrupt(x86_saved_state_t *state)
348 {
349 	uint64_t        rip;
350 	uint64_t        rsp;
351 	int             interrupt_num;
352 	boolean_t       user_mode = FALSE;
353 	int             ipl;
354 	int             cnum = cpu_number();
355 	cpu_data_t      *cdp = cpu_data_ptr[cnum];
356 	int             itype = DBG_INTR_TYPE_UNKNOWN;
357 	int             handled;
358 
359 
360 	x86_saved_state64_t     *state64 = saved_state64(state);
361 	rip = state64->isf.rip;
362 	rsp = state64->isf.rsp;
363 	interrupt_num = state64->isf.trapno;
364 	if (state64->isf.cs & 0x03) {
365 		user_mode = TRUE;
366 	}
367 
368 #if DEVELOPMENT || DEBUG
369 	uint64_t frameptr = is_saved_state64(state) ? state64->rbp : saved_state32(state)->ebp;
370 	uint32_t traptrace_index = traptrace_start(interrupt_num, rip, mach_absolute_time(), frameptr);
371 #endif
372 
373 	if (cpu_data_ptr[cnum]->lcpu.package->num_idle == topoParms.nLThreadsPerPackage) {
374 		cpu_data_ptr[cnum]->cpu_hwIntpexits[interrupt_num]++;
375 	}
376 
377 	if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT)) {
378 		itype = DBG_INTR_TYPE_IPI;
379 	} else if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT)) {
380 		itype = DBG_INTR_TYPE_TIMER;
381 	} else {
382 		itype = DBG_INTR_TYPE_OTHER;
383 	}
384 
385 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
386 	    MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
387 	    interrupt_num,
388 	    (user_mode ? rip : VM_KERNEL_UNSLIDE(rip)),
389 	    user_mode, itype, 0);
390 
391 	SCHED_STATS_INC(interrupt_count);
392 
393 #if CONFIG_TELEMETRY
394 	if (telemetry_needs_record) {
395 		telemetry_mark_curthread(user_mode, FALSE);
396 	}
397 #endif
398 
399 	ipl = get_preemption_level();
400 
401 	/*
402 	 * Handle local APIC interrupts
403 	 * else call platform expert for devices.
404 	 */
405 	handled = lapic_interrupt(interrupt_num, state);
406 
407 	if (!handled) {
408 		if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_CMCI_INTERRUPT)) {
409 			/*
410 			 * CMCI can be signalled on any logical processor, and the kexts
411 			 * that implement handling CMCI use IOKit to register handlers for
412 			 * the CMCI vector, so if we see a CMCI, do not encode a CPU
413 			 * number in bits 8:31 (since the vector is the same regardless of
414 			 * the handling CPU).
415 			 */
416 			PE_incoming_interrupt(interrupt_num);
417 		} else if (cnum <= lapic_max_interrupt_cpunum) {
418 			PE_incoming_interrupt((cnum << 8) | interrupt_num);
419 		}
420 	}
421 
422 	if (__improbable(get_preemption_level() != ipl)) {
423 		panic("Preemption level altered by interrupt vector 0x%x: initial 0x%x, final: 0x%x", interrupt_num, ipl, get_preemption_level());
424 	}
425 
426 
427 	if (__improbable(cdp->cpu_nested_istack)) {
428 		cdp->cpu_nested_istack_events++;
429 	} else {
430 		uint64_t ctime = mach_absolute_time();
431 		uint64_t int_latency = ctime - cdp->cpu_int_event_time;
432 		uint64_t esdeadline, ehdeadline;
433 		/* Attempt to process deferred timers in the context of
434 		 * this interrupt, unless interrupt time has already exceeded
435 		 * TCOAL_ILAT_THRESHOLD.
436 		 */
437 #define TCOAL_ILAT_THRESHOLD (30000ULL)
438 
439 		if ((int_latency < TCOAL_ILAT_THRESHOLD) &&
440 		    interrupt_timer_coalescing_enabled) {
441 			esdeadline = cdp->rtclock_timer.queue.earliest_soft_deadline;
442 			ehdeadline = cdp->rtclock_timer.deadline;
443 			if ((ctime >= esdeadline) && (ctime < ehdeadline)) {
444 				interrupt_coalesced_timers++;
445 				TCOAL_DEBUG(0x88880000 | DBG_FUNC_START, ctime, esdeadline, ehdeadline, interrupt_coalesced_timers, 0);
446 				rtclock_intr(state);
447 				TCOAL_DEBUG(0x88880000 | DBG_FUNC_END, ctime, esdeadline, interrupt_coalesced_timers, 0, 0);
448 			} else {
449 				TCOAL_DEBUG(0x77770000, ctime, cdp->rtclock_timer.queue.earliest_soft_deadline, cdp->rtclock_timer.deadline, interrupt_coalesced_timers, 0);
450 			}
451 		}
452 
453 		if (__improbable(ilat_assert && (int_latency > interrupt_latency_cap) && !machine_timeout_suspended())) {
454 			panic("Interrupt vector 0x%x exceeded interrupt latency threshold, 0x%llx absolute time delta, prior signals: 0x%x, current signals: 0x%x", interrupt_num, int_latency, cdp->cpu_prior_signals, cdp->cpu_signals);
455 		}
456 
457 		if (__improbable(int_latency > cdp->cpu_max_observed_int_latency)) {
458 			cdp->cpu_max_observed_int_latency = int_latency;
459 			cdp->cpu_max_observed_int_latency_vector = interrupt_num;
460 		}
461 	}
462 
463 	/*
464 	 * Having serviced the interrupt first, look at the interrupted stack depth.
465 	 */
466 	if (!user_mode) {
467 		uint64_t depth = cdp->cpu_kernel_stack
468 		    + sizeof(struct thread_kernel_state)
469 		    + sizeof(struct i386_exception_link *)
470 		    - rsp;
471 		if (__improbable(depth > kernel_stack_depth_max)) {
472 			kernel_stack_depth_max = (vm_offset_t)depth;
473 			KERNEL_DEBUG_CONSTANT(
474 				MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
475 				(long) depth, (long) VM_KERNEL_UNSLIDE(rip), 0, 0, 0);
476 		}
477 	}
478 
479 	if (cnum == master_cpu) {
480 		entropy_collect();
481 	}
482 
483 #if KPERF
484 	kperf_interrupt();
485 #endif /* KPERF */
486 
487 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
488 	    interrupt_num);
489 
490 	assert(ml_get_interrupts_enabled() == FALSE);
491 
492 #if DEVELOPMENT || DEBUG
493 	if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
494 		traptrace_end(traptrace_index, mach_absolute_time());
495 	}
496 #endif
497 }
498 
499 static inline void
reset_dr7(void)500 reset_dr7(void)
501 {
502 	long dr7 = 0x400; /* magic dr7 reset value; 32 bit on i386, 64 bit on x86_64 */
503 	__asm__ volatile ("mov %0,%%dr7" : : "r" (dr7));
504 }
505 #if MACH_KDP
506 unsigned kdp_has_active_watchpoints = 0;
507 #define NO_WATCHPOINTS (!kdp_has_active_watchpoints)
508 #else
509 #define NO_WATCHPOINTS 1
510 #endif
511 /*
512  * Trap from kernel mode.  Only page-fault errors are recoverable,
513  * and then only in special circumstances.  All other errors are
514  * fatal.  Return value indicates if trap was handled.
515  */
516 
517 void
kernel_trap(x86_saved_state_t * state,uintptr_t * lo_spp)518 kernel_trap(
519 	x86_saved_state_t       *state,
520 	uintptr_t *lo_spp)
521 {
522 	x86_saved_state64_t     *saved_state;
523 	int                     code;
524 	user_addr_t             vaddr;
525 	int                     type;
526 	vm_map_t                map = 0;        /* protected by T_PAGE_FAULT */
527 	kern_return_t           result = KERN_FAILURE;
528 	kern_return_t           fault_result = KERN_SUCCESS;
529 	thread_t                thread;
530 	boolean_t               intr;
531 	vm_prot_t               prot;
532 	struct recovery         *rp;
533 	vm_offset_t             kern_ip;
534 	int                     is_user;
535 	int                     trap_pl = get_preemption_level();
536 
537 	thread = current_thread();
538 
539 	if (__improbable(is_saved_state32(state))) {
540 		panic("kernel_trap(%p) with 32-bit state", state);
541 	}
542 	saved_state = saved_state64(state);
543 
544 	/* Record cpu where state was captured */
545 	saved_state->isf.cpu = cpu_number();
546 
547 	vaddr = (user_addr_t)saved_state->cr2;
548 	type  = saved_state->isf.trapno;
549 	code  = (int)(saved_state->isf.err & 0xffff);
550 	intr  = (saved_state->isf.rflags & EFL_IF) != 0;        /* state of ints at trap */
551 	kern_ip = (vm_offset_t)saved_state->isf.rip;
552 
553 	is_user = (vaddr < VM_MAX_USER_PAGE_ADDRESS);
554 
555 #if DEVELOPMENT || DEBUG
556 	uint32_t traptrace_index = traptrace_start(type, kern_ip, mach_absolute_time(), saved_state->rbp);
557 #endif
558 
559 #if CONFIG_DTRACE
560 	/*
561 	 * Is there a DTrace hook?
562 	 */
563 	if (__improbable(tempDTraceTrapHook != NULL)) {
564 		if (tempDTraceTrapHook(type, state, lo_spp, 0) == KERN_SUCCESS) {
565 			/*
566 			 * If it succeeds, we are done...
567 			 */
568 			goto common_return;
569 		}
570 	}
571 
572 	/* Handle traps originated from probe context. */
573 	if (thread != THREAD_NULL && thread->t_dtrace_inprobe) {
574 		if (dtrace_handle_trap(type, state)) {
575 			goto common_return;
576 		}
577 	}
578 
579 #endif /* CONFIG_DTRACE */
580 
581 	/*
582 	 * we come here with interrupts off as we don't want to recurse
583 	 * on preemption below.  but we do want to re-enable interrupts
584 	 * as soon we possibly can to hold latency down
585 	 */
586 	if (__improbable(T_PREEMPT == type)) {
587 		ast_taken_kernel();
588 
589 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
590 		    (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
591 		    0, 0, 0, VM_KERNEL_UNSLIDE(kern_ip), 0);
592 
593 		goto common_return;
594 	}
595 
596 	user_addr_t     kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr);
597 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
598 	    (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
599 	    (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user,
600 	    VM_KERNEL_UNSLIDE(kern_ip), 0);
601 
602 
603 	if (T_PAGE_FAULT == type) {
604 		/*
605 		 * assume we're faulting in the kernel map
606 		 */
607 		map = kernel_map;
608 
609 		if (__probable((thread != THREAD_NULL) && (thread->map != kernel_map) &&
610 		    (vaddr < VM_MAX_USER_PAGE_ADDRESS))) {
611 			/* fault occurred in userspace */
612 			map = thread->map;
613 
614 			/* Intercept a potential Supervisor Mode Execute
615 			 * Protection fault. These criteria identify
616 			 * both NX faults and SMEP faults, but both
617 			 * are fatal. We avoid checking PTEs (racy).
618 			 * (The VM could just redrive a SMEP fault, hence
619 			 * the intercept).
620 			 */
621 			if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) &&
622 			    (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) {
623 				goto debugger_entry;
624 			}
625 
626 			/*
627 			 * Additionally check for SMAP faults...
628 			 * which are characterized by page-present and
629 			 * the AC bit unset (i.e. not from copyin/out path).
630 			 */
631 			if (__improbable(code & T_PF_PROT &&
632 			    pmap_smap_enabled &&
633 			    (saved_state->isf.rflags & EFL_AC) == 0)) {
634 				goto debugger_entry;
635 			}
636 
637 			/*
638 			 * If we're not sharing cr3 with the user
639 			 * and we faulted in copyio,
640 			 * then switch cr3 here and dismiss the fault.
641 			 */
642 			if (no_shared_cr3 &&
643 			    (thread->machine.specFlags & CopyIOActive) &&
644 			    map->pmap->pm_cr3 != get_cr3_base()) {
645 				pmap_assert(current_cpu_datap()->cpu_pmap_pcid_enabled == FALSE);
646 				set_cr3_raw(map->pmap->pm_cr3);
647 				return;
648 			}
649 			if (__improbable(vaddr < PAGE_SIZE) &&
650 			    ((thread->machine.specFlags & CopyIOActive) == 0)) {
651 				goto debugger_entry;
652 			}
653 		}
654 	}
655 
656 	(void) ml_set_interrupts_enabled(intr);
657 
658 	switch (type) {
659 	case T_NO_FPU:
660 		fpnoextflt();
661 		goto common_return;
662 
663 	case T_FPU_FAULT:
664 		fpextovrflt();
665 		goto common_return;
666 
667 	case T_FLOATING_POINT_ERROR:
668 		fpexterrflt();
669 		goto common_return;
670 
671 	case T_SSE_FLOAT_ERROR:
672 		fpSSEexterrflt();
673 		goto common_return;
674 
675 	case T_INVALID_OPCODE:
676 		fpUDflt(kern_ip);
677 		goto debugger_entry;
678 
679 	case T_DEBUG:
680 		/*
681 		 * Re-enable LBR tracing for core/panic files if necessary. i386_lbr_enable confirms LBR should be re-enabled.
682 		 */
683 		i386_lbr_enable();
684 		if ((saved_state->isf.rflags & EFL_TF) == 0 && NO_WATCHPOINTS) {
685 			/* We've somehow encountered a debug
686 			 * register match that does not belong
687 			 * to the kernel debugger.
688 			 * This isn't supposed to happen.
689 			 */
690 			reset_dr7();
691 			goto common_return;
692 		}
693 		goto debugger_entry;
694 	case T_INT3:
695 		goto debugger_entry;
696 	case T_PAGE_FAULT:
697 
698 #if CONFIG_DTRACE
699 		if (thread != THREAD_NULL && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
700 			if (dtrace_tally_fault(vaddr)) { /* Should a fault under dtrace be ignored? */
701 				/*
702 				 * DTrace has "anticipated" the possibility of this fault, and has
703 				 * established the suitable recovery state. Drop down now into the
704 				 * recovery handling code in "case T_GENERAL_PROTECTION:".
705 				 */
706 				goto FALL_THROUGH;
707 			}
708 		}
709 #endif /* CONFIG_DTRACE */
710 
711 		prot = VM_PROT_READ;
712 
713 		if (code & T_PF_WRITE) {
714 			prot |= VM_PROT_WRITE;
715 		}
716 		if (code & T_PF_EXECUTE) {
717 			prot |= VM_PROT_EXECUTE;
718 		}
719 
720 		fault_result = result = vm_fault(map,
721 		    vaddr,
722 		    prot,
723 		    FALSE, VM_KERN_MEMORY_NONE,
724 		    THREAD_UNINT, NULL, 0);
725 
726 		if (result == KERN_SUCCESS) {
727 			goto common_return;
728 		}
729 		/*
730 		 * fall through
731 		 */
732 #if CONFIG_DTRACE
733 FALL_THROUGH:
734 #endif /* CONFIG_DTRACE */
735 
736 	case T_GENERAL_PROTECTION:
737 		/*
738 		 * If there is a failure recovery address
739 		 * for this fault, go there.
740 		 */
741 		for (rp = recover_table; rp < recover_table_end; rp++) {
742 			if (kern_ip == rp->fault_addr) {
743 				set_recovery_ip(saved_state, rp->recover_addr);
744 				goto common_return;
745 			}
746 		}
747 
748 		/*
749 		 * Unanticipated page-fault errors in kernel
750 		 * should not happen.
751 		 *
752 		 * fall through...
753 		 */
754 		OS_FALLTHROUGH;
755 	default:
756 		/*
757 		 * Exception 15 is reserved but some chips may generate it
758 		 * spuriously. Seen at startup on AMD Athlon-64.
759 		 */
760 		if (type == 15) {
761 			kprintf("kernel_trap() ignoring spurious trap 15\n");
762 			goto common_return;
763 		}
764 debugger_entry:
765 		/* Ensure that the i386_kernel_state at the base of the
766 		 * current thread's stack (if any) is synchronized with the
767 		 * context at the moment of the trap, to facilitate
768 		 * access through the debugger.
769 		 */
770 		sync_iss_to_iks(state);
771 #if  MACH_KDP
772 		if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr)) {
773 			goto common_return;
774 		}
775 #endif
776 	}
777 	pal_cli();
778 	panic_trap(saved_state, trap_pl, fault_result);
779 	/*
780 	 * NO RETURN
781 	 */
782 
783 common_return:
784 #if DEVELOPMENT || DEBUG
785 	if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
786 		traptrace_end(traptrace_index, mach_absolute_time());
787 	}
788 #endif
789 	return;
790 }
791 
792 static void
set_recovery_ip(x86_saved_state64_t * saved_state,vm_offset_t ip)793 set_recovery_ip(x86_saved_state64_t  *saved_state, vm_offset_t ip)
794 {
795 	saved_state->isf.rip = ip;
796 }
797 
798 static void
panic_trap(x86_saved_state64_t * regs,uint32_t pl,kern_return_t fault_result)799 panic_trap(x86_saved_state64_t *regs, uint32_t pl, kern_return_t fault_result)
800 {
801 	const char      *trapname = "Unknown";
802 	pal_cr_t        cr0, cr2, cr3, cr4;
803 	boolean_t       potential_smep_fault = FALSE, potential_kernel_NX_fault = FALSE;
804 	boolean_t       potential_smap_fault = FALSE;
805 
806 	pal_get_control_registers( &cr0, &cr2, &cr3, &cr4 );
807 	assert(ml_get_interrupts_enabled() == FALSE);
808 	current_cpu_datap()->cpu_fatal_trap_state = regs;
809 	/*
810 	 * Issue an I/O port read if one has been requested - this is an
811 	 * event logic analyzers can use as a trigger point.
812 	 */
813 	panic_notify();
814 
815 	kprintf("CPU %d panic trap number 0x%x, rip 0x%016llx\n",
816 	    cpu_number(), regs->isf.trapno, regs->isf.rip);
817 	kprintf("cr0 0x%016llx cr2 0x%016llx cr3 0x%016llx cr4 0x%016llx\n",
818 	    cr0, cr2, cr3, cr4);
819 
820 	if (regs->isf.trapno < TRAP_TYPES) {
821 		trapname = trap_type[regs->isf.trapno];
822 	}
823 
824 	if ((regs->isf.trapno == T_PAGE_FAULT) && (regs->isf.err == (T_PF_PROT | T_PF_EXECUTE)) && (regs->isf.rip == regs->cr2)) {
825 		if (pmap_smep_enabled && (regs->isf.rip < VM_MAX_USER_PAGE_ADDRESS)) {
826 			potential_smep_fault = TRUE;
827 		} else if (regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
828 			potential_kernel_NX_fault = TRUE;
829 		}
830 	} else if (pmap_smap_enabled &&
831 	    regs->isf.trapno == T_PAGE_FAULT &&
832 	    regs->isf.err & T_PF_PROT &&
833 	    regs->cr2 < VM_MAX_USER_PAGE_ADDRESS &&
834 	    regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
835 		potential_smap_fault = TRUE;
836 	}
837 
838 #undef panic
839 	panic("Kernel trap at 0x%016llx, type %d=%s, registers:\n"
840 	    "CR0: 0x%016llx, CR2: 0x%016llx, CR3: 0x%016llx, CR4: 0x%016llx\n"
841 	    "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
842 	    "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
843 	    "R8:  0x%016llx, R9:  0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
844 	    "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
845 	    "RFL: 0x%016llx, RIP: 0x%016llx, CS:  0x%016llx, SS:  0x%016llx\n"
846 	    "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d, VF: %d\n",
847 	    regs->isf.rip, regs->isf.trapno, trapname,
848 	    cr0, cr2, cr3, cr4,
849 	    regs->rax, regs->rbx, regs->rcx, regs->rdx,
850 	    regs->isf.rsp, regs->rbp, regs->rsi, regs->rdi,
851 	    regs->r8, regs->r9, regs->r10, regs->r11,
852 	    regs->r12, regs->r13, regs->r14, regs->r15,
853 	    regs->isf.rflags, regs->isf.rip, regs->isf.cs & 0xFFFF,
854 	    regs->isf.ss & 0xFFFF, regs->cr2, regs->isf.err, regs->isf.cpu,
855 	    virtualized ? " VMM" : "",
856 	    potential_kernel_NX_fault ? " Kernel NX fault" : "",
857 	    potential_smep_fault ? " SMEP/User NX fault" : "",
858 	    potential_smap_fault ? " SMAP fault" : "",
859 	    pl,
860 	    fault_result);
861 }
862 
863 #if CONFIG_DTRACE
864 extern kern_return_t dtrace_user_probe(x86_saved_state_t *);
865 #endif
866 
867 #if DEBUG
868 uint32_t fsigs[2];
869 uint32_t fsigns, fsigcs;
870 #endif
871 
872 /*
873  *	Trap from user mode.
874  */
875 void
user_trap(x86_saved_state_t * saved_state)876 user_trap(
877 	x86_saved_state_t *saved_state)
878 {
879 	int                     exc;
880 	int                     err;
881 	mach_exception_code_t   code;
882 	mach_exception_subcode_t subcode;
883 	int                     type;
884 	user_addr_t             vaddr;
885 	vm_prot_t               prot;
886 	thread_t                thread = current_thread();
887 	kern_return_t           kret;
888 	user_addr_t             rip;
889 	unsigned long           dr6 = 0; /* 32 bit for i386, 64 bit for x86_64 */
890 	int                     current_cpu = cpu_number();
891 #if DEVELOPMENT || DEBUG
892 	bool                    inspect_cacheline = false;
893 	uint32_t                traptrace_index;
894 #endif
895 	assert((is_saved_state32(saved_state) && !thread_is_64bit_addr(thread)) ||
896 	    (is_saved_state64(saved_state) && thread_is_64bit_addr(thread)));
897 
898 	if (is_saved_state64(saved_state)) {
899 		x86_saved_state64_t     *regs;
900 
901 		regs = saved_state64(saved_state);
902 
903 		/* Record cpu where state was captured */
904 		regs->isf.cpu = current_cpu;
905 
906 		type = regs->isf.trapno;
907 		err  = (int)regs->isf.err & 0xffff;
908 		vaddr = (user_addr_t)regs->cr2;
909 		rip   = (user_addr_t)regs->isf.rip;
910 #if DEVELOPMENT || DEBUG
911 		traptrace_index = traptrace_start(type, rip, mach_absolute_time(), regs->rbp);
912 #endif
913 	} else {
914 		x86_saved_state32_t     *regs;
915 
916 		regs = saved_state32(saved_state);
917 
918 		/* Record cpu where state was captured */
919 		regs->cpu = current_cpu;
920 
921 		type  = regs->trapno;
922 		err   = regs->err & 0xffff;
923 		vaddr = (user_addr_t)regs->cr2;
924 		rip   = (user_addr_t)regs->eip;
925 #if DEVELOPMENT || DEBUG
926 		traptrace_index = traptrace_start(type, rip, mach_absolute_time(), regs->ebp);
927 #endif
928 	}
929 
930 #if DEVELOPMENT || DEBUG
931 	/*
932 	 * Copy the cacheline of code into the thread's instruction stream save area
933 	 * before enabling interrupts (the assumption is that we have not otherwise faulted or
934 	 * trapped since the original cache line stores).  If the saved code is not valid,
935 	 * we'll catch it below when we process the copyin() for unhandled faults.
936 	 */
937 	if (thread->machine.insn_copy_optout == false &&
938 	    (type == T_PAGE_FAULT || type == T_INVALID_OPCODE || type == T_GENERAL_PROTECTION)) {
939 #define CACHELINE_SIZE 64
940 		THREAD_TO_PCB(thread)->insn_cacheline[CACHELINE_SIZE] = (uint8_t)(rip & (CACHELINE_SIZE - 1));
941 		bcopy(&cpu_shadowp(current_cpu)->cpu_rtimes[0],
942 		    &THREAD_TO_PCB(thread)->insn_cacheline[0],
943 		    sizeof(THREAD_TO_PCB(thread)->insn_cacheline) - 1);
944 		inspect_cacheline = true;
945 	}
946 #endif
947 
948 	if (type == T_DEBUG) {
949 		if (thread->machine.ids) {
950 			unsigned long clear = 0;
951 			/* Stash and clear this processor's DR6 value, in the event
952 			 * this was a debug register match
953 			 */
954 			__asm__ volatile ("mov %%db6, %0" : "=r" (dr6));
955 			__asm__ volatile ("mov %0, %%db6" : : "r" (clear));
956 		}
957 		/* [Re]Enable LBRs *BEFORE* enabling interrupts to ensure we hit the right CPU */
958 		i386_lbr_enable();
959 	}
960 
961 	pal_sti();
962 
963 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
964 	    (MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86, type)) | DBG_FUNC_NONE,
965 	    (unsigned)(vaddr >> 32), (unsigned)vaddr,
966 	    (unsigned)(rip >> 32), (unsigned)rip, 0);
967 
968 	code = 0;
969 	subcode = 0;
970 	exc = 0;
971 
972 #if CONFIG_DTRACE
973 	/*
974 	 * DTrace does not consume all user traps, only INT_3's for now.
975 	 * Avoid needlessly calling tempDTraceTrapHook here, and let the
976 	 * INT_3 case handle them.
977 	 */
978 #endif
979 
980 	DEBUG_KPRINT_SYSCALL_MASK(1,
981 	    "user_trap: type=0x%x(%s) err=0x%x cr2=%p rip=%p\n",
982 	    type, trap_type[type], err, (void *)(long) vaddr, (void *)(long) rip);
983 
984 	switch (type) {
985 	case T_DIVIDE_ERROR:
986 		exc = EXC_ARITHMETIC;
987 		code = EXC_I386_DIV;
988 		break;
989 
990 	case T_DEBUG:
991 	{
992 		pcb_t   pcb;
993 		/*
994 		 * Update the PCB with this processor's DR6 value
995 		 * in the event this was a debug register match.
996 		 */
997 		pcb = THREAD_TO_PCB(thread);
998 		if (pcb->ids) {
999 			/*
1000 			 * We can get and set the status register
1001 			 * in 32-bit mode even on a 64-bit thread
1002 			 * because the high order bits are not
1003 			 * used on x86_64
1004 			 */
1005 			if (thread_is_64bit_addr(thread)) {
1006 				x86_debug_state64_t *ids = pcb->ids;
1007 				ids->dr6 = dr6;
1008 			} else {         /* 32 bit thread */
1009 				x86_debug_state32_t *ids = pcb->ids;
1010 				ids->dr6 = (uint32_t) dr6;
1011 			}
1012 		}
1013 		exc = EXC_BREAKPOINT;
1014 		code = EXC_I386_SGL;
1015 		break;
1016 	}
1017 	case T_INT3:
1018 #if CONFIG_DTRACE
1019 		if (dtrace_user_probe(saved_state) == KERN_SUCCESS) {
1020 			return; /* If it succeeds, we are done... */
1021 		}
1022 #endif
1023 		exc = EXC_BREAKPOINT;
1024 		code = EXC_I386_BPT;
1025 		break;
1026 
1027 	case T_OVERFLOW:
1028 		exc = EXC_ARITHMETIC;
1029 		code = EXC_I386_INTO;
1030 		break;
1031 
1032 	case T_OUT_OF_BOUNDS:
1033 		exc = EXC_SOFTWARE;
1034 		code = EXC_I386_BOUND;
1035 		break;
1036 
1037 	case T_INVALID_OPCODE:
1038 		if (fpUDflt(rip) == 1) {
1039 			exc = EXC_BAD_INSTRUCTION;
1040 			code = EXC_I386_INVOP;
1041 		}
1042 		break;
1043 
1044 	case T_NO_FPU:
1045 		fpnoextflt();
1046 		break;
1047 
1048 	case T_FPU_FAULT:
1049 		fpextovrflt();
1050 		/*
1051 		 * Raise exception.
1052 		 */
1053 		exc = EXC_BAD_ACCESS;
1054 		code = VM_PROT_READ | VM_PROT_EXECUTE;
1055 		subcode = 0;
1056 		break;
1057 
1058 	case T_INVALID_TSS:     /* invalid TSS == iret with NT flag set */
1059 		exc = EXC_BAD_INSTRUCTION;
1060 		code = EXC_I386_INVTSSFLT;
1061 		subcode = err;
1062 		break;
1063 
1064 	case T_SEGMENT_NOT_PRESENT:
1065 		exc = EXC_BAD_INSTRUCTION;
1066 		code = EXC_I386_SEGNPFLT;
1067 		subcode = err;
1068 		break;
1069 
1070 	case T_STACK_FAULT:
1071 		exc = EXC_BAD_INSTRUCTION;
1072 		code = EXC_I386_STKFLT;
1073 		subcode = err;
1074 		break;
1075 
1076 	case T_GENERAL_PROTECTION:
1077 		/*
1078 		 * There's a wide range of circumstances which generate this
1079 		 * class of exception. From user-space, many involve bad
1080 		 * addresses (such as a non-canonical 64-bit address).
1081 		 * So we map this to EXC_BAD_ACCESS (and thereby SIGSEGV).
1082 		 * The trouble is cr2 doesn't contain the faulting address;
1083 		 * we'd need to decode the faulting instruction to really
1084 		 * determine this. We'll leave that to debuggers.
1085 		 * However, attempted execution of privileged instructions
1086 		 * (e.g. cli) also generate GP faults and so we map these to
1087 		 * to EXC_BAD_ACCESS (and thence SIGSEGV) also - rather than
1088 		 * EXC_BAD_INSTRUCTION which is more accurate. We just can't
1089 		 * win!
1090 		 */
1091 		exc = EXC_BAD_ACCESS;
1092 		code = EXC_I386_GPFLT;
1093 		subcode = err;
1094 		break;
1095 
1096 	case T_PAGE_FAULT:
1097 	{
1098 		prot = VM_PROT_READ;
1099 
1100 		if (err & T_PF_WRITE) {
1101 			prot |= VM_PROT_WRITE;
1102 		}
1103 		if (__improbable(err & T_PF_EXECUTE)) {
1104 			prot |= VM_PROT_EXECUTE;
1105 		}
1106 #if DEVELOPMENT || DEBUG
1107 		uint32_t fsig = 0;
1108 		fsig = thread_fpsimd_hash(thread);
1109 #if DEBUG
1110 		fsigs[0] = fsig;
1111 #endif
1112 #endif
1113 		kret = vm_fault(thread->map,
1114 		    vaddr,
1115 		    prot, FALSE, VM_KERN_MEMORY_NONE,
1116 		    THREAD_ABORTSAFE, NULL, 0);
1117 #if DEVELOPMENT || DEBUG
1118 		if (fsig) {
1119 			uint32_t fsig2 = thread_fpsimd_hash(thread);
1120 #if DEBUG
1121 			fsigcs++;
1122 			fsigs[1] = fsig2;
1123 #endif
1124 			if (fsig != fsig2) {
1125 				panic("FP/SIMD state hash mismatch across fault thread: %p 0x%x->0x%x", thread, fsig, fsig2);
1126 			}
1127 		} else {
1128 #if DEBUG
1129 			fsigns++;
1130 #endif
1131 		}
1132 #endif
1133 		if (__probable((kret == KERN_SUCCESS) || (kret == KERN_ABORTED))) {
1134 			break;
1135 		} else if (__improbable(kret == KERN_FAILURE)) {
1136 			/*
1137 			 * For a user trap, vm_fault() should never return KERN_FAILURE.
1138 			 * If it does, we're leaking preemption disables somewhere in the kernel.
1139 			 */
1140 			panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
1141 		}
1142 
1143 		/* PAL debug hook (empty on x86) */
1144 		pal_dbg_page_fault(thread, vaddr, kret);
1145 		exc = EXC_BAD_ACCESS;
1146 		code = kret;
1147 		subcode = vaddr;
1148 	}
1149 	break;
1150 
1151 	case T_SSE_FLOAT_ERROR:
1152 		fpSSEexterrflt();
1153 		exc = EXC_ARITHMETIC;
1154 		code = EXC_I386_SSEEXTERR;
1155 		subcode = ((struct x86_fx_thread_state *)thread->machine.ifps)->fx_MXCSR;
1156 		break;
1157 
1158 
1159 	case T_FLOATING_POINT_ERROR:
1160 		fpexterrflt();
1161 		exc = EXC_ARITHMETIC;
1162 		code = EXC_I386_EXTERR;
1163 		subcode = ((struct x86_fx_thread_state *)thread->machine.ifps)->fx_status;
1164 		break;
1165 
1166 	case T_DTRACE_RET:
1167 #if CONFIG_DTRACE
1168 		if (dtrace_user_probe(saved_state) == KERN_SUCCESS) {
1169 			return; /* If it succeeds, we are done... */
1170 		}
1171 #endif
1172 		/*
1173 		 * If we get an INT 0x7f when we do not expect to,
1174 		 * treat it as an illegal instruction
1175 		 */
1176 		exc = EXC_BAD_INSTRUCTION;
1177 		code = EXC_I386_INVOP;
1178 		break;
1179 
1180 	default:
1181 		panic("Unexpected user trap, type %d", type);
1182 	}
1183 
1184 	if (exc != 0) {
1185 		uint16_t cs;
1186 		boolean_t intrs;
1187 
1188 		if (is_saved_state64(saved_state)) {
1189 			cs = saved_state64(saved_state)->isf.cs;
1190 		} else {
1191 			cs = saved_state32(saved_state)->cs;
1192 		}
1193 
1194 		if (last_branch_enabled_modes == LBR_ENABLED_USERMODE) {
1195 			intrs = ml_set_interrupts_enabled(FALSE);
1196 			/*
1197 			 * This is a bit racy (it's possible for this thread to migrate to another CPU, then
1198 			 * migrate back, but that seems rather rare in practice), but good enough to ensure
1199 			 * the LBRs are saved before proceeding with exception/signal dispatch.
1200 			 */
1201 			if (current_cpu == cpu_number()) {
1202 				i386_lbr_synch(thread);
1203 			}
1204 			ml_set_interrupts_enabled(intrs);
1205 		}
1206 
1207 		/*
1208 		 * Do not try to copyin from the instruction stream if the page fault was due
1209 		 * to an access to rip and was unhandled.
1210 		 * Do not deal with cases when %cs != USER[64]_CS
1211 		 * And of course there's no need to copy the instruction stream if the boot-arg
1212 		 * was set to 0.
1213 		 */
1214 		if (thread->machine.insn_copy_optout == false && insn_copyin_count > 0 &&
1215 		    (cs == USER64_CS || cs == USER_CS) && (type != T_PAGE_FAULT || vaddr != rip)) {
1216 #if DEVELOPMENT || DEBUG
1217 			copy_instruction_stream(thread, rip, type, inspect_cacheline);
1218 #else
1219 			copy_instruction_stream(thread, rip, type);
1220 #endif
1221 		}
1222 
1223 #if DEVELOPMENT || DEBUG
1224 		if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
1225 			traptrace_end(traptrace_index, mach_absolute_time());
1226 		}
1227 #endif
1228 		/*
1229 		 * Note: Codepaths that directly return from user_trap() have pending
1230 		 * ASTs processed in locore
1231 		 */
1232 		i386_exception(exc, code, subcode);
1233 		/* NOTREACHED */
1234 	} else {
1235 #if DEVELOPMENT || DEBUG
1236 		if (traptrace_index != TRAPTRACE_INVALID_INDEX) {
1237 			traptrace_end(traptrace_index, mach_absolute_time());
1238 		}
1239 #endif
1240 	}
1241 }
1242 
1243 /*
1244  * Copyin up to x86_INSTRUCTION_STATE_MAX_INSN_BYTES bytes from the page that includes `rip`,
1245  * ensuring that we stay on the same page, clipping the start or end, as needed.
1246  * Add the clipped amount back at the start or end, depending on where it fits.
1247  * Consult the variable populated by the boot-arg `insn_capcnt'
1248  */
1249 static __attribute__((noinline)) void
copy_instruction_stream(thread_t thread,uint64_t rip,int __unused trap_code,bool inspect_cacheline)1250 copy_instruction_stream(thread_t thread, uint64_t rip, int __unused trap_code
1251 #if DEVELOPMENT || DEBUG
1252     , bool inspect_cacheline
1253 #endif
1254     )
1255 {
1256 #if x86_INSTRUCTION_STATE_MAX_INSN_BYTES > 4096
1257 #error x86_INSTRUCTION_STATE_MAX_INSN_BYTES cannot exceed a page in size.
1258 #endif
1259 	pcb_t pcb = THREAD_TO_PCB(thread);
1260 	vm_map_offset_t pagemask = ~vm_map_page_mask(current_map());
1261 	vm_map_offset_t rip_page = rip & pagemask;
1262 	vm_map_offset_t start_addr;
1263 	vm_map_offset_t insn_offset;
1264 	vm_map_offset_t end_addr = rip + (insn_copyin_count / 2);
1265 	void *stack_buffer;
1266 	int copyin_err = 0;
1267 #if defined(MACH_BSD) && (DEVELOPMENT || DEBUG)
1268 	void *procname;
1269 #endif
1270 
1271 #if DEVELOPMENT || DEBUG
1272 	assert(insn_copyin_count <= x86_INSTRUCTION_STATE_MAX_INSN_BYTES);
1273 #else
1274 	if (insn_copyin_count > x86_INSTRUCTION_STATE_MAX_INSN_BYTES ||
1275 	    insn_copyin_count < 64 /* CACHELINE_SIZE */) {
1276 		return;
1277 	}
1278 #endif
1279 
1280 #pragma clang diagnostic push
1281 #pragma clang diagnostic ignored "-Walloca"
1282 	stack_buffer = __builtin_alloca(insn_copyin_count);
1283 #pragma clang diagnostic pop
1284 
1285 	if (rip >= (insn_copyin_count / 2)) {
1286 		start_addr = rip - (insn_copyin_count / 2);
1287 	} else {
1288 		start_addr = 0;
1289 	}
1290 
1291 	if (start_addr < rip_page) {
1292 		insn_offset = (insn_copyin_count / 2) - (rip_page - start_addr);
1293 		end_addr += (rip_page - start_addr);
1294 		start_addr = rip_page;
1295 	} else if (end_addr >= (rip_page + (~pagemask + 1))) {
1296 		start_addr -= (end_addr - (rip_page + (~pagemask + 1))); /* Adjust start address backward */
1297 		/* Adjust instruction offset due to start address change */
1298 		insn_offset = (insn_copyin_count / 2) + (end_addr - (rip_page + (~pagemask + 1)));
1299 		end_addr = rip_page + (~pagemask + 1);  /* clip to the start of the next page (non-inclusive */
1300 	} else {
1301 		insn_offset = insn_copyin_count / 2;
1302 	}
1303 
1304 	disable_preemption();   /* Prevent copyin from faulting in the instruction stream */
1305 	if (
1306 #if DEVELOPMENT || DEBUG
1307 		(insnstream_force_cacheline_mismatch < 2) &&
1308 #endif
1309 		((end_addr > start_addr) && (copyin_err = copyin(start_addr, stack_buffer, end_addr - start_addr)) == 0)) {
1310 		enable_preemption();
1311 
1312 		if (pcb->insn_state == 0) {
1313 			pcb->insn_state = kalloc_data(sizeof(x86_instruction_state_t), Z_WAITOK);
1314 		}
1315 
1316 		if (pcb->insn_state != 0) {
1317 			bcopy(stack_buffer, pcb->insn_state->insn_bytes, end_addr - start_addr);
1318 			bzero(&pcb->insn_state->insn_bytes[end_addr - start_addr],
1319 			    insn_copyin_count - (end_addr - start_addr));
1320 
1321 			pcb->insn_state->insn_stream_valid_bytes = (int)(end_addr - start_addr);
1322 			pcb->insn_state->insn_offset = (int)insn_offset;
1323 
1324 #if DEVELOPMENT || DEBUG
1325 			/* Now try to validate the cacheline we read at early-fault time matches the code
1326 			 * copied in. Before we do that, we have to make sure the buffer contains a valid
1327 			 * cacheline by looking for the 2 sentinel values written in the event the cacheline
1328 			 * could not be copied.
1329 			 */
1330 #define CACHELINE_DATA_NOT_PRESENT 0xdeadc0debeefcafeULL
1331 #define CACHELINE_MASK (CACHELINE_SIZE - 1)
1332 
1333 			if (inspect_cacheline &&
1334 			    (*(uint64_t *)(uintptr_t)&pcb->insn_cacheline[0] != CACHELINE_DATA_NOT_PRESENT &&
1335 			    *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[8] != CACHELINE_DATA_NOT_PRESENT)) {
1336 				/*
1337 				 * The position of the cacheline in the instruction buffer is at offset
1338 				 * insn_offset - (rip & CACHELINE_MASK)
1339 				 */
1340 				if (__improbable((rip & CACHELINE_MASK) > insn_offset)) {
1341 					printf("thread %p code cacheline @ %p clipped wrt copied-in code (offset %d)\n",
1342 					    thread, (void *)(rip & ~CACHELINE_MASK), (int)(rip & CACHELINE_MASK));
1343 				} else if (bcmp(&pcb->insn_state->insn_bytes[insn_offset - (rip & CACHELINE_MASK)],
1344 				    &pcb->insn_cacheline[0], CACHELINE_SIZE) != 0
1345 				    || insnstream_force_cacheline_mismatch
1346 				    ) {
1347 #if x86_INSTRUCTION_STATE_CACHELINE_SIZE != CACHELINE_SIZE
1348 #error cacheline size mismatch
1349 #endif
1350 					bcopy(&pcb->insn_cacheline[0], &pcb->insn_state->insn_cacheline[0],
1351 					    x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1352 					/* Mark the instruction stream as being out-of-synch */
1353 					pcb->insn_state->out_of_synch = 1;
1354 
1355 					printf("thread %p code cacheline @ %p mismatches with copied-in code [trap 0x%x]\n",
1356 					    thread, (void *)(rip & ~CACHELINE_MASK), trap_code);
1357 					for (int i = 0; i < 8; i++) {
1358 						printf("\t[%d] cl=0x%08llx vs. ci=0x%08llx\n", i, *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[i * 8],
1359 						    *(uint64_t *)(uintptr_t)&pcb->insn_state->insn_bytes[(i * 8) + insn_offset - (rip & CACHELINE_MASK)]);
1360 					}
1361 					if (panic_on_cacheline_mismatch) {
1362 						panic("Cacheline mismatch while processing unhandled exception.");
1363 					}
1364 				} else {
1365 					pcb->insn_state->out_of_synch = 0;
1366 				}
1367 			} else if (inspect_cacheline) {
1368 				printf("thread %p could not capture code cacheline at fault IP %p [offset %d]\n",
1369 				    (void *)thread, (void *)rip, (int)(insn_offset - (rip & CACHELINE_MASK)));
1370 				pcb->insn_state->out_of_synch = 0;
1371 			}
1372 #else
1373 			pcb->insn_state->out_of_synch = 0;
1374 #endif /* DEVELOPMENT || DEBUG */
1375 
1376 #if defined(MACH_BSD) && (DEVELOPMENT || DEBUG)
1377 			if (panic_on_trap_procname[0] != 0) {
1378 				task_t task = get_threadtask(thread);
1379 				char procnamebuf[65] = {0};
1380 
1381 				if (task->bsd_info != NULL) {
1382 					procname = proc_name_address(task->bsd_info);
1383 					strlcpy(procnamebuf, procname, sizeof(procnamebuf));
1384 
1385 					if (strcasecmp(panic_on_trap_procname, procnamebuf) == 0 &&
1386 					    ((1U << trap_code) & panic_on_trap_mask) != 0) {
1387 						panic("Panic requested on trap type 0x%x for process `%s'", trap_code,
1388 						    panic_on_trap_procname);
1389 						/*NORETURN*/
1390 					}
1391 				}
1392 			}
1393 #endif /* MACH_BSD && (DEVELOPMENT || DEBUG) */
1394 		}
1395 	} else {
1396 		enable_preemption();
1397 
1398 		pcb->insn_state_copyin_failure_errorcode = copyin_err;
1399 #if DEVELOPMENT || DEBUG
1400 		if (inspect_cacheline && pcb->insn_state == 0) {
1401 			pcb->insn_state = kalloc_data(sizeof(x86_instruction_state_t), Z_WAITOK);
1402 		}
1403 		if (pcb->insn_state != 0) {
1404 			pcb->insn_state->insn_stream_valid_bytes = 0;
1405 			pcb->insn_state->insn_offset = 0;
1406 
1407 			if (inspect_cacheline &&
1408 			    (*(uint64_t *)(uintptr_t)&pcb->insn_cacheline[0] != CACHELINE_DATA_NOT_PRESENT &&
1409 			    *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[8] != CACHELINE_DATA_NOT_PRESENT)) {
1410 				/*
1411 				 * We can still copy the cacheline into the instruction state structure
1412 				 * if it contains valid data
1413 				 */
1414 				pcb->insn_state->out_of_synch = 1;
1415 				bcopy(&pcb->insn_cacheline[0], &pcb->insn_state->insn_cacheline[0],
1416 				    x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1417 			}
1418 		}
1419 #endif /* DEVELOPMENT || DEBUG */
1420 	}
1421 }
1422 
1423 /*
1424  * Handle exceptions for i386.
1425  *
1426  * If we are an AT bus machine, we must turn off the AST for a
1427  * delayed floating-point exception.
1428  *
1429  * If we are providing floating-point emulation, we may have
1430  * to retrieve the real register values from the floating point
1431  * emulator.
1432  */
1433 void
i386_exception(int exc,mach_exception_code_t code,mach_exception_subcode_t subcode)1434 i386_exception(
1435 	int     exc,
1436 	mach_exception_code_t code,
1437 	mach_exception_subcode_t subcode)
1438 {
1439 	mach_exception_data_type_t   codes[EXCEPTION_CODE_MAX];
1440 
1441 	DEBUG_KPRINT_SYSCALL_MACH("i386_exception: exc=%d code=0x%llx subcode=0x%llx\n",
1442 	    exc, code, subcode);
1443 	codes[0] = code;                /* new exception interface */
1444 	codes[1] = subcode;
1445 	exception_triage(exc, codes, 2);
1446 	/*NOTREACHED*/
1447 }
1448 
1449 
1450 /* Synchronize a thread's x86_kernel_state (if any) with the given
1451  * x86_saved_state_t obtained from the trap/IPI handler; called in
1452  * kernel_trap() prior to entering the debugger, and when receiving
1453  * an "MP_KDP" IPI. Called with null saved_state if an incoming IPI
1454  * was detected from the kernel while spinning with interrupts masked.
1455  */
1456 
1457 void
sync_iss_to_iks(x86_saved_state_t * saved_state)1458 sync_iss_to_iks(x86_saved_state_t *saved_state)
1459 {
1460 	struct x86_kernel_state *iks = NULL;
1461 	vm_offset_t kstack;
1462 	boolean_t record_active_regs = FALSE;
1463 
1464 	/* The PAL may have a special way to sync registers */
1465 	if (saved_state && saved_state->flavor == THREAD_STATE_NONE) {
1466 		pal_get_kern_regs( saved_state );
1467 	}
1468 
1469 	if (current_thread() != NULL &&
1470 	    (kstack = current_thread()->kernel_stack) != 0) {
1471 		x86_saved_state64_t     *regs = saved_state64(saved_state);
1472 
1473 		iks = STACK_IKS(kstack);
1474 
1475 		/* Did we take the trap/interrupt in kernel mode? */
1476 		if (saved_state == NULL || /* NULL => polling in kernel */
1477 		    regs == USER_REGS64(current_thread())) {
1478 			record_active_regs = TRUE;
1479 		} else {
1480 			iks->k_rbx = regs->rbx;
1481 			iks->k_rsp = regs->isf.rsp;
1482 			iks->k_rbp = regs->rbp;
1483 			iks->k_r12 = regs->r12;
1484 			iks->k_r13 = regs->r13;
1485 			iks->k_r14 = regs->r14;
1486 			iks->k_r15 = regs->r15;
1487 			iks->k_rip = regs->isf.rip;
1488 		}
1489 	}
1490 
1491 	if (record_active_regs == TRUE) {
1492 		/* Show the trap handler path */
1493 		__asm__ volatile ("movq %%rbx, %0" : "=m" (iks->k_rbx));
1494 		__asm__ volatile ("movq %%rsp, %0" : "=m" (iks->k_rsp));
1495 		__asm__ volatile ("movq %%rbp, %0" : "=m" (iks->k_rbp));
1496 		__asm__ volatile ("movq %%r12, %0" : "=m" (iks->k_r12));
1497 		__asm__ volatile ("movq %%r13, %0" : "=m" (iks->k_r13));
1498 		__asm__ volatile ("movq %%r14, %0" : "=m" (iks->k_r14));
1499 		__asm__ volatile ("movq %%r15, %0" : "=m" (iks->k_r15));
1500 		/* "Current" instruction pointer */
1501 		__asm__ volatile ("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:"
1502                                   : "=m" (iks->k_rip)
1503                                   :
1504                                   : "rax");
1505 	}
1506 }
1507 
1508 /*
1509  * This is used by the NMI interrupt handler (from mp.c) to
1510  * uncondtionally sync the trap handler context to the IKS
1511  * irrespective of whether the NMI was fielded in kernel
1512  * or user space.
1513  */
1514 void
sync_iss_to_iks_unconditionally(__unused x86_saved_state_t * saved_state)1515 sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state)
1516 {
1517 	struct x86_kernel_state *iks;
1518 	vm_offset_t kstack;
1519 
1520 	if ((kstack = current_thread()->kernel_stack) != 0) {
1521 		iks = STACK_IKS(kstack);
1522 		/* Display the trap handler path */
1523 		__asm__ volatile ("movq %%rbx, %0" : "=m" (iks->k_rbx));
1524 		__asm__ volatile ("movq %%rsp, %0" : "=m" (iks->k_rsp));
1525 		__asm__ volatile ("movq %%rbp, %0" : "=m" (iks->k_rbp));
1526 		__asm__ volatile ("movq %%r12, %0" : "=m" (iks->k_r12));
1527 		__asm__ volatile ("movq %%r13, %0" : "=m" (iks->k_r13));
1528 		__asm__ volatile ("movq %%r14, %0" : "=m" (iks->k_r14));
1529 		__asm__ volatile ("movq %%r15, %0" : "=m" (iks->k_r15));
1530 		/* "Current" instruction pointer */
1531 		__asm__ volatile ("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks->k_rip)::"rax");
1532 	}
1533 }
1534 
1535 #if DEBUG
1536 #define TERI 1
1537 #endif
1538 
1539 #if TERI
1540 extern void     thread_exception_return_internal(void) __dead2;
1541 
1542 void
thread_exception_return(void)1543 thread_exception_return(void)
1544 {
1545 	thread_t thread = current_thread();
1546 	task_t   task   = current_task();
1547 
1548 	ml_set_interrupts_enabled(FALSE);
1549 	if (thread_is_64bit_addr(thread) != task_has_64Bit_addr(task)) {
1550 		panic("Task/thread bitness mismatch %p %p, task: %d, thread: %d",
1551 		    thread, task, thread_is_64bit_addr(thread), task_has_64Bit_addr(task));
1552 	}
1553 
1554 	if (thread_is_64bit_addr(thread)) {
1555 		if ((gdt_desc_p(USER64_CS)->access & ACC_PL_U) == 0) {
1556 			panic("64-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER64_CS));
1557 		}
1558 	} else {
1559 		if ((gdt_desc_p(USER_CS)->access & ACC_PL_U) == 0) {
1560 			panic("32-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER_CS));
1561 		}
1562 	}
1563 	assert(get_preemption_level() == 0);
1564 	thread_exception_return_internal();
1565 }
1566 #endif
1567