xref: /xnu-8020.140.41/osfmk/arm/trap.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <kern/debug.h>
29 #include <mach_kdp.h>
30 #include <machine/endian.h>
31 #include <mach/mach_types.h>
32 #include <mach/boolean.h>
33 #include <mach/vm_prot.h>
34 #include <mach/vm_types.h>
35 #include <mach/mach_traps.h>
36 
37 #include <mach/exception.h>
38 #include <mach/kern_return.h>
39 #include <mach/vm_param.h>
40 #include <mach/message.h>
41 #include <mach/machine/thread_status.h>
42 
43 #include <vm/vm_page.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_fault.h>
46 #include <vm/vm_kern.h>
47 
48 #include <kern/ast.h>
49 #include <kern/restartable.h>
50 #include <kern/sched_prim.h>
51 #include <kern/thread.h>
52 #include <kern/task.h>
53 
54 #include <sys/kdebug.h>
55 #include <kperf/kperf.h>
56 
57 #include <arm/trap.h>
58 #include <arm/caches_internal.h>
59 #include <arm/cpu_data_internal.h>
60 #include <arm/machdep_call.h>
61 #include <arm/machine_routines.h>
62 #include <arm/misc_protos.h>
63 #include <arm/setjmp.h>
64 #include <arm/proc_reg.h>
65 
66 /*
67  * External function prototypes.
68  */
69 #include <kern/syscall_sw.h>
70 #include <kern/host.h>
71 #include <kern/processor.h>
72 
73 
74 #if CONFIG_DTRACE
75 extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs, unsigned int instr);
76 extern boolean_t dtrace_tally_fault(user_addr_t);
77 
78 /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions
79  *  over from that file. Need to keep these in sync! */
80 #define FASTTRAP_ARM_INSTR 0xe7ffdefc
81 #define FASTTRAP_THUMB_INSTR 0xdefc
82 
83 #define FASTTRAP_ARM_RET_INSTR 0xe7ffdefb
84 #define FASTTRAP_THUMB_RET_INSTR 0xdefb
85 
86 /* See <rdar://problem/4613924> */
87 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
88 #endif
89 
90 #define COPYIN(dst, src, size)                                  \
91 	((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ?       \
92 	        copyin_kern(dst, src, size)                     \
93 	:                                                       \
94 	        copyin(dst, src, size)
95 
96 #define COPYOUT(src, dst, size)                                 \
97 	((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ?       \
98 	        copyout_kern(src, dst, size)                    \
99 	:                                                       \
100 	        copyout(src, dst, size)
101 
102 /* Second-level exception handlers forward declarations */
103 void            sleh_undef(struct arm_saved_state *, struct arm_vfpsaved_state *);
104 void            sleh_abort(struct arm_saved_state *, int);
105 static kern_return_t sleh_alignment(struct arm_saved_state *);
106 static void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *regs);
107 
108 int             sleh_alignment_count = 0;
109 int             trap_on_alignment_fault = 0;
110 
111 /*
112  *	Routine:        sleh_undef
113  *	Function:       Second level exception handler for undefined exception
114  */
115 
116 void
sleh_undef(struct arm_saved_state * regs,struct arm_vfpsaved_state * vfp_ss __unused)117 sleh_undef(struct arm_saved_state * regs, struct arm_vfpsaved_state * vfp_ss __unused)
118 {
119 	exception_type_t exception = EXC_BAD_INSTRUCTION;
120 	mach_exception_data_type_t code[2] = {EXC_ARM_UNDEFINED};
121 	mach_msg_type_number_t codeCnt = 2;
122 	thread_t        thread = current_thread();
123 	vm_offset_t     recover;
124 
125 	recover = thread->machine.recover;
126 	thread->machine.recover = 0;
127 
128 	getCpuDatap()->cpu_stat.undef_ex_cnt++;
129 
130 
131 #if CONFIG_DTRACE
132 	if (tempDTraceTrapHook) {
133 		if (tempDTraceTrapHook(exception, regs, 0, 0) == KERN_SUCCESS) {
134 			/*
135 			 * If it succeeds, we are done...
136 			 */
137 			goto exit;
138 		}
139 	}
140 #endif /* CONFIG_DTRACE */
141 
142 	/* Inherit the interrupt masks from previous */
143 	if (!(regs->cpsr & PSR_INTMASK)) {
144 		ml_set_interrupts_enabled(TRUE);
145 	}
146 
147 #if CONFIG_DTRACE
148 	/* Check to see if we've hit a userland probe */
149 	if ((regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE) {
150 		if (regs->cpsr & PSR_TF) {
151 			uint16_t instr = 0;
152 
153 			if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) {
154 				goto exit;
155 			}
156 
157 			if (instr == FASTTRAP_THUMB_INSTR || instr == FASTTRAP_THUMB_RET_INSTR) {
158 				if (dtrace_user_probe(regs, instr) == KERN_SUCCESS) {
159 					/* If it succeeds, we are done... */
160 					goto exit;
161 				}
162 			}
163 		} else {
164 			uint32_t instr = 0;
165 
166 			if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS) {
167 				goto exit;
168 			}
169 
170 			if (instr == FASTTRAP_ARM_INSTR || instr == FASTTRAP_ARM_RET_INSTR) {
171 				if (dtrace_user_probe(regs, instr) == KERN_SUCCESS) {
172 					/* If it succeeds, we are done... */
173 					goto exit;
174 				}
175 			}
176 		}
177 	}
178 #endif /* CONFIG_DTRACE */
179 
180 	if (regs->cpsr & PSR_TF) {
181 		unsigned short instr = 0;
182 
183 		if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS) {
184 			goto exit;
185 		}
186 
187 		if (IS_THUMB32(instr)) {
188 			unsigned int instr32;
189 
190 			instr32 = (instr << 16);
191 
192 			if (COPYIN((user_addr_t)(((unsigned short *) (regs->pc)) + 1), (char *)&instr, (vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS) {
193 				goto exit;
194 			}
195 
196 			instr32 |= instr;
197 			code[1] = instr32;
198 
199 #if     __ARM_VFP__
200 			if (IS_THUMB_VFP(instr32)) {
201 				/* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
202 				if (!get_vfp_enabled()) {
203 					panic("VFP was disabled (thumb); VFP should always be enabled");
204 				}
205 			}
206 #endif
207 		} else {
208 			/* I don't believe we have any 16 bit VFP instructions, so just set code[1]. */
209 			code[1] = instr;
210 
211 			if (IS_THUMB_GDB_TRAP(instr)) {
212 				exception = EXC_BREAKPOINT;
213 				code[0] = EXC_ARM_BREAKPOINT;
214 			}
215 		}
216 	} else {
217 		uint32_t instr = 0;
218 
219 		if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS) {
220 			goto exit;
221 		}
222 
223 		code[1] = instr;
224 #if     __ARM_VFP__
225 		if (IS_ARM_VFP(instr)) {
226 			/* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
227 			if (!get_vfp_enabled()) {
228 				panic("VFP was disabled (arm); VFP should always be enabled");
229 			}
230 		}
231 #endif
232 
233 		if (IS_ARM_GDB_TRAP(instr)) {
234 			exception = EXC_BREAKPOINT;
235 			code[0] = EXC_ARM_BREAKPOINT;
236 		}
237 	}
238 
239 	if (!((regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)) {
240 		boolean_t       intr;
241 
242 		intr = ml_set_interrupts_enabled(FALSE);
243 
244 		if (exception == EXC_BREAKPOINT) {
245 			/* Save off the context here (so that the debug logic
246 			 * can see the original state of this thread).
247 			 */
248 			vm_offset_t kstackptr = current_thread()->machine.kstackptr;
249 			copy_signed_thread_state((arm_saved_state_t *)kstackptr, regs);
250 
251 			DebuggerCall(exception, regs);
252 			(void) ml_set_interrupts_enabled(intr);
253 			goto exit;
254 		}
255 		panic_with_thread_kernel_state("undefined kernel instruction", regs);
256 
257 		(void) ml_set_interrupts_enabled(intr);
258 	} else {
259 		exception_triage(exception, code, codeCnt);
260 		/* NOTREACHED */
261 	}
262 
263 exit:
264 	if (recover) {
265 		thread->machine.recover = recover;
266 	}
267 }
268 
269 /*
270  *	Routine:	sleh_abort
271  *	Function:	Second level exception handler for abort(Pref/Data)
272  */
273 
274 void
sleh_abort(struct arm_saved_state * regs,int type)275 sleh_abort(struct arm_saved_state * regs, int type)
276 {
277 	int             status;
278 	int             debug_status = 0;
279 	int             spsr;
280 	int             exc = EXC_BAD_ACCESS;
281 	mach_exception_data_type_t codes[2];
282 	vm_map_t        map;
283 	vm_map_address_t vaddr;
284 	vm_map_address_t fault_addr;
285 	vm_prot_t       fault_type;
286 	kern_return_t   result;
287 	vm_offset_t     recover;
288 	thread_t        thread = current_thread();
289 	boolean_t       intr;
290 	bool            need_done_faulting = false;
291 
292 	recover = thread->machine.recover;
293 	thread->machine.recover = 0;
294 
295 	status = regs->fsr & FSR_MASK;
296 	spsr = regs->cpsr;
297 
298 	/* The DSFR/IFSR.ExT bit indicates "IMPLEMENTATION DEFINED" classification.
299 	 * Allow a platform-level error handler to decode it.
300 	 */
301 	if ((regs->fsr) & FSR_EXT) {
302 		cpu_data_t      *cdp = getCpuDatap();
303 
304 		if (cdp->platform_error_handler != NULL) {
305 			cdp->platform_error_handler(cdp->cpu_id, 0);
306 			/* If a platform error handler is registered, expect it to panic, not fall through */
307 			panic("Unexpected return from platform_error_handler");
308 		}
309 	}
310 
311 	/* Done with asynchronous handling; re-enable here so that subsequent aborts are taken as early as possible. */
312 	reenable_async_aborts();
313 
314 	if (ml_at_interrupt_context()) {
315 #if CONFIG_DTRACE
316 		if (!(thread->t_dtrace_inprobe))
317 #endif /* CONFIG_DTRACE */
318 		{
319 			panic_with_thread_kernel_state("sleh_abort at interrupt context", regs);
320 		}
321 	}
322 
323 	fault_addr = vaddr = regs->far;
324 
325 	if (type == T_DATA_ABT) {
326 		getCpuDatap()->cpu_stat.data_ex_cnt++;
327 	} else { /* T_PREFETCH_ABT */
328 		getCpuDatap()->cpu_stat.instr_ex_cnt++;
329 		fault_type = VM_PROT_READ | VM_PROT_EXECUTE;
330 	}
331 
332 	if (status == FSR_DEBUG) {
333 		debug_status = arm_debug_read_dscr() & ARM_DBGDSCR_MOE_MASK;
334 	}
335 
336 	if ((spsr & PSR_MODE_MASK) == PSR_USER_MODE && TEST_FSR_VMFAULT(status)) {
337 		need_done_faulting = true;
338 		thread_reset_pcs_will_fault(thread);
339 	}
340 
341 	/* Inherit the interrupt masks from previous */
342 	if (!(spsr & PSR_INTMASK)) {
343 		ml_set_interrupts_enabled(TRUE);
344 	}
345 
346 	if (type == T_DATA_ABT) {
347 		/*
348 		 * Now that interrupts are reenabled, we can perform any needed
349 		 * copyin operations.
350 		 *
351 		 * Because we have reenabled interrupts, any instruction copy
352 		 * must be a copyin, even on UP systems.
353 		 */
354 
355 		if (regs->fsr & DFSR_WRITE) {
356 			fault_type = (VM_PROT_READ | VM_PROT_WRITE);
357 			/* Cache operations report faults as write access, change these to read access */
358 			/* Cache operations are invoked from arm mode for now */
359 			if (!(regs->cpsr & PSR_TF)) {
360 				unsigned int ins = 0;
361 
362 				if (COPYIN((user_addr_t)(regs->pc), (char *)&ins, (vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) {
363 					goto exit;
364 				}
365 
366 				if (arm_mcr_cp15(ins) || arm_mcrr_cp15(ins)) {
367 					fault_type = VM_PROT_READ;
368 				}
369 			}
370 		} else {
371 			fault_type = VM_PROT_READ;
372 			/*
373 			 * DFSR is not getting the "write" bit set
374 			 * when a swp instruction is encountered (even when it is
375 			 * a write fault.
376 			 */
377 			if (!(regs->cpsr & PSR_TF)) {
378 				unsigned int ins = 0;
379 
380 				if (COPYIN((user_addr_t)(regs->pc), (char *)&ins, (vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) {
381 					goto exit;
382 				}
383 
384 				if ((ins & ARM_SWP_MASK) == ARM_SWP) {
385 					fault_type = VM_PROT_WRITE;
386 				}
387 			}
388 		}
389 	}
390 
391 	if ((spsr & PSR_MODE_MASK) != PSR_USER_MODE) {
392 		/* Fault in kernel mode */
393 
394 		if ((status == FSR_DEBUG)
395 		    && ((debug_status == ARM_DBGDSCR_MOE_ASYNC_WATCHPOINT) || (debug_status == ARM_DBGDSCR_MOE_SYNC_WATCHPOINT))
396 		    && (recover != 0) && (getCpuDatap()->cpu_user_debug != 0)) {
397 			/* If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
398 			 * abort.  Turn off watchpoints and keep going; we'll turn them back on in load_and_go_user.
399 			 */
400 			arm_debug_set(NULL);
401 			goto exit;
402 		}
403 
404 		if ((type == T_PREFETCH_ABT) || (status == FSR_DEBUG)) {
405 			intr = ml_set_interrupts_enabled(FALSE);
406 			if (status == FSR_DEBUG) {
407 				DebuggerCall(EXC_BREAKPOINT, regs);
408 				(void) ml_set_interrupts_enabled(intr);
409 				goto exit;
410 			}
411 			panic_with_thread_kernel_state("prefetch abort in kernel mode", regs);
412 
413 			(void) ml_set_interrupts_enabled(intr);
414 		} else if (TEST_FSR_VMFAULT(status)) {
415 #if CONFIG_DTRACE
416 			if (thread->t_dtrace_inprobe) {  /* Executing under dtrace_probe? */
417 				if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
418 					/* Point to next instruction */
419 					regs->pc += ((regs->cpsr & PSR_TF) && !IS_THUMB32(*((uint16_t*) (regs->pc)))) ? 2 : 4;
420 					goto exit;
421 				} else {
422 					intr = ml_set_interrupts_enabled(FALSE);
423 					panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", regs);
424 
425 					(void) ml_set_interrupts_enabled(intr);
426 
427 					goto exit;
428 				}
429 			}
430 #endif
431 
432 			if (VM_KERNEL_ADDRESS(vaddr) || thread == THREAD_NULL) {
433 				map = kernel_map;
434 			} else {
435 				map = thread->map;
436 			}
437 
438 			if (!TEST_FSR_TRANSLATION_FAULT(status)) {
439 				/* check to see if it is just a pmap ref/modify fault */
440 				result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, (status == FSR_PACCESS), FALSE);
441 				if (result == KERN_SUCCESS) {
442 					goto exit;
443 				}
444 			}
445 
446 			/*
447 			 *  We have to "fault" the page in.
448 			 */
449 			result = vm_fault(map, fault_addr,
450 			    fault_type,
451 			    FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
452 			    (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0);
453 
454 			if (result == KERN_SUCCESS) {
455 				goto exit;
456 			} else {
457 				/*
458 				 *  If we have a recover handler, invoke it now.
459 				 */
460 				if (recover != 0) {
461 					regs->pc = (register_t) (recover & ~0x1);
462 					regs->cpsr = (regs->cpsr & ~PSR_TF) | ((recover & 0x1) << PSR_TFb);
463 					goto exit;
464 				}
465 			}
466 		} else if ((status & FSR_ALIGN_MASK) == FSR_ALIGN) {
467 			result = sleh_alignment(regs);
468 			if (result == KERN_SUCCESS) {
469 				goto exit;
470 			} else {
471 				intr = ml_set_interrupts_enabled(FALSE);
472 
473 				panic_with_thread_kernel_state("unaligned kernel data access", regs);
474 
475 				(void) ml_set_interrupts_enabled(intr);
476 
477 				goto exit;
478 			}
479 		}
480 		intr = ml_set_interrupts_enabled(FALSE);
481 
482 		panic_plain("kernel abort type %d at pc 0x%08x, lr 0x%08x: fault_type=0x%x, fault_addr=0x%x\n"
483 		    "r0:   0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
484 		    "r4:   0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
485 		    "r8:   0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
486 		    "r12:  0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
487 		    "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
488 		    type, regs->pc, regs->lr, fault_type, fault_addr,
489 		    regs->r[0], regs->r[1], regs->r[2], regs->r[3],
490 		    regs->r[4], regs->r[5], regs->r[6], regs->r[7],
491 		    regs->r[8], regs->r[9], regs->r[10], regs->r[11],
492 		    regs->r[12], regs->sp, regs->lr, regs->pc,
493 		    regs->cpsr, regs->fsr, regs->far);
494 	}
495 	/* Fault in user mode */
496 
497 	if (TEST_FSR_VMFAULT(status)) {
498 		map = thread->map;
499 
500 #if CONFIG_DTRACE
501 		if (thread->t_dtrace_inprobe) {  /* Executing under dtrace_probe? */
502 			if (dtrace_tally_fault(fault_addr)) { /* Should a user mode fault under dtrace be ignored? */
503 				if (recover) {
504 					regs->pc = recover;
505 				} else {
506 					intr = ml_set_interrupts_enabled(FALSE);
507 
508 					panic_with_thread_kernel_state("copyin/out has no recovery point", regs);
509 
510 					(void) ml_set_interrupts_enabled(intr);
511 				}
512 				goto exit;
513 			} else {
514 				intr = ml_set_interrupts_enabled(FALSE);
515 
516 				panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", regs);
517 
518 				(void) ml_set_interrupts_enabled(intr);
519 
520 				goto exit;
521 			}
522 		}
523 #endif
524 
525 		if (!TEST_FSR_TRANSLATION_FAULT(status)) {
526 			/* check to see if it is just a pmap ref/modify fault */
527 			result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, (status == FSR_PACCESS), TRUE);
528 			if (result == KERN_SUCCESS) {
529 				goto exception_return;
530 			}
531 		}
532 
533 		/*
534 		 * We have to "fault" the page in.
535 		 */
536 		result = vm_fault(map, fault_addr, fault_type,
537 		    FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
538 		    THREAD_ABORTSAFE, NULL, 0);
539 		if (result == KERN_SUCCESS || result == KERN_ABORTED) {
540 			goto exception_return;
541 		}
542 
543 		/*
544 		 * KERN_FAILURE here means preemption was disabled when we called vm_fault.
545 		 * That should never happen for a page fault from user space.
546 		 */
547 		if (__improbable(result == KERN_FAILURE)) {
548 			panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
549 		}
550 
551 		codes[0] = result;
552 	} else if ((status & FSR_ALIGN_MASK) == FSR_ALIGN) {
553 		if (sleh_alignment(regs) == KERN_SUCCESS) {
554 			goto exception_return;
555 		}
556 		codes[0] = EXC_ARM_DA_ALIGN;
557 	} else if (status == FSR_DEBUG) {
558 		exc = EXC_BREAKPOINT;
559 		codes[0] = EXC_ARM_DA_DEBUG;
560 	} else if ((status == FSR_SDOM) || (status == FSR_PDOM)) {
561 		panic_with_thread_kernel_state("Unexpected domain fault", regs);
562 	} else {
563 		codes[0] = KERN_FAILURE;
564 	}
565 
566 	if (need_done_faulting) {
567 		thread_reset_pcs_done_faulting(thread);
568 	}
569 
570 	codes[1] = vaddr;
571 	exception_triage(exc, codes, 2);
572 	/* NOTREACHED */
573 
574 exception_return:
575 	if (need_done_faulting) {
576 		thread_reset_pcs_done_faulting(thread);
577 	}
578 
579 	if (recover) {
580 		thread->machine.recover = recover;
581 	}
582 	thread_exception_return();
583 	/* NOTREACHED */
584 
585 exit:
586 	if (need_done_faulting) {
587 		thread_reset_pcs_done_faulting(thread);
588 	}
589 
590 	if (recover) {
591 		thread->machine.recover = recover;
592 	}
593 	return;
594 }
595 
596 
597 /*
598  *	Routine:        sleh_alignment
599  *	Function:       Second level exception handler for alignment data fault
600  */
601 
602 static kern_return_t
sleh_alignment(struct arm_saved_state * regs)603 sleh_alignment(struct arm_saved_state * regs)
604 {
605 	unsigned int    status;
606 	unsigned int    ins = 0;
607 	unsigned int    rd_index;
608 	unsigned int    base_index;
609 	unsigned int    paddr;
610 	void           *src;
611 	unsigned int    reg_list;
612 	unsigned int    pre;
613 	unsigned int    up;
614 	unsigned int    write_back;
615 	kern_return_t   rc = KERN_SUCCESS;
616 
617 	getCpuDatap()->cpu_stat.unaligned_cnt++;
618 
619 	/* Do not try to emulate in modified execution states */
620 	if (regs->cpsr & (PSR_EF | PSR_JF)) {
621 		return KERN_NOT_SUPPORTED;
622 	}
623 
624 	/* Disallow emulation of kernel instructions */
625 	if ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) {
626 		return KERN_NOT_SUPPORTED;
627 	}
628 
629 
630 #define ALIGN_THRESHOLD 1024
631 	if ((sleh_alignment_count++ & (ALIGN_THRESHOLD - 1)) ==
632 	    (ALIGN_THRESHOLD - 1)) {
633 		kprintf("sleh_alignment: %d more alignment faults: %d total\n",
634 		    ALIGN_THRESHOLD, sleh_alignment_count);
635 	}
636 
637 	if ((trap_on_alignment_fault != 0)
638 	    && (sleh_alignment_count % trap_on_alignment_fault == 0)) {
639 		return KERN_NOT_SUPPORTED;
640 	}
641 
642 	status = regs->fsr;
643 	paddr = regs->far;
644 
645 	if (regs->cpsr & PSR_TF) {
646 		unsigned short ins16 = 0;
647 
648 		/* Get aborted instruction */
649 		if (COPYIN((user_addr_t)(regs->pc), (char *)&ins16, (vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) {
650 			/* Failed to fetch instruction, return success to re-drive the exception */
651 			return KERN_SUCCESS;
652 		}
653 
654 		/*
655 		 * Map multi-word Thumb loads and stores to their ARM
656 		 * equivalents.
657 		 * Don't worry about single-word instructions, since those are
658 		 * handled in hardware.
659 		 */
660 
661 		reg_list = ins16 & 0xff;
662 		if (reg_list == 0) {
663 			return KERN_NOT_SUPPORTED;
664 		}
665 
666 		if (((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA) ||
667 		    ((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA)) {
668 			base_index = (ins16 >> 8) & 0x7;
669 			ins = 0xE8800000 | (base_index << 16) | reg_list;
670 			if ((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA) {
671 				ins |= (1 << 20);
672 			}
673 			if (((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA) ||
674 			    !(reg_list & (1 << base_index))) {
675 				ins |= (1 << 21);
676 			}
677 		} else if ((ins16 & THUMB_PUSH_MASK) == THUMB_POP) {
678 			unsigned int    r = (ins16 >> 8) & 1;
679 			ins = 0xE8BD0000 | (r << 15) | reg_list;
680 		} else if ((ins16 & THUMB_PUSH_MASK) == THUMB_PUSH) {
681 			unsigned int    r = (ins16 >> 8) & 1;
682 			ins = 0xE92D0000 | (r << 14) | reg_list;
683 		} else {
684 			return KERN_NOT_SUPPORTED;
685 		}
686 	} else {
687 		/* Get aborted instruction */
688 		if (COPYIN((user_addr_t)(regs->pc), (char *)&ins, (vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) {
689 			/* Failed to fetch instruction, return success to re-drive the exception */
690 			return KERN_SUCCESS;
691 		}
692 	}
693 
694 	/* Don't try to emulate unconditional instructions */
695 	if ((ins & 0xF0000000) == 0xF0000000) {
696 		return KERN_NOT_SUPPORTED;
697 	}
698 
699 	pre = (ins >> 24) & 1;
700 	up = (ins >> 23) & 1;
701 	reg_list = ins & 0xffff;
702 	write_back = (ins >> 21) & 1;
703 	base_index = (ins >> 16) & 0xf;
704 
705 	if ((ins & ARM_BLK_MASK) == ARM_STM) {  /* STM or LDM */
706 		int             reg_count = 0;
707 		int             waddr;
708 
709 		for (rd_index = 0; rd_index < 16; rd_index++) {
710 			if (reg_list & (1 << rd_index)) {
711 				reg_count++;
712 			}
713 		}
714 
715 		paddr = regs->r[base_index];
716 
717 		switch (ins & (ARM_POST_INDEXING | ARM_INCREMENT)) {
718 		/* Increment after */
719 		case ARM_INCREMENT:
720 			waddr = paddr + reg_count * 4;
721 			break;
722 
723 		/* Increment before */
724 		case ARM_POST_INDEXING | ARM_INCREMENT:
725 			waddr = paddr + reg_count * 4;
726 			paddr += 4;
727 			break;
728 
729 		/* Decrement after */
730 		case 0:
731 			waddr = paddr - reg_count * 4;
732 			paddr = waddr + 4;
733 			break;
734 
735 		/* Decrement before */
736 		case ARM_POST_INDEXING:
737 			waddr = paddr - reg_count * 4;
738 			paddr = waddr;
739 			break;
740 
741 		default:
742 			waddr = 0;
743 		}
744 
745 		for (rd_index = 0; rd_index < 16; rd_index++) {
746 			if (reg_list & (1 << rd_index)) {
747 				src = &regs->r[rd_index];
748 
749 				if ((ins & (1 << 20)) == 0) {   /* STM */
750 					rc = COPYOUT(src, paddr, 4);
751 				} else { /* LDM */
752 					rc = COPYIN(paddr, src, 4);
753 				}
754 
755 				if (rc != KERN_SUCCESS) {
756 					break;
757 				}
758 
759 				paddr += 4;
760 			}
761 		}
762 
763 		paddr = waddr;
764 	} else {
765 		rc = 1;
766 	}
767 
768 	if (rc == KERN_SUCCESS) {
769 		if (regs->cpsr & PSR_TF) {
770 			regs->pc += 2;
771 		} else {
772 			regs->pc += 4;
773 		}
774 
775 		if (write_back) {
776 			regs->r[base_index] = paddr;
777 		}
778 	}
779 	return rc;
780 }
781 
782 
783 #ifndef NO_KDEBUG
784 /* XXX quell warnings */
785 void            syscall_trace(struct arm_saved_state * regs);
786 void            syscall_trace_exit(unsigned int, unsigned int);
787 void            mach_syscall_trace(struct arm_saved_state * regs, unsigned int call_number);
788 void            mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
789 void            interrupt_trace(struct arm_saved_state * regs);
790 void            interrupt_trace_exit(void);
791 
792 /* called from the fleh_swi handler, if TRACE_SYSCALL is enabled */
793 void
syscall_trace(struct arm_saved_state * regs)794 syscall_trace(
795 	struct arm_saved_state * regs)
796 {
797 	kprintf("syscall: %d\n", regs->r[12]);
798 }
799 
800 void
syscall_trace_exit(unsigned int r0,unsigned int r1)801 syscall_trace_exit(
802 	unsigned int r0,
803 	unsigned int r1)
804 {
805 	kprintf("syscall exit: 0x%x 0x%x\n", r0, r1);
806 }
807 
808 void
mach_syscall_trace(struct arm_saved_state * regs,unsigned int call_number)809 mach_syscall_trace(
810 	struct arm_saved_state * regs,
811 	unsigned int call_number)
812 {
813 	int             i, argc;
814 	int             kdarg[3] = {0, 0, 0};
815 
816 	argc = mach_trap_table[call_number].mach_trap_arg_count;
817 
818 	if (argc > 3) {
819 		argc = 3;
820 	}
821 
822 	for (i = 0; i < argc; i++) {
823 		kdarg[i] = (int) regs->r[i];
824 	}
825 
826 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
827 	    MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
828 	    kdarg[0], kdarg[1], kdarg[2], 0, 0);
829 }
830 
831 void
mach_syscall_trace_exit(unsigned int retval,unsigned int call_number)832 mach_syscall_trace_exit(
833 	unsigned int retval,
834 	unsigned int call_number)
835 {
836 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
837 	    MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
838 	    retval, 0, 0, 0, 0);
839 }
840 
841 void
interrupt_trace(struct arm_saved_state * regs)842 interrupt_trace(
843 	struct arm_saved_state * regs)
844 {
845 #define UMODE(rp)       (((rp)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)
846 
847 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
848 	    MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
849 	    0, UMODE(regs) ? regs->pc : VM_KERNEL_UNSLIDE(regs->pc),
850 	    UMODE(regs), 0, 0);
851 }
852 
853 void
interrupt_trace_exit(void)854 interrupt_trace_exit(
855 	void)
856 {
857 #if KPERF
858 	kperf_interrupt();
859 #endif /* KPERF */
860 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END);
861 }
862 #endif
863 
864 /* XXX quell warnings */
865 void interrupt_stats(void);
866 
867 /* This is called from locore.s directly. We only update per-processor interrupt counters in this function */
868 void
interrupt_stats(void)869 interrupt_stats(void)
870 {
871 	SCHED_STATS_INC(interrupt_count);
872 }
873 
874 __dead2
875 static void
panic_with_thread_kernel_state(const char * msg,struct arm_saved_state * regs)876 panic_with_thread_kernel_state(const char *msg, struct arm_saved_state *regs)
877 {
878 	panic_plain("%s at pc 0x%08x, lr 0x%08x (saved state:%p)\n"
879 	    "r0:   0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
880 	    "r4:   0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
881 	    "r8:   0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
882 	    "r12:  0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
883 	    "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
884 	    msg, regs->pc, regs->lr, regs,
885 	    regs->r[0], regs->r[1], regs->r[2], regs->r[3],
886 	    regs->r[4], regs->r[5], regs->r[6], regs->r[7],
887 	    regs->r[8], regs->r[9], regs->r[10], regs->r[11],
888 	    regs->r[12], regs->sp, regs->lr, regs->pc,
889 	    regs->cpsr, regs->fsr, regs->far);
890 }
891