1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <kern/debug.h>
29 #include <mach_kdp.h>
30 #include <machine/endian.h>
31 #include <mach/mach_types.h>
32 #include <mach/boolean.h>
33 #include <mach/vm_prot.h>
34 #include <mach/vm_types.h>
35 #include <mach/mach_traps.h>
36
37 #include <mach/exception.h>
38 #include <mach/kern_return.h>
39 #include <mach/vm_param.h>
40 #include <mach/message.h>
41 #include <mach/machine/thread_status.h>
42
43 #include <vm/vm_page.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_fault.h>
46 #include <vm/vm_kern.h>
47
48 #include <kern/ast.h>
49 #include <kern/thread.h>
50 #include <kern/task.h>
51 #include <kern/sched_prim.h>
52
53 #include <sys/kdebug.h>
54 #include <kperf/kperf.h>
55
56 #include <arm/trap.h>
57 #include <arm/caches_internal.h>
58 #include <arm/cpu_data_internal.h>
59 #include <arm/machdep_call.h>
60 #include <arm/machine_routines.h>
61 #include <arm/misc_protos.h>
62 #include <arm/setjmp.h>
63 #include <arm/proc_reg.h>
64
65 /*
66 * External function prototypes.
67 */
68 #include <kern/syscall_sw.h>
69 #include <kern/host.h>
70 #include <kern/processor.h>
71
72
73 #if CONFIG_DTRACE
74 extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs, unsigned int instr);
75 extern boolean_t dtrace_tally_fault(user_addr_t);
76
77 /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions
78 * over from that file. Need to keep these in sync! */
79 #define FASTTRAP_ARM_INSTR 0xe7ffdefc
80 #define FASTTRAP_THUMB_INSTR 0xdefc
81
82 #define FASTTRAP_ARM_RET_INSTR 0xe7ffdefb
83 #define FASTTRAP_THUMB_RET_INSTR 0xdefb
84
85 /* See <rdar://problem/4613924> */
86 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
87 #endif
88
89 #define COPYIN(dst, src, size) \
90 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
91 copyin_kern(dst, src, size) \
92 : \
93 copyin(dst, src, size)
94
95 #define COPYOUT(src, dst, size) \
96 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
97 copyout_kern(src, dst, size) \
98 : \
99 copyout(src, dst, size)
100
101 /* Second-level exception handlers forward declarations */
102 void sleh_undef(struct arm_saved_state *, struct arm_vfpsaved_state *);
103 void sleh_abort(struct arm_saved_state *, int);
104 static kern_return_t sleh_alignment(struct arm_saved_state *);
105 static void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *regs);
106
107 int sleh_alignment_count = 0;
108 int trap_on_alignment_fault = 0;
109
110 /*
111 * Routine: sleh_undef
112 * Function: Second level exception handler for undefined exception
113 */
114
115 void
sleh_undef(struct arm_saved_state * regs,struct arm_vfpsaved_state * vfp_ss __unused)116 sleh_undef(struct arm_saved_state * regs, struct arm_vfpsaved_state * vfp_ss __unused)
117 {
118 exception_type_t exception = EXC_BAD_INSTRUCTION;
119 mach_exception_data_type_t code[2] = {EXC_ARM_UNDEFINED};
120 mach_msg_type_number_t codeCnt = 2;
121 thread_t thread = current_thread();
122 vm_offset_t recover;
123
124 recover = thread->recover;
125 thread->recover = 0;
126
127 getCpuDatap()->cpu_stat.undef_ex_cnt++;
128
129
130 #if CONFIG_DTRACE
131 if (tempDTraceTrapHook) {
132 if (tempDTraceTrapHook(exception, regs, 0, 0) == KERN_SUCCESS) {
133 /*
134 * If it succeeds, we are done...
135 */
136 goto exit;
137 }
138 }
139 #endif /* CONFIG_DTRACE */
140
141 /* Inherit the interrupt masks from previous */
142 if (!(regs->cpsr & PSR_INTMASK)) {
143 ml_set_interrupts_enabled(TRUE);
144 }
145
146 #if CONFIG_DTRACE
147 /* Check to see if we've hit a userland probe */
148 if ((regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE) {
149 if (regs->cpsr & PSR_TF) {
150 uint16_t instr = 0;
151
152 if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) {
153 goto exit;
154 }
155
156 if (instr == FASTTRAP_THUMB_INSTR || instr == FASTTRAP_THUMB_RET_INSTR) {
157 if (dtrace_user_probe(regs, instr) == KERN_SUCCESS) {
158 /* If it succeeds, we are done... */
159 goto exit;
160 }
161 }
162 } else {
163 uint32_t instr = 0;
164
165 if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS) {
166 goto exit;
167 }
168
169 if (instr == FASTTRAP_ARM_INSTR || instr == FASTTRAP_ARM_RET_INSTR) {
170 if (dtrace_user_probe(regs, instr) == KERN_SUCCESS) {
171 /* If it succeeds, we are done... */
172 goto exit;
173 }
174 }
175 }
176 }
177 #endif /* CONFIG_DTRACE */
178
179 if (regs->cpsr & PSR_TF) {
180 unsigned short instr = 0;
181
182 if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS) {
183 goto exit;
184 }
185
186 if (IS_THUMB32(instr)) {
187 unsigned int instr32;
188
189 instr32 = (instr << 16);
190
191 if (COPYIN((user_addr_t)(((unsigned short *) (regs->pc)) + 1), (char *)&instr, (vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS) {
192 goto exit;
193 }
194
195 instr32 |= instr;
196 code[1] = instr32;
197
198 #if __ARM_VFP__
199 if (IS_THUMB_VFP(instr32)) {
200 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
201 if (!get_vfp_enabled()) {
202 panic("VFP was disabled (thumb); VFP should always be enabled");
203 }
204 }
205 #endif
206 } else {
207 /* I don't believe we have any 16 bit VFP instructions, so just set code[1]. */
208 code[1] = instr;
209
210 if (IS_THUMB_GDB_TRAP(instr)) {
211 exception = EXC_BREAKPOINT;
212 code[0] = EXC_ARM_BREAKPOINT;
213 }
214 }
215 } else {
216 uint32_t instr = 0;
217
218 if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS) {
219 goto exit;
220 }
221
222 code[1] = instr;
223 #if __ARM_VFP__
224 if (IS_ARM_VFP(instr)) {
225 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
226 if (!get_vfp_enabled()) {
227 panic("VFP was disabled (arm); VFP should always be enabled");
228 }
229 }
230 #endif
231
232 if (IS_ARM_GDB_TRAP(instr)) {
233 exception = EXC_BREAKPOINT;
234 code[0] = EXC_ARM_BREAKPOINT;
235 }
236 }
237
238 if (!((regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)) {
239 boolean_t intr;
240
241 intr = ml_set_interrupts_enabled(FALSE);
242
243 if (exception == EXC_BREAKPOINT) {
244 /* Save off the context here (so that the debug logic
245 * can see the original state of this thread).
246 */
247 vm_offset_t kstackptr = current_thread()->machine.kstackptr;
248 copy_signed_thread_state((arm_saved_state_t *)kstackptr, regs);
249
250 DebuggerCall(exception, regs);
251 (void) ml_set_interrupts_enabled(intr);
252 goto exit;
253 }
254 panic_with_thread_kernel_state("undefined kernel instruction", regs);
255
256 (void) ml_set_interrupts_enabled(intr);
257 } else {
258 exception_triage(exception, code, codeCnt);
259 /* NOTREACHED */
260 }
261
262 exit:
263 if (recover) {
264 thread->recover = recover;
265 }
266 }
267
268 /*
269 * Routine: sleh_abort
270 * Function: Second level exception handler for abort(Pref/Data)
271 */
272
273 void
sleh_abort(struct arm_saved_state * regs,int type)274 sleh_abort(struct arm_saved_state * regs, int type)
275 {
276 int status;
277 int debug_status = 0;
278 int spsr;
279 int exc = EXC_BAD_ACCESS;
280 mach_exception_data_type_t codes[2];
281 vm_map_t map;
282 vm_map_address_t vaddr;
283 vm_map_address_t fault_addr;
284 vm_prot_t fault_type;
285 kern_return_t result;
286 vm_offset_t recover;
287 thread_t thread = current_thread();
288 boolean_t intr;
289
290 recover = thread->recover;
291 thread->recover = 0;
292
293 status = regs->fsr & FSR_MASK;
294 spsr = regs->cpsr;
295
296 /* The DSFR/IFSR.ExT bit indicates "IMPLEMENTATION DEFINED" classification.
297 * Allow a platform-level error handler to decode it.
298 */
299 if ((regs->fsr) & FSR_EXT) {
300 cpu_data_t *cdp = getCpuDatap();
301
302 if (cdp->platform_error_handler != NULL) {
303 cdp->platform_error_handler(cdp->cpu_id, 0);
304 /* If a platform error handler is registered, expect it to panic, not fall through */
305 panic("Unexpected return from platform_error_handler");
306 }
307 }
308
309 /* Done with asynchronous handling; re-enable here so that subsequent aborts are taken as early as possible. */
310 reenable_async_aborts();
311
312 if (ml_at_interrupt_context()) {
313 #if CONFIG_DTRACE
314 if (!(thread->t_dtrace_inprobe))
315 #endif /* CONFIG_DTRACE */
316 {
317 panic_with_thread_kernel_state("sleh_abort at interrupt context", regs);
318 }
319 }
320
321 fault_addr = vaddr = regs->far;
322
323 if (type == T_DATA_ABT) {
324 getCpuDatap()->cpu_stat.data_ex_cnt++;
325 } else { /* T_PREFETCH_ABT */
326 getCpuDatap()->cpu_stat.instr_ex_cnt++;
327 fault_type = VM_PROT_READ | VM_PROT_EXECUTE;
328 }
329
330 if (status == FSR_DEBUG) {
331 debug_status = arm_debug_read_dscr() & ARM_DBGDSCR_MOE_MASK;
332 }
333
334 /* Inherit the interrupt masks from previous */
335 if (!(spsr & PSR_INTMASK)) {
336 ml_set_interrupts_enabled(TRUE);
337 }
338
339 if (type == T_DATA_ABT) {
340 /*
341 * Now that interrupts are reenabled, we can perform any needed
342 * copyin operations.
343 *
344 * Because we have reenabled interrupts, any instruction copy
345 * must be a copyin, even on UP systems.
346 */
347
348 if (regs->fsr & DFSR_WRITE) {
349 fault_type = (VM_PROT_READ | VM_PROT_WRITE);
350 /* Cache operations report faults as write access, change these to read access */
351 /* Cache operations are invoked from arm mode for now */
352 if (!(regs->cpsr & PSR_TF)) {
353 unsigned int ins = 0;
354
355 if (COPYIN((user_addr_t)(regs->pc), (char *)&ins, (vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) {
356 goto exit;
357 }
358
359 if (arm_mcr_cp15(ins) || arm_mcrr_cp15(ins)) {
360 fault_type = VM_PROT_READ;
361 }
362 }
363 } else {
364 fault_type = VM_PROT_READ;
365 /*
366 * DFSR is not getting the "write" bit set
367 * when a swp instruction is encountered (even when it is
368 * a write fault.
369 */
370 if (!(regs->cpsr & PSR_TF)) {
371 unsigned int ins = 0;
372
373 if (COPYIN((user_addr_t)(regs->pc), (char *)&ins, (vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) {
374 goto exit;
375 }
376
377 if ((ins & ARM_SWP_MASK) == ARM_SWP) {
378 fault_type = VM_PROT_WRITE;
379 }
380 }
381 }
382 }
383
384 if ((spsr & PSR_MODE_MASK) != PSR_USER_MODE) {
385 /* Fault in kernel mode */
386
387 if ((status == FSR_DEBUG)
388 && ((debug_status == ARM_DBGDSCR_MOE_ASYNC_WATCHPOINT) || (debug_status == ARM_DBGDSCR_MOE_SYNC_WATCHPOINT))
389 && (recover != 0) && (getCpuDatap()->cpu_user_debug != 0)) {
390 /* If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
391 * abort. Turn off watchpoints and keep going; we'll turn them back on in load_and_go_user.
392 */
393 arm_debug_set(NULL);
394 goto exit;
395 }
396
397 if ((type == T_PREFETCH_ABT) || (status == FSR_DEBUG)) {
398 intr = ml_set_interrupts_enabled(FALSE);
399 if (status == FSR_DEBUG) {
400 DebuggerCall(EXC_BREAKPOINT, regs);
401 (void) ml_set_interrupts_enabled(intr);
402 goto exit;
403 }
404 panic_with_thread_kernel_state("prefetch abort in kernel mode", regs);
405
406 (void) ml_set_interrupts_enabled(intr);
407 } else if (TEST_FSR_VMFAULT(status)) {
408 #if CONFIG_DTRACE
409 if (thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
410 if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
411 /* Point to next instruction */
412 regs->pc += ((regs->cpsr & PSR_TF) && !IS_THUMB32(*((uint16_t*) (regs->pc)))) ? 2 : 4;
413 goto exit;
414 } else {
415 intr = ml_set_interrupts_enabled(FALSE);
416 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", regs);
417
418 (void) ml_set_interrupts_enabled(intr);
419
420 goto exit;
421 }
422 }
423 #endif
424
425 if (VM_KERNEL_ADDRESS(vaddr) || thread == THREAD_NULL) {
426 map = kernel_map;
427 } else {
428 map = thread->map;
429 }
430
431 if (!TEST_FSR_TRANSLATION_FAULT(status)) {
432 /* check to see if it is just a pmap ref/modify fault */
433 result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, (status == FSR_PACCESS), FALSE);
434 if (result == KERN_SUCCESS) {
435 goto exit;
436 }
437 }
438
439 /*
440 * We have to "fault" the page in.
441 */
442 result = vm_fault(map, fault_addr,
443 fault_type,
444 FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
445 (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0);
446
447 if (result == KERN_SUCCESS) {
448 goto exit;
449 } else {
450 /*
451 * If we have a recover handler, invoke it now.
452 */
453 if (recover != 0) {
454 regs->pc = (register_t) (recover & ~0x1);
455 regs->cpsr = (regs->cpsr & ~PSR_TF) | ((recover & 0x1) << PSR_TFb);
456 goto exit;
457 }
458 }
459 } else if ((status & FSR_ALIGN_MASK) == FSR_ALIGN) {
460 result = sleh_alignment(regs);
461 if (result == KERN_SUCCESS) {
462 goto exit;
463 } else {
464 intr = ml_set_interrupts_enabled(FALSE);
465
466 panic_with_thread_kernel_state("unaligned kernel data access", regs);
467
468 (void) ml_set_interrupts_enabled(intr);
469
470 goto exit;
471 }
472 }
473 intr = ml_set_interrupts_enabled(FALSE);
474
475 panic_plain("kernel abort type %d at pc 0x%08x, lr 0x%08x: fault_type=0x%x, fault_addr=0x%x\n"
476 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
477 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
478 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
479 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
480 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
481 type, regs->pc, regs->lr, fault_type, fault_addr,
482 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
483 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
484 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
485 regs->r[12], regs->sp, regs->lr, regs->pc,
486 regs->cpsr, regs->fsr, regs->far);
487 }
488 /* Fault in user mode */
489
490 if (TEST_FSR_VMFAULT(status)) {
491 map = thread->map;
492
493 #if CONFIG_DTRACE
494 if (thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
495 if (dtrace_tally_fault(fault_addr)) { /* Should a user mode fault under dtrace be ignored? */
496 if (recover) {
497 regs->pc = recover;
498 } else {
499 intr = ml_set_interrupts_enabled(FALSE);
500
501 panic_with_thread_kernel_state("copyin/out has no recovery point", regs);
502
503 (void) ml_set_interrupts_enabled(intr);
504 }
505 goto exit;
506 } else {
507 intr = ml_set_interrupts_enabled(FALSE);
508
509 panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", regs);
510
511 (void) ml_set_interrupts_enabled(intr);
512
513 goto exit;
514 }
515 }
516 #endif
517
518 if (!TEST_FSR_TRANSLATION_FAULT(status)) {
519 /* check to see if it is just a pmap ref/modify fault */
520 result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, (status == FSR_PACCESS), TRUE);
521 if (result == KERN_SUCCESS) {
522 goto exception_return;
523 }
524 }
525
526 /*
527 * We have to "fault" the page in.
528 */
529 result = vm_fault(map, fault_addr, fault_type,
530 FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
531 THREAD_ABORTSAFE, NULL, 0);
532 if (result == KERN_SUCCESS || result == KERN_ABORTED) {
533 goto exception_return;
534 }
535
536 /*
537 * KERN_FAILURE here means preemption was disabled when we called vm_fault.
538 * That should never happen for a page fault from user space.
539 */
540 if (__improbable(result == KERN_FAILURE)) {
541 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
542 }
543
544 codes[0] = result;
545 } else if ((status & FSR_ALIGN_MASK) == FSR_ALIGN) {
546 if (sleh_alignment(regs) == KERN_SUCCESS) {
547 goto exception_return;
548 }
549 codes[0] = EXC_ARM_DA_ALIGN;
550 } else if (status == FSR_DEBUG) {
551 exc = EXC_BREAKPOINT;
552 codes[0] = EXC_ARM_DA_DEBUG;
553 } else if ((status == FSR_SDOM) || (status == FSR_PDOM)) {
554 panic_with_thread_kernel_state("Unexpected domain fault", regs);
555 } else {
556 codes[0] = KERN_FAILURE;
557 }
558
559 codes[1] = vaddr;
560 exception_triage(exc, codes, 2);
561 /* NOTREACHED */
562
563 exception_return:
564 if (recover) {
565 thread->recover = recover;
566 }
567 thread_exception_return();
568 /* NOTREACHED */
569
570 exit:
571 if (recover) {
572 thread->recover = recover;
573 }
574 return;
575 }
576
577
578 /*
579 * Routine: sleh_alignment
580 * Function: Second level exception handler for alignment data fault
581 */
582
583 static kern_return_t
sleh_alignment(struct arm_saved_state * regs)584 sleh_alignment(struct arm_saved_state * regs)
585 {
586 unsigned int status;
587 unsigned int ins = 0;
588 unsigned int rd_index;
589 unsigned int base_index;
590 unsigned int paddr;
591 void *src;
592 unsigned int reg_list;
593 unsigned int pre;
594 unsigned int up;
595 unsigned int write_back;
596 kern_return_t rc = KERN_SUCCESS;
597
598 getCpuDatap()->cpu_stat.unaligned_cnt++;
599
600 /* Do not try to emulate in modified execution states */
601 if (regs->cpsr & (PSR_EF | PSR_JF)) {
602 return KERN_NOT_SUPPORTED;
603 }
604
605 /* Disallow emulation of kernel instructions */
606 if ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) {
607 return KERN_NOT_SUPPORTED;
608 }
609
610
611 #define ALIGN_THRESHOLD 1024
612 if ((sleh_alignment_count++ & (ALIGN_THRESHOLD - 1)) ==
613 (ALIGN_THRESHOLD - 1)) {
614 kprintf("sleh_alignment: %d more alignment faults: %d total\n",
615 ALIGN_THRESHOLD, sleh_alignment_count);
616 }
617
618 if ((trap_on_alignment_fault != 0)
619 && (sleh_alignment_count % trap_on_alignment_fault == 0)) {
620 return KERN_NOT_SUPPORTED;
621 }
622
623 status = regs->fsr;
624 paddr = regs->far;
625
626 if (regs->cpsr & PSR_TF) {
627 unsigned short ins16 = 0;
628
629 /* Get aborted instruction */
630 if (COPYIN((user_addr_t)(regs->pc), (char *)&ins16, (vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) {
631 /* Failed to fetch instruction, return success to re-drive the exception */
632 return KERN_SUCCESS;
633 }
634
635 /*
636 * Map multi-word Thumb loads and stores to their ARM
637 * equivalents.
638 * Don't worry about single-word instructions, since those are
639 * handled in hardware.
640 */
641
642 reg_list = ins16 & 0xff;
643 if (reg_list == 0) {
644 return KERN_NOT_SUPPORTED;
645 }
646
647 if (((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA) ||
648 ((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA)) {
649 base_index = (ins16 >> 8) & 0x7;
650 ins = 0xE8800000 | (base_index << 16) | reg_list;
651 if ((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA) {
652 ins |= (1 << 20);
653 }
654 if (((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA) ||
655 !(reg_list & (1 << base_index))) {
656 ins |= (1 << 21);
657 }
658 } else if ((ins16 & THUMB_PUSH_MASK) == THUMB_POP) {
659 unsigned int r = (ins16 >> 8) & 1;
660 ins = 0xE8BD0000 | (r << 15) | reg_list;
661 } else if ((ins16 & THUMB_PUSH_MASK) == THUMB_PUSH) {
662 unsigned int r = (ins16 >> 8) & 1;
663 ins = 0xE92D0000 | (r << 14) | reg_list;
664 } else {
665 return KERN_NOT_SUPPORTED;
666 }
667 } else {
668 /* Get aborted instruction */
669 if (COPYIN((user_addr_t)(regs->pc), (char *)&ins, (vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) {
670 /* Failed to fetch instruction, return success to re-drive the exception */
671 return KERN_SUCCESS;
672 }
673 }
674
675 /* Don't try to emulate unconditional instructions */
676 if ((ins & 0xF0000000) == 0xF0000000) {
677 return KERN_NOT_SUPPORTED;
678 }
679
680 pre = (ins >> 24) & 1;
681 up = (ins >> 23) & 1;
682 reg_list = ins & 0xffff;
683 write_back = (ins >> 21) & 1;
684 base_index = (ins >> 16) & 0xf;
685
686 if ((ins & ARM_BLK_MASK) == ARM_STM) { /* STM or LDM */
687 int reg_count = 0;
688 int waddr;
689
690 for (rd_index = 0; rd_index < 16; rd_index++) {
691 if (reg_list & (1 << rd_index)) {
692 reg_count++;
693 }
694 }
695
696 paddr = regs->r[base_index];
697
698 switch (ins & (ARM_POST_INDEXING | ARM_INCREMENT)) {
699 /* Increment after */
700 case ARM_INCREMENT:
701 waddr = paddr + reg_count * 4;
702 break;
703
704 /* Increment before */
705 case ARM_POST_INDEXING | ARM_INCREMENT:
706 waddr = paddr + reg_count * 4;
707 paddr += 4;
708 break;
709
710 /* Decrement after */
711 case 0:
712 waddr = paddr - reg_count * 4;
713 paddr = waddr + 4;
714 break;
715
716 /* Decrement before */
717 case ARM_POST_INDEXING:
718 waddr = paddr - reg_count * 4;
719 paddr = waddr;
720 break;
721
722 default:
723 waddr = 0;
724 }
725
726 for (rd_index = 0; rd_index < 16; rd_index++) {
727 if (reg_list & (1 << rd_index)) {
728 src = ®s->r[rd_index];
729
730 if ((ins & (1 << 20)) == 0) { /* STM */
731 rc = COPYOUT(src, paddr, 4);
732 } else { /* LDM */
733 rc = COPYIN(paddr, src, 4);
734 }
735
736 if (rc != KERN_SUCCESS) {
737 break;
738 }
739
740 paddr += 4;
741 }
742 }
743
744 paddr = waddr;
745 } else {
746 rc = 1;
747 }
748
749 if (rc == KERN_SUCCESS) {
750 if (regs->cpsr & PSR_TF) {
751 regs->pc += 2;
752 } else {
753 regs->pc += 4;
754 }
755
756 if (write_back) {
757 regs->r[base_index] = paddr;
758 }
759 }
760 return rc;
761 }
762
763
764 #ifndef NO_KDEBUG
765 /* XXX quell warnings */
766 void syscall_trace(struct arm_saved_state * regs);
767 void syscall_trace_exit(unsigned int, unsigned int);
768 void mach_syscall_trace(struct arm_saved_state * regs, unsigned int call_number);
769 void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
770 void interrupt_trace(struct arm_saved_state * regs);
771 void interrupt_trace_exit(void);
772
773 /* called from the fleh_swi handler, if TRACE_SYSCALL is enabled */
774 void
syscall_trace(struct arm_saved_state * regs)775 syscall_trace(
776 struct arm_saved_state * regs)
777 {
778 kprintf("syscall: %d\n", regs->r[12]);
779 }
780
781 void
syscall_trace_exit(unsigned int r0,unsigned int r1)782 syscall_trace_exit(
783 unsigned int r0,
784 unsigned int r1)
785 {
786 kprintf("syscall exit: 0x%x 0x%x\n", r0, r1);
787 }
788
789 void
mach_syscall_trace(struct arm_saved_state * regs,unsigned int call_number)790 mach_syscall_trace(
791 struct arm_saved_state * regs,
792 unsigned int call_number)
793 {
794 int i, argc;
795 int kdarg[3] = {0, 0, 0};
796
797 argc = mach_trap_table[call_number].mach_trap_arg_count;
798
799 if (argc > 3) {
800 argc = 3;
801 }
802
803 for (i = 0; i < argc; i++) {
804 kdarg[i] = (int) regs->r[i];
805 }
806
807 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
808 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
809 kdarg[0], kdarg[1], kdarg[2], 0, 0);
810 }
811
812 void
mach_syscall_trace_exit(unsigned int retval,unsigned int call_number)813 mach_syscall_trace_exit(
814 unsigned int retval,
815 unsigned int call_number)
816 {
817 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
818 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
819 retval, 0, 0, 0, 0);
820 }
821
822 void
interrupt_trace(struct arm_saved_state * regs)823 interrupt_trace(
824 struct arm_saved_state * regs)
825 {
826 #define UMODE(rp) (((rp)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)
827
828 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
829 MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
830 0, UMODE(regs) ? regs->pc : VM_KERNEL_UNSLIDE(regs->pc),
831 UMODE(regs), 0, 0);
832 }
833
834 void
interrupt_trace_exit(void)835 interrupt_trace_exit(
836 void)
837 {
838 #if KPERF
839 kperf_interrupt();
840 #endif /* KPERF */
841 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END);
842 }
843 #endif
844
845 /* XXX quell warnings */
846 void interrupt_stats(void);
847
848 /* This is called from locore.s directly. We only update per-processor interrupt counters in this function */
849 void
interrupt_stats(void)850 interrupt_stats(void)
851 {
852 SCHED_STATS_INC(interrupt_count);
853 }
854
855 __dead2
856 static void
panic_with_thread_kernel_state(const char * msg,struct arm_saved_state * regs)857 panic_with_thread_kernel_state(const char *msg, struct arm_saved_state *regs)
858 {
859 panic_plain("%s at pc 0x%08x, lr 0x%08x (saved state:%p)\n"
860 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
861 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
862 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
863 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
864 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
865 msg, regs->pc, regs->lr, regs,
866 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
867 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
868 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
869 regs->r[12], regs->sp, regs->lr, regs->pc,
870 regs->cpsr, regs->fsr, regs->far);
871 }
872