1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifdef MACH_BSD
29 #include <mach_ldebug.h>
30
31 #include <mach/kern_return.h>
32 #include <mach/mach_traps.h>
33 #include <mach/thread_status.h>
34 #include <mach/vm_param.h>
35
36 #include <kern/cpu_data.h>
37 #include <kern/mach_param.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/sched_prim.h>
41 #include <kern/misc_protos.h>
42 #include <kern/assert.h>
43 #include <kern/debug.h>
44 #include <kern/spl.h>
45 #include <kern/syscall_sw.h>
46 #include <ipc/ipc_port.h>
47 #include <vm/vm_kern.h>
48 #include <vm/pmap.h>
49
50 #include <i386/cpu_number.h>
51 #include <i386/eflags.h>
52 #include <i386/proc_reg.h>
53 #include <i386/tss.h>
54 #include <i386/user_ldt.h>
55 #include <i386/fpu.h>
56 #include <i386/machdep_call.h>
57 #include <i386/vmparam.h>
58 #include <i386/mp_desc.h>
59 #include <i386/misc_protos.h>
60 #include <i386/thread.h>
61 #include <i386/trap.h>
62 #include <i386/seg.h>
63 #include <mach/i386/syscall_sw.h>
64 #include <sys/syscall.h>
65 #include <sys/kdebug.h>
66 #include <sys/errno.h>
67 #include <../bsd/sys/sysent.h>
68
69 #ifdef MACH_BSD
70 extern void mach_kauth_cred_thread_update(void);
71 extern void throttle_lowpri_io(int);
72 #endif
73
74 #if CONFIG_MACF
75 #include <security/mac_mach_internal.h>
76 #endif
77
78 void * find_user_regs(thread_t);
79
80 unsigned int get_msr_exportmask(void);
81
82 unsigned int get_msr_nbits(void);
83
84 unsigned int get_msr_rbits(void);
85
86 /*
87 * thread_userstack:
88 *
89 * Return the user stack pointer from the machine
90 * dependent thread state info.
91 */
92 kern_return_t
thread_userstack(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * user_stack,int * customstack,__unused boolean_t is64bit)93 thread_userstack(
94 __unused thread_t thread,
95 int flavor,
96 thread_state_t tstate,
97 unsigned int count,
98 mach_vm_offset_t *user_stack,
99 int *customstack,
100 __unused boolean_t is64bit
101 )
102 {
103 if (customstack) {
104 *customstack = 0;
105 }
106
107 switch (flavor) {
108 case x86_THREAD_STATE32:
109 {
110 x86_thread_state32_t *state25;
111
112 if (__improbable(count != x86_THREAD_STATE32_COUNT)) {
113 return KERN_INVALID_ARGUMENT;
114 }
115
116 state25 = (x86_thread_state32_t *) tstate;
117
118 if (state25->esp) {
119 *user_stack = state25->esp;
120 if (customstack) {
121 *customstack = 1;
122 }
123 } else {
124 *user_stack = VM_USRSTACK32;
125 if (customstack) {
126 *customstack = 0;
127 }
128 }
129 break;
130 }
131
132 case x86_THREAD_FULL_STATE64:
133 {
134 x86_thread_full_state64_t *state25;
135
136 if (__improbable(count != x86_THREAD_FULL_STATE64_COUNT)) {
137 return KERN_INVALID_ARGUMENT;
138 }
139
140 state25 = (x86_thread_full_state64_t *) tstate;
141
142 if (state25->ss64.rsp) {
143 *user_stack = state25->ss64.rsp;
144 if (customstack) {
145 *customstack = 1;
146 }
147 } else {
148 *user_stack = VM_USRSTACK64;
149 if (customstack) {
150 *customstack = 0;
151 }
152 }
153 break;
154 }
155
156 case x86_THREAD_STATE64:
157 {
158 x86_thread_state64_t *state25;
159
160 if (__improbable(count != x86_THREAD_STATE64_COUNT)) {
161 return KERN_INVALID_ARGUMENT;
162 }
163
164 state25 = (x86_thread_state64_t *) tstate;
165
166 if (state25->rsp) {
167 *user_stack = state25->rsp;
168 if (customstack) {
169 *customstack = 1;
170 }
171 } else {
172 *user_stack = VM_USRSTACK64;
173 if (customstack) {
174 *customstack = 0;
175 }
176 }
177 break;
178 }
179
180 default:
181 return KERN_INVALID_ARGUMENT;
182 }
183
184 return KERN_SUCCESS;
185 }
186
187 /*
188 * thread_userstackdefault:
189 *
190 * Return the default stack location for the
191 * thread, if otherwise unknown.
192 */
193 kern_return_t
thread_userstackdefault(mach_vm_offset_t * default_user_stack,boolean_t is64bit)194 thread_userstackdefault(
195 mach_vm_offset_t *default_user_stack,
196 boolean_t is64bit)
197 {
198 if (is64bit) {
199 *default_user_stack = VM_USRSTACK64;
200 } else {
201 *default_user_stack = VM_USRSTACK32;
202 }
203 return KERN_SUCCESS;
204 }
205
206 kern_return_t
thread_entrypoint(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * entry_point)207 thread_entrypoint(
208 __unused thread_t thread,
209 int flavor,
210 thread_state_t tstate,
211 unsigned int count,
212 mach_vm_offset_t *entry_point
213 )
214 {
215 /*
216 * Set a default.
217 */
218 if (*entry_point == 0) {
219 *entry_point = VM_MIN_ADDRESS;
220 }
221
222 switch (flavor) {
223 case x86_THREAD_STATE32:
224 {
225 x86_thread_state32_t *state25;
226
227 if (count != x86_THREAD_STATE32_COUNT) {
228 return KERN_INVALID_ARGUMENT;
229 }
230
231 state25 = (i386_thread_state_t *) tstate;
232 *entry_point = state25->eip ? state25->eip : VM_MIN_ADDRESS;
233 break;
234 }
235
236 case x86_THREAD_STATE64:
237 {
238 x86_thread_state64_t *state25;
239
240 if (count != x86_THREAD_STATE64_COUNT) {
241 return KERN_INVALID_ARGUMENT;
242 }
243
244 state25 = (x86_thread_state64_t *) tstate;
245 *entry_point = state25->rip ? state25->rip : VM_MIN_ADDRESS64;
246 break;
247 }
248 }
249 return KERN_SUCCESS;
250 }
251
252 /*
253 * FIXME - thread_set_child
254 */
255
256 void thread_set_child(thread_t child, int pid);
257 void
thread_set_child(thread_t child,int pid)258 thread_set_child(thread_t child, int pid)
259 {
260 pal_register_cache_state(child, DIRTY);
261
262 if (thread_is_64bit_addr(child)) {
263 x86_saved_state64_t *iss64;
264
265 iss64 = USER_REGS64(child);
266
267 iss64->rax = pid;
268 iss64->rdx = 1;
269 iss64->isf.rflags &= ~EFL_CF;
270 } else {
271 x86_saved_state32_t *iss32;
272
273 iss32 = USER_REGS32(child);
274
275 iss32->eax = pid;
276 iss32->edx = 1;
277 iss32->efl &= ~EFL_CF;
278 }
279 }
280
281
282
283 /*
284 * System Call handling code
285 */
286
287 extern long fuword(vm_offset_t);
288
289 __attribute__((noreturn))
290 void
machdep_syscall(x86_saved_state_t * state)291 machdep_syscall(x86_saved_state_t *state)
292 {
293 int args[MACHDEP_MAX_ARGS] = { 0 };
294 int trapno;
295 int nargs;
296 const machdep_call_t *entry;
297 x86_saved_state32_t *regs;
298
299 assert(is_saved_state32(state));
300 regs = saved_state32(state);
301
302 trapno = regs->eax;
303 #if DEBUG_TRACE
304 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
305 #endif
306
307 DEBUG_KPRINT_SYSCALL_MDEP(
308 "machdep_syscall: trapno=%d\n", trapno);
309
310 if (trapno < 0 || trapno >= machdep_call_count) {
311 regs->eax = (unsigned int)kern_invalid(NULL);
312
313 thread_exception_return();
314 /* NOTREACHED */
315 }
316 entry = &machdep_call_table[trapno];
317 nargs = entry->nargs;
318
319 if (nargs != 0) {
320 if (copyin((user_addr_t) regs->uesp + sizeof(int),
321 (char *) args, (nargs * sizeof(int)))) {
322 regs->eax = KERN_INVALID_ADDRESS;
323
324 thread_exception_return();
325 /* NOTREACHED */
326 }
327 }
328
329 static_assert(MACHDEP_MAX_ARGS >= 4);
330 KDBG(MACHDBG_CODE(DBG_MACH_MACHDEP_EXCP_SC_x86, trapno) | DBG_FUNC_START,
331 args[0], args[1], args[2], args[3]);
332
333 switch (nargs) {
334 case 0:
335 regs->eax = (*entry->routine.args_0)();
336 break;
337 case 1:
338 regs->eax = (*entry->routine.args_1)(args[0]);
339 break;
340 case 2:
341 regs->eax = (*entry->routine.args_2)(args[0], args[1]);
342 break;
343 case 3:
344 if (!entry->bsd_style) {
345 regs->eax = (*entry->routine.args_3)(args[0], args[1], args[2]);
346 } else {
347 int error;
348 uint32_t rval;
349
350 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
351 if (error) {
352 regs->eax = error;
353 regs->efl |= EFL_CF; /* carry bit */
354 } else {
355 regs->eax = rval;
356 regs->efl &= ~EFL_CF;
357 }
358 }
359 break;
360 case 4:
361 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
362 break;
363
364 default:
365 panic("machdep_syscall: too many args");
366 }
367
368 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax);
369
370 #if DEBUG || DEVELOPMENT
371 kern_allocation_name_t
372 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
373 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
374 #endif /* DEBUG || DEVELOPMENT */
375
376 KDBG(MACHDBG_CODE(DBG_MACH_MACHDEP_EXCP_SC_x86, trapno) | DBG_FUNC_END, regs->eax);
377
378 throttle_lowpri_io(1);
379
380 thread_exception_return();
381 /* NOTREACHED */
382 }
383
384 __attribute__((noreturn))
385 void
machdep_syscall64(x86_saved_state_t * state)386 machdep_syscall64(x86_saved_state_t *state)
387 {
388 int trapno;
389 const machdep_call_t *entry;
390 x86_saved_state64_t *regs;
391
392 assert(is_saved_state64(state));
393 regs = saved_state64(state);
394
395 trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK);
396
397 DEBUG_KPRINT_SYSCALL_MDEP(
398 "machdep_syscall64: trapno=%d\n", trapno);
399
400 if (trapno < 0 || trapno >= machdep_call_count) {
401 regs->rax = (unsigned int)kern_invalid(NULL);
402
403 thread_exception_return();
404 /* NOTREACHED */
405 }
406 entry = &machdep_call_table64[trapno];
407
408 KDBG(MACHDBG_CODE(DBG_MACH_MACHDEP_EXCP_SC_x86, trapno) | DBG_FUNC_START,
409 regs->rdi, regs->rsi, regs->rdx);
410
411 switch (entry->nargs) {
412 case 0:
413 regs->rax = (*entry->routine.args_0)();
414 break;
415 case 1:
416 regs->rax = (*entry->routine.args64_1)(regs->rdi);
417 break;
418 case 2:
419 regs->rax = (*entry->routine.args64_2)(regs->rdi, regs->rsi);
420 break;
421 case 3:
422 if (!entry->bsd_style) {
423 regs->rax = (*entry->routine.args64_3)(regs->rdi, regs->rsi, regs->rdx);
424 } else {
425 int error;
426 uint32_t rval;
427
428 error = (*entry->routine.args64_bsd_3)(&rval, regs->rdi, regs->rsi, regs->rdx);
429 if (error) {
430 regs->rax = (uint64_t)error;
431 regs->isf.rflags |= EFL_CF; /* carry bit */
432 } else {
433 regs->rax = rval;
434 regs->isf.rflags &= ~(uint64_t)EFL_CF;
435 }
436 }
437 break;
438 default:
439 panic("machdep_syscall64: too many args");
440 }
441
442 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax);
443
444 #if DEBUG || DEVELOPMENT
445 kern_allocation_name_t
446 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
447 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
448 #endif /* DEBUG || DEVELOPMENT */
449
450 KDBG(MACHDBG_CODE(DBG_MACH_MACHDEP_EXCP_SC_x86, trapno) | DBG_FUNC_END, regs->rax);
451
452 throttle_lowpri_io(1);
453
454 thread_exception_return();
455 /* NOTREACHED */
456 }
457
458 #endif /* MACH_BSD */
459
460
461 typedef kern_return_t (*mach_call_t)(void *);
462
463 struct mach_call_args {
464 syscall_arg_t arg1;
465 syscall_arg_t arg2;
466 syscall_arg_t arg3;
467 syscall_arg_t arg4;
468 syscall_arg_t arg5;
469 syscall_arg_t arg6;
470 syscall_arg_t arg7;
471 syscall_arg_t arg8;
472 syscall_arg_t arg9;
473 };
474
475 static kern_return_t
476 mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp);
477
478
479 static kern_return_t
mach_call_arg_munger32(uint32_t sp,struct mach_call_args * args,const mach_trap_t * trapp)480 mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp)
481 {
482 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof(int))) {
483 return KERN_INVALID_ARGUMENT;
484 }
485 #if CONFIG_REQUIRES_U32_MUNGING
486 trapp->mach_trap_arg_munge32(args);
487 #else
488 #error U32 mach traps on x86_64 kernel requires munging
489 #endif
490 return KERN_SUCCESS;
491 }
492
493
494 __private_extern__ void mach_call_munger(x86_saved_state_t *state);
495
496 extern const char *const mach_syscall_name_table[];
497
498 __attribute__((noreturn))
499 void
mach_call_munger(x86_saved_state_t * state)500 mach_call_munger(x86_saved_state_t *state)
501 {
502 int argc;
503 int call_number;
504 mach_call_t mach_call;
505 kern_return_t retval;
506 struct mach_call_args args = {
507 .arg1 = 0,
508 .arg2 = 0,
509 .arg3 = 0,
510 .arg4 = 0,
511 .arg5 = 0,
512 .arg6 = 0,
513 .arg7 = 0,
514 .arg8 = 0,
515 .arg9 = 0
516 };
517 x86_saved_state32_t *regs;
518
519 struct uthread *ut = get_bsdthread_info(current_thread());
520 uthread_reset_proc_refcount(ut);
521
522 assert(is_saved_state32(state));
523 regs = saved_state32(state);
524
525 call_number = -(regs->eax);
526
527 DEBUG_KPRINT_SYSCALL_MACH(
528 "mach_call_munger: code=%d(%s)\n",
529 call_number, mach_syscall_name_table[call_number]);
530 #if DEBUG_TRACE
531 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
532 #endif
533
534 if (call_number < 0 || call_number >= mach_trap_count) {
535 i386_exception(EXC_SYSCALL, call_number, 1);
536 /* NOTREACHED */
537 }
538 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
539
540 if (mach_call == (mach_call_t)kern_invalid) {
541 DEBUG_KPRINT_SYSCALL_MACH(
542 "mach_call_munger: kern_invalid 0x%x\n", regs->eax);
543 i386_exception(EXC_SYSCALL, call_number, 1);
544 /* NOTREACHED */
545 }
546
547 argc = mach_trap_table[call_number].mach_trap_arg_count;
548 if (argc) {
549 retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]);
550 if (retval != KERN_SUCCESS) {
551 regs->eax = retval;
552
553 DEBUG_KPRINT_SYSCALL_MACH(
554 "mach_call_munger: retval=0x%x\n", retval);
555
556 thread_exception_return();
557 /* NOTREACHED */
558 }
559 }
560
561 #ifdef MACH_BSD
562 mach_kauth_cred_thread_update();
563 #endif
564
565 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
566 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
567 args.arg1, args.arg2, args.arg3, args.arg4, 0);
568
569 #if CONFIG_MACF
570 /* Check mach trap filter mask, if exists. */
571 thread_ro_t tro = current_thread_ro();
572 task_t task = tro->tro_task;
573 struct proc *proc = tro->tro_proc;
574 uint8_t *filter_mask = task_get_mach_trap_filter_mask(task);
575
576 if (__improbable(filter_mask != NULL &&
577 !bitstr_test(filter_mask, call_number) &&
578 mac_task_mach_trap_evaluate != NULL)) {
579 /* Not in filter mask, evaluate policy. */
580 retval = mac_task_mach_trap_evaluate(proc, call_number);
581 if (retval != KERN_SUCCESS) {
582 if (mach_trap_table[call_number].mach_trap_returns_port) {
583 retval = MACH_PORT_NULL;
584 }
585 goto skip_machcall;
586 }
587 }
588 #endif /* CONFIG_MACF */
589
590 retval = mach_call(&args);
591
592 skip_machcall:
593 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
594
595 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
596 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
597 retval, 0, 0, 0, 0);
598
599 regs->eax = retval;
600
601 #if DEBUG || DEVELOPMENT
602 kern_allocation_name_t
603 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
604 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
605 #endif /* DEBUG || DEVELOPMENT */
606
607 throttle_lowpri_io(1);
608
609 uthread_assert_zero_proc_refcount(ut);
610 thread_exception_return();
611 /* NOTREACHED */
612 }
613
614
615 __private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
616
617 __attribute__((noreturn))
618 void
mach_call_munger64(x86_saved_state_t * state)619 mach_call_munger64(x86_saved_state_t *state)
620 {
621 int call_number;
622 int argc;
623 mach_call_t mach_call;
624 kern_return_t retval;
625 struct mach_call_args args = {
626 .arg1 = 0,
627 .arg2 = 0,
628 .arg3 = 0,
629 .arg4 = 0,
630 .arg5 = 0,
631 .arg6 = 0,
632 .arg7 = 0,
633 .arg8 = 0,
634 .arg9 = 0
635 };
636 x86_saved_state64_t *regs;
637
638 struct uthread *ut = get_bsdthread_info(current_thread());
639 uthread_reset_proc_refcount(ut);
640
641 assert(is_saved_state64(state));
642 regs = saved_state64(state);
643
644 call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK);
645
646 DEBUG_KPRINT_SYSCALL_MACH(
647 "mach_call_munger64: code=%d(%s)\n",
648 call_number, mach_syscall_name_table[call_number]);
649
650 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
651 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
652 regs->rdi, regs->rsi, regs->rdx, regs->r10, 0);
653
654 if (call_number < 0 || call_number >= mach_trap_count) {
655 i386_exception(EXC_SYSCALL, regs->rax, 1);
656 /* NOTREACHED */
657 }
658 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
659
660 if (mach_call == (mach_call_t)kern_invalid) {
661 i386_exception(EXC_SYSCALL, regs->rax, 1);
662 /* NOTREACHED */
663 }
664 argc = mach_trap_table[call_number].mach_trap_arg_count;
665 if (argc) {
666 int args_in_regs = MIN(6, argc);
667 __nochk_memcpy(&args.arg1, ®s->rdi, args_in_regs * sizeof(syscall_arg_t));
668
669 if (argc > 6) {
670 int copyin_count;
671
672 assert(argc <= 9);
673 copyin_count = (argc - 6) * (int)sizeof(syscall_arg_t);
674
675 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&args.arg7, copyin_count)) {
676 regs->rax = KERN_INVALID_ARGUMENT;
677
678 thread_exception_return();
679 /* NOTREACHED */
680 }
681 }
682 }
683
684 #ifdef MACH_BSD
685 mach_kauth_cred_thread_update();
686 #endif
687
688 #if CONFIG_MACF
689 /* Check syscall filter mask, if exists. */
690 thread_ro_t tro = current_thread_ro();
691 task_t task = tro->tro_task;
692 struct proc *proc = tro->tro_proc;
693 uint8_t *filter_mask = task_get_mach_trap_filter_mask(task);
694
695 if (__improbable(filter_mask != NULL &&
696 !bitstr_test(filter_mask, call_number)) &&
697 mac_task_mach_trap_evaluate != NULL) {
698 retval = mac_task_mach_trap_evaluate(proc, call_number);
699 if (retval != KERN_SUCCESS) {
700 if (mach_trap_table[call_number].mach_trap_returns_port) {
701 retval = MACH_PORT_NULL;
702 }
703 goto skip_machcall;
704 }
705 }
706 #endif /* CONFIG_MACF */
707
708 retval = mach_call((void *)&args);
709
710 skip_machcall:
711 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger64: retval=0x%llx\n", retval);
712
713 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
714 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
715 retval, 0, 0, 0, 0);
716
717 regs->rax = (uint64_t)retval;
718 #if DEBUG || DEVELOPMENT
719 kern_allocation_name_t
720 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
721 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
722 #endif /* DEBUG || DEVELOPMENT */
723
724 throttle_lowpri_io(1);
725
726 uthread_assert_zero_proc_refcount(ut);
727 thread_exception_return();
728 /* NOTREACHED */
729 }
730
731
732 /*
733 * thread_setuserstack:
734 *
735 * Sets the user stack pointer into the machine
736 * dependent thread state info.
737 */
738 void
thread_setuserstack(thread_t thread,mach_vm_address_t user_stack)739 thread_setuserstack(
740 thread_t thread,
741 mach_vm_address_t user_stack)
742 {
743 pal_register_cache_state(thread, DIRTY);
744 if (thread_is_64bit_addr(thread)) {
745 x86_saved_state64_t *iss64;
746
747 iss64 = USER_REGS64(thread);
748
749 iss64->isf.rsp = (uint64_t)user_stack;
750 } else {
751 x86_saved_state32_t *iss32;
752
753 iss32 = USER_REGS32(thread);
754
755 iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack);
756 }
757 }
758
759 /*
760 * thread_adjuserstack:
761 *
762 * Returns the adjusted user stack pointer from the machine
763 * dependent thread state info. Used for small (<2G) deltas.
764 */
765 user_addr_t
thread_adjuserstack(thread_t thread,int adjust)766 thread_adjuserstack(
767 thread_t thread,
768 int adjust)
769 {
770 pal_register_cache_state(thread, DIRTY);
771 if (thread_is_64bit_addr(thread)) {
772 x86_saved_state64_t *iss64;
773
774 iss64 = USER_REGS64(thread);
775
776 iss64->isf.rsp += adjust;
777
778 return iss64->isf.rsp;
779 } else {
780 x86_saved_state32_t *iss32;
781
782 iss32 = USER_REGS32(thread);
783
784 iss32->uesp += adjust;
785
786 return CAST_USER_ADDR_T(iss32->uesp);
787 }
788 }
789
790 /*
791 * thread_setentrypoint:
792 *
793 * Sets the user PC into the machine
794 * dependent thread state info.
795 */
796 void
thread_setentrypoint(thread_t thread,mach_vm_address_t entry)797 thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
798 {
799 pal_register_cache_state(thread, DIRTY);
800 if (thread_is_64bit_addr(thread)) {
801 x86_saved_state64_t *iss64;
802
803 iss64 = USER_REGS64(thread);
804
805 iss64->isf.rip = (uint64_t)entry;
806 } else {
807 x86_saved_state32_t *iss32;
808
809 iss32 = USER_REGS32(thread);
810
811 iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry);
812 }
813 }
814
815
816 kern_return_t
thread_setsinglestep(thread_t thread,int on)817 thread_setsinglestep(thread_t thread, int on)
818 {
819 pal_register_cache_state(thread, DIRTY);
820 if (thread_is_64bit_addr(thread)) {
821 x86_saved_state64_t *iss64;
822
823 iss64 = USER_REGS64(thread);
824
825 if (on) {
826 iss64->isf.rflags |= EFL_TF;
827 } else {
828 iss64->isf.rflags &= ~EFL_TF;
829 }
830 } else {
831 x86_saved_state32_t *iss32;
832
833 iss32 = USER_REGS32(thread);
834
835 if (on) {
836 iss32->efl |= EFL_TF;
837 /* Ensure IRET */
838 if (iss32->cs == SYSENTER_CS) {
839 iss32->cs = SYSENTER_TF_CS;
840 }
841 } else {
842 iss32->efl &= ~EFL_TF;
843 }
844 }
845
846 return KERN_SUCCESS;
847 }
848
849 void *
get_user_regs(thread_t th)850 get_user_regs(thread_t th)
851 {
852 pal_register_cache_state(th, DIRTY);
853 return USER_STATE(th);
854 }
855
856 void *
find_user_regs(thread_t thread)857 find_user_regs(thread_t thread)
858 {
859 return get_user_regs(thread);
860 }
861
862 #if CONFIG_DTRACE
863 /*
864 * DTrace would like to have a peek at the kernel interrupt state, if available.
865 */
866 x86_saved_state_t *find_kern_regs(thread_t);
867
868 x86_saved_state_t *
find_kern_regs(thread_t thread)869 find_kern_regs(thread_t thread)
870 {
871 if (thread == current_thread() &&
872 NULL != current_cpu_datap()->cpu_int_state &&
873 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
874 current_cpu_datap()->cpu_interrupt_level == 1)) {
875 return current_cpu_datap()->cpu_int_state;
876 } else {
877 return NULL;
878 }
879 }
880
881 vm_offset_t dtrace_get_cpu_int_stack_top(void);
882
883 vm_offset_t
dtrace_get_cpu_int_stack_top(void)884 dtrace_get_cpu_int_stack_top(void)
885 {
886 return current_cpu_datap()->cpu_int_stack_top;
887 }
888 #endif
889