1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifdef MACH_BSD
29 #include <mach_debug.h>
30 #include <mach_ldebug.h>
31
32 #include <mach/kern_return.h>
33 #include <mach/mach_traps.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_param.h>
36
37 #include <kern/cpu_data.h>
38 #include <kern/mach_param.h>
39 #include <kern/task.h>
40 #include <kern/thread.h>
41 #include <kern/sched_prim.h>
42 #include <kern/misc_protos.h>
43 #include <kern/assert.h>
44 #include <kern/debug.h>
45 #include <kern/spl.h>
46 #include <kern/syscall_sw.h>
47 #include <ipc/ipc_port.h>
48 #include <vm/vm_kern.h>
49 #include <vm/pmap.h>
50
51 #include <i386/cpu_number.h>
52 #include <i386/eflags.h>
53 #include <i386/proc_reg.h>
54 #include <i386/tss.h>
55 #include <i386/user_ldt.h>
56 #include <i386/fpu.h>
57 #include <i386/machdep_call.h>
58 #include <i386/vmparam.h>
59 #include <i386/mp_desc.h>
60 #include <i386/misc_protos.h>
61 #include <i386/thread.h>
62 #include <i386/trap.h>
63 #include <i386/seg.h>
64 #include <mach/i386/syscall_sw.h>
65 #include <sys/syscall.h>
66 #include <sys/kdebug.h>
67 #include <sys/errno.h>
68 #include <../bsd/sys/sysent.h>
69
70 #ifdef MACH_BSD
71 extern void mach_kauth_cred_thread_update(void);
72 extern void throttle_lowpri_io(int);
73 #endif
74
75 #if CONFIG_MACF
76 #include <security/mac_mach_internal.h>
77 #endif
78
79 void * find_user_regs(thread_t);
80
81 unsigned int get_msr_exportmask(void);
82
83 unsigned int get_msr_nbits(void);
84
85 unsigned int get_msr_rbits(void);
86
87 /*
88 * thread_userstack:
89 *
90 * Return the user stack pointer from the machine
91 * dependent thread state info.
92 */
93 kern_return_t
thread_userstack(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * user_stack,int * customstack,__unused boolean_t is64bit)94 thread_userstack(
95 __unused thread_t thread,
96 int flavor,
97 thread_state_t tstate,
98 unsigned int count,
99 mach_vm_offset_t *user_stack,
100 int *customstack,
101 __unused boolean_t is64bit
102 )
103 {
104 if (customstack) {
105 *customstack = 0;
106 }
107
108 switch (flavor) {
109 case x86_THREAD_STATE32:
110 {
111 x86_thread_state32_t *state25;
112
113 if (__improbable(count != x86_THREAD_STATE32_COUNT)) {
114 return KERN_INVALID_ARGUMENT;
115 }
116
117 state25 = (x86_thread_state32_t *) tstate;
118
119 if (state25->esp) {
120 *user_stack = state25->esp;
121 if (customstack) {
122 *customstack = 1;
123 }
124 } else {
125 *user_stack = VM_USRSTACK32;
126 if (customstack) {
127 *customstack = 0;
128 }
129 }
130 break;
131 }
132
133 case x86_THREAD_FULL_STATE64:
134 {
135 x86_thread_full_state64_t *state25;
136
137 if (__improbable(count != x86_THREAD_FULL_STATE64_COUNT)) {
138 return KERN_INVALID_ARGUMENT;
139 }
140
141 state25 = (x86_thread_full_state64_t *) tstate;
142
143 if (state25->ss64.rsp) {
144 *user_stack = state25->ss64.rsp;
145 if (customstack) {
146 *customstack = 1;
147 }
148 } else {
149 *user_stack = VM_USRSTACK64;
150 if (customstack) {
151 *customstack = 0;
152 }
153 }
154 break;
155 }
156
157 case x86_THREAD_STATE64:
158 {
159 x86_thread_state64_t *state25;
160
161 if (__improbable(count != x86_THREAD_STATE64_COUNT)) {
162 return KERN_INVALID_ARGUMENT;
163 }
164
165 state25 = (x86_thread_state64_t *) tstate;
166
167 if (state25->rsp) {
168 *user_stack = state25->rsp;
169 if (customstack) {
170 *customstack = 1;
171 }
172 } else {
173 *user_stack = VM_USRSTACK64;
174 if (customstack) {
175 *customstack = 0;
176 }
177 }
178 break;
179 }
180
181 default:
182 return KERN_INVALID_ARGUMENT;
183 }
184
185 return KERN_SUCCESS;
186 }
187
188 /*
189 * thread_userstackdefault:
190 *
191 * Return the default stack location for the
192 * thread, if otherwise unknown.
193 */
194 kern_return_t
thread_userstackdefault(mach_vm_offset_t * default_user_stack,boolean_t is64bit)195 thread_userstackdefault(
196 mach_vm_offset_t *default_user_stack,
197 boolean_t is64bit)
198 {
199 if (is64bit) {
200 *default_user_stack = VM_USRSTACK64;
201 } else {
202 *default_user_stack = VM_USRSTACK32;
203 }
204 return KERN_SUCCESS;
205 }
206
207 kern_return_t
thread_entrypoint(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * entry_point)208 thread_entrypoint(
209 __unused thread_t thread,
210 int flavor,
211 thread_state_t tstate,
212 unsigned int count,
213 mach_vm_offset_t *entry_point
214 )
215 {
216 /*
217 * Set a default.
218 */
219 if (*entry_point == 0) {
220 *entry_point = VM_MIN_ADDRESS;
221 }
222
223 switch (flavor) {
224 case x86_THREAD_STATE32:
225 {
226 x86_thread_state32_t *state25;
227
228 if (count != x86_THREAD_STATE32_COUNT) {
229 return KERN_INVALID_ARGUMENT;
230 }
231
232 state25 = (i386_thread_state_t *) tstate;
233 *entry_point = state25->eip ? state25->eip : VM_MIN_ADDRESS;
234 break;
235 }
236
237 case x86_THREAD_STATE64:
238 {
239 x86_thread_state64_t *state25;
240
241 if (count != x86_THREAD_STATE64_COUNT) {
242 return KERN_INVALID_ARGUMENT;
243 }
244
245 state25 = (x86_thread_state64_t *) tstate;
246 *entry_point = state25->rip ? state25->rip : VM_MIN_ADDRESS64;
247 break;
248 }
249 }
250 return KERN_SUCCESS;
251 }
252
253 /*
254 * FIXME - thread_set_child
255 */
256
257 void thread_set_child(thread_t child, int pid);
258 void
thread_set_child(thread_t child,int pid)259 thread_set_child(thread_t child, int pid)
260 {
261 pal_register_cache_state(child, DIRTY);
262
263 if (thread_is_64bit_addr(child)) {
264 x86_saved_state64_t *iss64;
265
266 iss64 = USER_REGS64(child);
267
268 iss64->rax = pid;
269 iss64->rdx = 1;
270 iss64->isf.rflags &= ~EFL_CF;
271 } else {
272 x86_saved_state32_t *iss32;
273
274 iss32 = USER_REGS32(child);
275
276 iss32->eax = pid;
277 iss32->edx = 1;
278 iss32->efl &= ~EFL_CF;
279 }
280 }
281
282
283
284 /*
285 * System Call handling code
286 */
287
288 extern long fuword(vm_offset_t);
289
290 __attribute__((noreturn))
291 void
machdep_syscall(x86_saved_state_t * state)292 machdep_syscall(x86_saved_state_t *state)
293 {
294 int args[MACHDEP_MAX_ARGS] = { 0 };
295 int trapno;
296 int nargs;
297 const machdep_call_t *entry;
298 x86_saved_state32_t *regs;
299
300 assert(is_saved_state32(state));
301 regs = saved_state32(state);
302
303 trapno = regs->eax;
304 #if DEBUG_TRACE
305 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
306 #endif
307
308 DEBUG_KPRINT_SYSCALL_MDEP(
309 "machdep_syscall: trapno=%d\n", trapno);
310
311 if (trapno < 0 || trapno >= machdep_call_count) {
312 regs->eax = (unsigned int)kern_invalid(NULL);
313
314 thread_exception_return();
315 /* NOTREACHED */
316 }
317 entry = &machdep_call_table[trapno];
318 nargs = entry->nargs;
319
320 if (nargs != 0) {
321 if (copyin((user_addr_t) regs->uesp + sizeof(int),
322 (char *) args, (nargs * sizeof(int)))) {
323 regs->eax = KERN_INVALID_ADDRESS;
324
325 thread_exception_return();
326 /* NOTREACHED */
327 }
328 }
329
330 static_assert(MACHDEP_MAX_ARGS >= 4);
331 KDBG(MACHDBG_CODE(DBG_MACH_MACHDEP_EXCP_SC_x86, trapno) | DBG_FUNC_START,
332 args[0], args[1], args[2], args[3]);
333
334 switch (nargs) {
335 case 0:
336 regs->eax = (*entry->routine.args_0)();
337 break;
338 case 1:
339 regs->eax = (*entry->routine.args_1)(args[0]);
340 break;
341 case 2:
342 regs->eax = (*entry->routine.args_2)(args[0], args[1]);
343 break;
344 case 3:
345 if (!entry->bsd_style) {
346 regs->eax = (*entry->routine.args_3)(args[0], args[1], args[2]);
347 } else {
348 int error;
349 uint32_t rval;
350
351 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
352 if (error) {
353 regs->eax = error;
354 regs->efl |= EFL_CF; /* carry bit */
355 } else {
356 regs->eax = rval;
357 regs->efl &= ~EFL_CF;
358 }
359 }
360 break;
361 case 4:
362 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
363 break;
364
365 default:
366 panic("machdep_syscall: too many args");
367 }
368
369 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax);
370
371 #if DEBUG || DEVELOPMENT
372 kern_allocation_name_t
373 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
374 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
375 #endif /* DEBUG || DEVELOPMENT */
376
377 KDBG(MACHDBG_CODE(DBG_MACH_MACHDEP_EXCP_SC_x86, trapno) | DBG_FUNC_END, regs->eax);
378
379 throttle_lowpri_io(1);
380
381 thread_exception_return();
382 /* NOTREACHED */
383 }
384
385 __attribute__((noreturn))
386 void
machdep_syscall64(x86_saved_state_t * state)387 machdep_syscall64(x86_saved_state_t *state)
388 {
389 int trapno;
390 const machdep_call_t *entry;
391 x86_saved_state64_t *regs;
392
393 assert(is_saved_state64(state));
394 regs = saved_state64(state);
395
396 trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK);
397
398 DEBUG_KPRINT_SYSCALL_MDEP(
399 "machdep_syscall64: trapno=%d\n", trapno);
400
401 if (trapno < 0 || trapno >= machdep_call_count) {
402 regs->rax = (unsigned int)kern_invalid(NULL);
403
404 thread_exception_return();
405 /* NOTREACHED */
406 }
407 entry = &machdep_call_table64[trapno];
408
409 KDBG(MACHDBG_CODE(DBG_MACH_MACHDEP_EXCP_SC_x86, trapno) | DBG_FUNC_START,
410 regs->rdi, regs->rsi, regs->rdx);
411
412 switch (entry->nargs) {
413 case 0:
414 regs->rax = (*entry->routine.args_0)();
415 break;
416 case 1:
417 regs->rax = (*entry->routine.args64_1)(regs->rdi);
418 break;
419 case 2:
420 regs->rax = (*entry->routine.args64_2)(regs->rdi, regs->rsi);
421 break;
422 case 3:
423 if (!entry->bsd_style) {
424 regs->rax = (*entry->routine.args64_3)(regs->rdi, regs->rsi, regs->rdx);
425 } else {
426 int error;
427 uint32_t rval;
428
429 error = (*entry->routine.args64_bsd_3)(&rval, regs->rdi, regs->rsi, regs->rdx);
430 if (error) {
431 regs->rax = (uint64_t)error;
432 regs->isf.rflags |= EFL_CF; /* carry bit */
433 } else {
434 regs->rax = rval;
435 regs->isf.rflags &= ~(uint64_t)EFL_CF;
436 }
437 }
438 break;
439 default:
440 panic("machdep_syscall64: too many args");
441 }
442
443 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax);
444
445 #if DEBUG || DEVELOPMENT
446 kern_allocation_name_t
447 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
448 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
449 #endif /* DEBUG || DEVELOPMENT */
450
451 KDBG(MACHDBG_CODE(DBG_MACH_MACHDEP_EXCP_SC_x86, trapno) | DBG_FUNC_END, regs->rax);
452
453 throttle_lowpri_io(1);
454
455 thread_exception_return();
456 /* NOTREACHED */
457 }
458
459 #endif /* MACH_BSD */
460
461
462 typedef kern_return_t (*mach_call_t)(void *);
463
464 struct mach_call_args {
465 syscall_arg_t arg1;
466 syscall_arg_t arg2;
467 syscall_arg_t arg3;
468 syscall_arg_t arg4;
469 syscall_arg_t arg5;
470 syscall_arg_t arg6;
471 syscall_arg_t arg7;
472 syscall_arg_t arg8;
473 syscall_arg_t arg9;
474 };
475
476 static kern_return_t
477 mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp);
478
479
480 static kern_return_t
mach_call_arg_munger32(uint32_t sp,struct mach_call_args * args,const mach_trap_t * trapp)481 mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp)
482 {
483 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof(int))) {
484 return KERN_INVALID_ARGUMENT;
485 }
486 #if CONFIG_REQUIRES_U32_MUNGING
487 trapp->mach_trap_arg_munge32(args);
488 #else
489 #error U32 mach traps on x86_64 kernel requires munging
490 #endif
491 return KERN_SUCCESS;
492 }
493
494
495 __private_extern__ void mach_call_munger(x86_saved_state_t *state);
496
497 extern const char *const mach_syscall_name_table[];
498
499 __attribute__((noreturn))
500 void
mach_call_munger(x86_saved_state_t * state)501 mach_call_munger(x86_saved_state_t *state)
502 {
503 int argc;
504 int call_number;
505 mach_call_t mach_call;
506 kern_return_t retval;
507 struct mach_call_args args = {
508 .arg1 = 0,
509 .arg2 = 0,
510 .arg3 = 0,
511 .arg4 = 0,
512 .arg5 = 0,
513 .arg6 = 0,
514 .arg7 = 0,
515 .arg8 = 0,
516 .arg9 = 0
517 };
518 x86_saved_state32_t *regs;
519
520 struct uthread *ut = get_bsdthread_info(current_thread());
521 uthread_reset_proc_refcount(ut);
522
523 assert(is_saved_state32(state));
524 regs = saved_state32(state);
525
526 call_number = -(regs->eax);
527
528 DEBUG_KPRINT_SYSCALL_MACH(
529 "mach_call_munger: code=%d(%s)\n",
530 call_number, mach_syscall_name_table[call_number]);
531 #if DEBUG_TRACE
532 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
533 #endif
534
535 if (call_number < 0 || call_number >= mach_trap_count) {
536 i386_exception(EXC_SYSCALL, call_number, 1);
537 /* NOTREACHED */
538 }
539 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
540
541 if (mach_call == (mach_call_t)kern_invalid) {
542 DEBUG_KPRINT_SYSCALL_MACH(
543 "mach_call_munger: kern_invalid 0x%x\n", regs->eax);
544 i386_exception(EXC_SYSCALL, call_number, 1);
545 /* NOTREACHED */
546 }
547
548 argc = mach_trap_table[call_number].mach_trap_arg_count;
549 if (argc) {
550 retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]);
551 if (retval != KERN_SUCCESS) {
552 regs->eax = retval;
553
554 DEBUG_KPRINT_SYSCALL_MACH(
555 "mach_call_munger: retval=0x%x\n", retval);
556
557 thread_exception_return();
558 /* NOTREACHED */
559 }
560 }
561
562 #ifdef MACH_BSD
563 mach_kauth_cred_thread_update();
564 #endif
565
566 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
567 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
568 args.arg1, args.arg2, args.arg3, args.arg4, 0);
569
570 #if CONFIG_MACF
571 /* Check mach trap filter mask, if exists. */
572 thread_ro_t tro = current_thread_ro();
573 task_t task = tro->tro_task;
574 struct proc *proc = tro->tro_proc;
575 uint8_t *filter_mask = task_get_mach_trap_filter_mask(task);
576
577 if (__improbable(filter_mask != NULL &&
578 !bitstr_test(filter_mask, call_number) &&
579 mac_task_mach_trap_evaluate != NULL)) {
580 /* Not in filter mask, evaluate policy. */
581 retval = mac_task_mach_trap_evaluate(proc, call_number);
582 if (retval != KERN_SUCCESS) {
583 if (mach_trap_table[call_number].mach_trap_returns_port) {
584 retval = MACH_PORT_NULL;
585 }
586 goto skip_machcall;
587 }
588 }
589 #endif /* CONFIG_MACF */
590
591 retval = mach_call(&args);
592
593 skip_machcall:
594 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
595
596 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
597 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
598 retval, 0, 0, 0, 0);
599
600 regs->eax = retval;
601
602 #if DEBUG || DEVELOPMENT
603 kern_allocation_name_t
604 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
605 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
606 #endif /* DEBUG || DEVELOPMENT */
607
608 throttle_lowpri_io(1);
609
610 uthread_assert_zero_proc_refcount(ut);
611 thread_exception_return();
612 /* NOTREACHED */
613 }
614
615
616 __private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
617
618 __attribute__((noreturn))
619 void
mach_call_munger64(x86_saved_state_t * state)620 mach_call_munger64(x86_saved_state_t *state)
621 {
622 int call_number;
623 int argc;
624 mach_call_t mach_call;
625 kern_return_t retval;
626 struct mach_call_args args = {
627 .arg1 = 0,
628 .arg2 = 0,
629 .arg3 = 0,
630 .arg4 = 0,
631 .arg5 = 0,
632 .arg6 = 0,
633 .arg7 = 0,
634 .arg8 = 0,
635 .arg9 = 0
636 };
637 x86_saved_state64_t *regs;
638
639 struct uthread *ut = get_bsdthread_info(current_thread());
640 uthread_reset_proc_refcount(ut);
641
642 assert(is_saved_state64(state));
643 regs = saved_state64(state);
644
645 call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK);
646
647 DEBUG_KPRINT_SYSCALL_MACH(
648 "mach_call_munger64: code=%d(%s)\n",
649 call_number, mach_syscall_name_table[call_number]);
650
651 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
652 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
653 regs->rdi, regs->rsi, regs->rdx, regs->r10, 0);
654
655 if (call_number < 0 || call_number >= mach_trap_count) {
656 i386_exception(EXC_SYSCALL, regs->rax, 1);
657 /* NOTREACHED */
658 }
659 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
660
661 if (mach_call == (mach_call_t)kern_invalid) {
662 i386_exception(EXC_SYSCALL, regs->rax, 1);
663 /* NOTREACHED */
664 }
665 argc = mach_trap_table[call_number].mach_trap_arg_count;
666 if (argc) {
667 int args_in_regs = MIN(6, argc);
668 __nochk_memcpy(&args.arg1, ®s->rdi, args_in_regs * sizeof(syscall_arg_t));
669
670 if (argc > 6) {
671 int copyin_count;
672
673 assert(argc <= 9);
674 copyin_count = (argc - 6) * (int)sizeof(syscall_arg_t);
675
676 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&args.arg7, copyin_count)) {
677 regs->rax = KERN_INVALID_ARGUMENT;
678
679 thread_exception_return();
680 /* NOTREACHED */
681 }
682 }
683 }
684
685 #ifdef MACH_BSD
686 mach_kauth_cred_thread_update();
687 #endif
688
689 #if CONFIG_MACF
690 /* Check syscall filter mask, if exists. */
691 thread_ro_t tro = current_thread_ro();
692 task_t task = tro->tro_task;
693 struct proc *proc = tro->tro_proc;
694 uint8_t *filter_mask = task_get_mach_trap_filter_mask(task);
695
696 if (__improbable(filter_mask != NULL &&
697 !bitstr_test(filter_mask, call_number)) &&
698 mac_task_mach_trap_evaluate != NULL) {
699 retval = mac_task_mach_trap_evaluate(proc, call_number);
700 if (retval != KERN_SUCCESS) {
701 if (mach_trap_table[call_number].mach_trap_returns_port) {
702 retval = MACH_PORT_NULL;
703 }
704 goto skip_machcall;
705 }
706 }
707 #endif /* CONFIG_MACF */
708
709 retval = mach_call((void *)&args);
710
711 skip_machcall:
712 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger64: retval=0x%llx\n", retval);
713
714 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
715 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
716 retval, 0, 0, 0, 0);
717
718 regs->rax = (uint64_t)retval;
719 #if DEBUG || DEVELOPMENT
720 kern_allocation_name_t
721 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
722 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
723 #endif /* DEBUG || DEVELOPMENT */
724
725 throttle_lowpri_io(1);
726
727 uthread_assert_zero_proc_refcount(ut);
728 thread_exception_return();
729 /* NOTREACHED */
730 }
731
732
733 /*
734 * thread_setuserstack:
735 *
736 * Sets the user stack pointer into the machine
737 * dependent thread state info.
738 */
739 void
thread_setuserstack(thread_t thread,mach_vm_address_t user_stack)740 thread_setuserstack(
741 thread_t thread,
742 mach_vm_address_t user_stack)
743 {
744 pal_register_cache_state(thread, DIRTY);
745 if (thread_is_64bit_addr(thread)) {
746 x86_saved_state64_t *iss64;
747
748 iss64 = USER_REGS64(thread);
749
750 iss64->isf.rsp = (uint64_t)user_stack;
751 } else {
752 x86_saved_state32_t *iss32;
753
754 iss32 = USER_REGS32(thread);
755
756 iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack);
757 }
758 }
759
760 /*
761 * thread_adjuserstack:
762 *
763 * Returns the adjusted user stack pointer from the machine
764 * dependent thread state info. Used for small (<2G) deltas.
765 */
766 user_addr_t
thread_adjuserstack(thread_t thread,int adjust)767 thread_adjuserstack(
768 thread_t thread,
769 int adjust)
770 {
771 pal_register_cache_state(thread, DIRTY);
772 if (thread_is_64bit_addr(thread)) {
773 x86_saved_state64_t *iss64;
774
775 iss64 = USER_REGS64(thread);
776
777 iss64->isf.rsp += adjust;
778
779 return iss64->isf.rsp;
780 } else {
781 x86_saved_state32_t *iss32;
782
783 iss32 = USER_REGS32(thread);
784
785 iss32->uesp += adjust;
786
787 return CAST_USER_ADDR_T(iss32->uesp);
788 }
789 }
790
791 /*
792 * thread_setentrypoint:
793 *
794 * Sets the user PC into the machine
795 * dependent thread state info.
796 */
797 void
thread_setentrypoint(thread_t thread,mach_vm_address_t entry)798 thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
799 {
800 pal_register_cache_state(thread, DIRTY);
801 if (thread_is_64bit_addr(thread)) {
802 x86_saved_state64_t *iss64;
803
804 iss64 = USER_REGS64(thread);
805
806 iss64->isf.rip = (uint64_t)entry;
807 } else {
808 x86_saved_state32_t *iss32;
809
810 iss32 = USER_REGS32(thread);
811
812 iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry);
813 }
814 }
815
816
817 kern_return_t
thread_setsinglestep(thread_t thread,int on)818 thread_setsinglestep(thread_t thread, int on)
819 {
820 pal_register_cache_state(thread, DIRTY);
821 if (thread_is_64bit_addr(thread)) {
822 x86_saved_state64_t *iss64;
823
824 iss64 = USER_REGS64(thread);
825
826 if (on) {
827 iss64->isf.rflags |= EFL_TF;
828 } else {
829 iss64->isf.rflags &= ~EFL_TF;
830 }
831 } else {
832 x86_saved_state32_t *iss32;
833
834 iss32 = USER_REGS32(thread);
835
836 if (on) {
837 iss32->efl |= EFL_TF;
838 /* Ensure IRET */
839 if (iss32->cs == SYSENTER_CS) {
840 iss32->cs = SYSENTER_TF_CS;
841 }
842 } else {
843 iss32->efl &= ~EFL_TF;
844 }
845 }
846
847 return KERN_SUCCESS;
848 }
849
850 void *
get_user_regs(thread_t th)851 get_user_regs(thread_t th)
852 {
853 pal_register_cache_state(th, DIRTY);
854 return USER_STATE(th);
855 }
856
857 void *
find_user_regs(thread_t thread)858 find_user_regs(thread_t thread)
859 {
860 return get_user_regs(thread);
861 }
862
863 #if CONFIG_DTRACE
864 /*
865 * DTrace would like to have a peek at the kernel interrupt state, if available.
866 */
867 x86_saved_state_t *find_kern_regs(thread_t);
868
869 x86_saved_state_t *
find_kern_regs(thread_t thread)870 find_kern_regs(thread_t thread)
871 {
872 if (thread == current_thread() &&
873 NULL != current_cpu_datap()->cpu_int_state &&
874 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
875 current_cpu_datap()->cpu_interrupt_level == 1)) {
876 return current_cpu_datap()->cpu_int_state;
877 } else {
878 return NULL;
879 }
880 }
881
882 vm_offset_t dtrace_get_cpu_int_stack_top(void);
883
884 vm_offset_t
dtrace_get_cpu_int_stack_top(void)885 dtrace_get_cpu_int_stack_top(void)
886 {
887 return current_cpu_datap()->cpu_int_stack_top;
888 }
889 #endif
890