1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1992 NeXT, Inc.
30 *
31 * HISTORY
32 * 13 May 1992 ? at NeXT
33 * Created.
34 */
35
36 #include <mach/mach_types.h>
37 #include <mach/exception.h>
38
39 #include <kern/thread.h>
40 #include <kern/ast.h>
41
42 #include <sys/systm.h>
43 #include <sys/param.h>
44 #include <sys/proc_internal.h>
45 #include <sys/user.h>
46 #include <sys/sysproto.h>
47 #include <sys/sysent.h>
48 #include <sys/ucontext.h>
49 #include <sys/wait.h>
50
51 #include <sys/ux_exception.h>
52
53 #include <mach/thread_act.h> /* for thread_abort_safely */
54 #include <mach/thread_status.h>
55
56 #include <i386/eflags.h>
57 #include <i386/psl.h>
58 #include <i386/machine_routines.h>
59 #include <i386/seg.h>
60 #include <i386/fpu.h>
61
62 #include <machine/pal_routines.h>
63
64 #include <sys/kdebug.h>
65 #include <sys/sdt.h>
66 #include <sys/random.h>
67
68
69 /* Forward: */
70 extern kern_return_t thread_getstatus(thread_t act, int flavor,
71 thread_state_t tstate, mach_msg_type_number_t *count);
72 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
73 thread_state_t tstate, mach_msg_type_number_t count);
74
75 /* Signal handler flavors supported */
76 /* These defns should match the Libc implmn */
77 #define UC_TRAD 1
78 #define UC_FLAVOR 30
79 #define UC_SET_ALT_STACK 0x40000000
80 #define UC_RESET_ALT_STACK 0x80000000
81
82 #define C_32_STK_ALIGN 16
83 #define C_64_STK_ALIGN 16
84 #define C_64_REDZONE_LEN 128
85 #define TRUNC_DOWN32(a, c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c))))
86 #define TRUNC_DOWN64(a, c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c))))
87
88 /*
89 * Send an interrupt to process.
90 *
91 * Stack is set up to allow sigcode stored
92 * in u. to call routine, followed by chmk
93 * to sigreturn routine below. After sigreturn
94 * resets the signal mask, the stack, the frame
95 * pointer, and the argument pointer, it returns
96 * to the user specified pc, psl.
97 */
98 struct sigframe32 {
99 int retaddr;
100 user32_addr_t catcher; /* sig_t */
101 int sigstyle;
102 int sig;
103 user32_addr_t sinfo; /* siginfo32_t* */
104 user32_addr_t uctx; /* struct ucontext32 */
105 user32_addr_t token;
106 };
107
108 /*
109 * Declare table of structure flavors and sizes for 64-bit and 32-bit processes
110 * for the cases of extended states (plain FP, or AVX):
111 */
112 typedef struct {
113 int flavor; natural_t state_count; size_t mcontext_size;
114 } xstate_info_t;
115 static const xstate_info_t thread_state64[] = {
116 [FP] = { x86_FLOAT_STATE64, x86_FLOAT_STATE64_COUNT, sizeof(struct mcontext64) },
117 [FP_FULL] = { x86_FLOAT_STATE64, x86_FLOAT_STATE64_COUNT, sizeof(struct mcontext64_full) },
118 [AVX] = { x86_AVX_STATE64, x86_AVX_STATE64_COUNT, sizeof(struct mcontext_avx64) },
119 [AVX_FULL] = { x86_AVX_STATE64, x86_AVX_STATE64_COUNT, sizeof(struct mcontext_avx64_full) },
120 [AVX512] = { x86_AVX512_STATE64, x86_AVX512_STATE64_COUNT, sizeof(struct mcontext_avx512_64) },
121 [AVX512_FULL] = { x86_AVX512_STATE64, x86_AVX512_STATE64_COUNT, sizeof(struct mcontext_avx512_64_full) }
122 };
123 static const xstate_info_t thread_state32[] = {
124 [FP] = { x86_FLOAT_STATE32, x86_FLOAT_STATE32_COUNT, sizeof(struct mcontext32) },
125 [AVX] = { x86_AVX_STATE32, x86_AVX_STATE32_COUNT, sizeof(struct mcontext_avx32) },
126 [AVX512] = { x86_AVX512_STATE32, x86_AVX512_STATE32_COUNT, sizeof(struct mcontext_avx512_32) }
127 };
128
129 /*
130 * NOTE: Source and target may *NOT* overlap!
131 * XXX: Unify with bsd/kern/kern_exit.c
132 */
133 static void
siginfo_user_to_user32_x86(user_siginfo_t * in,user32_siginfo_t * out)134 siginfo_user_to_user32_x86(user_siginfo_t *in, user32_siginfo_t *out)
135 {
136 out->si_signo = in->si_signo;
137 out->si_errno = in->si_errno;
138 out->si_code = in->si_code;
139 out->si_pid = in->si_pid;
140 out->si_uid = in->si_uid;
141 out->si_status = in->si_status;
142 out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr);
143 /* following cast works for sival_int because of padding */
144 out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr);
145 out->si_band = in->si_band; /* range reduction */
146 out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */
147 }
148
149 static void
siginfo_user_to_user64_x86(user_siginfo_t * in,user64_siginfo_t * out)150 siginfo_user_to_user64_x86(user_siginfo_t *in, user64_siginfo_t *out)
151 {
152 out->si_signo = in->si_signo;
153 out->si_errno = in->si_errno;
154 out->si_code = in->si_code;
155 out->si_pid = in->si_pid;
156 out->si_uid = in->si_uid;
157 out->si_status = in->si_status;
158 out->si_addr = in->si_addr;
159 out->si_value.sival_ptr = in->si_value.sival_ptr;
160 out->si_band = in->si_band; /* range reduction */
161 out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */
162 }
163
164 void
sendsig(struct proc * p,user_addr_t ua_catcher,int sig,int mask,__unused uint32_t code,sigset_t siginfo)165 sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code, sigset_t siginfo)
166 {
167 union {
168 struct mcontext_avx32 mctx_avx32;
169 struct mcontext_avx64 mctx_avx64;
170 struct mcontext_avx64_full mctx_avx64_full;
171 struct mcontext_avx512_32 mctx_avx512_32;
172 struct mcontext_avx512_64 mctx_avx512_64;
173 struct mcontext_avx512_64_full mctx_avx512_64_full;
174 } mctx_store, *mctxp = &mctx_store;
175
176 user_addr_t ua_sp;
177 user_addr_t ua_fp;
178 user_addr_t ua_cr2;
179 user_addr_t ua_sip;
180 user_addr_t ua_uctxp;
181 user_addr_t ua_mctxp;
182 user_siginfo_t sinfo64;
183
184 struct sigacts *ps = &p->p_sigacts;
185 int oonstack, flavor;
186 user_addr_t trampact;
187 int sigonstack;
188 void * state, *fpstate;
189 mach_msg_type_number_t state_count;
190
191 thread_t thread;
192 struct uthread * ut;
193 int stack_size = 0;
194 int infostyle = UC_TRAD;
195 xstate_t sig_xstate;
196 user_addr_t token_uctx;
197 kern_return_t kr;
198 boolean_t reset_ss = TRUE;
199
200 thread = current_thread();
201 ut = get_bsdthread_info(thread);
202
203 if (siginfo & sigmask(sig)) {
204 infostyle = UC_FLAVOR;
205 }
206
207 oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK;
208 trampact = SIGTRAMP(p, sig);
209 sigonstack = (ps->ps_sigonstack & sigmask(sig));
210
211 /*
212 * init siginfo
213 */
214 proc_unlock(p);
215
216 bzero((caddr_t)&sinfo64, sizeof(sinfo64));
217 sinfo64.si_signo = sig;
218
219 bzero(mctxp, sizeof(*mctxp));
220
221 sig_xstate = current_xstate();
222
223 if (ut->uu_pending_sigreturn == 0) {
224 /* Generate random token value used to validate sigreturn arguments */
225 read_random(&ut->uu_sigreturn_token, sizeof(ut->uu_sigreturn_token));
226 }
227 ut->uu_pending_sigreturn++;
228
229 if (proc_is64bit(p)) {
230 x86_thread_state64_t *tstate64;
231 struct user_ucontext64 uctx64;
232 user64_addr_t token;
233 int task_has_ldt = thread_task_has_ldt(thread);
234
235 if (task_has_ldt) {
236 flavor = x86_THREAD_FULL_STATE64;
237 state_count = x86_THREAD_FULL_STATE64_COUNT;
238 fpstate = (void *)&mctxp->mctx_avx64_full.fs;
239 sig_xstate |= STATE64_FULL;
240 } else {
241 flavor = x86_THREAD_STATE64;
242 state_count = x86_THREAD_STATE64_COUNT;
243 fpstate = (void *)&mctxp->mctx_avx64.fs;
244 }
245 state = (void *)&mctxp->mctx_avx64.ss;
246
247 /*
248 * The state copying is performed with pointers to fields in the state
249 * struct. This works specifically because the mcontext is layed-out with the
250 * variable-sized FP-state as the last member. However, with the requirement
251 * to support passing "full" 64-bit state to the signal handler, that layout has now
252 * changed (since the "full" state has a larger "ss" member than the non-"full"
253 * structure. Because of this, and to retain the array-lookup method of determining
254 * structure sizes, we OR-in STATE64_FULL to sig_xstate to ensure the proper mcontext
255 * size is passed.
256 */
257
258 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
259 goto bad;
260 }
261
262 if ((sig_xstate & STATE64_FULL) && mctxp->mctx_avx64.ss.cs != USER64_CS) {
263 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
264 (sigonstack)) {
265 reset_ss = TRUE;
266 } else {
267 reset_ss = FALSE;
268 }
269 } else {
270 reset_ss = FALSE;
271 }
272
273 flavor = thread_state64[sig_xstate].flavor;
274 state_count = thread_state64[sig_xstate].state_count;
275 if (thread_getstatus(thread, flavor, (thread_state_t)fpstate, &state_count) != KERN_SUCCESS) {
276 goto bad;
277 }
278
279 flavor = x86_EXCEPTION_STATE64;
280 state_count = x86_EXCEPTION_STATE64_COUNT;
281 state = (void *)&mctxp->mctx_avx64.es;
282 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
283 goto bad;
284 }
285
286 tstate64 = &mctxp->mctx_avx64.ss;
287
288 /* figure out where our new stack lives */
289 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
290 (sigonstack)) {
291 ua_sp = ut->uu_sigstk.ss_sp;
292 stack_size = ut->uu_sigstk.ss_size;
293 ua_sp += stack_size;
294 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
295 } else {
296 if ((sig_xstate & STATE64_FULL) && tstate64->cs != USER64_CS) {
297 reset_ss = FALSE;
298 }
299 ua_sp = tstate64->rsp;
300 }
301 ua_cr2 = mctxp->mctx_avx64.es.faultvaddr;
302
303 /* The x86_64 ABI defines a 128-byte red zone. */
304 ua_sp -= C_64_REDZONE_LEN;
305
306 ua_sp -= sizeof(struct user_ucontext64);
307 ua_uctxp = ua_sp; // someone tramples the first word!
308
309 ua_sp -= sizeof(user64_siginfo_t);
310 ua_sip = ua_sp;
311
312 ua_sp -= thread_state64[sig_xstate].mcontext_size;
313 ua_mctxp = ua_sp;
314
315 /*
316 * Align the frame and stack pointers to 16 bytes for SSE.
317 * (Note that we use 'ua_fp' as the base of the stack going forward)
318 */
319 ua_fp = TRUNC_DOWN64(ua_sp, C_64_STK_ALIGN);
320
321 /*
322 * But we need to account for the return address so the alignment is
323 * truly "correct" at _sigtramp
324 */
325 ua_fp -= sizeof(user_addr_t);
326
327 /*
328 * Generate the validation token for sigreturn
329 */
330 token_uctx = ua_uctxp;
331 kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx);
332 assert(kr == KERN_SUCCESS);
333 token = (user64_addr_t)token_uctx ^ (user64_addr_t)ut->uu_sigreturn_token;
334
335 /*
336 * Build the signal context to be used by sigreturn.
337 */
338 bzero(&uctx64, sizeof(uctx64));
339
340 uctx64.uc_onstack = oonstack;
341 uctx64.uc_sigmask = mask;
342 uctx64.uc_stack.ss_sp = ua_fp;
343 uctx64.uc_stack.ss_size = stack_size;
344
345 if (oonstack) {
346 uctx64.uc_stack.ss_flags |= SS_ONSTACK;
347 }
348 uctx64.uc_link = 0;
349
350 uctx64.uc_mcsize = thread_state64[sig_xstate].mcontext_size;
351 uctx64.uc_mcontext64 = ua_mctxp;
352
353 if (copyout((caddr_t)&uctx64, ua_uctxp, sizeof(uctx64))) {
354 goto bad;
355 }
356
357 if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state64[sig_xstate].mcontext_size)) {
358 goto bad;
359 }
360
361 sinfo64.pad[0] = tstate64->rsp;
362 sinfo64.si_addr = tstate64->rip;
363
364 tstate64->rip = trampact;
365 tstate64->rsp = ua_fp;
366 tstate64->rflags = get_eflags_exportmask();
367
368 /*
369 * SETH - need to set these for processes with LDTs
370 */
371 tstate64->cs = USER64_CS;
372 tstate64->fs = NULL_SEG;
373 /*
374 * Set gs to 0 here to prevent restoration of %gs on return-to-user. If we
375 * did NOT do that here and %gs was non-zero, we'd blow away gsbase when
376 * we restore %gs in the kernel exit trampoline.
377 */
378 tstate64->gs = 0;
379
380 if (sig_xstate & STATE64_FULL) {
381 /* Reset DS, ES, and possibly SS */
382 if (reset_ss) {
383 /*
384 * Restore %ss if (a) an altstack was used for signal delivery
385 * or (b) %cs at the time of the signal was the default
386 * (USER64_CS)
387 */
388 mctxp->mctx_avx64_full.ss.ss = USER64_DS;
389 }
390 mctxp->mctx_avx64_full.ss.ds = USER64_DS;
391 mctxp->mctx_avx64_full.ss.es = 0;
392 }
393
394 /*
395 * Build the argument list for the signal handler.
396 * Handler should call sigreturn to get out of it
397 */
398 tstate64->rdi = ua_catcher;
399 tstate64->rsi = infostyle;
400 tstate64->rdx = sig;
401 tstate64->rcx = ua_sip;
402 tstate64->r8 = ua_uctxp;
403 tstate64->r9 = token;
404 } else {
405 x86_thread_state32_t *tstate32;
406 struct user_ucontext32 uctx32;
407 struct sigframe32 frame32;
408 user32_addr_t token;
409
410 flavor = x86_THREAD_STATE32;
411 state_count = x86_THREAD_STATE32_COUNT;
412 state = (void *)&mctxp->mctx_avx32.ss;
413 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
414 goto bad;
415 }
416
417 flavor = thread_state32[sig_xstate].flavor;
418 state_count = thread_state32[sig_xstate].state_count;
419 state = (void *)&mctxp->mctx_avx32.fs;
420 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
421 goto bad;
422 }
423
424 flavor = x86_EXCEPTION_STATE32;
425 state_count = x86_EXCEPTION_STATE32_COUNT;
426 state = (void *)&mctxp->mctx_avx32.es;
427 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
428 goto bad;
429 }
430
431 tstate32 = &mctxp->mctx_avx32.ss;
432
433 /* figure out where our new stack lives */
434 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
435 (sigonstack)) {
436 ua_sp = ut->uu_sigstk.ss_sp;
437 stack_size = ut->uu_sigstk.ss_size;
438 ua_sp += stack_size;
439 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
440 } else {
441 ua_sp = tstate32->esp;
442 }
443 ua_cr2 = mctxp->mctx_avx32.es.faultvaddr;
444
445 ua_sp -= sizeof(struct user_ucontext32);
446 ua_uctxp = ua_sp; // someone tramples the first word!
447
448 ua_sp -= sizeof(user32_siginfo_t);
449 ua_sip = ua_sp;
450
451 ua_sp -= thread_state32[sig_xstate].mcontext_size;
452 ua_mctxp = ua_sp;
453
454 ua_sp -= sizeof(struct sigframe32);
455 ua_fp = ua_sp;
456
457 /*
458 * Align the frame and stack pointers to 16 bytes for SSE.
459 * (Note that we use 'fp' as the base of the stack going forward)
460 */
461 ua_fp = TRUNC_DOWN32(ua_fp, C_32_STK_ALIGN);
462
463 /*
464 * But we need to account for the return address so the alignment is
465 * truly "correct" at _sigtramp
466 */
467 ua_fp -= sizeof(frame32.retaddr);
468
469 /*
470 * Generate the validation token for sigreturn
471 */
472 token_uctx = ua_uctxp;
473 kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx);
474 assert(kr == KERN_SUCCESS);
475 token = CAST_DOWN_EXPLICIT(user32_addr_t, token_uctx) ^
476 CAST_DOWN_EXPLICIT(user32_addr_t, ut->uu_sigreturn_token);
477
478 /*
479 * Build the argument list for the signal handler.
480 * Handler should call sigreturn to get out of it
481 */
482 frame32.retaddr = -1;
483 frame32.sigstyle = infostyle;
484 frame32.sig = sig;
485 frame32.catcher = CAST_DOWN_EXPLICIT(user32_addr_t, ua_catcher);
486 frame32.sinfo = CAST_DOWN_EXPLICIT(user32_addr_t, ua_sip);
487 frame32.uctx = CAST_DOWN_EXPLICIT(user32_addr_t, ua_uctxp);
488 frame32.token = token;
489
490 if (copyout((caddr_t)&frame32, ua_fp, sizeof(frame32))) {
491 goto bad;
492 }
493
494 /*
495 * Build the signal context to be used by sigreturn.
496 */
497 bzero(&uctx32, sizeof(uctx32));
498
499 uctx32.uc_onstack = oonstack;
500 uctx32.uc_sigmask = mask;
501 uctx32.uc_stack.ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp);
502 uctx32.uc_stack.ss_size = stack_size;
503
504 if (oonstack) {
505 uctx32.uc_stack.ss_flags |= SS_ONSTACK;
506 }
507 uctx32.uc_link = 0;
508
509 uctx32.uc_mcsize = thread_state64[sig_xstate].mcontext_size;
510
511 uctx32.uc_mcontext = CAST_DOWN_EXPLICIT(user32_addr_t, ua_mctxp);
512
513 if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof(uctx32))) {
514 goto bad;
515 }
516
517 if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state32[sig_xstate].mcontext_size)) {
518 goto bad;
519 }
520
521 sinfo64.pad[0] = tstate32->esp;
522 sinfo64.si_addr = tstate32->eip;
523 }
524
525 switch (sig) {
526 case SIGILL:
527 switch (ut->uu_code) {
528 case EXC_I386_INVOP:
529 sinfo64.si_code = ILL_ILLOPC;
530 break;
531 default:
532 sinfo64.si_code = ILL_NOOP;
533 }
534 break;
535 case SIGFPE:
536 #define FP_IE 0 /* Invalid operation */
537 #define FP_DE 1 /* Denormalized operand */
538 #define FP_ZE 2 /* Zero divide */
539 #define FP_OE 3 /* overflow */
540 #define FP_UE 4 /* underflow */
541 #define FP_PE 5 /* precision */
542 if (ut->uu_code == EXC_I386_DIV) {
543 sinfo64.si_code = FPE_INTDIV;
544 } else if (ut->uu_code == EXC_I386_INTO) {
545 sinfo64.si_code = FPE_INTOVF;
546 } else if (ut->uu_subcode & (1 << FP_ZE)) {
547 sinfo64.si_code = FPE_FLTDIV;
548 } else if (ut->uu_subcode & (1 << FP_OE)) {
549 sinfo64.si_code = FPE_FLTOVF;
550 } else if (ut->uu_subcode & (1 << FP_UE)) {
551 sinfo64.si_code = FPE_FLTUND;
552 } else if (ut->uu_subcode & (1 << FP_PE)) {
553 sinfo64.si_code = FPE_FLTRES;
554 } else if (ut->uu_subcode & (1 << FP_IE)) {
555 sinfo64.si_code = FPE_FLTINV;
556 } else {
557 sinfo64.si_code = FPE_NOOP;
558 }
559 break;
560 case SIGBUS:
561 sinfo64.si_code = BUS_ADRERR;
562 sinfo64.si_addr = ua_cr2;
563 break;
564 case SIGTRAP:
565 sinfo64.si_code = TRAP_BRKPT;
566 break;
567 case SIGSEGV:
568 sinfo64.si_addr = ua_cr2;
569
570 switch (ut->uu_code) {
571 case EXC_I386_GPFLT:
572 /* CR2 is meaningless after GP fault */
573 /* XXX namespace clash! */
574 sinfo64.si_addr = 0ULL;
575 sinfo64.si_code = 0;
576 break;
577 case KERN_PROTECTION_FAILURE:
578 sinfo64.si_code = SEGV_ACCERR;
579 break;
580 case KERN_INVALID_ADDRESS:
581 sinfo64.si_code = SEGV_MAPERR;
582 break;
583 default:
584 sinfo64.si_code = FPE_NOOP;
585 }
586 break;
587 default:
588 {
589 int status_and_exitcode;
590
591 /*
592 * All other signals need to fill out a minimum set of
593 * information for the siginfo structure passed into
594 * the signal handler, if SA_SIGINFO was specified.
595 *
596 * p->si_status actually contains both the status and
597 * the exit code; we save it off in its own variable
598 * for later breakdown.
599 */
600 proc_lock(p);
601 sinfo64.si_pid = p->si_pid;
602 p->si_pid = 0;
603 status_and_exitcode = p->si_status;
604 p->si_status = 0;
605 sinfo64.si_uid = p->si_uid;
606 p->si_uid = 0;
607 sinfo64.si_code = p->si_code;
608 p->si_code = 0;
609 proc_unlock(p);
610 if (sinfo64.si_code == CLD_EXITED) {
611 if (WIFEXITED(status_and_exitcode)) {
612 sinfo64.si_code = CLD_EXITED;
613 } else if (WIFSIGNALED(status_and_exitcode)) {
614 if (WCOREDUMP(status_and_exitcode)) {
615 sinfo64.si_code = CLD_DUMPED;
616 status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
617 } else {
618 sinfo64.si_code = CLD_KILLED;
619 status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
620 }
621 }
622 }
623 /*
624 * The recorded status contains the exit code and the
625 * signal information, but the information to be passed
626 * in the siginfo to the handler is supposed to only
627 * contain the status, so we have to shift it out.
628 */
629 sinfo64.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
630 p->p_xhighbits = 0;
631 break;
632 }
633 }
634 if (proc_is64bit(p)) {
635 user64_siginfo_t sinfo64_user64;
636
637 bzero((caddr_t)&sinfo64_user64, sizeof(sinfo64_user64));
638
639 siginfo_user_to_user64_x86(&sinfo64, &sinfo64_user64);
640
641 #if CONFIG_DTRACE
642 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
643
644 ut->t_dtrace_siginfo.si_signo = sinfo64.si_signo;
645 ut->t_dtrace_siginfo.si_code = sinfo64.si_code;
646 ut->t_dtrace_siginfo.si_pid = sinfo64.si_pid;
647 ut->t_dtrace_siginfo.si_uid = sinfo64.si_uid;
648 ut->t_dtrace_siginfo.si_status = sinfo64.si_status;
649 /* XXX truncates faulting address to void * on K32 */
650 ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo64.si_addr);
651
652 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
653 switch (sig) {
654 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
655 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
656 break;
657 default:
658 break;
659 }
660
661 /* XXX truncates catcher address to uintptr_t */
662 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
663 void (*)(void), CAST_DOWN(sig_t, ua_catcher));
664 #endif /* CONFIG_DTRACE */
665
666 if (copyout((caddr_t)&sinfo64_user64, ua_sip, sizeof(sinfo64_user64))) {
667 goto bad;
668 }
669
670 if (sig_xstate & STATE64_FULL) {
671 flavor = x86_THREAD_FULL_STATE64;
672 state_count = x86_THREAD_FULL_STATE64_COUNT;
673 } else {
674 flavor = x86_THREAD_STATE64;
675 state_count = x86_THREAD_STATE64_COUNT;
676 }
677 state = (void *)&mctxp->mctx_avx64.ss;
678 } else {
679 x86_thread_state32_t *tstate32;
680 user32_siginfo_t sinfo32;
681
682 bzero((caddr_t)&sinfo32, sizeof(sinfo32));
683
684 siginfo_user_to_user32_x86(&sinfo64, &sinfo32);
685
686 #if CONFIG_DTRACE
687 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
688
689 ut->t_dtrace_siginfo.si_signo = sinfo32.si_signo;
690 ut->t_dtrace_siginfo.si_code = sinfo32.si_code;
691 ut->t_dtrace_siginfo.si_pid = sinfo32.si_pid;
692 ut->t_dtrace_siginfo.si_uid = sinfo32.si_uid;
693 ut->t_dtrace_siginfo.si_status = sinfo32.si_status;
694 ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo32.si_addr);
695
696 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
697 switch (sig) {
698 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
699 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
700 break;
701 default:
702 break;
703 }
704
705 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
706 void (*)(void), CAST_DOWN(sig_t, ua_catcher));
707 #endif /* CONFIG_DTRACE */
708
709 if (copyout((caddr_t)&sinfo32, ua_sip, sizeof(sinfo32))) {
710 goto bad;
711 }
712
713 tstate32 = &mctxp->mctx_avx32.ss;
714
715 tstate32->eip = CAST_DOWN_EXPLICIT(user32_addr_t, trampact);
716 tstate32->esp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp);
717
718 tstate32->eflags = get_eflags_exportmask();
719
720 tstate32->cs = USER_CS;
721 tstate32->ss = USER_DS;
722 tstate32->ds = USER_DS;
723 tstate32->es = USER_DS;
724 tstate32->fs = NULL_SEG;
725 tstate32->gs = USER_CTHREAD;
726
727 flavor = x86_THREAD_STATE32;
728 state_count = x86_THREAD_STATE32_COUNT;
729 state = (void *)tstate32;
730 }
731 if (thread_setstatus(thread, flavor, (thread_state_t)state, state_count) != KERN_SUCCESS) {
732 goto bad;
733 }
734 ml_fp_setvalid(FALSE);
735
736 /* Tell the PAL layer about the signal */
737 pal_set_signal_delivery( thread );
738
739 proc_lock(p);
740
741 return;
742
743 bad:
744
745 assert(ut->uu_pending_sigreturn > 0);
746 ut->uu_pending_sigreturn--;
747 proc_lock(p);
748 proc_set_sigact(p, SIGILL, SIG_DFL);
749 sig = sigmask(SIGILL);
750 p->p_sigignore &= ~sig;
751 p->p_sigcatch &= ~sig;
752 ut->uu_sigmask &= ~sig;
753 /* sendsig is called with signal lock held */
754 proc_unlock(p);
755 psignal_locked(p, SIGILL);
756 proc_lock(p);
757 return;
758 }
759
760 /*
761 * System call to cleanup state after a signal
762 * has been taken. Reset signal mask and
763 * stack state from context left by sendsig (above).
764 * Return to previous pc and psl as specified by
765 * context left by sendsig. Check carefully to
766 * make sure that the user has not modified the
767 * psl to gain improper priviledges or to cause
768 * a machine fault.
769 */
770
771 int
sigreturn(struct proc * p,struct sigreturn_args * uap,__unused int * retval)772 sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval)
773 {
774 union {
775 struct mcontext_avx32 mctx_avx32;
776 struct mcontext_avx64 mctx_avx64;
777 struct mcontext_avx64_full mctx_avx64_full;
778 struct mcontext_avx512_32 mctx_avx512_32;
779 struct mcontext_avx512_64 mctx_avx512_64;
780 struct mcontext_avx512_64_full mctx_avx512_64_full;
781 } mctx_store, *mctxp = &mctx_store;
782
783 thread_t thread = current_thread();
784 struct uthread * ut;
785 struct sigacts *ps = &p->p_sigacts;
786 int error;
787 int onstack = 0;
788
789 mach_msg_type_number_t ts_count;
790 unsigned int ts_flavor;
791 void * ts;
792 mach_msg_type_number_t fs_count;
793 unsigned int fs_flavor;
794 void * fs;
795 int rval = EJUSTRETURN;
796 xstate_t sig_xstate;
797 uint32_t sigreturn_validation;
798 user_addr_t token_uctx;
799 kern_return_t kr;
800
801 ut = (struct uthread *)get_bsdthread_info(thread);
802
803 /* see osfmk/kern/restartable.c */
804 act_set_ast_reset_pcs(TASK_NULL, thread);
805
806 /*
807 * If we are being asked to change the altstack flag on the thread, we
808 * just set/reset it and return (the uap->uctx is not used).
809 */
810 if ((unsigned int)uap->infostyle == UC_SET_ALT_STACK) {
811 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
812 return 0;
813 } else if ((unsigned int)uap->infostyle == UC_RESET_ALT_STACK) {
814 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
815 return 0;
816 }
817
818 bzero(mctxp, sizeof(*mctxp));
819
820 sig_xstate = current_xstate();
821
822 sigreturn_validation = atomic_load_explicit(
823 &ps->ps_sigreturn_validation, memory_order_relaxed);
824 token_uctx = uap->uctx;
825 kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx);
826 assert(kr == KERN_SUCCESS);
827
828 if (proc_is64bit(p)) {
829 struct user_ucontext64 uctx64;
830 user64_addr_t token;
831 int task_has_ldt = thread_task_has_ldt(thread);
832
833 if ((error = copyin(uap->uctx, (void *)&uctx64, sizeof(uctx64)))) {
834 return error;
835 }
836
837 onstack = uctx64.uc_onstack & 01;
838 ut->uu_sigmask = uctx64.uc_sigmask & ~sigcantmask;
839
840 if (task_has_ldt) {
841 ts_flavor = x86_THREAD_FULL_STATE64;
842 ts_count = x86_THREAD_FULL_STATE64_COUNT;
843 fs = (void *)&mctxp->mctx_avx64_full.fs;
844 sig_xstate |= STATE64_FULL;
845 } else {
846 ts_flavor = x86_THREAD_STATE64;
847 ts_count = x86_THREAD_STATE64_COUNT;
848 fs = (void *)&mctxp->mctx_avx64.fs;
849 }
850
851 if ((error = copyin(uctx64.uc_mcontext64, (void *)mctxp, thread_state64[sig_xstate].mcontext_size))) {
852 return error;
853 }
854
855 ts = (void *)&mctxp->mctx_avx64.ss;
856
857 fs_flavor = thread_state64[sig_xstate].flavor;
858 fs_count = thread_state64[sig_xstate].state_count;
859
860 token = (user64_addr_t)token_uctx ^ (user64_addr_t)ut->uu_sigreturn_token;
861 if ((user64_addr_t)uap->token != token) {
862 #if DEVELOPMENT || DEBUG
863 printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n",
864 p->p_comm, proc_getpid(p), (user64_addr_t)uap->token, token);
865 #endif /* DEVELOPMENT || DEBUG */
866 if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) {
867 rval = EINVAL;
868 }
869 }
870 } else {
871 struct user_ucontext32 uctx32;
872 user32_addr_t token;
873
874 if ((error = copyin(uap->uctx, (void *)&uctx32, sizeof(uctx32)))) {
875 return error;
876 }
877
878 if ((error = copyin(CAST_USER_ADDR_T(uctx32.uc_mcontext), (void *)mctxp, thread_state32[sig_xstate].mcontext_size))) {
879 return error;
880 }
881
882 onstack = uctx32.uc_onstack & 01;
883 ut->uu_sigmask = uctx32.uc_sigmask & ~sigcantmask;
884
885 ts_flavor = x86_THREAD_STATE32;
886 ts_count = x86_THREAD_STATE32_COUNT;
887 ts = (void *)&mctxp->mctx_avx32.ss;
888
889 fs_flavor = thread_state32[sig_xstate].flavor;
890 fs_count = thread_state32[sig_xstate].state_count;
891 fs = (void *)&mctxp->mctx_avx32.fs;
892
893 token = CAST_DOWN_EXPLICIT(user32_addr_t, uap->uctx) ^
894 CAST_DOWN_EXPLICIT(user32_addr_t, ut->uu_sigreturn_token);
895 if ((user32_addr_t)uap->token != token) {
896 #if DEVELOPMENT || DEBUG
897 printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n",
898 p->p_comm, proc_getpid(p), (user32_addr_t)uap->token, token);
899 #endif /* DEVELOPMENT || DEBUG */
900 if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) {
901 rval = EINVAL;
902 }
903 }
904 }
905
906 if (onstack) {
907 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
908 } else {
909 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
910 }
911
912 if (ut->uu_siglist & ~ut->uu_sigmask) {
913 signal_setast(thread);
914 }
915
916 if (rval == EINVAL) {
917 goto error_ret;
918 }
919
920 /*
921 * thread_set_state() does all the needed checks for the passed in
922 * content
923 */
924 if (thread_setstatus(thread, ts_flavor, ts, ts_count) != KERN_SUCCESS) {
925 rval = EINVAL;
926 #if DEVELOPMENT || DEBUG
927 printf("process %s[%d] sigreturn thread_setstatus error %d\n",
928 p->p_comm, proc_getpid(p), rval);
929 #endif /* DEVELOPMENT || DEBUG */
930 goto error_ret;
931 }
932
933 /* Decrement the pending sigreturn count */
934 if (ut->uu_pending_sigreturn > 0) {
935 ut->uu_pending_sigreturn--;
936 }
937
938 ml_fp_setvalid(TRUE);
939
940 if (thread_setstatus(thread, fs_flavor, fs, fs_count) != KERN_SUCCESS) {
941 rval = EINVAL;
942 #if DEVELOPMENT || DEBUG
943 printf("process %s[%d] sigreturn thread_setstatus error %d\n",
944 p->p_comm, proc_getpid(p), rval);
945 #endif /* DEVELOPMENT || DEBUG */
946 goto error_ret;
947 }
948 error_ret:
949 return rval;
950 }
951
952
953 /*
954 * machine_exception() performs machine-dependent translation
955 * of a mach exception to a unix signal.
956 */
957 int
machine_exception(int exception,mach_exception_code_t code,__unused mach_exception_subcode_t subcode)958 machine_exception(int exception,
959 mach_exception_code_t code,
960 __unused mach_exception_subcode_t subcode)
961 {
962 switch (exception) {
963 case EXC_BAD_ACCESS:
964 /* Map GP fault to SIGSEGV, otherwise defer to caller */
965 if (code == EXC_I386_GPFLT) {
966 return SIGSEGV;
967 }
968 break;
969
970 case EXC_BAD_INSTRUCTION:
971 return SIGILL;
972
973 case EXC_ARITHMETIC:
974 return SIGFPE;
975
976 case EXC_SOFTWARE:
977 if (code == EXC_I386_BOUND) {
978 /*
979 * Map #BR, the Bound Range Exceeded exception, to
980 * SIGTRAP.
981 */
982 return SIGTRAP;
983 }
984 break;
985 }
986
987 return 0;
988 }
989