1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 */
4
5 #include <mach/mach_types.h>
6 #include <mach/exception_types.h>
7
8 #include <sys/param.h>
9 #include <sys/proc_internal.h>
10 #include <sys/user.h>
11 #include <sys/signal.h>
12 #include <sys/ucontext.h>
13 #include <sys/sysproto.h>
14 #include <sys/systm.h>
15 #include <sys/ux_exception.h>
16
17 #include <arm/signal.h>
18 #include <sys/signalvar.h>
19 #include <sys/kdebug.h>
20 #include <sys/sdt.h>
21 #include <sys/wait.h>
22 #include <kern/thread.h>
23 #include <mach/arm/thread_status.h>
24 #include <arm/proc_reg.h>
25
26 #include <kern/assert.h>
27 #include <kern/ast.h>
28 #include <pexpert/pexpert.h>
29 #include <sys/random.h>
30
31 extern struct arm_saved_state *get_user_regs(thread_t);
32 extern user_addr_t thread_get_cthread_self(void);
33 extern kern_return_t thread_getstatus(thread_t act, int flavor,
34 thread_state_t tstate, mach_msg_type_number_t *count);
35 extern kern_return_t thread_getstatus_to_user(thread_t act, int flavor,
36 thread_state_t tstate, mach_msg_type_number_t *count, thread_set_status_flags_t);
37 extern kern_return_t machine_thread_state_convert_to_user(thread_t act, int flavor,
38 thread_state_t tstate, mach_msg_type_number_t *count, thread_set_status_flags_t);
39 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
40 thread_state_t tstate, mach_msg_type_number_t count);
41 extern kern_return_t thread_setstatus_from_user(thread_t thread, int flavor,
42 thread_state_t tstate, mach_msg_type_number_t count,
43 thread_state_t old_tstate, mach_msg_type_number_t old_count,
44 thread_set_status_flags_t flags);
45 extern task_t current_task(void);
46 extern bool task_needs_user_signed_thread_state(task_t);
47 /* XXX Put these someplace smarter... */
48 typedef struct mcontext32 mcontext32_t;
49 typedef struct mcontext64 mcontext64_t;
50
51 /* Signal handler flavors supported */
52 /* These defns should match the libplatform implmn */
53 #define UC_TRAD 1
54 #define UC_FLAVOR 30
55 #define UC_SET_ALT_STACK 0x40000000
56 #define UC_RESET_ALT_STACK 0x80000000
57
58 /* The following are valid mcontext sizes */
59 #define UC_FLAVOR_SIZE32 ((ARM_THREAD_STATE_COUNT + ARM_EXCEPTION_STATE_COUNT + ARM_VFP_STATE_COUNT) * sizeof(int))
60 #define UC_FLAVOR_SIZE64 ((ARM_THREAD_STATE64_COUNT + ARM_EXCEPTION_STATE64_COUNT + ARM_NEON_STATE64_COUNT) * sizeof(int))
61
62 #if __arm64__
63 #define C_64_REDZONE_LEN 128
64 #endif
65
66 #define TRUNC_TO_16_BYTES(addr) (addr & ~0xf)
67
68 static int
sendsig_get_state32(thread_t th_act,arm_thread_state_t * ts,mcontext32_t * mcp)69 sendsig_get_state32(thread_t th_act, arm_thread_state_t *ts, mcontext32_t *mcp)
70 {
71 void *tstate;
72 mach_msg_type_number_t state_count;
73
74 assert(!proc_is64bit_data(current_proc()));
75
76 tstate = (void *) ts;
77 state_count = ARM_THREAD_STATE_COUNT;
78 if (thread_getstatus(th_act, ARM_THREAD_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
79 return EINVAL;
80 }
81
82 mcp->ss = *ts;
83 tstate = (void *) &mcp->ss;
84 state_count = ARM_THREAD_STATE_COUNT;
85 if (machine_thread_state_convert_to_user(th_act, ARM_THREAD_STATE, (thread_state_t) tstate,
86 &state_count, TSSF_FLAGS_NONE) != KERN_SUCCESS) {
87 return EINVAL;
88 }
89
90 tstate = (void *) &mcp->es;
91 state_count = ARM_EXCEPTION_STATE_COUNT;
92 if (thread_getstatus(th_act, ARM_EXCEPTION_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
93 return EINVAL;
94 }
95
96 tstate = (void *) &mcp->fs;
97 state_count = ARM_VFP_STATE_COUNT;
98 if (thread_getstatus_to_user(th_act, ARM_VFP_STATE, (thread_state_t) tstate, &state_count, TSSF_FLAGS_NONE) != KERN_SUCCESS) {
99 return EINVAL;
100 }
101
102 return 0;
103 }
104
105 static TUNABLE(bool, pac_sigreturn_token, "-pac_sigreturn_token", false);
106
107 #if defined(__arm64__)
108 struct user_sigframe64 {
109 /* We can pass the last two args in registers for ARM64 */
110 user64_siginfo_t sinfo;
111 struct user_ucontext64 uctx;
112 mcontext64_t mctx;
113 };
114
115 static int
sendsig_get_state64(thread_t th_act,arm_thread_state64_t * ts,mcontext64_t * mcp)116 sendsig_get_state64(thread_t th_act, arm_thread_state64_t *ts, mcontext64_t *mcp)
117 {
118 void *tstate;
119 mach_msg_type_number_t state_count;
120
121 assert(proc_is64bit_data(current_proc()));
122
123 tstate = (void *) ts;
124 state_count = ARM_THREAD_STATE64_COUNT;
125 if (thread_getstatus(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
126 return EINVAL;
127 }
128
129 mcp->ss = *ts;
130 tstate = (void *) &mcp->ss;
131 state_count = ARM_THREAD_STATE64_COUNT;
132 thread_set_status_flags_t flags = TSSF_STASH_SIGRETURN_TOKEN;
133 if (pac_sigreturn_token || task_needs_user_signed_thread_state(current_task())) {
134 flags |= TSSF_THREAD_USER_DIV;
135 }
136 if (machine_thread_state_convert_to_user(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate,
137 &state_count, flags) != KERN_SUCCESS) {
138 return EINVAL;
139 }
140
141 tstate = (void *) &mcp->es;
142 state_count = ARM_EXCEPTION_STATE64_COUNT;
143 if (thread_getstatus(th_act, ARM_EXCEPTION_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
144 return EINVAL;
145 }
146
147 tstate = (void *) &mcp->ns;
148 state_count = ARM_NEON_STATE64_COUNT;
149 if (thread_getstatus_to_user(th_act, ARM_NEON_STATE64, (thread_state_t) tstate, &state_count, TSSF_FLAGS_NONE) != KERN_SUCCESS) {
150 return EINVAL;
151 }
152
153 return 0;
154 }
155
156 static void
sendsig_fill_uctx64(user_ucontext64_t * uctx,int oonstack,int mask,user64_addr_t sp,user64_size_t stack_size,user64_addr_t p_mctx)157 sendsig_fill_uctx64(user_ucontext64_t *uctx, int oonstack, int mask, user64_addr_t sp, user64_size_t stack_size, user64_addr_t p_mctx)
158 {
159 bzero(uctx, sizeof(*uctx));
160 uctx->uc_onstack = oonstack;
161 uctx->uc_sigmask = mask;
162 uctx->uc_stack.ss_sp = sp;
163 uctx->uc_stack.ss_size = stack_size;
164 if (oonstack) {
165 uctx->uc_stack.ss_flags |= SS_ONSTACK;
166 }
167 uctx->uc_link = (user64_addr_t)0;
168 uctx->uc_mcsize = (user64_size_t) UC_FLAVOR_SIZE64;
169 uctx->uc_mcontext64 = (user64_addr_t) p_mctx;
170 }
171
172 static kern_return_t
sendsig_set_thread_state64(arm_thread_state64_t * regs,user64_addr_t catcher,int infostyle,int sig,user64_addr_t p_sinfo,user64_addr_t p_uctx,user64_addr_t token,user64_addr_t trampact,user64_addr_t sp,thread_t th_act)173 sendsig_set_thread_state64(arm_thread_state64_t *regs,
174 user64_addr_t catcher, int infostyle, int sig, user64_addr_t p_sinfo,
175 user64_addr_t p_uctx, user64_addr_t token, user64_addr_t trampact, user64_addr_t sp, thread_t th_act)
176 {
177 assert(proc_is64bit_data(current_proc()));
178
179 regs->x[0] = catcher;
180 regs->x[1] = infostyle;
181 regs->x[2] = sig;
182 regs->x[3] = p_sinfo;
183 regs->x[4] = p_uctx;
184 regs->x[5] = token;
185 regs->pc = trampact;
186 regs->cpsr = PSR64_USER64_DEFAULT;
187 regs->sp = sp;
188
189 return thread_setstatus(th_act, ARM_THREAD_STATE64, (void *)regs, ARM_THREAD_STATE64_COUNT);
190 }
191 #endif /* defined(__arm64__) */
192
193 static void
sendsig_fill_uctx32(user_ucontext32_t * uctx,int oonstack,int mask,user_addr_t sp,user_size_t stack_size,user_addr_t p_mctx)194 sendsig_fill_uctx32(user_ucontext32_t *uctx, int oonstack, int mask, user_addr_t sp, user_size_t stack_size, user_addr_t p_mctx)
195 {
196 bzero(uctx, sizeof(*uctx));
197 uctx->uc_onstack = oonstack;
198 uctx->uc_sigmask = mask;
199 uctx->uc_stack.ss_sp = (user32_addr_t) sp;
200 uctx->uc_stack.ss_size = (user32_size_t) stack_size;
201 if (oonstack) {
202 uctx->uc_stack.ss_flags |= SS_ONSTACK;
203 }
204 uctx->uc_link = (user32_addr_t)0;
205 uctx->uc_mcsize = (user32_size_t) UC_FLAVOR_SIZE32;
206 uctx->uc_mcontext = (user32_addr_t) p_mctx;
207 }
208
209 static kern_return_t
sendsig_set_thread_state32(arm_thread_state_t * regs,user32_addr_t catcher,int infostyle,int sig,user32_addr_t p_sinfo,user32_addr_t trampact,user32_addr_t sp,thread_t th_act)210 sendsig_set_thread_state32(arm_thread_state_t *regs,
211 user32_addr_t catcher, int infostyle, int sig, user32_addr_t p_sinfo,
212 user32_addr_t trampact, user32_addr_t sp, thread_t th_act)
213 {
214 assert(!proc_is64bit_data(current_proc()));
215
216 regs->r[0] = catcher;
217 regs->r[1] = infostyle;
218 regs->r[2] = sig;
219 regs->r[3] = p_sinfo;
220 if (trampact & 1) {
221 regs->pc = trampact & ~1;
222 #if defined(__arm64__)
223 regs->cpsr = PSR64_USER32_DEFAULT | PSR64_MODE_USER32_THUMB;
224 #elif defined(__arm__)
225 regs->cpsr = PSR_USERDFLT | PSR_TF;
226 #else
227 #error Unknown architeture.
228 #endif
229 } else {
230 regs->pc = trampact;
231 regs->cpsr = PSR_USERDFLT;
232 }
233 regs->sp = sp;
234
235 return thread_setstatus(th_act, ARM_THREAD_STATE, (void *)regs, ARM_THREAD_STATE_COUNT);
236 }
237
238 #if CONFIG_DTRACE
239 static void
sendsig_do_dtrace(uthread_t ut,user_siginfo_t * sinfo,int sig,user_addr_t catcher)240 sendsig_do_dtrace(uthread_t ut, user_siginfo_t *sinfo, int sig, user_addr_t catcher)
241 {
242 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
243
244 ut->t_dtrace_siginfo.si_signo = sinfo->si_signo;
245 ut->t_dtrace_siginfo.si_code = sinfo->si_code;
246 ut->t_dtrace_siginfo.si_pid = sinfo->si_pid;
247 ut->t_dtrace_siginfo.si_uid = sinfo->si_uid;
248 ut->t_dtrace_siginfo.si_status = sinfo->si_status;
249 /* XXX truncates faulting address to void * */
250 ut->t_dtrace_siginfo.si_addr = CAST_DOWN_EXPLICIT(void *, sinfo->si_addr);
251
252 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
253 switch (sig) {
254 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
255 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
256 break;
257 default:
258 break;
259 }
260
261 /* XXX truncates faulting address to uintptr_t */
262 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
263 void (*)(void), CAST_DOWN(uintptr_t, catcher));
264 }
265 #endif
266
267 struct user_sigframe32 {
268 user32_addr_t puctx;
269 user32_addr_t token;
270 user32_siginfo_t sinfo;
271 struct user_ucontext32 uctx;
272 mcontext32_t mctx;
273 };
274
275 /*
276 * Send an interrupt to process.
277 *
278 */
279 void
sendsig(struct proc * p,user_addr_t catcher,int sig,int mask,__unused uint32_t code,sigset_t siginfo)280 sendsig(
281 struct proc * p,
282 user_addr_t catcher,
283 int sig,
284 int mask,
285 __unused uint32_t code,
286 sigset_t siginfo
287 )
288 {
289 union {
290 struct ts32 {
291 arm_thread_state_t ss;
292 } ts32;
293 #if defined(__arm64__)
294 struct ts64 {
295 arm_thread_state64_t ss;
296 } ts64;
297 #endif
298 } ts;
299 union {
300 struct user_sigframe32 uf32;
301 #if defined(__arm64__)
302 struct user_sigframe64 uf64;
303 #endif
304 } user_frame;
305
306 user_siginfo_t sinfo;
307 user_addr_t sp = 0, trampact;
308 struct sigacts *ps = &p->p_sigacts;
309 int oonstack, infostyle;
310 thread_t th_act;
311 struct uthread *ut;
312 user_size_t stack_size = 0;
313 user_addr_t p_uctx, token_uctx;
314 kern_return_t kr;
315
316 th_act = current_thread();
317 ut = get_bsdthread_info(th_act);
318
319 bzero(&ts, sizeof(ts));
320 bzero(&user_frame, sizeof(user_frame));
321
322 if (siginfo & sigmask(sig)) {
323 infostyle = UC_FLAVOR;
324 } else {
325 infostyle = UC_TRAD;
326 }
327
328 trampact = SIGTRAMP(p, sig);
329 oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK;
330
331 if (ut->uu_pending_sigreturn == 0) {
332 /* Generate random token value used to validate sigreturn arguments */
333 read_random(&ut->uu_sigreturn_token, sizeof(ut->uu_sigreturn_token));
334
335 do {
336 read_random(&ut->uu_sigreturn_diversifier, sizeof(ut->uu_sigreturn_diversifier));
337 ut->uu_sigreturn_diversifier &=
338 __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK;
339 } while (ut->uu_sigreturn_diversifier == 0);
340 }
341 ut->uu_pending_sigreturn++;
342
343 /*
344 * Get sundry thread state.
345 */
346 if (proc_is64bit_data(p)) {
347 #ifdef __arm64__
348 int ret = 0;
349 if ((ret = sendsig_get_state64(th_act, &ts.ts64.ss, &user_frame.uf64.mctx)) != 0) {
350 #if DEVELOPMENT || DEBUG
351 printf("process [%s][%d] sendsig_get_state64 failed with ret %d, expected 0", p->p_comm, proc_getpid(p), ret);
352 #endif
353 goto bad2;
354 }
355 #else
356 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
357 #endif
358 } else {
359 int ret = 0;
360 if ((ret = sendsig_get_state32(th_act, &ts.ts32.ss, &user_frame.uf32.mctx)) != 0) {
361 #if DEVELOPMENT || DEBUG
362 printf("process [%s][%d] sendsig_get_state32 failed with ret %d, expected 0", p->p_comm, proc_getpid(p), ret);
363 #endif
364 goto bad2;
365 }
366 }
367
368 /*
369 * Figure out where our new stack lives.
370 */
371 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
372 (ps->ps_sigonstack & sigmask(sig))) {
373 sp = ut->uu_sigstk.ss_sp;
374 stack_size = ut->uu_sigstk.ss_size;
375
376 sp += stack_size;
377 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
378 } else {
379 /*
380 * Get stack pointer, and allocate enough space
381 * for signal handler data.
382 */
383 if (proc_is64bit_data(p)) {
384 #if defined(__arm64__)
385 sp = CAST_USER_ADDR_T(ts.ts64.ss.sp);
386 #else
387 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
388 #endif
389 } else {
390 sp = CAST_USER_ADDR_T(ts.ts32.ss.sp);
391 }
392 }
393
394 /* Make sure to move stack pointer down for room for metadata */
395 if (proc_is64bit_data(p)) {
396 #if defined(__arm64__)
397 sp = (sp - sizeof(user_frame.uf64) - C_64_REDZONE_LEN);
398 sp = TRUNC_TO_16_BYTES(sp);
399 #else
400 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
401 #endif
402 } else {
403 sp -= sizeof(user_frame.uf32);
404 #if defined(__arm__) && (__BIGGEST_ALIGNMENT__ > 4)
405 sp = TRUNC_TO_16_BYTES(sp); /* Only for armv7k */
406 #endif
407 }
408
409 proc_unlock(p);
410
411 /*
412 * Fill in ucontext (points to mcontext, i.e. thread states).
413 */
414 if (proc_is64bit_data(p)) {
415 #if defined(__arm64__)
416 sendsig_fill_uctx64(&user_frame.uf64.uctx, oonstack, mask, sp, (user64_size_t)stack_size,
417 (user64_addr_t)&((struct user_sigframe64*)sp)->mctx);
418 #else
419 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
420 #endif
421 } else {
422 sendsig_fill_uctx32(&user_frame.uf32.uctx, oonstack, mask, sp, (user32_size_t)stack_size,
423 (user32_addr_t)&((struct user_sigframe32*)sp)->mctx);
424 }
425
426 /*
427 * Setup siginfo.
428 */
429 bzero((caddr_t) &sinfo, sizeof(sinfo));
430 sinfo.si_signo = sig;
431
432 if (proc_is64bit_data(p)) {
433 #if defined(__arm64__)
434 sinfo.si_addr = ts.ts64.ss.pc;
435 sinfo.pad[0] = ts.ts64.ss.sp;
436 #else
437 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
438 #endif
439 } else {
440 sinfo.si_addr = ts.ts32.ss.pc;
441 sinfo.pad[0] = ts.ts32.ss.sp;
442 }
443
444 switch (sig) {
445 case SIGILL:
446 #ifdef BER_XXX
447 if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT))) {
448 sinfo.si_code = ILL_ILLOPC;
449 } else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT))) {
450 sinfo.si_code = ILL_PRVOPC;
451 } else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT))) {
452 sinfo.si_code = ILL_ILLTRP;
453 } else {
454 sinfo.si_code = ILL_NOOP;
455 }
456 #else
457 sinfo.si_code = ILL_ILLTRP;
458 #endif
459 break;
460
461 case SIGFPE:
462 switch (ut->uu_code) {
463 case EXC_ARM_FP_UF:
464 sinfo.si_code = FPE_FLTUND;
465 break;
466 case EXC_ARM_FP_OF:
467 sinfo.si_code = FPE_FLTOVF;
468 break;
469 case EXC_ARM_FP_IO:
470 sinfo.si_code = FPE_FLTINV;
471 break;
472 case EXC_ARM_FP_DZ:
473 sinfo.si_code = FPE_FLTDIV;
474 break;
475 case EXC_ARM_FP_ID:
476 sinfo.si_code = FPE_FLTINV;
477 break;
478 case EXC_ARM_FP_IX:
479 sinfo.si_code = FPE_FLTRES;
480 break;
481 default:
482 sinfo.si_code = FPE_NOOP;
483 break;
484 }
485
486 break;
487
488 case SIGBUS:
489 if (proc_is64bit_data(p)) {
490 #if defined(__arm64__)
491 sinfo.si_addr = user_frame.uf64.mctx.es.far;
492 #else
493 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
494 #endif
495 } else {
496 sinfo.si_addr = user_frame.uf32.mctx.es.far;
497 }
498
499 sinfo.si_code = BUS_ADRALN;
500 break;
501
502 case SIGSEGV:
503 if (proc_is64bit_data(p)) {
504 #if defined(__arm64__)
505 sinfo.si_addr = user_frame.uf64.mctx.es.far;
506 #else
507 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
508 #endif
509 } else {
510 sinfo.si_addr = user_frame.uf32.mctx.es.far;
511 }
512
513 #ifdef BER_XXX
514 /* First check in srr1 and then in dsisr */
515 if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT))) {
516 sinfo.si_code = SEGV_ACCERR;
517 } else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT))) {
518 sinfo.si_code = SEGV_ACCERR;
519 } else {
520 sinfo.si_code = SEGV_MAPERR;
521 }
522 #else
523 sinfo.si_code = SEGV_ACCERR;
524 #endif
525 break;
526
527 default:
528 {
529 int status_and_exitcode;
530
531 /*
532 * All other signals need to fill out a minimum set of
533 * information for the siginfo structure passed into
534 * the signal handler, if SA_SIGINFO was specified.
535 *
536 * p->si_status actually contains both the status and
537 * the exit code; we save it off in its own variable
538 * for later breakdown.
539 */
540 proc_lock(p);
541 sinfo.si_pid = p->si_pid;
542 p->si_pid = 0;
543 status_and_exitcode = p->si_status;
544 p->si_status = 0;
545 sinfo.si_uid = p->si_uid;
546 p->si_uid = 0;
547 sinfo.si_code = p->si_code;
548 p->si_code = 0;
549 proc_unlock(p);
550 if (sinfo.si_code == CLD_EXITED) {
551 if (WIFEXITED(status_and_exitcode)) {
552 sinfo.si_code = CLD_EXITED;
553 } else if (WIFSIGNALED(status_and_exitcode)) {
554 if (WCOREDUMP(status_and_exitcode)) {
555 sinfo.si_code = CLD_DUMPED;
556 status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
557 } else {
558 sinfo.si_code = CLD_KILLED;
559 status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
560 }
561 }
562 }
563 /*
564 * The recorded status contains the exit code and the
565 * signal information, but the information to be passed
566 * in the siginfo to the handler is supposed to only
567 * contain the status, so we have to shift it out.
568 */
569 sinfo.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
570 p->p_xhighbits = 0;
571 break;
572 }
573 }
574
575 #if CONFIG_DTRACE
576 sendsig_do_dtrace(ut, &sinfo, sig, catcher);
577 #endif /* CONFIG_DTRACE */
578
579 /*
580 * Copy signal-handling frame out to user space, set thread state.
581 */
582 if (proc_is64bit_data(p)) {
583 #if defined(__arm64__)
584 user64_addr_t token;
585
586 /*
587 * mctx filled in when we get state. uctx filled in by
588 * sendsig_fill_uctx64(). We fill in the sinfo now.
589 */
590 siginfo_user_to_user64(&sinfo, &user_frame.uf64.sinfo);
591
592 p_uctx = (user_addr_t)&((struct user_sigframe64*)sp)->uctx;
593 /*
594 * Generate the validation token for sigreturn
595 */
596 token_uctx = p_uctx;
597 kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx);
598 assert(kr == KERN_SUCCESS);
599 token = (user64_addr_t)token_uctx ^ (user64_addr_t)ut->uu_sigreturn_token;
600
601 int ret = 0;
602 if ((ret = copyout(&user_frame.uf64, sp, sizeof(user_frame.uf64))) != 0) {
603 #if DEVELOPMENT || DEBUG
604 printf("process [%s][%d] copyout of user_frame to (sp, size) = (0x%llx, %zu) failed with ret %d, expected 0\n", p->p_comm, proc_getpid(p), sp, sizeof(user_frame.uf64), ret);
605 #endif
606 goto bad;
607 }
608
609 if ((kr = sendsig_set_thread_state64(&ts.ts64.ss,
610 catcher, infostyle, sig, (user64_addr_t)&((struct user_sigframe64*)sp)->sinfo,
611 (user64_addr_t)p_uctx, token, trampact, sp, th_act)) != KERN_SUCCESS) {
612 #if DEVELOPMENT || DEBUG
613 printf("process [%s][%d] sendsig_set_thread_state64 failed with kr %d, expected 0", p->p_comm, proc_getpid(p), kr);
614 #endif
615 goto bad;
616 }
617
618 #else
619 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
620 #endif
621 } else {
622 user32_addr_t token;
623
624 /*
625 * mctx filled in when we get state. uctx filled in by
626 * sendsig_fill_uctx32(). We fill in the sinfo, *pointer*
627 * to uctx and token now.
628 */
629 siginfo_user_to_user32(&sinfo, &user_frame.uf32.sinfo);
630
631 p_uctx = (user_addr_t)&((struct user_sigframe32*)sp)->uctx;
632 /*
633 * Generate the validation token for sigreturn
634 */
635 token_uctx = (user_addr_t)p_uctx;
636 kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx);
637 assert(kr == KERN_SUCCESS);
638 token = (user32_addr_t)token_uctx ^ (user32_addr_t)ut->uu_sigreturn_token;
639
640 user_frame.uf32.puctx = (user32_addr_t)p_uctx;
641 user_frame.uf32.token = token;
642
643 if (copyout(&user_frame.uf32, sp, sizeof(user_frame.uf32)) != 0) {
644 goto bad;
645 }
646
647 if (sendsig_set_thread_state32(&ts.ts32.ss,
648 CAST_DOWN_EXPLICIT(user32_addr_t, catcher), infostyle, sig, (user32_addr_t)&((struct user_sigframe32*)sp)->sinfo,
649 CAST_DOWN_EXPLICIT(user32_addr_t, trampact), CAST_DOWN_EXPLICIT(user32_addr_t, sp), th_act) != KERN_SUCCESS) {
650 goto bad;
651 }
652 }
653
654 proc_lock(p);
655 return;
656
657 bad:
658 proc_lock(p);
659 bad2:
660 assert(ut->uu_pending_sigreturn > 0);
661 ut->uu_pending_sigreturn--;
662 proc_set_sigact(p, SIGILL, SIG_DFL);
663 sig = sigmask(SIGILL);
664 p->p_sigignore &= ~sig;
665 p->p_sigcatch &= ~sig;
666 ut->uu_sigmask &= ~sig;
667 /* sendsig is called with signal lock held */
668 proc_unlock(p);
669 psignal_locked(p, SIGILL);
670 proc_lock(p);
671 }
672
673 /*
674 * System call to cleanup state after a signal
675 * has been taken. Reset signal mask and
676 * stack state from context left by sendsig (above).
677 * Return to previous * context left by sendsig.
678 * Check carefully to * make sure that the user has not
679 * modified the * spr to gain improper priviledges.
680 */
681
682 static int
sigreturn_copyin_ctx32(struct user_ucontext32 * uctx,mcontext32_t * mctx,user_addr_t uctx_addr)683 sigreturn_copyin_ctx32(struct user_ucontext32 *uctx, mcontext32_t *mctx, user_addr_t uctx_addr)
684 {
685 int error;
686
687 assert(!proc_is64bit_data(current_proc()));
688
689 error = copyin(uctx_addr, uctx, sizeof(*uctx));
690 if (error) {
691 return error;
692 }
693
694 /* validate the machine context size */
695 switch (uctx->uc_mcsize) {
696 case UC_FLAVOR_SIZE32:
697 break;
698 default:
699 return EINVAL;
700 }
701
702 assert(uctx->uc_mcsize == sizeof(*mctx));
703 error = copyin((user_addr_t)uctx->uc_mcontext, mctx, uctx->uc_mcsize);
704 if (error) {
705 return error;
706 }
707
708 return 0;
709 }
710
711 static int
sigreturn_set_state32(thread_t th_act,mcontext32_t * mctx)712 sigreturn_set_state32(thread_t th_act, mcontext32_t *mctx)
713 {
714 assert(!proc_is64bit_data(current_proc()));
715
716 /* validate the thread state, set/reset appropriate mode bits in cpsr */
717 #if defined(__arm__)
718 mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR_MODE_MASK) | PSR_USERDFLT;
719 #elif defined(__arm64__)
720 mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR64_MODE_MASK) | PSR64_USER32_DEFAULT;
721 #else
722 #error Unknown architecture.
723 #endif
724
725 if (thread_setstatus_from_user(th_act, ARM_THREAD_STATE, (void *)&mctx->ss,
726 ARM_THREAD_STATE_COUNT, NULL, 0, TSSF_FLAGS_NONE) != KERN_SUCCESS) {
727 return EINVAL;
728 }
729 if (thread_setstatus_from_user(th_act, ARM_VFP_STATE, (void *)&mctx->fs,
730 ARM_VFP_STATE_COUNT, NULL, 0, TSSF_FLAGS_NONE) != KERN_SUCCESS) {
731 return EINVAL;
732 }
733
734 return 0;
735 }
736
737 #if defined(__arm64__)
738 static int
sigreturn_copyin_ctx64(struct user_ucontext64 * uctx,mcontext64_t * mctx,user_addr_t uctx_addr)739 sigreturn_copyin_ctx64(struct user_ucontext64 *uctx, mcontext64_t *mctx, user_addr_t uctx_addr)
740 {
741 int error;
742
743 assert(proc_is64bit_data(current_proc()));
744
745 error = copyin(uctx_addr, uctx, sizeof(*uctx));
746 if (error) {
747 return error;
748 }
749
750 /* validate the machine context size */
751 switch (uctx->uc_mcsize) {
752 case UC_FLAVOR_SIZE64:
753 break;
754 default:
755 return EINVAL;
756 }
757
758 assert(uctx->uc_mcsize == sizeof(*mctx));
759 error = copyin((user_addr_t)uctx->uc_mcontext64, mctx, uctx->uc_mcsize);
760 if (error) {
761 return error;
762 }
763
764 return 0;
765 }
766
767 static int
sigreturn_set_state64(thread_t th_act,mcontext64_t * mctx,thread_set_status_flags_t tssf_flags)768 sigreturn_set_state64(thread_t th_act, mcontext64_t *mctx, thread_set_status_flags_t tssf_flags)
769 {
770 assert(proc_is64bit_data(current_proc()));
771
772 /* validate the thread state, set/reset appropriate mode bits in cpsr */
773 mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR64_MODE_MASK) | PSR64_USER64_DEFAULT;
774
775 if (thread_setstatus_from_user(th_act, ARM_THREAD_STATE64, (void *)&mctx->ss,
776 ARM_THREAD_STATE64_COUNT, NULL, 0, tssf_flags) != KERN_SUCCESS) {
777 return EINVAL;
778 }
779 if (thread_setstatus_from_user(th_act, ARM_NEON_STATE64, (void *)&mctx->ns,
780 ARM_NEON_STATE64_COUNT, NULL, 0, TSSF_FLAGS_NONE) != KERN_SUCCESS) {
781 return EINVAL;
782 }
783
784 return 0;
785 }
786 #endif /* defined(__arm64__) */
787
788 /* ARGSUSED */
789 int
sigreturn(struct proc * p,struct sigreturn_args * uap,__unused int * retval)790 sigreturn(
791 struct proc * p,
792 struct sigreturn_args * uap,
793 __unused int *retval)
794 {
795 union {
796 user_ucontext32_t uc32;
797 #if defined(__arm64__)
798 user_ucontext64_t uc64;
799 #endif
800 } uctx;
801
802 union {
803 mcontext32_t mc32;
804 #if defined(__arm64__)
805 mcontext64_t mc64;
806 #endif
807 } mctx;
808
809 struct sigacts *ps = &p->p_sigacts;
810 int error, sigmask = 0, onstack = 0;
811 thread_t th_act;
812 struct uthread *ut;
813 uint32_t sigreturn_validation;
814 user_addr_t token_uctx;
815 kern_return_t kr;
816
817 th_act = current_thread();
818 ut = (struct uthread *) get_bsdthread_info(th_act);
819
820 /* see osfmk/kern/restartable.c */
821 act_set_ast_reset_pcs(TASK_NULL, th_act);
822
823 /*
824 * If we are being asked to change the altstack flag on the thread, we
825 * just set/reset it and return (the uap->uctx is not used).
826 */
827 if ((unsigned int)uap->infostyle == UC_SET_ALT_STACK) {
828 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
829 return 0;
830 } else if ((unsigned int)uap->infostyle == UC_RESET_ALT_STACK) {
831 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
832 return 0;
833 }
834
835 if (proc_is64bit_data(p)) {
836 #if defined(__arm64__)
837 error = sigreturn_copyin_ctx64(&uctx.uc64, &mctx.mc64, uap->uctx);
838 if (error != 0) {
839 return error;
840 }
841
842 onstack = uctx.uc64.uc_onstack;
843 sigmask = uctx.uc64.uc_sigmask;
844 #else
845 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
846 #endif
847 } else {
848 error = sigreturn_copyin_ctx32(&uctx.uc32, &mctx.mc32, uap->uctx);
849 if (error != 0) {
850 return error;
851 }
852
853 onstack = uctx.uc32.uc_onstack;
854 sigmask = uctx.uc32.uc_sigmask;
855 }
856
857 if ((onstack & 01)) {
858 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
859 } else {
860 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
861 }
862
863 ut->uu_sigmask = sigmask & ~sigcantmask;
864 if (ut->uu_siglist & ~ut->uu_sigmask) {
865 signal_setast(current_thread());
866 }
867
868 sigreturn_validation = atomic_load_explicit(
869 &ps->ps_sigreturn_validation, memory_order_relaxed);
870 token_uctx = uap->uctx;
871 kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx);
872 assert(kr == KERN_SUCCESS);
873
874 if (proc_is64bit_data(p)) {
875 #if defined(__arm64__)
876 user64_addr_t token;
877 token = (user64_addr_t)token_uctx ^ (user64_addr_t)ut->uu_sigreturn_token;
878 thread_set_status_flags_t tssf_flags = TSSF_FLAGS_NONE;
879
880 if ((user64_addr_t)uap->token != token) {
881 #if DEVELOPMENT || DEBUG
882 printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n",
883 p->p_comm, proc_getpid(p), (user64_addr_t)uap->token, token);
884 #endif /* DEVELOPMENT || DEBUG */
885 if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) {
886 return EINVAL;
887 }
888 }
889
890 if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) {
891 tssf_flags |= TSSF_CHECK_SIGRETURN_TOKEN;
892
893 if (pac_sigreturn_token || task_needs_user_signed_thread_state(current_task())) {
894 tssf_flags |= TSSF_ALLOW_ONLY_MATCHING_TOKEN | TSSF_THREAD_USER_DIV;
895 }
896 }
897 error = sigreturn_set_state64(th_act, &mctx.mc64, tssf_flags);
898 if (error != 0) {
899 #if DEVELOPMENT || DEBUG
900 printf("process %s[%d] sigreturn set_state64 error %d\n",
901 p->p_comm, proc_getpid(p), error);
902 #endif /* DEVELOPMENT || DEBUG */
903 return error;
904 }
905 #else
906 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
907 #endif
908 } else {
909 user32_addr_t token;
910 token = (user32_addr_t)token_uctx ^ (user32_addr_t)ut->uu_sigreturn_token;
911 if ((user32_addr_t)uap->token != token) {
912 #if DEVELOPMENT || DEBUG
913 printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n",
914 p->p_comm, proc_getpid(p), (user32_addr_t)uap->token, token);
915 #endif /* DEVELOPMENT || DEBUG */
916 if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) {
917 return EINVAL;
918 }
919 }
920 error = sigreturn_set_state32(th_act, &mctx.mc32);
921 if (error != 0) {
922 #if DEVELOPMENT || DEBUG
923 printf("process %s[%d] sigreturn sigreturn_set_state32 error %d\n",
924 p->p_comm, proc_getpid(p), error);
925 #endif /* DEVELOPMENT || DEBUG */
926 return error;
927 }
928 }
929
930 /* Decrement the pending sigreturn count */
931 if (ut->uu_pending_sigreturn > 0) {
932 ut->uu_pending_sigreturn--;
933 }
934
935 return EJUSTRETURN;
936 }
937
938 /*
939 * machine_exception() performs machine-dependent translation
940 * of a mach exception to a unix signal.
941 */
942 int
machine_exception(int exception,__unused mach_exception_code_t code,__unused mach_exception_subcode_t subcode)943 machine_exception(int exception,
944 __unused mach_exception_code_t code,
945 __unused mach_exception_subcode_t subcode)
946 {
947 switch (exception) {
948 case EXC_BAD_INSTRUCTION:
949 return SIGILL;
950
951 case EXC_ARITHMETIC:
952 return SIGFPE;
953 }
954
955 return 0;
956 }
957