1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 */
4
5 #include <kern/bits.h>
6 #include <kern/task.h>
7 #include <kern/thread.h>
8 #include <kern/assert.h>
9 #include <kern/clock.h>
10 #include <kern/locks.h>
11 #include <kern/sched_prim.h>
12 #include <mach/machine/thread_status.h>
13 #include <mach/thread_act.h>
14 #include <machine/machine_routines.h>
15 #include <arm/thread.h>
16 #include <arm/proc_reg.h>
17 #include <pexpert/pexpert.h>
18
19 #include <sys/kernel.h>
20 #include <sys/kern_debug.h>
21 #include <sys/vm.h>
22 #include <sys/proc_internal.h>
23 #include <sys/syscall.h>
24 #include <sys/systm.h>
25 #include <sys/user.h>
26 #include <sys/errno.h>
27 #include <sys/kdebug.h>
28 #include <sys/sysent.h>
29 #include <sys/sysproto.h>
30 #include <sys/kauth.h>
31 #include <sys/bitstring.h>
32
33 #include <security/audit/audit.h>
34
35 #if CONFIG_MACF
36 #include <security/mac_framework.h>
37 #endif
38
39 #if CONFIG_DTRACE
40 extern int32_t dtrace_systrace_syscall(struct proc *, void *, int *);
41 extern void dtrace_systrace_syscall_return(unsigned short, int, int *);
42 #endif /* CONFIG_DTRACE */
43
44 extern void
45 unix_syscall(struct arm_saved_state * regs, thread_t thread_act, struct proc * proc);
46
47 static int arm_get_syscall_args(uthread_t, struct arm_saved_state *, const struct sysent *);
48 static int arm_get_u32_syscall_args(uthread_t, arm_saved_state32_t *, const struct sysent *);
49 static void arm_prepare_u32_syscall_return(const struct sysent *, arm_saved_state_t *, uthread_t, int);
50 static void arm_prepare_syscall_return(const struct sysent *, struct arm_saved_state *, uthread_t, int);
51 static unsigned short arm_get_syscall_number(struct arm_saved_state *);
52 static void arm_trace_unix_syscall(int, struct arm_saved_state *);
53 static void arm_clear_syscall_error(struct arm_saved_state *);
54 #define save_r0 r[0]
55 #define save_r1 r[1]
56 #define save_r2 r[2]
57 #define save_r3 r[3]
58 #define save_r4 r[4]
59 #define save_r5 r[5]
60 #define save_r6 r[6]
61 #define save_r7 r[7]
62 #define save_r8 r[8]
63 #define save_r9 r[9]
64 #define save_r10 r[10]
65 #define save_r11 r[11]
66 #define save_r12 r[12]
67 #define save_r13 r[13]
68
69 #if COUNT_SYSCALLS
70 __XNU_PRIVATE_EXTERN int do_count_syscalls = 1;
71 __XNU_PRIVATE_EXTERN int syscalls_log[SYS_MAXSYSCALL];
72 #endif
73
74 #define code_is_kdebug_trace(code) (((code) == SYS_kdebug_trace) || \
75 ((code) == SYS_kdebug_trace64) || \
76 ((code) == SYS_kdebug_trace_string))
77
78 #if CONFIG_DEBUG_SYSCALL_REJECTION
79 extern int mach_trap_count;
80 #endif
81
82 /*
83 * Function: unix_syscall
84 *
85 * Inputs: regs - pointer to Process Control Block
86 *
87 * Outputs: none
88 */
89 #ifdef __arm__
90 __attribute__((noreturn))
91 #endif
92 void
unix_syscall(struct arm_saved_state * state,thread_t thread_act,struct proc * proc)93 unix_syscall(
94 struct arm_saved_state * state,
95 thread_t thread_act,
96 struct proc * proc)
97 {
98 const struct sysent *callp;
99 int error;
100 unsigned short code, syscode;
101 pid_t pid;
102 struct uthread *uthread = get_bsdthread_info(thread_act);
103
104 #if defined(__arm__)
105 assert(is_saved_state32(state));
106 #endif
107
108 uthread_reset_proc_refcount(uthread);
109
110 code = arm_get_syscall_number(state);
111
112 #define unix_syscall_kprintf(x...) /* kprintf("unix_syscall: " x) */
113
114 if (kdebug_enable && !code_is_kdebug_trace(code)) {
115 arm_trace_unix_syscall(code, state);
116 }
117
118
119 syscode = (code < nsysent) ? code : SYS_invalid;
120 callp = &sysent[syscode];
121
122 /*
123 * sy_narg is inaccurate on ARM if a 64 bit parameter is specified. Since user_addr_t
124 * is currently a 32 bit type, this is really a long word count. See rdar://problem/6104668.
125 */
126 if (callp->sy_narg != 0) {
127 if (arm_get_syscall_args(uthread, state, callp) != 0) {
128 /* Too many arguments, or something failed */
129 unix_syscall_kprintf("arm_get_syscall_args failed.\n");
130 callp = &sysent[SYS_invalid];
131 }
132 }
133
134 uthread->uu_flag |= UT_NOTCANCELPT;
135 uthread->syscall_code = code;
136
137 uthread->uu_rval[0] = 0;
138
139 /*
140 * r4 is volatile, if we set it to regs->save_r4 here the child
141 * will have parents r4 after execve
142 */
143 uthread->uu_rval[1] = 0;
144
145 error = 0;
146
147 /*
148 * ARM runtime will call cerror if the carry bit is set after a
149 * system call, so clear it here for the common case of success.
150 */
151 arm_clear_syscall_error(state);
152
153 #if COUNT_SYSCALLS
154 if (do_count_syscalls > 0) {
155 syscalls_log[code]++;
156 }
157 #endif
158 pid = proc_pid(proc);
159
160 #ifdef CONFIG_IOCOUNT_TRACE
161 uthread->uu_iocount = 0;
162 uthread->uu_vpindex = 0;
163 #endif
164 unix_syscall_kprintf("code %d (pid %d - %s, tid %lld)\n", code,
165 pid, proc->p_comm, thread_tid(current_thread()));
166
167 #if CONFIG_MACF
168 if (__improbable(proc_syscall_filter_mask(proc) != NULL && !bitstr_test(proc_syscall_filter_mask(proc), syscode))) {
169 error = mac_proc_check_syscall_unix(proc, syscode);
170 if (error) {
171 goto skip_syscall;
172 }
173 }
174 #endif /* CONFIG_MACF */
175
176 #if CONFIG_DEBUG_SYSCALL_REJECTION
177 if (__improbable(uthread->syscall_rejection_mask != NULL &&
178 debug_syscall_rejection_mode != 0) &&
179 !bitmap_test(uthread->syscall_rejection_mask, mach_trap_count + syscode)) {
180 if (debug_syscall_rejection_handle(syscode)) {
181 goto skip_syscall;
182 }
183 }
184 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
185
186 AUDIT_SYSCALL_ENTER(code, proc, uthread);
187 error = (*(callp->sy_call))(proc, &uthread->uu_arg[0], &(uthread->uu_rval[0]));
188 AUDIT_SYSCALL_EXIT(code, proc, uthread, error);
189
190 #if CONFIG_MACF
191 skip_syscall:
192 #endif /* CONFIG_MACF */
193
194 unix_syscall_kprintf("code %d, error %d, results %x, %x (pid %d - %s, tid %lld)\n", code, error,
195 uthread->uu_rval[0], uthread->uu_rval[1],
196 pid, get_bsdtask_info(current_task()) ? proc->p_comm : "unknown", thread_tid(current_thread()));
197
198 #ifdef CONFIG_IOCOUNT_TRACE
199 if (uthread->uu_iocount) {
200 printf("system call returned with uu_iocount(%d) != 0",
201 uthread->uu_iocount);
202 }
203 #endif
204 #if CONFIG_DTRACE
205 uthread->t_dtrace_errno = error;
206 #endif /* CONFIG_DTRACE */
207 #if DEBUG || DEVELOPMENT
208 kern_allocation_name_t
209 prior __assert_only = thread_set_allocation_name(NULL);
210 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
211 #endif /* DEBUG || DEVELOPMENT */
212
213 arm_prepare_syscall_return(callp, state, uthread, error);
214
215 uthread->uu_flag &= ~UT_NOTCANCELPT;
216 uthread->syscall_code = 0;
217
218 if (uthread->uu_lowpri_window) {
219 /*
220 * task is marked as a low priority I/O type
221 * and the I/O we issued while in this system call
222 * collided with normal I/O operations... we'll
223 * delay in order to mitigate the impact of this
224 * task on the normal operation of the system
225 */
226 throttle_lowpri_io(1);
227 }
228 if (kdebug_enable && !code_is_kdebug_trace(code)) {
229 KDBG_RELEASE(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
230 error, uthread->uu_rval[0], uthread->uu_rval[1], pid);
231 }
232
233 uthread_assert_zero_proc_refcount(uthread);
234 #ifdef __arm__
235 thread_exception_return();
236 #endif
237 }
238
239 void
unix_syscall_return(int error)240 unix_syscall_return(int error)
241 {
242 thread_t thread_act;
243 struct uthread *uthread;
244 struct proc *proc;
245 struct arm_saved_state *regs;
246 unsigned short code;
247 const struct sysent *callp;
248
249 #define unix_syscall_return_kprintf(x...) /* kprintf("unix_syscall_retur
250 * n: " x) */
251
252 thread_act = current_thread();
253 proc = current_proc();
254 uthread = get_bsdthread_info(thread_act);
255
256 regs = find_user_regs(thread_act);
257 code = uthread->syscall_code;
258 callp = (code >= nsysent) ? &sysent[SYS_invalid] : &sysent[code];
259
260 #if CONFIG_DTRACE
261 if (callp->sy_call == dtrace_systrace_syscall) {
262 dtrace_systrace_syscall_return( code, error, uthread->uu_rval );
263 }
264 #endif /* CONFIG_DTRACE */
265 #if DEBUG || DEVELOPMENT
266 kern_allocation_name_t
267 prior __assert_only = thread_set_allocation_name(NULL);
268 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
269 #endif /* DEBUG || DEVELOPMENT */
270
271 AUDIT_SYSCALL_EXIT(code, proc, uthread, error);
272
273 /*
274 * Get index into sysent table
275 */
276 arm_prepare_syscall_return(callp, regs, uthread, error);
277
278 uthread->uu_flag &= ~UT_NOTCANCELPT;
279 uthread->syscall_code = 0;
280
281 if (uthread->uu_lowpri_window) {
282 /*
283 * task is marked as a low priority I/O type
284 * and the I/O we issued while in this system call
285 * collided with normal I/O operations... we'll
286 * delay in order to mitigate the impact of this
287 * task on the normal operation of the system
288 */
289 throttle_lowpri_io(1);
290 }
291 if (kdebug_enable && !code_is_kdebug_trace(code)) {
292 KDBG_RELEASE(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
293 error, uthread->uu_rval[0], uthread->uu_rval[1], proc_getpid(proc));
294 }
295
296 thread_exception_return();
297 /* NOTREACHED */
298 }
299
300 static void
arm_prepare_u32_syscall_return(const struct sysent * callp,arm_saved_state_t * regs,uthread_t uthread,int error)301 arm_prepare_u32_syscall_return(const struct sysent *callp, arm_saved_state_t *regs, uthread_t uthread, int error)
302 {
303 assert(is_saved_state32(regs));
304
305 arm_saved_state32_t *ss32 = saved_state32(regs);
306
307 if (error == ERESTART) {
308 ss32->pc -= 4;
309 } else if (error != EJUSTRETURN) {
310 if (error) {
311 ss32->save_r0 = error;
312 ss32->save_r1 = 0;
313 /* set the carry bit to execute cerror routine */
314 ss32->cpsr |= PSR_CF;
315 unix_syscall_return_kprintf("error: setting carry to trigger cerror call\n");
316 } else { /* (not error) */
317 switch (callp->sy_return_type) {
318 case _SYSCALL_RET_INT_T:
319 case _SYSCALL_RET_UINT_T:
320 case _SYSCALL_RET_OFF_T:
321 case _SYSCALL_RET_ADDR_T:
322 case _SYSCALL_RET_SIZE_T:
323 case _SYSCALL_RET_SSIZE_T:
324 case _SYSCALL_RET_UINT64_T:
325 ss32->save_r0 = uthread->uu_rval[0];
326 ss32->save_r1 = uthread->uu_rval[1];
327 break;
328 case _SYSCALL_RET_NONE:
329 ss32->save_r0 = 0;
330 ss32->save_r1 = 0;
331 break;
332 default:
333 panic("unix_syscall: unknown return type");
334 break;
335 }
336 }
337 }
338 /* else (error == EJUSTRETURN) { nothing } */
339 }
340
341 static void
arm_trace_u32_unix_syscall(int code,arm_saved_state32_t * regs)342 arm_trace_u32_unix_syscall(int code, arm_saved_state32_t *regs)
343 {
344 bool indirect = (regs->save_r12 == 0);
345 if (indirect) {
346 KDBG_RELEASE(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
347 regs->save_r1, regs->save_r2, regs->save_r3, regs->save_r4);
348 } else {
349 KDBG_RELEASE(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
350 regs->save_r0, regs->save_r1, regs->save_r2, regs->save_r3);
351 }
352 }
353
354 static void
arm_clear_u32_syscall_error(arm_saved_state32_t * regs)355 arm_clear_u32_syscall_error(arm_saved_state32_t *regs)
356 {
357 regs->cpsr &= ~PSR_CF;
358 }
359
360 #if defined(__arm__)
361
362 static int
arm_get_syscall_args(uthread_t uthread,struct arm_saved_state * state,const struct sysent * callp)363 arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, const struct sysent *callp)
364 {
365 assert(is_saved_state32(state));
366 return arm_get_u32_syscall_args(uthread, saved_state32(state), callp);
367 }
368
369 #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
370 /*
371 * For armv7k, the alignment constraints of the ABI mean we don't know how the userspace
372 * arguments are arranged without knowing the the prototype of the syscall. So we use mungers
373 * to marshal the userspace data into the uu_arg. This also means we need the same convention
374 * as mach syscalls. That means we use r8 to pass arguments in the BSD case as well.
375 */
376 static int
arm_get_u32_syscall_args(uthread_t uthread,arm_saved_state32_t * regs,const struct sysent * callp)377 arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, const struct sysent *callp)
378 {
379 sy_munge_t *munger;
380
381 /* This check is probably not very useful since these both come from build-time */
382 if (callp->sy_arg_bytes > sizeof(uthread->uu_arg)) {
383 return -1;
384 }
385
386 /* get the munger and use it to marshal in the data from userspace */
387 munger = callp->sy_arg_munge32;
388 if (munger == NULL || (callp->sy_arg_bytes == 0)) {
389 return 0;
390 }
391
392 return munger(regs, uthread->uu_arg);
393 }
394 #else
395 /*
396 * For an AArch32 kernel, where we know that we have only AArch32 userland,
397 * we do not do any munging (which is a little confusing, as it is a contrast
398 * to the i386 kernel, where, like the x86_64 kernel, we always munge
399 * arguments from a 32-bit userland out to 64-bit.
400 */
401 static int
arm_get_u32_syscall_args(uthread_t uthread,arm_saved_state32_t * regs,const struct sysent * callp)402 arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, const struct sysent *callp)
403 {
404 int regparams;
405 int flavor = (regs->save_r12 == 0 ? 1 : 0);
406
407 regparams = (7 - flavor); /* Indirect value consumes a register */
408
409 assert((unsigned) callp->sy_arg_bytes <= sizeof(uthread->uu_arg));
410
411 if (callp->sy_arg_bytes <= (sizeof(uint32_t) * regparams)) {
412 /*
413 * Seven arguments or less are passed in registers.
414 */
415 memcpy(&uthread->uu_arg[0], ®s->r[flavor], callp->sy_arg_bytes);
416 } else if (callp->sy_arg_bytes <= sizeof(uthread->uu_arg)) {
417 /*
418 * In this case, we composite - take the first args from registers,
419 * the remainder from the stack (offset by the 7 regs therein).
420 */
421 unix_syscall_kprintf("%s: spillover...\n", __FUNCTION__);
422 memcpy(&uthread->uu_arg[0], ®s->r[flavor], regparams * sizeof(int));
423 if (copyin((user_addr_t)regs->sp + 7 * sizeof(int), (int *)&uthread->uu_arg[0] + regparams,
424 (callp->sy_arg_bytes - (sizeof(uint32_t) * regparams))) != 0) {
425 return -1;
426 }
427 } else {
428 return -1;
429 }
430
431 return 0;
432 }
433 #endif
434
435 static unsigned short
arm_get_syscall_number(struct arm_saved_state * regs)436 arm_get_syscall_number(struct arm_saved_state *regs)
437 {
438 if (regs->save_r12 != 0) {
439 return (unsigned short)regs->save_r12;
440 } else {
441 return (unsigned short)regs->save_r0;
442 }
443 }
444
445 static void
arm_prepare_syscall_return(const struct sysent * callp,struct arm_saved_state * state,uthread_t uthread,int error)446 arm_prepare_syscall_return(const struct sysent *callp, struct arm_saved_state *state, uthread_t uthread, int error)
447 {
448 assert(is_saved_state32(state));
449 arm_prepare_u32_syscall_return(callp, state, uthread, error);
450 }
451
452 static void
arm_trace_unix_syscall(int code,struct arm_saved_state * state)453 arm_trace_unix_syscall(int code, struct arm_saved_state *state)
454 {
455 assert(is_saved_state32(state));
456 arm_trace_u32_unix_syscall(code, saved_state32(state));
457 }
458
459 static void
arm_clear_syscall_error(struct arm_saved_state * state)460 arm_clear_syscall_error(struct arm_saved_state * state)
461 {
462 assert(is_saved_state32(state));
463 arm_clear_u32_syscall_error(saved_state32(state));
464 }
465
466 #elif defined(__arm64__)
467 static void arm_prepare_u64_syscall_return(const struct sysent *, arm_saved_state_t *, uthread_t, int);
468 static int arm_get_u64_syscall_args(uthread_t, arm_saved_state64_t *, const struct sysent *);
469
470 static int
arm_get_syscall_args(uthread_t uthread,struct arm_saved_state * state,const struct sysent * callp)471 arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, const struct sysent *callp)
472 {
473 if (is_saved_state32(state)) {
474 return arm_get_u32_syscall_args(uthread, saved_state32(state), callp);
475 } else {
476 return arm_get_u64_syscall_args(uthread, saved_state64(state), callp);
477 }
478 }
479
480 /*
481 * 64-bit: all arguments in registers. We're willing to use x9, a temporary
482 * register per the ABI, to pass an argument to the kernel for one case,
483 * an indirect syscall with 8 arguments. No munging required, as all arguments
484 * are in 64-bit wide registers already.
485 */
486 static int
arm_get_u64_syscall_args(uthread_t uthread,arm_saved_state64_t * regs,const struct sysent * callp)487 arm_get_u64_syscall_args(uthread_t uthread, arm_saved_state64_t *regs, const struct sysent *callp)
488 {
489 int indirect_offset;
490
491 #if CONFIG_REQUIRES_U32_MUNGING
492 sy_munge_t *mungerp;
493 #endif
494
495 indirect_offset = (regs->x[ARM64_SYSCALL_CODE_REG_NUM] == 0) ? 1 : 0;
496
497 /*
498 * Everything should fit in registers for now.
499 */
500 if (callp->sy_narg > (int)(sizeof(uthread->uu_arg) / sizeof(uthread->uu_arg[0]))) {
501 return -1;
502 }
503
504 memcpy(&uthread->uu_arg[0], ®s->x[indirect_offset], callp->sy_narg * sizeof(uint64_t));
505
506 #if CONFIG_REQUIRES_U32_MUNGING
507 /*
508 * The indirect system call interface is vararg based. For armv7k, arm64_32,
509 * and arm64, this means we simply lay the values down on the stack, padded to
510 * a width multiple (4 bytes for armv7k and arm64_32, 8 bytes for arm64).
511 * The arm64(_32) stub for syscall will load this data into the registers and
512 * then trap. This gives us register state that corresponds to what we would
513 * expect from a armv7 task, so in this particular case we need to munge the
514 * arguments.
515 *
516 * TODO: Is there a cleaner way to do this check? What we're actually
517 * interested in is whether the task is arm64_32. We don't appear to guarantee
518 * that uu_proc is populated here, which is why this currently uses the
519 * thread_t.
520 */
521 mungerp = callp->sy_arg_munge32;
522
523 if (indirect_offset && !ml_thread_is64bit(get_machthread(uthread))) {
524 (*mungerp)(&uthread->uu_arg[0]);
525 }
526 #endif
527
528 return 0;
529 }
530 /*
531 * When the kernel is running AArch64, munge arguments from 32-bit
532 * userland out to 64-bit.
533 *
534 * flavor == 1 indicates an indirect syscall.
535 */
536 static int
arm_get_u32_syscall_args(uthread_t uthread,arm_saved_state32_t * regs,const struct sysent * callp)537 arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, const struct sysent *callp)
538 {
539 int regparams;
540 #if CONFIG_REQUIRES_U32_MUNGING
541 sy_munge_t *mungerp;
542 #else
543 #error U32 syscalls on ARM64 kernel requires munging
544 #endif
545 int flavor = (regs->save_r12 == 0 ? 1 : 0);
546
547 regparams = (7 - flavor); /* Indirect value consumes a register */
548
549 assert((unsigned) callp->sy_arg_bytes <= sizeof(uthread->uu_arg));
550
551 if (callp->sy_arg_bytes <= (sizeof(uint32_t) * regparams)) {
552 /*
553 * Seven arguments or less are passed in registers.
554 */
555 memcpy(&uthread->uu_arg[0], ®s->r[flavor], callp->sy_arg_bytes);
556 } else if (callp->sy_arg_bytes <= sizeof(uthread->uu_arg)) {
557 /*
558 * In this case, we composite - take the first args from registers,
559 * the remainder from the stack (offset by the 7 regs therein).
560 */
561 unix_syscall_kprintf("%s: spillover...\n", __FUNCTION__);
562 memcpy(&uthread->uu_arg[0], ®s->r[flavor], regparams * sizeof(int));
563 if (copyin((user_addr_t)regs->sp + 7 * sizeof(int), (int *)&uthread->uu_arg[0] + regparams,
564 (callp->sy_arg_bytes - (sizeof(uint32_t) * regparams))) != 0) {
565 return -1;
566 }
567 } else {
568 return -1;
569 }
570
571 #if CONFIG_REQUIRES_U32_MUNGING
572 /* Munge here */
573 mungerp = callp->sy_arg_munge32;
574 if (mungerp != NULL) {
575 (*mungerp)(&uthread->uu_arg[0]);
576 }
577 #endif
578
579 return 0;
580 }
581
582 static unsigned short
arm_get_syscall_number(struct arm_saved_state * state)583 arm_get_syscall_number(struct arm_saved_state *state)
584 {
585 if (is_saved_state32(state)) {
586 if (saved_state32(state)->save_r12 != 0) {
587 return (unsigned short)saved_state32(state)->save_r12;
588 } else {
589 return (unsigned short)saved_state32(state)->save_r0;
590 }
591 } else {
592 if (saved_state64(state)->x[ARM64_SYSCALL_CODE_REG_NUM] != 0) {
593 return (unsigned short)saved_state64(state)->x[ARM64_SYSCALL_CODE_REG_NUM];
594 } else {
595 return (unsigned short)saved_state64(state)->x[0];
596 }
597 }
598 }
599
600 static void
arm_prepare_syscall_return(const struct sysent * callp,struct arm_saved_state * state,uthread_t uthread,int error)601 arm_prepare_syscall_return(const struct sysent *callp, struct arm_saved_state *state, uthread_t uthread, int error)
602 {
603 if (is_saved_state32(state)) {
604 arm_prepare_u32_syscall_return(callp, state, uthread, error);
605 } else {
606 arm_prepare_u64_syscall_return(callp, state, uthread, error);
607 }
608 }
609
610 static void
arm_prepare_u64_syscall_return(const struct sysent * callp,arm_saved_state_t * regs,uthread_t uthread,int error)611 arm_prepare_u64_syscall_return(const struct sysent *callp, arm_saved_state_t *regs, uthread_t uthread, int error)
612 {
613 assert(is_saved_state64(regs));
614
615 arm_saved_state64_t *ss64 = saved_state64(regs);
616
617 if (error == ERESTART) {
618 add_saved_state_pc(regs, -4);
619 } else if (error != EJUSTRETURN) {
620 if (error) {
621 ss64->x[0] = error;
622 ss64->x[1] = 0;
623 /*
624 * Set the carry bit to execute cerror routine.
625 * ARM64_TODO: should we have a separate definition?
626 * The bits are the same.
627 */
628 ss64->cpsr |= PSR_CF;
629 unix_syscall_return_kprintf("error: setting carry to trigger cerror call\n");
630 } else { /* (not error) */
631 switch (callp->sy_return_type) {
632 case _SYSCALL_RET_INT_T:
633 ss64->x[0] = uthread->uu_rval[0];
634 ss64->x[1] = uthread->uu_rval[1];
635 break;
636 case _SYSCALL_RET_UINT_T:
637 ss64->x[0] = (u_int)uthread->uu_rval[0];
638 ss64->x[1] = (u_int)uthread->uu_rval[1];
639 break;
640 case _SYSCALL_RET_OFF_T:
641 case _SYSCALL_RET_ADDR_T:
642 case _SYSCALL_RET_SIZE_T:
643 case _SYSCALL_RET_SSIZE_T:
644 case _SYSCALL_RET_UINT64_T:
645 ss64->x[0] = *((uint64_t *)(&uthread->uu_rval[0]));
646 ss64->x[1] = 0;
647 break;
648 case _SYSCALL_RET_NONE:
649 break;
650 default:
651 panic("unix_syscall: unknown return type");
652 break;
653 }
654 }
655 }
656 /* else (error == EJUSTRETURN) { nothing } */
657 }
658 static void
arm_trace_u64_unix_syscall(int code,arm_saved_state64_t * regs)659 arm_trace_u64_unix_syscall(int code, arm_saved_state64_t *regs)
660 {
661 bool indirect = (regs->x[ARM64_SYSCALL_CODE_REG_NUM] == 0);
662 if (indirect) {
663 KDBG_RELEASE(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
664 regs->x[1], regs->x[2], regs->x[3], regs->x[4]);
665 } else {
666 KDBG_RELEASE(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
667 regs->x[0], regs->x[1], regs->x[2], regs->x[3]);
668 }
669 }
670
671 static void
arm_trace_unix_syscall(int code,struct arm_saved_state * state)672 arm_trace_unix_syscall(int code, struct arm_saved_state *state)
673 {
674 if (is_saved_state32(state)) {
675 arm_trace_u32_unix_syscall(code, saved_state32(state));
676 } else {
677 arm_trace_u64_unix_syscall(code, saved_state64(state));
678 }
679 }
680
681 static void
arm_clear_u64_syscall_error(arm_saved_state64_t * regs)682 arm_clear_u64_syscall_error(arm_saved_state64_t *regs)
683 {
684 /*
685 * ARM64_TODO: should we have a separate definition?
686 * The bits are the same.
687 */
688 regs->cpsr &= ~PSR_CF;
689 }
690
691 static void
arm_clear_syscall_error(struct arm_saved_state * state)692 arm_clear_syscall_error(struct arm_saved_state * state)
693 {
694 if (is_saved_state32(state)) {
695 arm_clear_u32_syscall_error(saved_state32(state));
696 } else {
697 arm_clear_u64_syscall_error(saved_state64(state));
698 }
699 }
700
701 #else
702 #error Unknown architecture.
703 #endif
704