1 // Copyright (c) 2016-2021 Apple Inc. All rights reserved.
2 //
3 // @APPLE_OSREFERENCE_LICENSE_HEADER_START@
4 //
5 // This file contains Original Code and/or Modifications of Original Code
6 // as defined in and that are subject to the Apple Public Source License
7 // Version 2.0 (the 'License'). You may not use this file except in
8 // compliance with the License. The rights granted to you under the License
9 // may not be used to create, or enable the creation or redistribution of,
10 // unlawful or unlicensed copies of an Apple operating system, or to
11 // circumvent, violate, or enable the circumvention or violation of, any
12 // terms of an Apple operating system software license agreement.
13 //
14 // Please obtain a copy of the License at
15 // http://www.opensource.apple.com/apsl/ and read it before using this file.
16 //
17 // The Original Code and all software distributed under the License are
18 // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 // Please see the License for the specific language governing rights and
23 // limitations under the License.
24 //
25 // @APPLE_OSREFERENCE_LICENSE_HEADER_END@
26
27 #include <stddef.h>
28 #include <stdint.h>
29
30 #include <kern/assert.h>
31 #include <kern/backtrace.h>
32 #include <kern/cambria_layout.h>
33 #include <kern/thread.h>
34 #include <sys/errno.h>
35 #include <vm/vm_map.h>
36
37 #if defined(__arm__) || defined(__arm64__)
38 #include <arm/cpu_data.h>
39 #include <arm/cpu_data_internal.h>
40 #endif // defined(__arm__) || defined(__arm64__)
41
42 #if defined(HAS_APPLE_PAC)
43 #include <ptrauth.h>
44 #endif // defined(HAS_APPLE_PAC)
45
46 #if XNU_MONITOR
47 #define IN_PPLSTK_BOUNDS(__addr) \
48 (((uintptr_t)(__addr) >= (uintptr_t)pmap_stacks_start) && \
49 ((uintptr_t)(__addr) < (uintptr_t)pmap_stacks_end))
50 #endif
51
52 // This function is fast because it does no checking to make sure there isn't
53 // bad data.
54
55 // Since it's only called from threads that we're going to keep executing,
56 // if there's bad data the system is going to die eventually. If this function
57 // is inlined, it doesn't record the frame of the function it's inside (because
58 // there's no stack frame), so prevent that.
59 static unsigned int __attribute__((noinline, not_tail_called))
backtrace_internal(uintptr_t * bt,unsigned int max_frames,void * start_frame,int64_t addr_offset,backtrace_info_t * info_out)60 backtrace_internal(uintptr_t *bt, unsigned int max_frames, void *start_frame,
61 int64_t addr_offset, backtrace_info_t *info_out)
62 {
63 thread_t thread = current_thread();
64 uintptr_t *fp;
65 unsigned int frame_index = 0;
66 uintptr_t top, bottom;
67 bool in_valid_stack;
68
69 assert(bt != NULL);
70 assert(max_frames > 0);
71
72 fp = start_frame;
73 bottom = thread->kernel_stack;
74 top = bottom + kernel_stack_size;
75
76 #define IN_STK_BOUNDS(__addr) \
77 (((uintptr_t)(__addr) >= (uintptr_t)bottom) && \
78 ((uintptr_t)(__addr) < (uintptr_t)top))
79
80 in_valid_stack = IN_STK_BOUNDS(fp);
81 #if XNU_MONITOR
82 in_valid_stack |= IN_PPLSTK_BOUNDS(fp);
83 #endif /* XNU_MONITOR */
84
85 if (!in_valid_stack) {
86 fp = NULL;
87 }
88
89 while (fp != NULL && frame_index < max_frames) {
90 uintptr_t *next_fp = (uintptr_t *)*fp;
91 // Return address is one word higher than frame pointer.
92 uintptr_t ret_addr = *(fp + 1);
93
94 // If the frame pointer is 0, backtracing has reached the top of
95 // the stack and there is no return address. Some stacks might not
96 // have set this up, so bounds check, as well.
97 in_valid_stack = IN_STK_BOUNDS(next_fp);
98 #if XNU_MONITOR
99 in_valid_stack |= IN_PPLSTK_BOUNDS(next_fp);
100 #endif /* XNU_MONITOR */
101
102 if (next_fp == NULL || !in_valid_stack) {
103 break;
104 }
105
106 #if defined(HAS_APPLE_PAC)
107 // Return addresses are signed by arm64e ABI, so strip it.
108 bt[frame_index++] = (uintptr_t)ptrauth_strip((void *)ret_addr,
109 ptrauth_key_return_address) + addr_offset;
110 #else // defined(HAS_APPLE_PAC)
111 bt[frame_index++] = ret_addr + addr_offset;
112 #endif // !defined(HAS_APPLE_PAC)
113
114 // Stacks grow down; backtracing should be moving to higher addresses.
115 if (next_fp <= fp) {
116 #if XNU_MONITOR
117 bool fp_in_pplstack = IN_PPLSTK_BOUNDS(fp);
118 bool fp_in_kstack = IN_STK_BOUNDS(fp);
119 bool next_fp_in_pplstack = IN_PPLSTK_BOUNDS(fp);
120 bool next_fp_in_kstack = IN_STK_BOUNDS(fp);
121
122 // This check is verbose; it is basically checking whether this
123 // thread is switching between the kernel stack and the CPU stack.
124 // If so, ignore the fact that frame pointer has switched directions
125 // (as it is a symptom of switching stacks).
126 if (((fp_in_pplstack) && (next_fp_in_kstack)) ||
127 ((fp_in_kstack) && (next_fp_in_pplstack))) {
128 break;
129 }
130 #else /* XNU_MONITOR */
131 break;
132 #endif /* !XNU_MONITOR */
133 }
134 fp = next_fp;
135 }
136
137 // NULL-terminate the list, if space is available.
138 if (frame_index != max_frames) {
139 bt[frame_index] = 0;
140 }
141
142 if (info_out) {
143 backtrace_info_t info = BTI_NONE;
144 #if __LP64__
145 info |= BTI_64_BIT;
146 #endif
147 if (fp != NULL && frame_index == max_frames) {
148 info |= BTI_TRUNCATED;
149 }
150 *info_out = info;
151 }
152
153 return frame_index;
154 #undef IN_STK_BOUNDS
155 }
156
157 static kern_return_t
interrupted_kernel_pc_fp(uintptr_t * pc,uintptr_t * fp)158 interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
159 {
160 #if defined(__x86_64__)
161 x86_saved_state_t *state;
162 bool state_64;
163 uint64_t cs;
164
165 state = current_cpu_datap()->cpu_int_state;
166 if (!state) {
167 return KERN_FAILURE;
168 }
169
170 state_64 = is_saved_state64(state);
171
172 if (state_64) {
173 cs = saved_state64(state)->isf.cs;
174 } else {
175 cs = saved_state32(state)->cs;
176 }
177 // Return early if interrupted a thread in user space.
178 if ((cs & SEL_PL) == SEL_PL_U) {
179 return KERN_FAILURE;
180 }
181
182 if (state_64) {
183 *pc = saved_state64(state)->isf.rip;
184 *fp = saved_state64(state)->rbp;
185 } else {
186 *pc = saved_state32(state)->eip;
187 *fp = saved_state32(state)->ebp;
188 }
189
190 #elif defined(__arm64__)
191
192 struct arm_saved_state *state;
193 bool state_64;
194
195 state = getCpuDatap()->cpu_int_state;
196 if (!state) {
197 return KERN_FAILURE;
198 }
199 state_64 = is_saved_state64(state);
200
201 // Return early if interrupted a thread in user space.
202 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
203 return KERN_FAILURE;
204 }
205
206 *pc = get_saved_state_pc(state);
207 *fp = get_saved_state_fp(state);
208
209 #elif defined(__arm__)
210
211 struct arm_saved_state *state;
212
213 state = getCpuDatap()->cpu_int_state;
214 if (!state) {
215 return KERN_FAILURE;
216 }
217
218 /* return early if interrupted a thread in user space */
219 if (PSR_IS_USER(get_saved_state_cpsr(state))) {
220 return KERN_FAILURE;
221 }
222
223 *pc = get_saved_state_pc(state);
224 *fp = get_saved_state_fp(state);
225
226 #else // !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__)
227 #error "unsupported architecture"
228 #endif // !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__)
229
230 return KERN_SUCCESS;
231 }
232
233 unsigned int __attribute__((noinline))
backtrace(uintptr_t * bt,unsigned int max_frames,struct backtrace_control * ctl,backtrace_info_t * info_out)234 backtrace(uintptr_t *bt, unsigned int max_frames,
235 struct backtrace_control *ctl, backtrace_info_t *info_out)
236 {
237 backtrace_flags_t flags = ctl ? ctl->btc_flags : 0;
238 uintptr_t start_frame = ctl ? ctl->btc_frame_addr : 0;
239 unsigned int len_adj = 0;
240 if (flags & BTF_KERN_INTERRUPTED) {
241 assert(bt != NULL);
242 assert(max_frames > 0);
243 assert(ml_at_interrupt_context() == TRUE);
244
245 uintptr_t pc, fp;
246 kern_return_t kr = interrupted_kernel_pc_fp(&pc, &fp);
247 if (kr != KERN_SUCCESS) {
248 return 0;
249 }
250
251 bt[0] = pc;
252 if (max_frames == 1) {
253 return 1;
254 }
255 bt += 1;
256 max_frames -= 1;
257 len_adj = 1;
258 start_frame = start_frame ?: fp;
259 } else if (start_frame == 0) {
260 start_frame = (uintptr_t)__builtin_frame_address(0);
261 }
262
263 unsigned int len = backtrace_internal(bt, max_frames, (void *)start_frame,
264 ctl ? ctl->btc_addr_offset : 0, info_out);
265 return len + len_adj;
266 }
267
268 static errno_t
_backtrace_copyin(void * __unused ctx,void * dst,user_addr_t src,size_t size)269 _backtrace_copyin(void * __unused ctx, void *dst, user_addr_t src, size_t size)
270 {
271 return copyin((user_addr_t)src, dst, size);
272 }
273
274 errno_t
backtrace_user_copy_error(void * ctx,void * dst,user_addr_t src,size_t size)275 backtrace_user_copy_error(void *ctx, void *dst, user_addr_t src, size_t size)
276 {
277 #pragma unused(ctx, dst, src, size)
278 return EFAULT;
279 }
280
281 unsigned int
backtrace_user(uintptr_t * bt,unsigned int max_frames,const struct backtrace_control * ctl_in,struct backtrace_user_info * info_out)282 backtrace_user(uintptr_t *bt, unsigned int max_frames,
283 const struct backtrace_control *ctl_in,
284 struct backtrace_user_info *info_out)
285 {
286 static const struct backtrace_control ctl_default = {
287 .btc_user_copy = _backtrace_copyin,
288 };
289 const struct backtrace_control *ctl = ctl_in ?: &ctl_default;
290 uintptr_t pc = 0, next_fp = 0;
291 uintptr_t fp = ctl->btc_frame_addr;
292 bool custom_fp = fp != 0;
293 int64_t addr_offset = ctl ? ctl->btc_addr_offset : 0;
294 vm_map_t map = NULL, old_map = NULL;
295 unsigned int frame_index = 0;
296 int error = 0;
297 size_t frame_size = 0;
298 bool truncated = false;
299 bool user_64 = false;
300 bool allow_async = true;
301 bool has_async = false;
302 uintptr_t async_frame_addr = 0;
303 unsigned int async_index = 0;
304
305 backtrace_user_copy_fn copy = ctl->btc_user_copy ?: _backtrace_copyin;
306 bool custom_copy = copy != _backtrace_copyin;
307 void *ctx = ctl->btc_user_copy_context;
308
309 void *thread = ctl->btc_user_thread;
310 void *cur_thread = NULL;
311 if (thread == NULL) {
312 cur_thread = current_thread();
313 thread = cur_thread;
314 }
315 task_t task = get_threadtask(thread);
316
317 assert(task != NULL);
318 assert(bt != NULL);
319 assert(max_frames > 0);
320
321 if (!custom_copy) {
322 assert(ml_get_interrupts_enabled() == TRUE);
323 if (!ml_get_interrupts_enabled()) {
324 error = EDEADLK;
325 }
326
327 if (cur_thread == NULL) {
328 cur_thread = current_thread();
329 }
330 if (thread != cur_thread) {
331 map = get_task_map_reference(task);
332 if (map == NULL) {
333 return ENOMEM;
334 }
335 old_map = vm_map_switch(map);
336 }
337 }
338
339 #define SWIFT_ASYNC_FP_BIT (0x1ULL << 60)
340 #define SWIFT_ASYNC_FP(FP) (((FP) & SWIFT_ASYNC_FP_BIT) != 0)
341 #define SWIFT_ASYNC_FP_CLEAR(FP) ((FP) & ~SWIFT_ASYNC_FP_BIT)
342
343 #if defined(__x86_64__)
344
345 // Don't allow a malformed user stack to copy arbitrary kernel data.
346 #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP)))
347
348 x86_saved_state_t *state = get_user_regs(thread);
349 if (!state) {
350 error = EINVAL;
351 goto out;
352 }
353
354 user_64 = is_saved_state64(state);
355 if (user_64) {
356 pc = saved_state64(state)->isf.rip;
357 fp = fp != 0 ? fp : saved_state64(state)->rbp;
358 } else {
359 pc = saved_state32(state)->eip;
360 fp = fp != 0 ? fp : saved_state32(state)->ebp;
361 }
362
363 #elif defined(__arm64__) || defined(__arm__)
364
365 struct arm_saved_state *state = get_user_regs(thread);
366 if (!state) {
367 error = EINVAL;
368 goto out;
369 }
370
371 #if defined(__arm64__)
372 user_64 = is_saved_state64(state);
373 pc = get_saved_state_pc(state);
374 fp = fp != 0 ? fp : get_saved_state_fp(state);
375
376 // ARM expects stack frames to be aligned to 16 bytes.
377 #define INVALID_USER_FP(FP) (((FP) & 0x3UL) != 0UL)
378
379 #elif defined(__arm__)
380 // ARM expects stack frames to be aligned to 16 bytes.
381 #define INVALID_USER_FP(FP) (((FP) & 0x3UL) != 0UL)
382 #endif // !defined(__arm64__)
383
384 pc = get_saved_state_pc(state);
385 fp = fp != 0 ? fp : get_saved_state_fp(state);
386
387 #else // defined(__arm__) || defined(__arm64__) || defined(__x86_64__)
388 #error "unsupported architecture"
389 #endif // !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__)
390
391 // Only capture the save state PC without a custom frame pointer to walk.
392 if (!ctl || ctl->btc_frame_addr == 0) {
393 bt[frame_index++] = pc + addr_offset;
394 }
395
396 if (frame_index >= max_frames) {
397 goto out;
398 }
399
400 if (fp == 0) {
401 // If the FP is zeroed, then there's no stack to walk, by design. This
402 // happens for workq threads that are being sent back to user space or
403 // during boot-strapping operations on other kinds of threads.
404 goto out;
405 } else if (INVALID_USER_FP(fp)) {
406 // Still capture the PC in this case, but mark the stack as truncated
407 // and "faulting." (Using the frame pointer on a call stack would cause
408 // an exception.)
409 error = EFAULT;
410 truncated = true;
411 goto out;
412 }
413
414 union {
415 struct {
416 uint64_t fp;
417 uint64_t ret;
418 } u64;
419 struct {
420 uint32_t fp;
421 uint32_t ret;
422 } u32;
423 } frame;
424
425 frame_size = 2 * (user_64 ? 8 : 4);
426
427 while (fp != 0 && frame_index < max_frames) {
428 error = copy(ctx, (char *)&frame, fp, frame_size);
429 if (error) {
430 truncated = true;
431 goto out;
432 }
433
434 // Capture this return address before tripping over any errors finding
435 // the next frame to follow.
436 uintptr_t ret_addr = user_64 ? frame.u64.ret : frame.u32.ret;
437 #if defined(HAS_APPLE_PAC)
438 // Return addresses are signed by arm64e ABI, so strip off the auth
439 // bits.
440 bt[frame_index++] = (uintptr_t)ptrauth_strip((void *)ret_addr,
441 ptrauth_key_return_address) + addr_offset;
442 #else // defined(HAS_APPLE_PAC)
443 bt[frame_index++] = ret_addr + addr_offset;
444 #endif // !defined(HAS_APPLE_PAC)
445
446 // Find the next frame to follow.
447 next_fp = user_64 ? frame.u64.fp : frame.u32.fp;
448 bool async_frame = allow_async && SWIFT_ASYNC_FP(next_fp);
449 // There is no 32-bit ABI for Swift async call stacks.
450 if (user_64 && async_frame) {
451 async_index = frame_index - 1;
452 // The async context pointer is just below the stack frame.
453 user_addr_t async_ctx_ptr = fp - 8;
454 user_addr_t async_ctx = 0;
455 error = copy(ctx, (char *)&async_ctx, async_ctx_ptr,
456 sizeof(async_ctx));
457 if (error) {
458 goto out;
459 }
460 #if defined(HAS_APPLE_PAC)
461 async_frame_addr = (uintptr_t)ptrauth_strip((void *)async_ctx,
462 ptrauth_key_process_dependent_data);
463 #else // defined(HAS_APPLE_PAC)
464 async_frame_addr = (uintptr_t)async_ctx;
465 #endif // !defined(HAS_APPLE_PAC)
466 has_async = true;
467 allow_async = false;
468 }
469 next_fp = SWIFT_ASYNC_FP_CLEAR(next_fp);
470 #if defined(HAS_APPLE_PAC)
471 next_fp = (uintptr_t)ptrauth_strip((void *)next_fp,
472 ptrauth_key_process_dependent_data);
473 #endif // defined(HAS_APPLE_PAC)
474 if (INVALID_USER_FP(next_fp)) {
475 break;
476 }
477
478 // Stacks grow down; backtracing should be moving to higher addresses,
479 // unless a custom frame pointer is provided, in which case, an async
480 // stack might be walked, which is allocated on the heap in any order.
481 if ((next_fp == fp) || (!custom_fp && next_fp < fp)) {
482 break;
483 }
484 fp = next_fp;
485 }
486
487 out:
488 if (old_map != NULL) {
489 (void)vm_map_switch(old_map);
490 vm_map_deallocate(map);
491 }
492
493 // NULL-terminate the list, if space is available.
494 if (frame_index < max_frames) {
495 bt[frame_index] = 0;
496 }
497
498 if (info_out) {
499 info_out->btui_error = error;
500 backtrace_info_t info = user_64 ? BTI_64_BIT : BTI_NONE;
501 bool out_of_space = !INVALID_USER_FP(fp) && frame_index == max_frames;
502 if (truncated || out_of_space) {
503 info |= BTI_TRUNCATED;
504 }
505 if (out_of_space && error == 0) {
506 info_out->btui_next_frame_addr = fp;
507 }
508 info_out->btui_info = info;
509 info_out->btui_async_start_index = async_index;
510 info_out->btui_async_frame_addr = async_frame_addr;
511 }
512
513 return frame_index;
514 }
515