xref: /xnu-8020.140.41/osfmk/kern/backtrace.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 // Copyright (c) 2016-2021 Apple Inc. All rights reserved.
2 //
3 // @APPLE_OSREFERENCE_LICENSE_HEADER_START@
4 //
5 // This file contains Original Code and/or Modifications of Original Code
6 // as defined in and that are subject to the Apple Public Source License
7 // Version 2.0 (the 'License'). You may not use this file except in
8 // compliance with the License. The rights granted to you under the License
9 // may not be used to create, or enable the creation or redistribution of,
10 // unlawful or unlicensed copies of an Apple operating system, or to
11 // circumvent, violate, or enable the circumvention or violation of, any
12 // terms of an Apple operating system software license agreement.
13 //
14 // Please obtain a copy of the License at
15 // http://www.opensource.apple.com/apsl/ and read it before using this file.
16 //
17 // The Original Code and all software distributed under the License are
18 // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 // Please see the License for the specific language governing rights and
23 // limitations under the License.
24 //
25 // @APPLE_OSREFERENCE_LICENSE_HEADER_END@
26 
27 #include <stddef.h>
28 #include <stdint.h>
29 
30 #include <kern/assert.h>
31 #include <kern/backtrace.h>
32 #include <kern/cambria_layout.h>
33 #include <kern/thread.h>
34 #include <sys/errno.h>
35 #include <vm/vm_map.h>
36 
37 #if defined(__arm__) || defined(__arm64__)
38 #include <arm/cpu_data.h>
39 #include <arm/cpu_data_internal.h>
40 #endif // defined(__arm__) || defined(__arm64__)
41 
42 #if defined(HAS_APPLE_PAC)
43 #include <ptrauth.h>
44 #endif // defined(HAS_APPLE_PAC)
45 
46 #if XNU_MONITOR
47 #define IN_PPLSTK_BOUNDS(__addr) \
48    (((uintptr_t)(__addr) >= (uintptr_t)pmap_stacks_start) && \
49    ((uintptr_t)(__addr) < (uintptr_t)pmap_stacks_end))
50 #endif
51 
52 #if __x86_64__
53 static void
_backtrace_packed_out_of_reach(void)54 _backtrace_packed_out_of_reach(void)
55 {
56 	/*
57 	 * This symbol is used to replace frames that have been "JIT-ed"
58 	 * or dynamically inserted in the kernel by some kext in a regular
59 	 * VM mapping that might be outside of the filesets.
60 	 *
61 	 * This is an Intel only issue.
62 	 */
63 }
64 #endif
65 
66 // Pack an address according to a particular packing format.
67 static size_t
_backtrace_pack_addr(backtrace_pack_t packing,uint8_t * dst,size_t dst_size,uintptr_t addr)68 _backtrace_pack_addr(backtrace_pack_t packing, uint8_t *dst, size_t dst_size,
69     uintptr_t addr)
70 {
71 	switch (packing) {
72 	case BTP_NONE:
73 		if (dst_size >= sizeof(addr)) {
74 			memcpy(dst, &addr, sizeof(addr));
75 		}
76 		return sizeof(addr);
77 	case BTP_KERN_OFFSET_32:;
78 		uintptr_t addr_delta = addr - vm_kernel_stext;
79 		int32_t addr_packed = (int32_t)addr_delta;
80 #if __x86_64__
81 		if ((uintptr_t)(int32_t)addr_delta != addr_delta) {
82 			addr = (vm_offset_t)&_backtrace_packed_out_of_reach;
83 			addr_delta = addr - vm_kernel_stext;
84 			addr_packed = (int32_t)addr_delta;
85 		}
86 #else
87 		assert((uintptr_t)(int32_t)addr_delta == addr_delta);
88 #endif
89 		if (dst_size >= sizeof(addr_packed)) {
90 			memcpy(dst, &addr_packed, sizeof(addr_packed));
91 		}
92 		return sizeof(addr_packed);
93 	default:
94 		panic("backtrace: unknown packing format %d", packing);
95 	}
96 }
97 
98 // Since it's only called from threads that we're going to keep executing,
99 // if there's bad data the system is going to die eventually.  If this function
100 // is inlined, it doesn't record the frame of the function it's inside (because
101 // there's no stack frame), so prevent that.
102 static size_t __attribute__((noinline, not_tail_called))
backtrace_internal(backtrace_pack_t packing,uint8_t * bt,size_t btsize,void * start_frame,int64_t addr_offset,backtrace_info_t * info_out)103 backtrace_internal(backtrace_pack_t packing, uint8_t *bt,
104     size_t btsize, void *start_frame, int64_t addr_offset,
105     backtrace_info_t *info_out)
106 {
107 	thread_t thread = current_thread();
108 	uintptr_t *fp;
109 	size_t size_used = 0;
110 	uintptr_t top, bottom;
111 	bool in_valid_stack;
112 
113 	assert(bt != NULL);
114 	assert(btsize > 0);
115 
116 	fp = start_frame;
117 	bottom = thread->kernel_stack;
118 	top = bottom + kernel_stack_size;
119 
120 #define IN_STK_BOUNDS(__addr) \
121 	(((uintptr_t)(__addr) >= (uintptr_t)bottom) && \
122 	((uintptr_t)(__addr) < (uintptr_t)top))
123 
124 	in_valid_stack = IN_STK_BOUNDS(fp);
125 #if XNU_MONITOR
126 	in_valid_stack |= IN_PPLSTK_BOUNDS(fp);
127 #endif /* XNU_MONITOR */
128 
129 	if (!in_valid_stack) {
130 		fp = NULL;
131 	}
132 
133 	while (fp != NULL && size_used < btsize) {
134 		uintptr_t *next_fp = (uintptr_t *)*fp;
135 		// Return address is one word higher than frame pointer.
136 		uintptr_t ret_addr = *(fp + 1);
137 
138 		// If the frame pointer is 0, backtracing has reached the top of
139 		// the stack and there is no return address.  Some stacks might not
140 		// have set this up, so bounds check, as well.
141 		in_valid_stack = IN_STK_BOUNDS(next_fp);
142 #if XNU_MONITOR
143 		in_valid_stack |= IN_PPLSTK_BOUNDS(next_fp);
144 #endif /* XNU_MONITOR */
145 
146 		if (next_fp == NULL || !in_valid_stack) {
147 			break;
148 		}
149 
150 #if defined(HAS_APPLE_PAC)
151 		// Return addresses are signed by arm64e ABI, so strip it.
152 		uintptr_t pc = (uintptr_t)ptrauth_strip((void *)ret_addr,
153 		    ptrauth_key_return_address);
154 #else // defined(HAS_APPLE_PAC)
155 		uintptr_t pc = ret_addr;
156 #endif // !defined(HAS_APPLE_PAC)
157 		pc += addr_offset;
158 		size_used += _backtrace_pack_addr(packing, bt + size_used,
159 		    btsize - size_used, pc);
160 
161 		// Stacks grow down; backtracing should be moving to higher addresses.
162 		if (next_fp <= fp) {
163 #if XNU_MONITOR
164 			bool fp_in_pplstack = IN_PPLSTK_BOUNDS(fp);
165 			bool fp_in_kstack = IN_STK_BOUNDS(fp);
166 			bool next_fp_in_pplstack = IN_PPLSTK_BOUNDS(fp);
167 			bool next_fp_in_kstack = IN_STK_BOUNDS(fp);
168 
169 			// This check is verbose; it is basically checking whether this
170 			// thread is switching between the kernel stack and the CPU stack.
171 			// If so, ignore the fact that frame pointer has switched directions
172 			// (as it is a symptom of switching stacks).
173 			if (((fp_in_pplstack) && (next_fp_in_kstack)) ||
174 			    ((fp_in_kstack) && (next_fp_in_pplstack))) {
175 				break;
176 			}
177 #else /* XNU_MONITOR */
178 			break;
179 #endif /* !XNU_MONITOR */
180 		}
181 		fp = next_fp;
182 	}
183 
184 	if (info_out) {
185 		backtrace_info_t info = BTI_NONE;
186 #if __LP64__
187 		info |= BTI_64_BIT;
188 #endif
189 		if (fp != NULL && size_used >= btsize) {
190 			info |= BTI_TRUNCATED;
191 		}
192 		*info_out = info;
193 	}
194 
195 	return size_used;
196 #undef IN_STK_BOUNDS
197 }
198 
199 static kern_return_t
interrupted_kernel_pc_fp(uintptr_t * pc,uintptr_t * fp)200 interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
201 {
202 #if defined(__x86_64__)
203 	x86_saved_state_t *state;
204 	bool state_64;
205 	uint64_t cs;
206 
207 	state = current_cpu_datap()->cpu_int_state;
208 	if (!state) {
209 		return KERN_FAILURE;
210 	}
211 
212 	state_64 = is_saved_state64(state);
213 
214 	if (state_64) {
215 		cs = saved_state64(state)->isf.cs;
216 	} else {
217 		cs = saved_state32(state)->cs;
218 	}
219 	// Return early if interrupted a thread in user space.
220 	if ((cs & SEL_PL) == SEL_PL_U) {
221 		return KERN_FAILURE;
222 	}
223 
224 	if (state_64) {
225 		*pc = saved_state64(state)->isf.rip;
226 		*fp = saved_state64(state)->rbp;
227 	} else {
228 		*pc = saved_state32(state)->eip;
229 		*fp = saved_state32(state)->ebp;
230 	}
231 
232 #elif defined(__arm64__)
233 
234 	struct arm_saved_state *state;
235 	bool state_64;
236 
237 	state = getCpuDatap()->cpu_int_state;
238 	if (!state) {
239 		return KERN_FAILURE;
240 	}
241 	state_64 = is_saved_state64(state);
242 
243 	// Return early if interrupted a thread in user space.
244 	if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
245 		return KERN_FAILURE;
246 	}
247 
248 	*pc = get_saved_state_pc(state);
249 	*fp = get_saved_state_fp(state);
250 
251 #elif defined(__arm__)
252 
253 	struct arm_saved_state *state;
254 
255 	state = getCpuDatap()->cpu_int_state;
256 	if (!state) {
257 		return KERN_FAILURE;
258 	}
259 
260 	/* return early if interrupted a thread in user space */
261 	if (PSR_IS_USER(get_saved_state_cpsr(state))) {
262 		return KERN_FAILURE;
263 	}
264 
265 	*pc = get_saved_state_pc(state);
266 	*fp = get_saved_state_fp(state);
267 
268 #else // !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__)
269 #error "unsupported architecture"
270 #endif // !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__)
271 
272 	return KERN_SUCCESS;
273 }
274 
275 __attribute__((always_inline))
276 static uintptr_t
_backtrace_preamble(struct backtrace_control * ctl,uintptr_t * start_frame_out)277 _backtrace_preamble(struct backtrace_control *ctl, uintptr_t *start_frame_out)
278 {
279 	backtrace_flags_t flags = ctl ? ctl->btc_flags : 0;
280 	uintptr_t start_frame = ctl ? ctl->btc_frame_addr : 0;
281 	uintptr_t pc = 0;
282 	if (flags & BTF_KERN_INTERRUPTED) {
283 		assert(ml_at_interrupt_context() == TRUE);
284 
285 		uintptr_t fp;
286 		kern_return_t kr = interrupted_kernel_pc_fp(&pc, &fp);
287 		if (kr != KERN_SUCCESS) {
288 			return 0;
289 		}
290 		*start_frame_out = start_frame ?: fp;
291 	} else if (start_frame == 0) {
292 		*start_frame_out = (uintptr_t)__builtin_frame_address(0);
293 	} else {
294 		*start_frame_out = start_frame;
295 	}
296 	return pc;
297 }
298 
299 unsigned int __attribute__((noinline))
backtrace(uintptr_t * bt,unsigned int max_frames,struct backtrace_control * ctl,backtrace_info_t * info_out)300 backtrace(uintptr_t *bt, unsigned int max_frames,
301     struct backtrace_control *ctl, backtrace_info_t *info_out)
302 {
303 	unsigned int len_adj = 0;
304 	uintptr_t start_frame = ctl ? ctl->btc_frame_addr : 0;
305 	uintptr_t pc = _backtrace_preamble(ctl, &start_frame);
306 	if (pc) {
307 		bt[0] = pc;
308 		if (max_frames == 1) {
309 			return 1;
310 		}
311 		bt += 1;
312 		max_frames -= 1;
313 		len_adj += 1;
314 	}
315 
316 	size_t size = backtrace_internal(BTP_NONE, (uint8_t *)bt,
317 	    max_frames * sizeof(uintptr_t), (void *)start_frame,
318 	    ctl ? ctl->btc_addr_offset : 0, info_out);
319 	// NULL-terminate the list, if space is available.
320 	unsigned int len = size / sizeof(uintptr_t);
321 	if (len != max_frames) {
322 		bt[len] = 0;
323 	}
324 
325 	return len + len_adj;
326 }
327 
328 // Backtrace the current thread's kernel stack as a packed representation.
329 size_t
backtrace_packed(backtrace_pack_t packing,uint8_t * bt,size_t btsize,struct backtrace_control * ctl,backtrace_info_t * info_out)330 backtrace_packed(backtrace_pack_t packing, uint8_t *bt, size_t btsize,
331     struct backtrace_control *ctl,
332     backtrace_info_t *info_out)
333 {
334 	unsigned int size_adj = 0;
335 	uintptr_t start_frame = ctl ? ctl->btc_frame_addr : 0;
336 	uintptr_t pc = _backtrace_preamble(ctl, &start_frame);
337 	if (pc) {
338 		size_adj = _backtrace_pack_addr(packing, bt, btsize, pc);
339 		if (size_adj >= btsize) {
340 			return size_adj;
341 		}
342 		btsize -= size_adj;
343 	}
344 
345 	size_t written_size = backtrace_internal(packing, (uint8_t *)bt, btsize,
346 	    (void *)start_frame, ctl ? ctl->btc_addr_offset : 0, info_out);
347 	return written_size + size_adj;
348 }
349 
350 // Convert an array of addresses to a packed representation.
351 size_t
backtrace_pack(backtrace_pack_t packing,uint8_t * dst,size_t dst_size,const uintptr_t * src,unsigned int src_len)352 backtrace_pack(backtrace_pack_t packing, uint8_t *dst, size_t dst_size,
353     const uintptr_t *src, unsigned int src_len)
354 {
355 	size_t dst_offset = 0;
356 	for (unsigned int i = 0; i < src_len; i++) {
357 		size_t pack_size = _backtrace_pack_addr(packing, dst + dst_offset,
358 		    dst_size - dst_offset, src[i]);
359 		if (dst_offset + pack_size >= dst_size) {
360 			return dst_offset;
361 		}
362 		dst_offset += pack_size;
363 	}
364 	return dst_offset;
365 }
366 
367 // Convert a packed backtrace to an array of addresses.
368 unsigned int
backtrace_unpack(backtrace_pack_t packing,uintptr_t * dst,unsigned int dst_len,const uint8_t * src,size_t src_size)369 backtrace_unpack(backtrace_pack_t packing, uintptr_t *dst, unsigned int dst_len,
370     const uint8_t *src, size_t src_size)
371 {
372 	switch (packing) {
373 	case BTP_NONE:;
374 		size_t unpack_size = MIN(dst_len * sizeof(uintptr_t), src_size);
375 		memmove(dst, src, unpack_size);
376 		return (unsigned int)(unpack_size / sizeof(uintptr_t));
377 	case BTP_KERN_OFFSET_32:;
378 		unsigned int src_len = src_size / sizeof(int32_t);
379 		unsigned int unpack_len = MIN(src_len, dst_len);
380 		for (unsigned int i = 0; i < unpack_len; i++) {
381 			int32_t addr = 0;
382 			memcpy(&addr, src + i * sizeof(int32_t), sizeof(int32_t));
383 			dst[i] = vm_kernel_stext + (uintptr_t)addr;
384 		}
385 		return unpack_len;
386 	default:
387 		panic("backtrace: unknown packing format %d", packing);
388 	}
389 }
390 
391 static errno_t
_backtrace_copyin(void * __unused ctx,void * dst,user_addr_t src,size_t size)392 _backtrace_copyin(void * __unused ctx, void *dst, user_addr_t src, size_t size)
393 {
394 	return copyin((user_addr_t)src, dst, size);
395 }
396 
397 errno_t
backtrace_user_copy_error(void * ctx,void * dst,user_addr_t src,size_t size)398 backtrace_user_copy_error(void *ctx, void *dst, user_addr_t src, size_t size)
399 {
400 #pragma unused(ctx, dst, src, size)
401 	return EFAULT;
402 }
403 
404 unsigned int
backtrace_user(uintptr_t * bt,unsigned int max_frames,const struct backtrace_control * ctl_in,struct backtrace_user_info * info_out)405 backtrace_user(uintptr_t *bt, unsigned int max_frames,
406     const struct backtrace_control *ctl_in,
407     struct backtrace_user_info *info_out)
408 {
409 	static const struct backtrace_control ctl_default = {
410 		.btc_user_copy = _backtrace_copyin,
411 	};
412 	const struct backtrace_control *ctl = ctl_in ?: &ctl_default;
413 	uintptr_t pc = 0, next_fp = 0;
414 	uintptr_t fp = ctl->btc_frame_addr;
415 	bool custom_fp = fp != 0;
416 	int64_t addr_offset = ctl ? ctl->btc_addr_offset : 0;
417 	vm_map_t map = NULL, old_map = NULL;
418 	unsigned int frame_index = 0;
419 	int error = 0;
420 	size_t frame_size = 0;
421 	bool truncated = false;
422 	bool user_64 = false;
423 	bool allow_async = true;
424 	bool has_async = false;
425 	uintptr_t async_frame_addr = 0;
426 	unsigned int async_index = 0;
427 
428 	backtrace_user_copy_fn copy = ctl->btc_user_copy ?: _backtrace_copyin;
429 	bool custom_copy = copy != _backtrace_copyin;
430 	void *ctx = ctl->btc_user_copy_context;
431 
432 	void *thread = ctl->btc_user_thread;
433 	void *cur_thread = NULL;
434 	if (thread == NULL) {
435 		cur_thread = current_thread();
436 		thread = cur_thread;
437 	}
438 	task_t task = get_threadtask(thread);
439 
440 	assert(task != NULL);
441 	assert(bt != NULL);
442 	assert(max_frames > 0);
443 
444 	if (!custom_copy) {
445 		assert(ml_get_interrupts_enabled() == TRUE);
446 		if (!ml_get_interrupts_enabled()) {
447 			error = EDEADLK;
448 		}
449 
450 		if (cur_thread == NULL) {
451 			cur_thread = current_thread();
452 		}
453 		if (thread != cur_thread) {
454 			map = get_task_map_reference(task);
455 			if (map == NULL) {
456 				return ENOMEM;
457 			}
458 			old_map = vm_map_switch(map);
459 		}
460 	}
461 
462 #define SWIFT_ASYNC_FP_BIT (0x1ULL << 60)
463 #define SWIFT_ASYNC_FP(FP) (((FP) & SWIFT_ASYNC_FP_BIT) != 0)
464 #define SWIFT_ASYNC_FP_CLEAR(FP) ((FP) & ~SWIFT_ASYNC_FP_BIT)
465 
466 #if defined(__x86_64__)
467 
468 	// Don't allow a malformed user stack to copy arbitrary kernel data.
469 #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP)))
470 
471 	x86_saved_state_t *state = get_user_regs(thread);
472 	if (!state) {
473 		error = EINVAL;
474 		goto out;
475 	}
476 
477 	user_64 = is_saved_state64(state);
478 	if (user_64) {
479 		pc = saved_state64(state)->isf.rip;
480 		fp = fp != 0 ? fp : saved_state64(state)->rbp;
481 	} else {
482 		pc = saved_state32(state)->eip;
483 		fp = fp != 0 ? fp : saved_state32(state)->ebp;
484 	}
485 
486 #elif defined(__arm64__) || defined(__arm__)
487 
488 	struct arm_saved_state *state = get_user_regs(thread);
489 	if (!state) {
490 		error = EINVAL;
491 		goto out;
492 	}
493 
494 #if defined(__arm64__)
495 	user_64 = is_saved_state64(state);
496 	pc = get_saved_state_pc(state);
497 	fp = fp != 0 ? fp : get_saved_state_fp(state);
498 
499 	// ARM expects stack frames to be aligned to 16 bytes.
500 #define INVALID_USER_FP(FP) (((FP) & 0x3UL) != 0UL)
501 
502 #elif defined(__arm__)
503 	// ARM expects stack frames to be aligned to 16 bytes.
504 #define INVALID_USER_FP(FP) (((FP) & 0x3UL) != 0UL)
505 #endif // !defined(__arm64__)
506 
507 	pc = get_saved_state_pc(state);
508 	fp = fp != 0 ? fp : get_saved_state_fp(state);
509 
510 #else // defined(__arm__) || defined(__arm64__) || defined(__x86_64__)
511 #error "unsupported architecture"
512 #endif // !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__)
513 
514 	// Only capture the save state PC without a custom frame pointer to walk.
515 	if (!ctl || ctl->btc_frame_addr == 0) {
516 		bt[frame_index++] = pc + addr_offset;
517 	}
518 
519 	if (frame_index >= max_frames) {
520 		goto out;
521 	}
522 
523 	if (fp == 0) {
524 		// If the FP is zeroed, then there's no stack to walk, by design.  This
525 		// happens for workq threads that are being sent back to user space or
526 		// during boot-strapping operations on other kinds of threads.
527 		goto out;
528 	} else if (INVALID_USER_FP(fp)) {
529 		// Still capture the PC in this case, but mark the stack as truncated
530 		// and "faulting."  (Using the frame pointer on a call stack would cause
531 		// an exception.)
532 		error = EFAULT;
533 		truncated = true;
534 		goto out;
535 	}
536 
537 	union {
538 		struct {
539 			uint64_t fp;
540 			uint64_t ret;
541 		} u64;
542 		struct {
543 			uint32_t fp;
544 			uint32_t ret;
545 		} u32;
546 	} frame;
547 
548 	frame_size = 2 * (user_64 ? 8 : 4);
549 
550 	while (fp != 0 && frame_index < max_frames) {
551 		error = copy(ctx, (char *)&frame, fp, frame_size);
552 		if (error) {
553 			truncated = true;
554 			goto out;
555 		}
556 
557 		// Capture this return address before tripping over any errors finding
558 		// the next frame to follow.
559 		uintptr_t ret_addr = user_64 ? frame.u64.ret : frame.u32.ret;
560 #if defined(HAS_APPLE_PAC)
561 		// Return addresses are signed by arm64e ABI, so strip off the auth
562 		// bits.
563 		bt[frame_index++] = (uintptr_t)ptrauth_strip((void *)ret_addr,
564 		    ptrauth_key_return_address) + addr_offset;
565 #else // defined(HAS_APPLE_PAC)
566 		bt[frame_index++] = ret_addr + addr_offset;
567 #endif // !defined(HAS_APPLE_PAC)
568 
569 		// Find the next frame to follow.
570 		next_fp = user_64 ? frame.u64.fp : frame.u32.fp;
571 		bool async_frame = allow_async && SWIFT_ASYNC_FP(next_fp);
572 		// There is no 32-bit ABI for Swift async call stacks.
573 		if (user_64 && async_frame) {
574 			async_index = frame_index - 1;
575 			// The async context pointer is just below the stack frame.
576 			user_addr_t async_ctx_ptr = fp - 8;
577 			user_addr_t async_ctx = 0;
578 			error = copy(ctx, (char *)&async_ctx, async_ctx_ptr,
579 			    sizeof(async_ctx));
580 			if (error) {
581 				goto out;
582 			}
583 #if defined(HAS_APPLE_PAC)
584 			async_frame_addr = (uintptr_t)ptrauth_strip((void *)async_ctx,
585 			    ptrauth_key_process_dependent_data);
586 #else // defined(HAS_APPLE_PAC)
587 			async_frame_addr = (uintptr_t)async_ctx;
588 #endif // !defined(HAS_APPLE_PAC)
589 			has_async = true;
590 			allow_async = false;
591 		}
592 		next_fp = SWIFT_ASYNC_FP_CLEAR(next_fp);
593 #if defined(HAS_APPLE_PAC)
594 		next_fp = (uintptr_t)ptrauth_strip((void *)next_fp,
595 		    ptrauth_key_process_dependent_data);
596 #endif // defined(HAS_APPLE_PAC)
597 		if (INVALID_USER_FP(next_fp)) {
598 			break;
599 		}
600 
601 		// Stacks grow down; backtracing should be moving to higher addresses,
602 		// unless a custom frame pointer is provided, in which case, an async
603 		// stack might be walked, which is allocated on the heap in any order.
604 		if ((next_fp == fp) || (!custom_fp && next_fp < fp)) {
605 			break;
606 		}
607 		fp = next_fp;
608 	}
609 
610 out:
611 	if (old_map != NULL) {
612 		(void)vm_map_switch(old_map);
613 		vm_map_deallocate(map);
614 	}
615 
616 	// NULL-terminate the list, if space is available.
617 	if (frame_index < max_frames) {
618 		bt[frame_index] = 0;
619 	}
620 
621 	if (info_out) {
622 		info_out->btui_error = error;
623 		backtrace_info_t info = user_64 ? BTI_64_BIT : BTI_NONE;
624 		bool out_of_space = !INVALID_USER_FP(fp) && frame_index == max_frames;
625 		if (truncated || out_of_space) {
626 			info |= BTI_TRUNCATED;
627 		}
628 		if (out_of_space && error == 0) {
629 			info_out->btui_next_frame_addr = fp;
630 		}
631 		info_out->btui_info = info;
632 		info_out->btui_async_start_index = async_index;
633 		info_out->btui_async_frame_addr = async_frame_addr;
634 	}
635 
636 	return frame_index;
637 }
638