xref: /xnu-11417.140.69/osfmk/kern/backtrace.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 // Copyright (c) 2016-2021 Apple Inc. All rights reserved.
2 //
3 // @APPLE_OSREFERENCE_LICENSE_HEADER_START@
4 //
5 // This file contains Original Code and/or Modifications of Original Code
6 // as defined in and that are subject to the Apple Public Source License
7 // Version 2.0 (the 'License'). You may not use this file except in
8 // compliance with the License. The rights granted to you under the License
9 // may not be used to create, or enable the creation or redistribution of,
10 // unlawful or unlicensed copies of an Apple operating system, or to
11 // circumvent, violate, or enable the circumvention or violation of, any
12 // terms of an Apple operating system software license agreement.
13 //
14 // Please obtain a copy of the License at
15 // http://www.opensource.apple.com/apsl/ and read it before using this file.
16 //
17 // The Original Code and all software distributed under the License are
18 // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 // Please see the License for the specific language governing rights and
23 // limitations under the License.
24 //
25 // @APPLE_OSREFERENCE_LICENSE_HEADER_END@
26 
27 #include <stddef.h>
28 #include <stdint.h>
29 
30 #include <kern/assert.h>
31 #include <kern/backtrace.h>
32 #include <kern/cambria_layout.h>
33 #include <kern/thread.h>
34 #include <machine/machine_routines.h>
35 #include <sys/errno.h>
36 #include <vm/vm_map_xnu.h>
37 
38 #if defined(__arm64__)
39 #include <arm/cpu_data.h>
40 #include <arm/cpu_data_internal.h>
41 #endif // defined(__arm64__)
42 
43 #if defined(HAS_APPLE_PAC)
44 #include <ptrauth.h>
45 #endif // defined(HAS_APPLE_PAC)
46 
47 
48 #if __x86_64__
49 static void
_backtrace_packed_out_of_reach(void)50 _backtrace_packed_out_of_reach(void)
51 {
52 	// This symbol is used to replace frames that have been "JIT-ed"
53 	// or dynamically inserted in the kernel by some kext in a regular
54 	// VM mapping that might be outside of the filesets.
55 	//
56 	// This is an Intel only issue.
57 }
58 #endif // __x86_64__
59 
60 // Pack an address according to a particular packing format.
61 static size_t
_backtrace_pack_addr(backtrace_pack_t packing,uint8_t * dst,size_t dst_size,uintptr_t addr)62 _backtrace_pack_addr(backtrace_pack_t packing, uint8_t *dst, size_t dst_size,
63     uintptr_t addr)
64 {
65 	switch (packing) {
66 	case BTP_NONE:
67 		if (dst_size >= sizeof(addr)) {
68 			memcpy(dst, &addr, sizeof(addr));
69 		}
70 		return sizeof(addr);
71 	case BTP_KERN_OFFSET_32:;
72 		uintptr_t addr_delta = addr - vm_kernel_stext;
73 		int32_t addr_packed = (int32_t)addr_delta;
74 #if __x86_64__
75 		if ((uintptr_t)(int32_t)addr_delta != addr_delta) {
76 			addr = (vm_offset_t)&_backtrace_packed_out_of_reach;
77 			addr_delta = addr - vm_kernel_stext;
78 			addr_packed = (int32_t)addr_delta;
79 		}
80 #else // __x86_64__
81 		assert((uintptr_t)(int32_t)addr_delta == addr_delta);
82 #endif // !__x86_64__
83 		if (dst_size >= sizeof(addr_packed)) {
84 			memcpy(dst, &addr_packed, sizeof(addr_packed));
85 		}
86 		return sizeof(addr_packed);
87 	default:
88 		panic("backtrace: unknown packing format %d", packing);
89 	}
90 }
91 
92 // Since it's only called from threads that we're going to keep executing,
93 // if there's bad data the system is going to die eventually.  If this function
94 // is inlined, it doesn't record the frame of the function it's inside (because
95 // there's no stack frame), so prevent that.
96 static size_t __attribute__((noinline, not_tail_called))
backtrace_internal(backtrace_pack_t packing,uint8_t * bt,size_t btsize,void * start_frame,int64_t addr_offset,backtrace_info_t * info_out)97 backtrace_internal(backtrace_pack_t packing, uint8_t *bt,
98     size_t btsize, void *start_frame, int64_t addr_offset,
99     backtrace_info_t *info_out)
100 {
101 	thread_t thread = current_thread();
102 	uintptr_t *fp;
103 	size_t size_used = 0;
104 	uintptr_t top, bottom;
105 	bool in_valid_stack;
106 	assert(bt != NULL);
107 	assert(btsize > 0);
108 
109 	fp = start_frame;
110 #if defined(HAS_APPLE_PAC)
111 	fp = ptrauth_strip(fp, ptrauth_key_frame_pointer);
112 #endif
113 	bottom = thread->kernel_stack;
114 	top = bottom + kernel_stack_size;
115 
116 #define IN_STK_BOUNDS(__addr) \
117 	(((uintptr_t)(__addr) >= (uintptr_t)bottom) && \
118 	((uintptr_t)(__addr) < (uintptr_t)top))
119 
120 	in_valid_stack = IN_STK_BOUNDS(fp) || ml_addr_in_non_xnu_stack((uintptr_t)fp);
121 
122 	if (!in_valid_stack) {
123 		fp = NULL;
124 	}
125 
126 	while (fp != NULL && size_used < btsize) {
127 		uintptr_t *next_fp = (uintptr_t *)*fp;
128 #if defined(HAS_APPLE_PAC)
129 		next_fp = ptrauth_strip(next_fp, ptrauth_key_frame_pointer);
130 #endif
131 		// Return address is one word higher than frame pointer.
132 		uintptr_t ret_addr = *(fp + 1);
133 
134 		// If the frame pointer is 0, backtracing has reached the top of
135 		// the stack and there is no return address.  Some stacks might not
136 		// have set this up, so bounds check, as well.
137 		in_valid_stack = IN_STK_BOUNDS(next_fp) || ml_addr_in_non_xnu_stack((uintptr_t)next_fp);
138 
139 		if (next_fp == NULL || !in_valid_stack) {
140 			break;
141 		}
142 
143 #if defined(HAS_APPLE_PAC)
144 		// Return addresses are signed by arm64e ABI, so strip it.
145 		uintptr_t pc = (uintptr_t)ptrauth_strip((void *)ret_addr,
146 		    ptrauth_key_return_address);
147 #else // defined(HAS_APPLE_PAC)
148 		uintptr_t pc = ret_addr;
149 #endif // !defined(HAS_APPLE_PAC)
150 		if (pc == 0) {
151 			// Once a NULL PC is encountered, ignore the rest of the call stack.
152 			break;
153 		}
154 		pc += addr_offset;
155 		size_used += _backtrace_pack_addr(packing, bt + size_used,
156 		    btsize - size_used, pc);
157 
158 		// Stacks grow down; backtracing should always be moving to higher
159 		// addresses except when a frame is stitching between two different
160 		// stacks.
161 		if (next_fp <= fp) {
162 			// This check is verbose; it is basically checking whether this
163 			// thread is switching between the kernel stack and a non-XNU stack
164 			// (or between one non-XNU stack and another, as there can be more
165 			// than one). If not, then stop the backtrace as stack switching
166 			// should be the only reason as to why the next FP would be lower
167 			// than the current FP.
168 			if (!ml_addr_in_non_xnu_stack((uintptr_t)fp) &&
169 			    !ml_addr_in_non_xnu_stack((uintptr_t)next_fp)) {
170 				break;
171 			}
172 		}
173 		fp = next_fp;
174 	}
175 
176 	if (info_out) {
177 		backtrace_info_t info = BTI_NONE;
178 #if __LP64__
179 		info |= BTI_64_BIT;
180 #endif
181 		if (fp != NULL && size_used >= btsize) {
182 			info |= BTI_TRUNCATED;
183 		}
184 		*info_out = info;
185 	}
186 
187 	return size_used;
188 #undef IN_STK_BOUNDS
189 }
190 
191 static kern_return_t
interrupted_kernel_pc_fp(uintptr_t * pc,uintptr_t * fp)192 interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
193 {
194 #if defined(__x86_64__)
195 	x86_saved_state_t *state;
196 	bool state_64;
197 	uint64_t cs;
198 
199 	state = current_cpu_datap()->cpu_int_state;
200 	if (!state) {
201 		return KERN_FAILURE;
202 	}
203 
204 	state_64 = is_saved_state64(state);
205 
206 	if (state_64) {
207 		cs = saved_state64(state)->isf.cs;
208 	} else {
209 		cs = saved_state32(state)->cs;
210 	}
211 	// Return early if interrupted a thread in user space.
212 	if ((cs & SEL_PL) == SEL_PL_U) {
213 		return KERN_FAILURE;
214 	}
215 
216 	if (state_64) {
217 		*pc = saved_state64(state)->isf.rip;
218 		*fp = saved_state64(state)->rbp;
219 	} else {
220 		*pc = saved_state32(state)->eip;
221 		*fp = saved_state32(state)->ebp;
222 	}
223 
224 #elif defined(__arm64__)
225 
226 	struct arm_saved_state *state;
227 
228 	state = getCpuDatap()->cpu_int_state;
229 	if (!state) {
230 		return KERN_FAILURE;
231 	}
232 
233 	// Return early if interrupted a thread in user space.
234 	if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
235 		return KERN_FAILURE;
236 	}
237 
238 	*pc = ml_get_backtrace_pc(state);
239 	*fp = get_saved_state_fp(state);
240 
241 #else // !defined(__arm64__) && !defined(__x86_64__)
242 #error "unsupported architecture"
243 #endif // !defined(__arm64__) && !defined(__x86_64__)
244 
245 	return KERN_SUCCESS;
246 }
247 
248 __attribute__((always_inline))
249 static uintptr_t
_backtrace_preamble(struct backtrace_control * ctl,uintptr_t * start_frame_out)250 _backtrace_preamble(struct backtrace_control *ctl, uintptr_t *start_frame_out)
251 {
252 	backtrace_flags_t flags = ctl ? ctl->btc_flags : 0;
253 	uintptr_t start_frame = ctl ? ctl->btc_frame_addr : 0;
254 	uintptr_t pc = 0;
255 	if (flags & BTF_KERN_INTERRUPTED) {
256 		assert(ml_at_interrupt_context() == TRUE);
257 
258 		uintptr_t fp;
259 		kern_return_t kr = interrupted_kernel_pc_fp(&pc, &fp);
260 		if (kr != KERN_SUCCESS) {
261 			return 0;
262 		}
263 		*start_frame_out = start_frame ?: fp;
264 	} else if (start_frame == 0) {
265 		*start_frame_out = (uintptr_t)__builtin_frame_address(0);
266 	} else {
267 		*start_frame_out = start_frame;
268 	}
269 	return pc;
270 }
271 
272 unsigned int __attribute__((noinline))
backtrace(uintptr_t * bt,unsigned int max_frames,struct backtrace_control * ctl,backtrace_info_t * info_out)273 backtrace(uintptr_t *bt, unsigned int max_frames,
274     struct backtrace_control *ctl, backtrace_info_t *info_out)
275 {
276 	unsigned int len_adj = 0;
277 	uintptr_t start_frame = ctl ? ctl->btc_frame_addr : 0;
278 	uintptr_t pc = _backtrace_preamble(ctl, &start_frame);
279 	if (pc) {
280 		bt[0] = pc;
281 		if (max_frames == 1) {
282 			return 1;
283 		}
284 		bt += 1;
285 		max_frames -= 1;
286 		len_adj += 1;
287 	}
288 
289 	size_t size = backtrace_internal(BTP_NONE, (uint8_t *)bt,
290 	    max_frames * sizeof(uintptr_t), (void *)start_frame,
291 	    ctl ? ctl->btc_addr_offset : 0, info_out);
292 	// NULL-terminate the list, if space is available.
293 	unsigned int len = size / sizeof(uintptr_t);
294 	if (len != max_frames) {
295 		bt[len] = 0;
296 	}
297 
298 	return len + len_adj;
299 }
300 
301 // Backtrace the current thread's kernel stack as a packed representation.
302 size_t
backtrace_packed(backtrace_pack_t packing,uint8_t * bt,size_t btsize,struct backtrace_control * ctl,backtrace_info_t * info_out)303 backtrace_packed(backtrace_pack_t packing, uint8_t *bt, size_t btsize,
304     struct backtrace_control *ctl,
305     backtrace_info_t *info_out)
306 {
307 	unsigned int size_adj = 0;
308 	uintptr_t start_frame = ctl ? ctl->btc_frame_addr : 0;
309 	uintptr_t pc = _backtrace_preamble(ctl, &start_frame);
310 	if (pc) {
311 		size_adj = _backtrace_pack_addr(packing, bt, btsize, pc);
312 		if (size_adj >= btsize) {
313 			return size_adj;
314 		}
315 		btsize -= size_adj;
316 	}
317 
318 	size_t written_size = backtrace_internal(packing, (uint8_t *)bt, btsize,
319 	    (void *)start_frame, ctl ? ctl->btc_addr_offset : 0, info_out);
320 	return written_size + size_adj;
321 }
322 
323 // Convert an array of addresses to a packed representation.
324 size_t
backtrace_pack(backtrace_pack_t packing,uint8_t * dst,size_t dst_size,const uintptr_t * src,unsigned int src_len)325 backtrace_pack(backtrace_pack_t packing, uint8_t *dst, size_t dst_size,
326     const uintptr_t *src, unsigned int src_len)
327 {
328 	size_t dst_offset = 0;
329 	for (unsigned int i = 0; i < src_len; i++) {
330 		size_t pack_size = _backtrace_pack_addr(packing, dst + dst_offset,
331 		    dst_size - dst_offset, src[i]);
332 		if (dst_offset + pack_size >= dst_size) {
333 			return dst_offset;
334 		}
335 		dst_offset += pack_size;
336 	}
337 	return dst_offset;
338 }
339 
340 // Convert a packed backtrace to an array of addresses.
341 unsigned int
backtrace_unpack(backtrace_pack_t packing,uintptr_t * dst,unsigned int dst_len,const uint8_t * src,size_t src_size)342 backtrace_unpack(backtrace_pack_t packing, uintptr_t *dst, unsigned int dst_len,
343     const uint8_t *src, size_t src_size)
344 {
345 	switch (packing) {
346 	case BTP_NONE:;
347 		size_t unpack_size = MIN(dst_len * sizeof(uintptr_t), src_size);
348 		memmove(dst, src, unpack_size);
349 		return (unsigned int)(unpack_size / sizeof(uintptr_t));
350 	case BTP_KERN_OFFSET_32:;
351 		unsigned int src_len = src_size / sizeof(int32_t);
352 		unsigned int unpack_len = MIN(src_len, dst_len);
353 		for (unsigned int i = 0; i < unpack_len; i++) {
354 			int32_t addr = 0;
355 			memcpy(&addr, src + i * sizeof(int32_t), sizeof(int32_t));
356 			dst[i] = vm_kernel_stext + (uintptr_t)addr;
357 		}
358 		return unpack_len;
359 	default:
360 		panic("backtrace: unknown packing format %d", packing);
361 	}
362 }
363 
364 static errno_t
_backtrace_copyin(void * __unused ctx,void * dst,user_addr_t src,size_t size)365 _backtrace_copyin(void * __unused ctx, void *dst, user_addr_t src, size_t size)
366 {
367 	int error = copyin((user_addr_t)src, dst, size);
368 	return error;
369 }
370 
371 errno_t
backtrace_user_copy_error(void * ctx,void * dst,user_addr_t src,size_t size)372 backtrace_user_copy_error(void *ctx, void *dst, user_addr_t src, size_t size)
373 {
374 #pragma unused(ctx, dst, src, size)
375 	return EFAULT;
376 }
377 
378 unsigned int
backtrace_user(uintptr_t * bt,unsigned int max_frames,const struct backtrace_control * ctl_in,struct backtrace_user_info * info_out)379 backtrace_user(uintptr_t *bt, unsigned int max_frames,
380     const struct backtrace_control *ctl_in,
381     struct backtrace_user_info *info_out)
382 {
383 	static const struct backtrace_control ctl_default = {
384 		.btc_user_copy = _backtrace_copyin,
385 	};
386 	const struct backtrace_control *ctl = ctl_in ?: &ctl_default;
387 	uintptr_t pc = 0, next_fp = 0;
388 	uintptr_t fp = ctl->btc_frame_addr;
389 	bool custom_fp = fp != 0;
390 	int64_t addr_offset = ctl ? ctl->btc_addr_offset : 0;
391 	vm_map_t map = NULL;
392 	vm_map_switch_context_t switch_ctx;
393 	bool switched_map = false;
394 	unsigned int frame_index = 0;
395 	int error = 0;
396 	size_t frame_size = 0;
397 	bool truncated = false;
398 	bool user_64 = false;
399 	bool allow_async = true;
400 	bool has_async = false;
401 	uintptr_t async_frame_addr = 0;
402 	unsigned int async_index = 0;
403 
404 	backtrace_user_copy_fn copy = ctl->btc_user_copy ?: _backtrace_copyin;
405 	bool custom_copy = copy != _backtrace_copyin;
406 	void *ctx = ctl->btc_user_copy_context;
407 
408 	void *thread = ctl->btc_user_thread;
409 	void *cur_thread = NULL;
410 	if (thread == NULL) {
411 		cur_thread = current_thread();
412 		thread = cur_thread;
413 	}
414 	task_t task = get_threadtask(thread);
415 
416 	assert(task != NULL);
417 	assert(bt != NULL);
418 	assert(max_frames > 0);
419 
420 	if (!custom_copy) {
421 		bool interrupts_enabled = ml_get_interrupts_enabled();
422 		assert(interrupts_enabled);
423 		if (!interrupts_enabled) {
424 			error = EDEADLK;
425 			goto out;
426 		}
427 
428 		if (cur_thread == NULL) {
429 			cur_thread = current_thread();
430 		}
431 		bool const must_switch_maps = thread != cur_thread;
432 		if (must_switch_maps) {
433 			map = get_task_map_reference(task);
434 			if (map == NULL) {
435 				error = ENOMEM;
436 				goto out;
437 			}
438 			switched_map = true;
439 			switch_ctx = vm_map_switch_to(map);
440 		}
441 	}
442 
443 #define SWIFT_ASYNC_FP_BIT (0x1ULL << 60)
444 #define SWIFT_ASYNC_FP(FP) (((FP) & SWIFT_ASYNC_FP_BIT) != 0)
445 #define SWIFT_ASYNC_FP_CLEAR(FP) ((FP) & ~SWIFT_ASYNC_FP_BIT)
446 
447 #if defined(__x86_64__)
448 
449 	// Don't allow a malformed user stack to copy arbitrary kernel data.
450 #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP)))
451 
452 	x86_saved_state_t *state = get_user_regs(thread);
453 	if (!state) {
454 		error = EINVAL;
455 		goto out;
456 	}
457 
458 	user_64 = is_saved_state64(state);
459 	if (user_64) {
460 		pc = saved_state64(state)->isf.rip;
461 		fp = fp != 0 ? fp : saved_state64(state)->rbp;
462 	} else {
463 		pc = saved_state32(state)->eip;
464 		fp = fp != 0 ? fp : saved_state32(state)->ebp;
465 	}
466 
467 #elif defined(__arm64__)
468 
469 	struct arm_saved_state *state = get_user_regs(thread);
470 	if (!state) {
471 		error = EINVAL;
472 		goto out;
473 	}
474 
475 	user_64 = is_saved_state64(state);
476 	pc = get_saved_state_pc(state);
477 	fp = fp != 0 ? fp : get_saved_state_fp(state);
478 
479 	// ARM expects stack frames to be aligned to 16 bytes.
480 #define INVALID_USER_FP(FP) (((FP) & 0x3UL) != 0UL)
481 
482 #else // defined(__arm64__) || defined(__x86_64__)
483 #error "unsupported architecture"
484 #endif // !defined(__arm64__) && !defined(__x86_64__)
485 
486 	// Only capture the save state PC without a custom frame pointer to walk.
487 	if (!ctl || ctl->btc_frame_addr == 0) {
488 		bt[frame_index++] = pc + addr_offset;
489 	}
490 
491 	if (frame_index >= max_frames) {
492 		goto out;
493 	}
494 
495 	if (fp == 0) {
496 		// If the FP is zeroed, then there's no stack to walk, by design.  This
497 		// happens for workq threads that are being sent back to user space or
498 		// during boot-strapping operations on other kinds of threads.
499 		goto out;
500 	} else if (INVALID_USER_FP(fp)) {
501 		// Still capture the PC in this case, but mark the stack as truncated
502 		// and "faulting."  (Using the frame pointer on a call stack would cause
503 		// an exception.)
504 		error = EFAULT;
505 		truncated = true;
506 		goto out;
507 	}
508 
509 	union {
510 		struct {
511 			uint64_t fp;
512 			uint64_t ret;
513 		} u64;
514 		struct {
515 			uint32_t fp;
516 			uint32_t ret;
517 		} u32;
518 	} frame;
519 
520 	frame_size = 2 * (user_64 ? 8 : 4);
521 
522 	while (fp != 0 && frame_index < max_frames) {
523 		error = copy(ctx, (char *)&frame, fp, frame_size);
524 		if (error) {
525 			truncated = true;
526 			goto out;
527 		}
528 
529 		// Capture this return address before tripping over any errors finding
530 		// the next frame to follow.
531 		uintptr_t ret_addr = user_64 ? frame.u64.ret : frame.u32.ret;
532 #if defined(HAS_APPLE_PAC)
533 		// Return addresses are signed by arm64e ABI, so strip off the auth
534 		// bits.
535 		bt[frame_index++] = (uintptr_t)ptrauth_strip((void *)ret_addr,
536 		    ptrauth_key_return_address) + addr_offset;
537 #else // defined(HAS_APPLE_PAC)
538 		bt[frame_index++] = ret_addr + addr_offset;
539 #endif // !defined(HAS_APPLE_PAC)
540 
541 		// Find the next frame to follow.
542 		next_fp = user_64 ? frame.u64.fp : frame.u32.fp;
543 		bool async_frame = allow_async && SWIFT_ASYNC_FP(next_fp);
544 		// There is no 32-bit ABI for Swift async call stacks.
545 		if (user_64 && async_frame) {
546 			async_index = frame_index - 1;
547 			// The async context pointer is just below the stack frame.
548 			user_addr_t async_ctx_ptr = fp - 8;
549 			user_addr_t async_ctx = 0;
550 			error = copy(ctx, (char *)&async_ctx, async_ctx_ptr,
551 			    sizeof(async_ctx));
552 			if (error) {
553 				goto out;
554 			}
555 #if defined(HAS_APPLE_PAC)
556 			async_frame_addr = (uintptr_t)ptrauth_strip((void *)async_ctx,
557 			    ptrauth_key_process_dependent_data);
558 #else // defined(HAS_APPLE_PAC)
559 			async_frame_addr = (uintptr_t)async_ctx;
560 #endif // !defined(HAS_APPLE_PAC)
561 			has_async = true;
562 			allow_async = false;
563 		}
564 		next_fp = SWIFT_ASYNC_FP_CLEAR(next_fp);
565 #if defined(HAS_APPLE_PAC)
566 		next_fp = (uintptr_t)ptrauth_strip((void *)next_fp,
567 		    ptrauth_key_frame_pointer);
568 #endif // defined(HAS_APPLE_PAC)
569 		if (INVALID_USER_FP(next_fp)) {
570 			break;
571 		}
572 
573 		// Stacks grow down; backtracing should be moving to higher addresses,
574 		// unless a custom frame pointer is provided, in which case, an async
575 		// stack might be walked, which is allocated on the heap in any order.
576 		if ((next_fp == fp) || (!custom_fp && next_fp < fp)) {
577 			break;
578 		}
579 		fp = next_fp;
580 	}
581 
582 out:
583 	if (switched_map) {
584 		vm_map_switch_back(switch_ctx);
585 		vm_map_deallocate(map);
586 	}
587 
588 	// NULL-terminate the list, if space is available.
589 	if (frame_index < max_frames) {
590 		bt[frame_index] = 0;
591 	}
592 
593 	if (info_out) {
594 		info_out->btui_error = error;
595 		backtrace_info_t info = user_64 ? BTI_64_BIT : BTI_NONE;
596 		bool out_of_space = !INVALID_USER_FP(fp) && frame_index == max_frames;
597 		if (truncated || out_of_space) {
598 			info |= BTI_TRUNCATED;
599 		}
600 		if (out_of_space && error == 0) {
601 			info_out->btui_next_frame_addr = fp;
602 		}
603 		info_out->btui_info = info;
604 		info_out->btui_async_start_index = async_index;
605 		info_out->btui_async_frame_addr = async_frame_addr;
606 	}
607 
608 	return frame_index;
609 }
610