xref: /xnu-11417.101.15/osfmk/kern/backtrace.c (revision e3723e1f17661b24996789d8afc084c0c3303b26)
1 // Copyright (c) 2016-2021 Apple Inc. All rights reserved.
2 //
3 // @APPLE_OSREFERENCE_LICENSE_HEADER_START@
4 //
5 // This file contains Original Code and/or Modifications of Original Code
6 // as defined in and that are subject to the Apple Public Source License
7 // Version 2.0 (the 'License'). You may not use this file except in
8 // compliance with the License. The rights granted to you under the License
9 // may not be used to create, or enable the creation or redistribution of,
10 // unlawful or unlicensed copies of an Apple operating system, or to
11 // circumvent, violate, or enable the circumvention or violation of, any
12 // terms of an Apple operating system software license agreement.
13 //
14 // Please obtain a copy of the License at
15 // http://www.opensource.apple.com/apsl/ and read it before using this file.
16 //
17 // The Original Code and all software distributed under the License are
18 // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 // Please see the License for the specific language governing rights and
23 // limitations under the License.
24 //
25 // @APPLE_OSREFERENCE_LICENSE_HEADER_END@
26 
27 #include <stddef.h>
28 #include <stdint.h>
29 
30 #include <kern/assert.h>
31 #include <kern/backtrace.h>
32 #include <kern/cambria_layout.h>
33 #include <kern/thread.h>
34 #include <machine/machine_routines.h>
35 #include <sys/errno.h>
36 #include <vm/vm_map_xnu.h>
37 
38 #if defined(__arm64__)
39 #include <arm/cpu_data.h>
40 #include <arm/cpu_data_internal.h>
41 #endif // defined(__arm64__)
42 
43 #if defined(HAS_APPLE_PAC)
44 #include <ptrauth.h>
45 #endif // defined(HAS_APPLE_PAC)
46 
47 #if __x86_64__
48 static void
_backtrace_packed_out_of_reach(void)49 _backtrace_packed_out_of_reach(void)
50 {
51 	// This symbol is used to replace frames that have been "JIT-ed"
52 	// or dynamically inserted in the kernel by some kext in a regular
53 	// VM mapping that might be outside of the filesets.
54 	//
55 	// This is an Intel only issue.
56 }
57 #endif // __x86_64__
58 
59 // Pack an address according to a particular packing format.
60 static size_t
_backtrace_pack_addr(backtrace_pack_t packing,uint8_t * dst,size_t dst_size,uintptr_t addr)61 _backtrace_pack_addr(backtrace_pack_t packing, uint8_t *dst, size_t dst_size,
62     uintptr_t addr)
63 {
64 	switch (packing) {
65 	case BTP_NONE:
66 		if (dst_size >= sizeof(addr)) {
67 			memcpy(dst, &addr, sizeof(addr));
68 		}
69 		return sizeof(addr);
70 	case BTP_KERN_OFFSET_32:;
71 		uintptr_t addr_delta = addr - vm_kernel_stext;
72 		int32_t addr_packed = (int32_t)addr_delta;
73 #if __x86_64__
74 		if ((uintptr_t)(int32_t)addr_delta != addr_delta) {
75 			addr = (vm_offset_t)&_backtrace_packed_out_of_reach;
76 			addr_delta = addr - vm_kernel_stext;
77 			addr_packed = (int32_t)addr_delta;
78 		}
79 #else // __x86_64__
80 		assert((uintptr_t)(int32_t)addr_delta == addr_delta);
81 #endif // !__x86_64__
82 		if (dst_size >= sizeof(addr_packed)) {
83 			memcpy(dst, &addr_packed, sizeof(addr_packed));
84 		}
85 		return sizeof(addr_packed);
86 	default:
87 		panic("backtrace: unknown packing format %d", packing);
88 	}
89 }
90 
91 // Since it's only called from threads that we're going to keep executing,
92 // if there's bad data the system is going to die eventually.  If this function
93 // is inlined, it doesn't record the frame of the function it's inside (because
94 // there's no stack frame), so prevent that.
95 static size_t __attribute__((noinline, not_tail_called))
backtrace_internal(backtrace_pack_t packing,uint8_t * bt,size_t btsize,void * start_frame,int64_t addr_offset,backtrace_info_t * info_out)96 backtrace_internal(backtrace_pack_t packing, uint8_t *bt,
97     size_t btsize, void *start_frame, int64_t addr_offset,
98     backtrace_info_t *info_out)
99 {
100 	thread_t thread = current_thread();
101 	uintptr_t *fp;
102 	size_t size_used = 0;
103 	uintptr_t top, bottom;
104 	bool in_valid_stack;
105 	assert(bt != NULL);
106 	assert(btsize > 0);
107 
108 	fp = start_frame;
109 #if defined(HAS_APPLE_PAC)
110 	fp = ptrauth_strip(fp, ptrauth_key_frame_pointer);
111 #endif
112 	bottom = thread->kernel_stack;
113 	top = bottom + kernel_stack_size;
114 
115 #define IN_STK_BOUNDS(__addr) \
116 	(((uintptr_t)(__addr) >= (uintptr_t)bottom) && \
117 	((uintptr_t)(__addr) < (uintptr_t)top))
118 
119 	in_valid_stack = IN_STK_BOUNDS(fp) || ml_addr_in_non_xnu_stack((uintptr_t)fp);
120 
121 	if (!in_valid_stack) {
122 		fp = NULL;
123 	}
124 
125 	while (fp != NULL && size_used < btsize) {
126 		uintptr_t *next_fp = (uintptr_t *)*fp;
127 #if defined(HAS_APPLE_PAC)
128 		next_fp = ptrauth_strip(next_fp, ptrauth_key_frame_pointer);
129 #endif
130 		// Return address is one word higher than frame pointer.
131 		uintptr_t ret_addr = *(fp + 1);
132 
133 		// If the frame pointer is 0, backtracing has reached the top of
134 		// the stack and there is no return address.  Some stacks might not
135 		// have set this up, so bounds check, as well.
136 		in_valid_stack = IN_STK_BOUNDS(next_fp) || ml_addr_in_non_xnu_stack((uintptr_t)next_fp);
137 
138 		if (next_fp == NULL || !in_valid_stack) {
139 			break;
140 		}
141 
142 #if defined(HAS_APPLE_PAC)
143 		// Return addresses are signed by arm64e ABI, so strip it.
144 		uintptr_t pc = (uintptr_t)ptrauth_strip((void *)ret_addr,
145 		    ptrauth_key_return_address);
146 #else // defined(HAS_APPLE_PAC)
147 		uintptr_t pc = ret_addr;
148 #endif // !defined(HAS_APPLE_PAC)
149 		if (pc == 0) {
150 			// Once a NULL PC is encountered, ignore the rest of the call stack.
151 			break;
152 		}
153 		pc += addr_offset;
154 		size_used += _backtrace_pack_addr(packing, bt + size_used,
155 		    btsize - size_used, pc);
156 
157 		// Stacks grow down; backtracing should always be moving to higher
158 		// addresses except when a frame is stitching between two different
159 		// stacks.
160 		if (next_fp <= fp) {
161 			// This check is verbose; it is basically checking whether this
162 			// thread is switching between the kernel stack and a non-XNU stack
163 			// (or between one non-XNU stack and another, as there can be more
164 			// than one). If not, then stop the backtrace as stack switching
165 			// should be the only reason as to why the next FP would be lower
166 			// than the current FP.
167 			if (!ml_addr_in_non_xnu_stack((uintptr_t)fp) &&
168 			    !ml_addr_in_non_xnu_stack((uintptr_t)next_fp)) {
169 				break;
170 			}
171 		}
172 		fp = next_fp;
173 	}
174 
175 	if (info_out) {
176 		backtrace_info_t info = BTI_NONE;
177 #if __LP64__
178 		info |= BTI_64_BIT;
179 #endif
180 		if (fp != NULL && size_used >= btsize) {
181 			info |= BTI_TRUNCATED;
182 		}
183 		*info_out = info;
184 	}
185 
186 	return size_used;
187 #undef IN_STK_BOUNDS
188 }
189 
190 static kern_return_t
interrupted_kernel_pc_fp(uintptr_t * pc,uintptr_t * fp)191 interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
192 {
193 #if defined(__x86_64__)
194 	x86_saved_state_t *state;
195 	bool state_64;
196 	uint64_t cs;
197 
198 	state = current_cpu_datap()->cpu_int_state;
199 	if (!state) {
200 		return KERN_FAILURE;
201 	}
202 
203 	state_64 = is_saved_state64(state);
204 
205 	if (state_64) {
206 		cs = saved_state64(state)->isf.cs;
207 	} else {
208 		cs = saved_state32(state)->cs;
209 	}
210 	// Return early if interrupted a thread in user space.
211 	if ((cs & SEL_PL) == SEL_PL_U) {
212 		return KERN_FAILURE;
213 	}
214 
215 	if (state_64) {
216 		*pc = saved_state64(state)->isf.rip;
217 		*fp = saved_state64(state)->rbp;
218 	} else {
219 		*pc = saved_state32(state)->eip;
220 		*fp = saved_state32(state)->ebp;
221 	}
222 
223 #elif defined(__arm64__)
224 
225 	struct arm_saved_state *state;
226 
227 	state = getCpuDatap()->cpu_int_state;
228 	if (!state) {
229 		return KERN_FAILURE;
230 	}
231 
232 	// Return early if interrupted a thread in user space.
233 	if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
234 		return KERN_FAILURE;
235 	}
236 
237 	*pc = ml_get_backtrace_pc(state);
238 	*fp = get_saved_state_fp(state);
239 
240 #else // !defined(__arm64__) && !defined(__x86_64__)
241 #error "unsupported architecture"
242 #endif // !defined(__arm64__) && !defined(__x86_64__)
243 
244 	return KERN_SUCCESS;
245 }
246 
247 __attribute__((always_inline))
248 static uintptr_t
_backtrace_preamble(struct backtrace_control * ctl,uintptr_t * start_frame_out)249 _backtrace_preamble(struct backtrace_control *ctl, uintptr_t *start_frame_out)
250 {
251 	backtrace_flags_t flags = ctl ? ctl->btc_flags : 0;
252 	uintptr_t start_frame = ctl ? ctl->btc_frame_addr : 0;
253 	uintptr_t pc = 0;
254 	if (flags & BTF_KERN_INTERRUPTED) {
255 		assert(ml_at_interrupt_context() == TRUE);
256 
257 		uintptr_t fp;
258 		kern_return_t kr = interrupted_kernel_pc_fp(&pc, &fp);
259 		if (kr != KERN_SUCCESS) {
260 			return 0;
261 		}
262 		*start_frame_out = start_frame ?: fp;
263 	} else if (start_frame == 0) {
264 		*start_frame_out = (uintptr_t)__builtin_frame_address(0);
265 	} else {
266 		*start_frame_out = start_frame;
267 	}
268 	return pc;
269 }
270 
271 unsigned int __attribute__((noinline))
backtrace(uintptr_t * bt,unsigned int max_frames,struct backtrace_control * ctl,backtrace_info_t * info_out)272 backtrace(uintptr_t *bt, unsigned int max_frames,
273     struct backtrace_control *ctl, backtrace_info_t *info_out)
274 {
275 	unsigned int len_adj = 0;
276 	uintptr_t start_frame = ctl ? ctl->btc_frame_addr : 0;
277 	uintptr_t pc = _backtrace_preamble(ctl, &start_frame);
278 	if (pc) {
279 		bt[0] = pc;
280 		if (max_frames == 1) {
281 			return 1;
282 		}
283 		bt += 1;
284 		max_frames -= 1;
285 		len_adj += 1;
286 	}
287 
288 	size_t size = backtrace_internal(BTP_NONE, (uint8_t *)bt,
289 	    max_frames * sizeof(uintptr_t), (void *)start_frame,
290 	    ctl ? ctl->btc_addr_offset : 0, info_out);
291 	// NULL-terminate the list, if space is available.
292 	unsigned int len = size / sizeof(uintptr_t);
293 	if (len != max_frames) {
294 		bt[len] = 0;
295 	}
296 
297 	return len + len_adj;
298 }
299 
300 // Backtrace the current thread's kernel stack as a packed representation.
301 size_t
backtrace_packed(backtrace_pack_t packing,uint8_t * bt,size_t btsize,struct backtrace_control * ctl,backtrace_info_t * info_out)302 backtrace_packed(backtrace_pack_t packing, uint8_t *bt, size_t btsize,
303     struct backtrace_control *ctl,
304     backtrace_info_t *info_out)
305 {
306 	unsigned int size_adj = 0;
307 	uintptr_t start_frame = ctl ? ctl->btc_frame_addr : 0;
308 	uintptr_t pc = _backtrace_preamble(ctl, &start_frame);
309 	if (pc) {
310 		size_adj = _backtrace_pack_addr(packing, bt, btsize, pc);
311 		if (size_adj >= btsize) {
312 			return size_adj;
313 		}
314 		btsize -= size_adj;
315 	}
316 
317 	size_t written_size = backtrace_internal(packing, (uint8_t *)bt, btsize,
318 	    (void *)start_frame, ctl ? ctl->btc_addr_offset : 0, info_out);
319 	return written_size + size_adj;
320 }
321 
322 // Convert an array of addresses to a packed representation.
323 size_t
backtrace_pack(backtrace_pack_t packing,uint8_t * dst,size_t dst_size,const uintptr_t * src,unsigned int src_len)324 backtrace_pack(backtrace_pack_t packing, uint8_t *dst, size_t dst_size,
325     const uintptr_t *src, unsigned int src_len)
326 {
327 	size_t dst_offset = 0;
328 	for (unsigned int i = 0; i < src_len; i++) {
329 		size_t pack_size = _backtrace_pack_addr(packing, dst + dst_offset,
330 		    dst_size - dst_offset, src[i]);
331 		if (dst_offset + pack_size >= dst_size) {
332 			return dst_offset;
333 		}
334 		dst_offset += pack_size;
335 	}
336 	return dst_offset;
337 }
338 
339 // Convert a packed backtrace to an array of addresses.
340 unsigned int
backtrace_unpack(backtrace_pack_t packing,uintptr_t * dst,unsigned int dst_len,const uint8_t * src,size_t src_size)341 backtrace_unpack(backtrace_pack_t packing, uintptr_t *dst, unsigned int dst_len,
342     const uint8_t *src, size_t src_size)
343 {
344 	switch (packing) {
345 	case BTP_NONE:;
346 		size_t unpack_size = MIN(dst_len * sizeof(uintptr_t), src_size);
347 		memmove(dst, src, unpack_size);
348 		return (unsigned int)(unpack_size / sizeof(uintptr_t));
349 	case BTP_KERN_OFFSET_32:;
350 		unsigned int src_len = src_size / sizeof(int32_t);
351 		unsigned int unpack_len = MIN(src_len, dst_len);
352 		for (unsigned int i = 0; i < unpack_len; i++) {
353 			int32_t addr = 0;
354 			memcpy(&addr, src + i * sizeof(int32_t), sizeof(int32_t));
355 			dst[i] = vm_kernel_stext + (uintptr_t)addr;
356 		}
357 		return unpack_len;
358 	default:
359 		panic("backtrace: unknown packing format %d", packing);
360 	}
361 }
362 
363 static errno_t
_backtrace_copyin(void * __unused ctx,void * dst,user_addr_t src,size_t size)364 _backtrace_copyin(void * __unused ctx, void *dst, user_addr_t src, size_t size)
365 {
366 	return copyin((user_addr_t)src, dst, size);
367 }
368 
369 errno_t
backtrace_user_copy_error(void * ctx,void * dst,user_addr_t src,size_t size)370 backtrace_user_copy_error(void *ctx, void *dst, user_addr_t src, size_t size)
371 {
372 #pragma unused(ctx, dst, src, size)
373 	return EFAULT;
374 }
375 
376 unsigned int
backtrace_user(uintptr_t * bt,unsigned int max_frames,const struct backtrace_control * ctl_in,struct backtrace_user_info * info_out)377 backtrace_user(uintptr_t *bt, unsigned int max_frames,
378     const struct backtrace_control *ctl_in,
379     struct backtrace_user_info *info_out)
380 {
381 	static const struct backtrace_control ctl_default = {
382 		.btc_user_copy = _backtrace_copyin,
383 	};
384 	const struct backtrace_control *ctl = ctl_in ?: &ctl_default;
385 	uintptr_t pc = 0, next_fp = 0;
386 	uintptr_t fp = ctl->btc_frame_addr;
387 	bool custom_fp = fp != 0;
388 	int64_t addr_offset = ctl ? ctl->btc_addr_offset : 0;
389 	vm_map_t map = NULL;
390 	vm_map_switch_context_t switch_ctx;
391 	bool switched_map = false;
392 	unsigned int frame_index = 0;
393 	int error = 0;
394 	size_t frame_size = 0;
395 	bool truncated = false;
396 	bool user_64 = false;
397 	bool allow_async = true;
398 	bool has_async = false;
399 	uintptr_t async_frame_addr = 0;
400 	unsigned int async_index = 0;
401 
402 	backtrace_user_copy_fn copy = ctl->btc_user_copy ?: _backtrace_copyin;
403 	bool custom_copy = copy != _backtrace_copyin;
404 	void *ctx = ctl->btc_user_copy_context;
405 
406 	void *thread = ctl->btc_user_thread;
407 	void *cur_thread = NULL;
408 	if (thread == NULL) {
409 		cur_thread = current_thread();
410 		thread = cur_thread;
411 	}
412 	task_t task = get_threadtask(thread);
413 
414 	assert(task != NULL);
415 	assert(bt != NULL);
416 	assert(max_frames > 0);
417 
418 	if (!custom_copy) {
419 		assert(ml_get_interrupts_enabled() == TRUE);
420 		if (!ml_get_interrupts_enabled()) {
421 			error = EDEADLK;
422 		}
423 
424 		if (cur_thread == NULL) {
425 			cur_thread = current_thread();
426 		}
427 		if (thread != cur_thread) {
428 			map = get_task_map_reference(task);
429 			if (map == NULL) {
430 				error = ENOMEM;
431 				goto out;
432 			}
433 			switched_map = true;
434 			switch_ctx = vm_map_switch_to(map);
435 		}
436 	}
437 
438 #define SWIFT_ASYNC_FP_BIT (0x1ULL << 60)
439 #define SWIFT_ASYNC_FP(FP) (((FP) & SWIFT_ASYNC_FP_BIT) != 0)
440 #define SWIFT_ASYNC_FP_CLEAR(FP) ((FP) & ~SWIFT_ASYNC_FP_BIT)
441 
442 #if defined(__x86_64__)
443 
444 	// Don't allow a malformed user stack to copy arbitrary kernel data.
445 #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP)))
446 
447 	x86_saved_state_t *state = get_user_regs(thread);
448 	if (!state) {
449 		error = EINVAL;
450 		goto out;
451 	}
452 
453 	user_64 = is_saved_state64(state);
454 	if (user_64) {
455 		pc = saved_state64(state)->isf.rip;
456 		fp = fp != 0 ? fp : saved_state64(state)->rbp;
457 	} else {
458 		pc = saved_state32(state)->eip;
459 		fp = fp != 0 ? fp : saved_state32(state)->ebp;
460 	}
461 
462 #elif defined(__arm64__)
463 
464 	struct arm_saved_state *state = get_user_regs(thread);
465 	if (!state) {
466 		error = EINVAL;
467 		goto out;
468 	}
469 
470 	user_64 = is_saved_state64(state);
471 	pc = get_saved_state_pc(state);
472 	fp = fp != 0 ? fp : get_saved_state_fp(state);
473 
474 	// ARM expects stack frames to be aligned to 16 bytes.
475 #define INVALID_USER_FP(FP) (((FP) & 0x3UL) != 0UL)
476 
477 #else // defined(__arm64__) || defined(__x86_64__)
478 #error "unsupported architecture"
479 #endif // !defined(__arm64__) && !defined(__x86_64__)
480 
481 	// Only capture the save state PC without a custom frame pointer to walk.
482 	if (!ctl || ctl->btc_frame_addr == 0) {
483 		bt[frame_index++] = pc + addr_offset;
484 	}
485 
486 	if (frame_index >= max_frames) {
487 		goto out;
488 	}
489 
490 	if (fp == 0) {
491 		// If the FP is zeroed, then there's no stack to walk, by design.  This
492 		// happens for workq threads that are being sent back to user space or
493 		// during boot-strapping operations on other kinds of threads.
494 		goto out;
495 	} else if (INVALID_USER_FP(fp)) {
496 		// Still capture the PC in this case, but mark the stack as truncated
497 		// and "faulting."  (Using the frame pointer on a call stack would cause
498 		// an exception.)
499 		error = EFAULT;
500 		truncated = true;
501 		goto out;
502 	}
503 
504 	union {
505 		struct {
506 			uint64_t fp;
507 			uint64_t ret;
508 		} u64;
509 		struct {
510 			uint32_t fp;
511 			uint32_t ret;
512 		} u32;
513 	} frame;
514 
515 	frame_size = 2 * (user_64 ? 8 : 4);
516 
517 	while (fp != 0 && frame_index < max_frames) {
518 		error = copy(ctx, (char *)&frame, fp, frame_size);
519 		if (error) {
520 			truncated = true;
521 			goto out;
522 		}
523 
524 		// Capture this return address before tripping over any errors finding
525 		// the next frame to follow.
526 		uintptr_t ret_addr = user_64 ? frame.u64.ret : frame.u32.ret;
527 #if defined(HAS_APPLE_PAC)
528 		// Return addresses are signed by arm64e ABI, so strip off the auth
529 		// bits.
530 		bt[frame_index++] = (uintptr_t)ptrauth_strip((void *)ret_addr,
531 		    ptrauth_key_return_address) + addr_offset;
532 #else // defined(HAS_APPLE_PAC)
533 		bt[frame_index++] = ret_addr + addr_offset;
534 #endif // !defined(HAS_APPLE_PAC)
535 
536 		// Find the next frame to follow.
537 		next_fp = user_64 ? frame.u64.fp : frame.u32.fp;
538 		bool async_frame = allow_async && SWIFT_ASYNC_FP(next_fp);
539 		// There is no 32-bit ABI for Swift async call stacks.
540 		if (user_64 && async_frame) {
541 			async_index = frame_index - 1;
542 			// The async context pointer is just below the stack frame.
543 			user_addr_t async_ctx_ptr = fp - 8;
544 			user_addr_t async_ctx = 0;
545 			error = copy(ctx, (char *)&async_ctx, async_ctx_ptr,
546 			    sizeof(async_ctx));
547 			if (error) {
548 				goto out;
549 			}
550 #if defined(HAS_APPLE_PAC)
551 			async_frame_addr = (uintptr_t)ptrauth_strip((void *)async_ctx,
552 			    ptrauth_key_process_dependent_data);
553 #else // defined(HAS_APPLE_PAC)
554 			async_frame_addr = (uintptr_t)async_ctx;
555 #endif // !defined(HAS_APPLE_PAC)
556 			has_async = true;
557 			allow_async = false;
558 		}
559 		next_fp = SWIFT_ASYNC_FP_CLEAR(next_fp);
560 #if defined(HAS_APPLE_PAC)
561 		next_fp = (uintptr_t)ptrauth_strip((void *)next_fp,
562 		    ptrauth_key_frame_pointer);
563 #endif // defined(HAS_APPLE_PAC)
564 		if (INVALID_USER_FP(next_fp)) {
565 			break;
566 		}
567 
568 		// Stacks grow down; backtracing should be moving to higher addresses,
569 		// unless a custom frame pointer is provided, in which case, an async
570 		// stack might be walked, which is allocated on the heap in any order.
571 		if ((next_fp == fp) || (!custom_fp && next_fp < fp)) {
572 			break;
573 		}
574 		fp = next_fp;
575 	}
576 
577 out:
578 	if (switched_map) {
579 		vm_map_switch_back(switch_ctx);
580 		vm_map_deallocate(map);
581 	}
582 
583 	// NULL-terminate the list, if space is available.
584 	if (frame_index < max_frames) {
585 		bt[frame_index] = 0;
586 	}
587 
588 	if (info_out) {
589 		info_out->btui_error = error;
590 		backtrace_info_t info = user_64 ? BTI_64_BIT : BTI_NONE;
591 		bool out_of_space = !INVALID_USER_FP(fp) && frame_index == max_frames;
592 		if (truncated || out_of_space) {
593 			info |= BTI_TRUNCATED;
594 		}
595 		if (out_of_space && error == 0) {
596 			info_out->btui_next_frame_addr = fp;
597 		}
598 		info_out->btui_info = info;
599 		info_out->btui_async_start_index = async_index;
600 		info_out->btui_async_frame_addr = async_frame_addr;
601 	}
602 
603 	return frame_index;
604 }
605