xref: /xnu-12377.41.6/osfmk/kperf/callstack.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1*bbb1b6f9SApple OSS Distributions /*
2*bbb1b6f9SApple OSS Distributions  * Copyright (c) 2011-2022 Apple Computer, Inc. All rights reserved.
3*bbb1b6f9SApple OSS Distributions  *
4*bbb1b6f9SApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*bbb1b6f9SApple OSS Distributions  *
6*bbb1b6f9SApple OSS Distributions  * This file contains Original Code and/or Modifications of Original Code
7*bbb1b6f9SApple OSS Distributions  * as defined in and that are subject to the Apple Public Source License
8*bbb1b6f9SApple OSS Distributions  * Version 2.0 (the 'License'). You may not use this file except in
9*bbb1b6f9SApple OSS Distributions  * compliance with the License. The rights granted to you under the License
10*bbb1b6f9SApple OSS Distributions  * may not be used to create, or enable the creation or redistribution of,
11*bbb1b6f9SApple OSS Distributions  * unlawful or unlicensed copies of an Apple operating system, or to
12*bbb1b6f9SApple OSS Distributions  * circumvent, violate, or enable the circumvention or violation of, any
13*bbb1b6f9SApple OSS Distributions  * terms of an Apple operating system software license agreement.
14*bbb1b6f9SApple OSS Distributions  *
15*bbb1b6f9SApple OSS Distributions  * Please obtain a copy of the License at
16*bbb1b6f9SApple OSS Distributions  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*bbb1b6f9SApple OSS Distributions  *
18*bbb1b6f9SApple OSS Distributions  * The Original Code and all software distributed under the License are
19*bbb1b6f9SApple OSS Distributions  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*bbb1b6f9SApple OSS Distributions  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*bbb1b6f9SApple OSS Distributions  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*bbb1b6f9SApple OSS Distributions  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*bbb1b6f9SApple OSS Distributions  * Please see the License for the specific language governing rights and
24*bbb1b6f9SApple OSS Distributions  * limitations under the License.
25*bbb1b6f9SApple OSS Distributions  *
26*bbb1b6f9SApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*bbb1b6f9SApple OSS Distributions  */
28*bbb1b6f9SApple OSS Distributions 
29*bbb1b6f9SApple OSS Distributions /* Collect kernel callstacks */
30*bbb1b6f9SApple OSS Distributions 
31*bbb1b6f9SApple OSS Distributions #include <mach/mach_types.h>
32*bbb1b6f9SApple OSS Distributions #include <kern/thread.h>
33*bbb1b6f9SApple OSS Distributions #include <kern/backtrace.h>
34*bbb1b6f9SApple OSS Distributions #include <kern/cambria_layout.h>
35*bbb1b6f9SApple OSS Distributions #include <vm/vm_map_xnu.h>
36*bbb1b6f9SApple OSS Distributions #include <kperf/buffer.h>
37*bbb1b6f9SApple OSS Distributions #include <kperf/context.h>
38*bbb1b6f9SApple OSS Distributions #include <kperf/callstack.h>
39*bbb1b6f9SApple OSS Distributions #include <kperf/ast.h>
40*bbb1b6f9SApple OSS Distributions #include <sys/errno.h>
41*bbb1b6f9SApple OSS Distributions #include <mach/exclaves.h>
42*bbb1b6f9SApple OSS Distributions 
43*bbb1b6f9SApple OSS Distributions #if defined(__arm64__)
44*bbb1b6f9SApple OSS Distributions #include <arm/cpu_data.h>
45*bbb1b6f9SApple OSS Distributions #include <arm/cpu_data_internal.h>
46*bbb1b6f9SApple OSS Distributions #endif
47*bbb1b6f9SApple OSS Distributions 
48*bbb1b6f9SApple OSS Distributions static void
callstack_fixup_user(struct kp_ucallstack * cs,thread_t thread)49*bbb1b6f9SApple OSS Distributions callstack_fixup_user(struct kp_ucallstack *cs, thread_t thread)
50*bbb1b6f9SApple OSS Distributions {
51*bbb1b6f9SApple OSS Distributions 	uint64_t fixup_val = 0;
52*bbb1b6f9SApple OSS Distributions 	assert(cs->kpuc_nframes < MAX_UCALLSTACK_FRAMES);
53*bbb1b6f9SApple OSS Distributions 
54*bbb1b6f9SApple OSS Distributions #if defined(__x86_64__)
55*bbb1b6f9SApple OSS Distributions 	user_addr_t sp_user;
56*bbb1b6f9SApple OSS Distributions 	bool user_64;
57*bbb1b6f9SApple OSS Distributions 	x86_saved_state_t *state;
58*bbb1b6f9SApple OSS Distributions 
59*bbb1b6f9SApple OSS Distributions 	state = get_user_regs(thread);
60*bbb1b6f9SApple OSS Distributions 	if (!state) {
61*bbb1b6f9SApple OSS Distributions 		goto out;
62*bbb1b6f9SApple OSS Distributions 	}
63*bbb1b6f9SApple OSS Distributions 
64*bbb1b6f9SApple OSS Distributions 	user_64 = is_saved_state64(state);
65*bbb1b6f9SApple OSS Distributions 	if (user_64) {
66*bbb1b6f9SApple OSS Distributions 		sp_user = saved_state64(state)->isf.rsp;
67*bbb1b6f9SApple OSS Distributions 	} else {
68*bbb1b6f9SApple OSS Distributions 		sp_user = saved_state32(state)->uesp;
69*bbb1b6f9SApple OSS Distributions 	}
70*bbb1b6f9SApple OSS Distributions 
71*bbb1b6f9SApple OSS Distributions 	if (thread == current_thread()) {
72*bbb1b6f9SApple OSS Distributions 		(void)copyin(sp_user, (char *)&fixup_val,
73*bbb1b6f9SApple OSS Distributions 		    user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
74*bbb1b6f9SApple OSS Distributions 	} else {
75*bbb1b6f9SApple OSS Distributions 		(void)vm_map_read_user(get_task_map(get_threadtask(thread)), sp_user,
76*bbb1b6f9SApple OSS Distributions 		    &fixup_val, user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
77*bbb1b6f9SApple OSS Distributions 	}
78*bbb1b6f9SApple OSS Distributions 
79*bbb1b6f9SApple OSS Distributions #elif defined(__arm64__)
80*bbb1b6f9SApple OSS Distributions 
81*bbb1b6f9SApple OSS Distributions 	struct arm_saved_state *state = get_user_regs(thread);
82*bbb1b6f9SApple OSS Distributions 	if (!state) {
83*bbb1b6f9SApple OSS Distributions 		goto out;
84*bbb1b6f9SApple OSS Distributions 	}
85*bbb1b6f9SApple OSS Distributions 
86*bbb1b6f9SApple OSS Distributions 	/* encode thumb mode into low bit of PC */
87*bbb1b6f9SApple OSS Distributions 	if (is_saved_state32(state) && (get_saved_state_cpsr(state) & PSR_TF)) {
88*bbb1b6f9SApple OSS Distributions 		cs->kpuc_frames[0] |= 1ULL;
89*bbb1b6f9SApple OSS Distributions 	}
90*bbb1b6f9SApple OSS Distributions 
91*bbb1b6f9SApple OSS Distributions 
92*bbb1b6f9SApple OSS Distributions 	fixup_val = get_saved_state_lr(state);
93*bbb1b6f9SApple OSS Distributions 
94*bbb1b6f9SApple OSS Distributions #else
95*bbb1b6f9SApple OSS Distributions #error "callstack_fixup_user: unsupported architecture"
96*bbb1b6f9SApple OSS Distributions #endif
97*bbb1b6f9SApple OSS Distributions 
98*bbb1b6f9SApple OSS Distributions out:
99*bbb1b6f9SApple OSS Distributions 	cs->kpuc_frames[cs->kpuc_nframes++] = fixup_val;
100*bbb1b6f9SApple OSS Distributions }
101*bbb1b6f9SApple OSS Distributions 
102*bbb1b6f9SApple OSS Distributions #if defined(__x86_64__)
103*bbb1b6f9SApple OSS Distributions 
104*bbb1b6f9SApple OSS Distributions __attribute__((used))
105*bbb1b6f9SApple OSS Distributions static kern_return_t
interrupted_kernel_sp_value(uintptr_t * sp_val)106*bbb1b6f9SApple OSS Distributions interrupted_kernel_sp_value(uintptr_t *sp_val)
107*bbb1b6f9SApple OSS Distributions {
108*bbb1b6f9SApple OSS Distributions 	x86_saved_state_t *state;
109*bbb1b6f9SApple OSS Distributions 	uintptr_t sp;
110*bbb1b6f9SApple OSS Distributions 	bool state_64;
111*bbb1b6f9SApple OSS Distributions 	uint64_t cs;
112*bbb1b6f9SApple OSS Distributions 	uintptr_t top, bottom;
113*bbb1b6f9SApple OSS Distributions 
114*bbb1b6f9SApple OSS Distributions 	state = current_cpu_datap()->cpu_int_state;
115*bbb1b6f9SApple OSS Distributions 	if (!state) {
116*bbb1b6f9SApple OSS Distributions 		return KERN_FAILURE;
117*bbb1b6f9SApple OSS Distributions 	}
118*bbb1b6f9SApple OSS Distributions 
119*bbb1b6f9SApple OSS Distributions 	state_64 = is_saved_state64(state);
120*bbb1b6f9SApple OSS Distributions 
121*bbb1b6f9SApple OSS Distributions 	if (state_64) {
122*bbb1b6f9SApple OSS Distributions 		cs = saved_state64(state)->isf.cs;
123*bbb1b6f9SApple OSS Distributions 	} else {
124*bbb1b6f9SApple OSS Distributions 		cs = saved_state32(state)->cs;
125*bbb1b6f9SApple OSS Distributions 	}
126*bbb1b6f9SApple OSS Distributions 	/* return early if interrupted a thread in user space */
127*bbb1b6f9SApple OSS Distributions 	if ((cs & SEL_PL) == SEL_PL_U) {
128*bbb1b6f9SApple OSS Distributions 		return KERN_FAILURE;
129*bbb1b6f9SApple OSS Distributions 	}
130*bbb1b6f9SApple OSS Distributions 
131*bbb1b6f9SApple OSS Distributions 	if (state_64) {
132*bbb1b6f9SApple OSS Distributions 		sp = saved_state64(state)->isf.rsp;
133*bbb1b6f9SApple OSS Distributions 	} else {
134*bbb1b6f9SApple OSS Distributions 		sp = saved_state32(state)->uesp;
135*bbb1b6f9SApple OSS Distributions 	}
136*bbb1b6f9SApple OSS Distributions 
137*bbb1b6f9SApple OSS Distributions 	/* make sure the stack pointer is pointing somewhere in this stack */
138*bbb1b6f9SApple OSS Distributions 	bottom = current_thread()->kernel_stack;
139*bbb1b6f9SApple OSS Distributions 	top = bottom + kernel_stack_size;
140*bbb1b6f9SApple OSS Distributions 	if (sp >= bottom && sp < top) {
141*bbb1b6f9SApple OSS Distributions 		return KERN_FAILURE;
142*bbb1b6f9SApple OSS Distributions 	}
143*bbb1b6f9SApple OSS Distributions 
144*bbb1b6f9SApple OSS Distributions 	*sp_val = *(uintptr_t *)sp;
145*bbb1b6f9SApple OSS Distributions 	return KERN_SUCCESS;
146*bbb1b6f9SApple OSS Distributions }
147*bbb1b6f9SApple OSS Distributions 
148*bbb1b6f9SApple OSS Distributions #elif defined(__arm64__)
149*bbb1b6f9SApple OSS Distributions 
150*bbb1b6f9SApple OSS Distributions __attribute__((used))
151*bbb1b6f9SApple OSS Distributions static kern_return_t
interrupted_kernel_lr(uintptr_t * lr)152*bbb1b6f9SApple OSS Distributions interrupted_kernel_lr(uintptr_t *lr)
153*bbb1b6f9SApple OSS Distributions {
154*bbb1b6f9SApple OSS Distributions 	struct arm_saved_state *state;
155*bbb1b6f9SApple OSS Distributions 
156*bbb1b6f9SApple OSS Distributions 	state = getCpuDatap()->cpu_int_state;
157*bbb1b6f9SApple OSS Distributions 
158*bbb1b6f9SApple OSS Distributions 	/* return early if interrupted a thread in user space */
159*bbb1b6f9SApple OSS Distributions 	if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
160*bbb1b6f9SApple OSS Distributions 		return KERN_FAILURE;
161*bbb1b6f9SApple OSS Distributions 	}
162*bbb1b6f9SApple OSS Distributions 
163*bbb1b6f9SApple OSS Distributions 	*lr = get_saved_state_lr(state);
164*bbb1b6f9SApple OSS Distributions 	return KERN_SUCCESS;
165*bbb1b6f9SApple OSS Distributions }
166*bbb1b6f9SApple OSS Distributions #else /* defined(__arm64__) */
167*bbb1b6f9SApple OSS Distributions #error "interrupted_kernel_{sp,lr}: unsupported architecture"
168*bbb1b6f9SApple OSS Distributions #endif /* !defined(__arm64__) */
169*bbb1b6f9SApple OSS Distributions 
170*bbb1b6f9SApple OSS Distributions 
171*bbb1b6f9SApple OSS Distributions static void
callstack_fixup_interrupted(struct kp_kcallstack * cs)172*bbb1b6f9SApple OSS Distributions callstack_fixup_interrupted(struct kp_kcallstack *cs)
173*bbb1b6f9SApple OSS Distributions {
174*bbb1b6f9SApple OSS Distributions 	uintptr_t fixup_val = 0;
175*bbb1b6f9SApple OSS Distributions 	assert(cs->kpkc_nframes < MAX_KCALLSTACK_FRAMES);
176*bbb1b6f9SApple OSS Distributions 
177*bbb1b6f9SApple OSS Distributions 	/*
178*bbb1b6f9SApple OSS Distributions 	 * Only provide arbitrary data on development or debug kernels.
179*bbb1b6f9SApple OSS Distributions 	 */
180*bbb1b6f9SApple OSS Distributions #if DEVELOPMENT || DEBUG
181*bbb1b6f9SApple OSS Distributions #if defined(__x86_64__)
182*bbb1b6f9SApple OSS Distributions 	(void)interrupted_kernel_sp_value(&fixup_val);
183*bbb1b6f9SApple OSS Distributions #elif defined(__arm64__)
184*bbb1b6f9SApple OSS Distributions 	(void)interrupted_kernel_lr(&fixup_val);
185*bbb1b6f9SApple OSS Distributions #endif /* defined(__x86_64__) */
186*bbb1b6f9SApple OSS Distributions #endif /* DEVELOPMENT || DEBUG */
187*bbb1b6f9SApple OSS Distributions 
188*bbb1b6f9SApple OSS Distributions 	assert(cs->kpkc_flags & CALLSTACK_KERNEL);
189*bbb1b6f9SApple OSS Distributions 	cs->kpkc_frames[cs->kpkc_nframes++] = fixup_val;
190*bbb1b6f9SApple OSS Distributions }
191*bbb1b6f9SApple OSS Distributions 
192*bbb1b6f9SApple OSS Distributions void
kperf_continuation_sample(struct kp_kcallstack * cs,struct kperf_context * context)193*bbb1b6f9SApple OSS Distributions kperf_continuation_sample(struct kp_kcallstack *cs, struct kperf_context *context)
194*bbb1b6f9SApple OSS Distributions {
195*bbb1b6f9SApple OSS Distributions 	thread_t thread;
196*bbb1b6f9SApple OSS Distributions 
197*bbb1b6f9SApple OSS Distributions 	assert(cs != NULL);
198*bbb1b6f9SApple OSS Distributions 	assert(context != NULL);
199*bbb1b6f9SApple OSS Distributions 
200*bbb1b6f9SApple OSS Distributions 	thread = context->cur_thread;
201*bbb1b6f9SApple OSS Distributions 	assert(thread != NULL);
202*bbb1b6f9SApple OSS Distributions 	assert(thread->continuation != NULL);
203*bbb1b6f9SApple OSS Distributions 
204*bbb1b6f9SApple OSS Distributions 	cs->kpkc_flags = CALLSTACK_CONTINUATION | CALLSTACK_VALID | CALLSTACK_KERNEL;
205*bbb1b6f9SApple OSS Distributions #ifdef __LP64__
206*bbb1b6f9SApple OSS Distributions 	cs->kpkc_flags |= CALLSTACK_64BIT;
207*bbb1b6f9SApple OSS Distributions #endif
208*bbb1b6f9SApple OSS Distributions 
209*bbb1b6f9SApple OSS Distributions 	cs->kpkc_nframes = 1;
210*bbb1b6f9SApple OSS Distributions 	cs->kpkc_frames[0] = VM_KERNEL_UNSLIDE(thread->continuation);
211*bbb1b6f9SApple OSS Distributions }
212*bbb1b6f9SApple OSS Distributions 
213*bbb1b6f9SApple OSS Distributions void
kperf_backtrace_sample(struct kp_kcallstack * cs,struct kperf_context * context)214*bbb1b6f9SApple OSS Distributions kperf_backtrace_sample(struct kp_kcallstack *cs, struct kperf_context *context)
215*bbb1b6f9SApple OSS Distributions {
216*bbb1b6f9SApple OSS Distributions 	assert(cs != NULL);
217*bbb1b6f9SApple OSS Distributions 	assert(context != NULL);
218*bbb1b6f9SApple OSS Distributions 	assert(context->cur_thread == current_thread());
219*bbb1b6f9SApple OSS Distributions 
220*bbb1b6f9SApple OSS Distributions 	cs->kpkc_flags = CALLSTACK_KERNEL | CALLSTACK_KERNEL_WORDS;
221*bbb1b6f9SApple OSS Distributions #ifdef __LP64__
222*bbb1b6f9SApple OSS Distributions 	cs->kpkc_flags |= CALLSTACK_64BIT;
223*bbb1b6f9SApple OSS Distributions #endif
224*bbb1b6f9SApple OSS Distributions 
225*bbb1b6f9SApple OSS Distributions 	BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, 1);
226*bbb1b6f9SApple OSS Distributions 
227*bbb1b6f9SApple OSS Distributions 	backtrace_info_t btinfo = BTI_NONE;
228*bbb1b6f9SApple OSS Distributions 	struct backtrace_control ctl = {
229*bbb1b6f9SApple OSS Distributions 		.btc_frame_addr = (uintptr_t)context->starting_fp,
230*bbb1b6f9SApple OSS Distributions 	};
231*bbb1b6f9SApple OSS Distributions 	cs->kpkc_nframes = backtrace(cs->kpkc_word_frames, cs->kpkc_nframes - 1,
232*bbb1b6f9SApple OSS Distributions 	    &ctl, &btinfo);
233*bbb1b6f9SApple OSS Distributions 	if (cs->kpkc_nframes > 0) {
234*bbb1b6f9SApple OSS Distributions 		cs->kpkc_flags |= CALLSTACK_VALID;
235*bbb1b6f9SApple OSS Distributions 
236*bbb1b6f9SApple OSS Distributions 		cs->kpkc_exclaves_offset = 0;
237*bbb1b6f9SApple OSS Distributions #if CONFIG_EXCLAVES
238*bbb1b6f9SApple OSS Distributions 		if ((context->cur_thread->th_exclaves_state & TH_EXCLAVES_RPC) != 0) {
239*bbb1b6f9SApple OSS Distributions 			cs->kpkc_exclaves_offset = exclaves_stack_offset(cs->kpkc_word_frames, cs->kpkc_nframes, true);
240*bbb1b6f9SApple OSS Distributions 		}
241*bbb1b6f9SApple OSS Distributions #endif /* CONFIG_EXCLAVES */
242*bbb1b6f9SApple OSS Distributions 
243*bbb1b6f9SApple OSS Distributions 		/*
244*bbb1b6f9SApple OSS Distributions 		 * Fake the value pointed to by the stack pointer or the link
245*bbb1b6f9SApple OSS Distributions 		 * register for symbolicators.
246*bbb1b6f9SApple OSS Distributions 		 */
247*bbb1b6f9SApple OSS Distributions 		cs->kpkc_word_frames[cs->kpkc_nframes + 1] = 0;
248*bbb1b6f9SApple OSS Distributions 		cs->kpkc_nframes += 1;
249*bbb1b6f9SApple OSS Distributions 	}
250*bbb1b6f9SApple OSS Distributions 	if ((btinfo & BTI_TRUNCATED)) {
251*bbb1b6f9SApple OSS Distributions 		cs->kpkc_flags |= CALLSTACK_TRUNCATED;
252*bbb1b6f9SApple OSS Distributions 	}
253*bbb1b6f9SApple OSS Distributions 
254*bbb1b6f9SApple OSS Distributions 	BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, cs->kpkc_nframes);
255*bbb1b6f9SApple OSS Distributions }
256*bbb1b6f9SApple OSS Distributions 
257*bbb1b6f9SApple OSS Distributions kern_return_t chudxnu_thread_get_callstack64_kperf(thread_t thread,
258*bbb1b6f9SApple OSS Distributions     uint64_t *callStack, mach_msg_type_number_t *count,
259*bbb1b6f9SApple OSS Distributions     boolean_t user_only);
260*bbb1b6f9SApple OSS Distributions 
261*bbb1b6f9SApple OSS Distributions void
kperf_kcallstack_sample(struct kp_kcallstack * cs,struct kperf_context * context)262*bbb1b6f9SApple OSS Distributions kperf_kcallstack_sample(struct kp_kcallstack *cs, struct kperf_context *context)
263*bbb1b6f9SApple OSS Distributions {
264*bbb1b6f9SApple OSS Distributions 	thread_t thread;
265*bbb1b6f9SApple OSS Distributions 
266*bbb1b6f9SApple OSS Distributions 	assert(cs != NULL);
267*bbb1b6f9SApple OSS Distributions 	assert(context != NULL);
268*bbb1b6f9SApple OSS Distributions 	assert(cs->kpkc_nframes <= MAX_KCALLSTACK_FRAMES);
269*bbb1b6f9SApple OSS Distributions 
270*bbb1b6f9SApple OSS Distributions 	thread = context->cur_thread;
271*bbb1b6f9SApple OSS Distributions 	assert(thread != NULL);
272*bbb1b6f9SApple OSS Distributions 
273*bbb1b6f9SApple OSS Distributions 	BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread),
274*bbb1b6f9SApple OSS Distributions 	    cs->kpkc_nframes);
275*bbb1b6f9SApple OSS Distributions 
276*bbb1b6f9SApple OSS Distributions 	cs->kpkc_flags = CALLSTACK_KERNEL;
277*bbb1b6f9SApple OSS Distributions #ifdef __LP64__
278*bbb1b6f9SApple OSS Distributions 	cs->kpkc_flags |= CALLSTACK_64BIT;
279*bbb1b6f9SApple OSS Distributions #endif
280*bbb1b6f9SApple OSS Distributions 
281*bbb1b6f9SApple OSS Distributions 	if (ml_at_interrupt_context()) {
282*bbb1b6f9SApple OSS Distributions 		assert(thread == current_thread());
283*bbb1b6f9SApple OSS Distributions 		cs->kpkc_flags |= CALLSTACK_KERNEL_WORDS;
284*bbb1b6f9SApple OSS Distributions 		backtrace_info_t btinfo = BTI_NONE;
285*bbb1b6f9SApple OSS Distributions 		struct backtrace_control ctl = { .btc_flags = BTF_KERN_INTERRUPTED, };
286*bbb1b6f9SApple OSS Distributions 		cs->kpkc_nframes = backtrace(cs->kpkc_word_frames, cs->kpkc_nframes - 1,
287*bbb1b6f9SApple OSS Distributions 		    &ctl, &btinfo);
288*bbb1b6f9SApple OSS Distributions 		if (cs->kpkc_nframes != 0) {
289*bbb1b6f9SApple OSS Distributions 			callstack_fixup_interrupted(cs);
290*bbb1b6f9SApple OSS Distributions 		}
291*bbb1b6f9SApple OSS Distributions 		if ((btinfo & BTI_TRUNCATED)) {
292*bbb1b6f9SApple OSS Distributions 			cs->kpkc_flags |= CALLSTACK_TRUNCATED;
293*bbb1b6f9SApple OSS Distributions 		}
294*bbb1b6f9SApple OSS Distributions 
295*bbb1b6f9SApple OSS Distributions 		cs->kpkc_exclaves_offset = 0;
296*bbb1b6f9SApple OSS Distributions #if CONFIG_EXCLAVES
297*bbb1b6f9SApple OSS Distributions 		if ((thread->th_exclaves_state & TH_EXCLAVES_RPC) != 0) {
298*bbb1b6f9SApple OSS Distributions 			cs->kpkc_exclaves_offset = exclaves_stack_offset(cs->kpkc_word_frames, cs->kpkc_nframes, true);
299*bbb1b6f9SApple OSS Distributions 		}
300*bbb1b6f9SApple OSS Distributions #endif /* CONFIG_EXCLAVES */
301*bbb1b6f9SApple OSS Distributions 	} else {
302*bbb1b6f9SApple OSS Distributions 		/*
303*bbb1b6f9SApple OSS Distributions 		 * Rely on legacy CHUD backtracer to backtrace kernel stacks on
304*bbb1b6f9SApple OSS Distributions 		 * other threads.
305*bbb1b6f9SApple OSS Distributions 		 */
306*bbb1b6f9SApple OSS Distributions 		kern_return_t kr;
307*bbb1b6f9SApple OSS Distributions 		kr = chudxnu_thread_get_callstack64_kperf(thread,
308*bbb1b6f9SApple OSS Distributions 		    cs->kpkc_frames, &cs->kpkc_nframes, FALSE);
309*bbb1b6f9SApple OSS Distributions 		if (kr == KERN_SUCCESS) {
310*bbb1b6f9SApple OSS Distributions 			cs->kpkc_flags |= CALLSTACK_VALID;
311*bbb1b6f9SApple OSS Distributions 		} else if (kr == KERN_RESOURCE_SHORTAGE) {
312*bbb1b6f9SApple OSS Distributions 			cs->kpkc_flags |= CALLSTACK_VALID;
313*bbb1b6f9SApple OSS Distributions 			cs->kpkc_flags |= CALLSTACK_TRUNCATED;
314*bbb1b6f9SApple OSS Distributions 		} else {
315*bbb1b6f9SApple OSS Distributions 			cs->kpkc_nframes = 0;
316*bbb1b6f9SApple OSS Distributions 		}
317*bbb1b6f9SApple OSS Distributions 	}
318*bbb1b6f9SApple OSS Distributions 
319*bbb1b6f9SApple OSS Distributions 	if (!(cs->kpkc_flags & CALLSTACK_VALID)) {
320*bbb1b6f9SApple OSS Distributions 		BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK);
321*bbb1b6f9SApple OSS Distributions 	}
322*bbb1b6f9SApple OSS Distributions 
323*bbb1b6f9SApple OSS Distributions 	BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
324*bbb1b6f9SApple OSS Distributions 	    cs->kpkc_flags, cs->kpkc_nframes);
325*bbb1b6f9SApple OSS Distributions }
326*bbb1b6f9SApple OSS Distributions 
327*bbb1b6f9SApple OSS Distributions void
kperf_ucallstack_sample(struct kp_ucallstack * cs,struct kperf_context * context)328*bbb1b6f9SApple OSS Distributions kperf_ucallstack_sample(struct kp_ucallstack *cs, struct kperf_context *context)
329*bbb1b6f9SApple OSS Distributions {
330*bbb1b6f9SApple OSS Distributions 	assert(ml_get_interrupts_enabled() == TRUE);
331*bbb1b6f9SApple OSS Distributions 
332*bbb1b6f9SApple OSS Distributions 	thread_t thread = context->cur_thread;
333*bbb1b6f9SApple OSS Distributions 	assert(thread != NULL);
334*bbb1b6f9SApple OSS Distributions 
335*bbb1b6f9SApple OSS Distributions 	BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_START,
336*bbb1b6f9SApple OSS Distributions 	    (uintptr_t)thread_tid(thread), cs->kpuc_nframes);
337*bbb1b6f9SApple OSS Distributions 
338*bbb1b6f9SApple OSS Distributions 	struct backtrace_user_info btinfo = BTUINFO_INIT;
339*bbb1b6f9SApple OSS Distributions 	/*
340*bbb1b6f9SApple OSS Distributions 	 * Leave space for the fixup information.
341*bbb1b6f9SApple OSS Distributions 	 */
342*bbb1b6f9SApple OSS Distributions 	unsigned int maxnframes = cs->kpuc_nframes - 1;
343*bbb1b6f9SApple OSS Distributions 	struct backtrace_control ctl = { .btc_user_thread = thread, };
344*bbb1b6f9SApple OSS Distributions 	unsigned int nframes = backtrace_user(cs->kpuc_frames, maxnframes, &ctl,
345*bbb1b6f9SApple OSS Distributions 	    &btinfo);
346*bbb1b6f9SApple OSS Distributions 	cs->kpuc_nframes = MIN(maxnframes, nframes);
347*bbb1b6f9SApple OSS Distributions 
348*bbb1b6f9SApple OSS Distributions 	cs->kpuc_flags |= CALLSTACK_KERNEL_WORDS |
349*bbb1b6f9SApple OSS Distributions 	    ((btinfo.btui_info & BTI_TRUNCATED) ? CALLSTACK_TRUNCATED : 0) |
350*bbb1b6f9SApple OSS Distributions 	    ((btinfo.btui_info & BTI_64_BIT) ? CALLSTACK_64BIT : 0);
351*bbb1b6f9SApple OSS Distributions 
352*bbb1b6f9SApple OSS Distributions 	/*
353*bbb1b6f9SApple OSS Distributions 	 * Ignore EFAULT to get as much of the stack as possible.
354*bbb1b6f9SApple OSS Distributions 	 */
355*bbb1b6f9SApple OSS Distributions 	if (btinfo.btui_error == 0 || btinfo.btui_error == EFAULT) {
356*bbb1b6f9SApple OSS Distributions 		callstack_fixup_user(cs, thread);
357*bbb1b6f9SApple OSS Distributions 		cs->kpuc_flags |= CALLSTACK_VALID;
358*bbb1b6f9SApple OSS Distributions 
359*bbb1b6f9SApple OSS Distributions 		if (cs->kpuc_nframes < maxnframes &&
360*bbb1b6f9SApple OSS Distributions 		    btinfo.btui_async_frame_addr != 0) {
361*bbb1b6f9SApple OSS Distributions 			cs->kpuc_async_index = btinfo.btui_async_start_index;
362*bbb1b6f9SApple OSS Distributions 			ctl.btc_frame_addr = btinfo.btui_async_frame_addr;
363*bbb1b6f9SApple OSS Distributions 			ctl.btc_addr_offset = BTCTL_ASYNC_ADDR_OFFSET;
364*bbb1b6f9SApple OSS Distributions 			maxnframes -= cs->kpuc_nframes;
365*bbb1b6f9SApple OSS Distributions 			btinfo = BTUINFO_INIT;
366*bbb1b6f9SApple OSS Distributions 			unsigned int nasync_frames = backtrace_user(
367*bbb1b6f9SApple OSS Distributions 			    &cs->kpuc_frames[cs->kpuc_nframes], maxnframes, &ctl, &btinfo);
368*bbb1b6f9SApple OSS Distributions 			if (btinfo.btui_info & BTI_TRUNCATED) {
369*bbb1b6f9SApple OSS Distributions 				cs->kpuc_flags |= CALLSTACK_TRUNCATED;
370*bbb1b6f9SApple OSS Distributions 			}
371*bbb1b6f9SApple OSS Distributions 			if (btinfo.btui_error == 0 || btinfo.btui_error == EFAULT) {
372*bbb1b6f9SApple OSS Distributions 				cs->kpuc_flags |= CALLSTACK_HAS_ASYNC;
373*bbb1b6f9SApple OSS Distributions 				cs->kpuc_async_nframes = nasync_frames;
374*bbb1b6f9SApple OSS Distributions 			}
375*bbb1b6f9SApple OSS Distributions 		}
376*bbb1b6f9SApple OSS Distributions 	} else {
377*bbb1b6f9SApple OSS Distributions 		cs->kpuc_nframes = 0;
378*bbb1b6f9SApple OSS Distributions 		BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK, btinfo.btui_error);
379*bbb1b6f9SApple OSS Distributions 	}
380*bbb1b6f9SApple OSS Distributions 
381*bbb1b6f9SApple OSS Distributions 	BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
382*bbb1b6f9SApple OSS Distributions 	    cs->kpuc_flags, cs->kpuc_nframes);
383*bbb1b6f9SApple OSS Distributions }
384*bbb1b6f9SApple OSS Distributions 
385*bbb1b6f9SApple OSS Distributions static inline uintptr_t
scrub_word(uintptr_t * bt,int n_frames,int frame,bool kern)386*bbb1b6f9SApple OSS Distributions scrub_word(uintptr_t *bt, int n_frames, int frame, bool kern)
387*bbb1b6f9SApple OSS Distributions {
388*bbb1b6f9SApple OSS Distributions 	if (frame < n_frames) {
389*bbb1b6f9SApple OSS Distributions 		if (kern) {
390*bbb1b6f9SApple OSS Distributions 			return VM_KERNEL_UNSLIDE(bt[frame]);
391*bbb1b6f9SApple OSS Distributions 		} else {
392*bbb1b6f9SApple OSS Distributions 			return bt[frame];
393*bbb1b6f9SApple OSS Distributions 		}
394*bbb1b6f9SApple OSS Distributions 	} else {
395*bbb1b6f9SApple OSS Distributions 		return 0;
396*bbb1b6f9SApple OSS Distributions 	}
397*bbb1b6f9SApple OSS Distributions }
398*bbb1b6f9SApple OSS Distributions 
399*bbb1b6f9SApple OSS Distributions static inline uintptr_t
scrub_frame(uint64_t * bt,int n_frames,int frame)400*bbb1b6f9SApple OSS Distributions scrub_frame(uint64_t *bt, int n_frames, int frame)
401*bbb1b6f9SApple OSS Distributions {
402*bbb1b6f9SApple OSS Distributions 	if (frame < n_frames) {
403*bbb1b6f9SApple OSS Distributions 		return (uintptr_t)(bt[frame]);
404*bbb1b6f9SApple OSS Distributions 	} else {
405*bbb1b6f9SApple OSS Distributions 		return 0;
406*bbb1b6f9SApple OSS Distributions 	}
407*bbb1b6f9SApple OSS Distributions }
408*bbb1b6f9SApple OSS Distributions 
409*bbb1b6f9SApple OSS Distributions static void
callstack_log(uint32_t hdrid,uint32_t dataid,void * vframes,unsigned int nframes,unsigned int flags,unsigned int async_index,unsigned int async_nframes)410*bbb1b6f9SApple OSS Distributions callstack_log(uint32_t hdrid, uint32_t dataid, void *vframes,
411*bbb1b6f9SApple OSS Distributions     unsigned int nframes, unsigned int flags, unsigned int async_index,
412*bbb1b6f9SApple OSS Distributions     unsigned int async_nframes)
413*bbb1b6f9SApple OSS Distributions {
414*bbb1b6f9SApple OSS Distributions 	BUF_VERB(PERF_CS_LOG | DBG_FUNC_START, flags, nframes);
415*bbb1b6f9SApple OSS Distributions 	BUF_DATA(hdrid, flags, nframes - async_nframes, async_index, async_nframes);
416*bbb1b6f9SApple OSS Distributions 
417*bbb1b6f9SApple OSS Distributions 	unsigned int nevts = nframes / 4;
418*bbb1b6f9SApple OSS Distributions 	unsigned int ovf = nframes % 4;
419*bbb1b6f9SApple OSS Distributions 	if (ovf != 0) {
420*bbb1b6f9SApple OSS Distributions 		nevts++;
421*bbb1b6f9SApple OSS Distributions 	}
422*bbb1b6f9SApple OSS Distributions 
423*bbb1b6f9SApple OSS Distributions 	bool kern = flags & CALLSTACK_KERNEL;
424*bbb1b6f9SApple OSS Distributions 
425*bbb1b6f9SApple OSS Distributions 	if (flags & CALLSTACK_KERNEL_WORDS) {
426*bbb1b6f9SApple OSS Distributions 		uintptr_t *frames = vframes;
427*bbb1b6f9SApple OSS Distributions 		for (unsigned int i = 0; i < nevts; i++) {
428*bbb1b6f9SApple OSS Distributions 			unsigned int j = i * 4;
429*bbb1b6f9SApple OSS Distributions 			BUF_DATA(dataid,
430*bbb1b6f9SApple OSS Distributions 			    scrub_word(frames, nframes, j + 0, kern),
431*bbb1b6f9SApple OSS Distributions 			    scrub_word(frames, nframes, j + 1, kern),
432*bbb1b6f9SApple OSS Distributions 			    scrub_word(frames, nframes, j + 2, kern),
433*bbb1b6f9SApple OSS Distributions 			    scrub_word(frames, nframes, j + 3, kern));
434*bbb1b6f9SApple OSS Distributions 		}
435*bbb1b6f9SApple OSS Distributions 	} else {
436*bbb1b6f9SApple OSS Distributions 		for (unsigned int i = 0; i < nevts; i++) {
437*bbb1b6f9SApple OSS Distributions 			uint64_t *frames = vframes;
438*bbb1b6f9SApple OSS Distributions 			unsigned int j = i * 4;
439*bbb1b6f9SApple OSS Distributions 			BUF_DATA(dataid,
440*bbb1b6f9SApple OSS Distributions 			    scrub_frame(frames, nframes, j + 0),
441*bbb1b6f9SApple OSS Distributions 			    scrub_frame(frames, nframes, j + 1),
442*bbb1b6f9SApple OSS Distributions 			    scrub_frame(frames, nframes, j + 2),
443*bbb1b6f9SApple OSS Distributions 			    scrub_frame(frames, nframes, j + 3));
444*bbb1b6f9SApple OSS Distributions 		}
445*bbb1b6f9SApple OSS Distributions 	}
446*bbb1b6f9SApple OSS Distributions 
447*bbb1b6f9SApple OSS Distributions 	BUF_VERB(PERF_CS_LOG | DBG_FUNC_END, flags, nframes);
448*bbb1b6f9SApple OSS Distributions }
449*bbb1b6f9SApple OSS Distributions 
450*bbb1b6f9SApple OSS Distributions void
kperf_kcallstack_log(struct kp_kcallstack * cs)451*bbb1b6f9SApple OSS Distributions kperf_kcallstack_log(struct kp_kcallstack *cs)
452*bbb1b6f9SApple OSS Distributions {
453*bbb1b6f9SApple OSS Distributions 	callstack_log(PERF_CS_KHDR, PERF_CS_KDATA, cs->kpkc_frames,
454*bbb1b6f9SApple OSS Distributions 	    cs->kpkc_nframes, cs->kpkc_flags, 0, 0);
455*bbb1b6f9SApple OSS Distributions 
456*bbb1b6f9SApple OSS Distributions 	if (cs->kpkc_exclaves_offset != 0) {
457*bbb1b6f9SApple OSS Distributions 		BUF_DATA(PERF_CS_KEXOFFSET, cs->kpkc_exclaves_offset);
458*bbb1b6f9SApple OSS Distributions 	}
459*bbb1b6f9SApple OSS Distributions }
460*bbb1b6f9SApple OSS Distributions 
461*bbb1b6f9SApple OSS Distributions void
kperf_ucallstack_log(struct kp_ucallstack * cs)462*bbb1b6f9SApple OSS Distributions kperf_ucallstack_log(struct kp_ucallstack *cs)
463*bbb1b6f9SApple OSS Distributions {
464*bbb1b6f9SApple OSS Distributions 	callstack_log(PERF_CS_UHDR, PERF_CS_UDATA, cs->kpuc_frames,
465*bbb1b6f9SApple OSS Distributions 	    cs->kpuc_nframes + cs->kpuc_async_nframes, cs->kpuc_flags,
466*bbb1b6f9SApple OSS Distributions 	    cs->kpuc_async_index, cs->kpuc_async_nframes);
467*bbb1b6f9SApple OSS Distributions }
468*bbb1b6f9SApple OSS Distributions 
469*bbb1b6f9SApple OSS Distributions #if CONFIG_EXCLAVES
470*bbb1b6f9SApple OSS Distributions void
kperf_excallstack_log(const stackshottypes_ipcstackentry_s * ipcstack)471*bbb1b6f9SApple OSS Distributions kperf_excallstack_log(const stackshottypes_ipcstackentry_s *ipcstack)
472*bbb1b6f9SApple OSS Distributions {
473*bbb1b6f9SApple OSS Distributions 	__block unsigned int nframes = 0;
474*bbb1b6f9SApple OSS Distributions 	__block unsigned int flags = CALLSTACK_VALID;
475*bbb1b6f9SApple OSS Distributions 	uint64_t frames[MAX_EXCALLSTACK_FRAMES] = {};
476*bbb1b6f9SApple OSS Distributions 	uint64_t *frames_block = frames;
477*bbb1b6f9SApple OSS Distributions 
478*bbb1b6f9SApple OSS Distributions 	BUF_DATA(PERF_CS_EXSTACK, ipcstack->asid);
479*bbb1b6f9SApple OSS Distributions 
480*bbb1b6f9SApple OSS Distributions 	if (ipcstack->stacktrace.has_value) {
481*bbb1b6f9SApple OSS Distributions 		address__v_visit(&ipcstack->stacktrace.value, ^(size_t i, const stackshottypes_address_s item) {
482*bbb1b6f9SApple OSS Distributions 			if (i >= MAX_EXCALLSTACK_FRAMES) {
483*bbb1b6f9SApple OSS Distributions 				flags |= CALLSTACK_TRUNCATED;
484*bbb1b6f9SApple OSS Distributions 				return;
485*bbb1b6f9SApple OSS Distributions 			}
486*bbb1b6f9SApple OSS Distributions 			frames_block[i] = item;
487*bbb1b6f9SApple OSS Distributions 			nframes += 1;
488*bbb1b6f9SApple OSS Distributions 		});
489*bbb1b6f9SApple OSS Distributions 		callstack_log(PERF_CS_EXHDR, PERF_CS_EXDATA, frames, nframes, flags, 0, 0);
490*bbb1b6f9SApple OSS Distributions 	}
491*bbb1b6f9SApple OSS Distributions }
492*bbb1b6f9SApple OSS Distributions 
493*bbb1b6f9SApple OSS Distributions bool
kperf_exclave_callstack_pend(struct kperf_context * context,unsigned int actionid)494*bbb1b6f9SApple OSS Distributions kperf_exclave_callstack_pend(struct kperf_context *context, unsigned int actionid)
495*bbb1b6f9SApple OSS Distributions {
496*bbb1b6f9SApple OSS Distributions 	if ((context->cur_thread->th_exclaves_state & TH_EXCLAVES_RPC)
497*bbb1b6f9SApple OSS Distributions 	    && (os_atomic_load(&context->cur_thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_NOINSPECT) == 0) {
498*bbb1b6f9SApple OSS Distributions 		os_atomic_or(&context->cur_thread->th_exclaves_inspection_state, TH_EXCLAVES_INSPECTION_KPERF, relaxed);
499*bbb1b6f9SApple OSS Distributions 		context->cur_thread->kperf_exclaves_ast |= T_KPERF_SET_ACTIONID(actionid);
500*bbb1b6f9SApple OSS Distributions 		return true;
501*bbb1b6f9SApple OSS Distributions 	}
502*bbb1b6f9SApple OSS Distributions 	return false;
503*bbb1b6f9SApple OSS Distributions }
504*bbb1b6f9SApple OSS Distributions #endif /* CONFIG_EXCLAVES */
505*bbb1b6f9SApple OSS Distributions 
506*bbb1b6f9SApple OSS Distributions int
kperf_ucallstack_pend(struct kperf_context * context,uint32_t depth,unsigned int actionid)507*bbb1b6f9SApple OSS Distributions kperf_ucallstack_pend(struct kperf_context * context, uint32_t depth,
508*bbb1b6f9SApple OSS Distributions     unsigned int actionid)
509*bbb1b6f9SApple OSS Distributions {
510*bbb1b6f9SApple OSS Distributions 	if (depth < 2) {
511*bbb1b6f9SApple OSS Distributions 		panic("HUH");
512*bbb1b6f9SApple OSS Distributions 	}
513*bbb1b6f9SApple OSS Distributions 	kperf_ast_set_callstack_depth(context->cur_thread, depth);
514*bbb1b6f9SApple OSS Distributions 	return kperf_ast_pend(context->cur_thread, T_KPERF_AST_CALLSTACK,
515*bbb1b6f9SApple OSS Distributions 	           actionid);
516*bbb1b6f9SApple OSS Distributions }
517*bbb1b6f9SApple OSS Distributions 
518*bbb1b6f9SApple OSS Distributions static kern_return_t
chudxnu_kern_read(void * dstaddr,vm_offset_t srcaddr,vm_size_t size)519*bbb1b6f9SApple OSS Distributions chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
520*bbb1b6f9SApple OSS Distributions {
521*bbb1b6f9SApple OSS Distributions 	return (ml_nofault_copy(srcaddr, (vm_offset_t)dstaddr, size) == size) ?
522*bbb1b6f9SApple OSS Distributions 	       KERN_SUCCESS : KERN_FAILURE;
523*bbb1b6f9SApple OSS Distributions }
524*bbb1b6f9SApple OSS Distributions 
525*bbb1b6f9SApple OSS Distributions static kern_return_t
chudxnu_task_read(task_t task,void * kernaddr,uint64_t usraddr,vm_size_t size)526*bbb1b6f9SApple OSS Distributions chudxnu_task_read(
527*bbb1b6f9SApple OSS Distributions 	task_t      task,
528*bbb1b6f9SApple OSS Distributions 	void        *kernaddr,
529*bbb1b6f9SApple OSS Distributions 	uint64_t    usraddr,
530*bbb1b6f9SApple OSS Distributions 	vm_size_t   size)
531*bbb1b6f9SApple OSS Distributions {
532*bbb1b6f9SApple OSS Distributions 	//ppc version ported to arm
533*bbb1b6f9SApple OSS Distributions 	kern_return_t ret = KERN_SUCCESS;
534*bbb1b6f9SApple OSS Distributions 
535*bbb1b6f9SApple OSS Distributions 	if (ml_at_interrupt_context()) {
536*bbb1b6f9SApple OSS Distributions 		return KERN_FAILURE;    // can't look at tasks on interrupt stack
537*bbb1b6f9SApple OSS Distributions 	}
538*bbb1b6f9SApple OSS Distributions 
539*bbb1b6f9SApple OSS Distributions 	if (current_task() == task) {
540*bbb1b6f9SApple OSS Distributions 		if (copyin(usraddr, kernaddr, size)) {
541*bbb1b6f9SApple OSS Distributions 			ret = KERN_FAILURE;
542*bbb1b6f9SApple OSS Distributions 		}
543*bbb1b6f9SApple OSS Distributions 	} else {
544*bbb1b6f9SApple OSS Distributions 		vm_map_t map = get_task_map(task);
545*bbb1b6f9SApple OSS Distributions 		ret = vm_map_read_user(map, usraddr, kernaddr, size);
546*bbb1b6f9SApple OSS Distributions 	}
547*bbb1b6f9SApple OSS Distributions 
548*bbb1b6f9SApple OSS Distributions 	return ret;
549*bbb1b6f9SApple OSS Distributions }
550*bbb1b6f9SApple OSS Distributions 
551*bbb1b6f9SApple OSS Distributions static inline uint64_t
chudxnu_vm_unslide(uint64_t ptr,int kaddr)552*bbb1b6f9SApple OSS Distributions chudxnu_vm_unslide( uint64_t ptr, int kaddr )
553*bbb1b6f9SApple OSS Distributions {
554*bbb1b6f9SApple OSS Distributions 	if (!kaddr) {
555*bbb1b6f9SApple OSS Distributions 		return ptr;
556*bbb1b6f9SApple OSS Distributions 	}
557*bbb1b6f9SApple OSS Distributions 
558*bbb1b6f9SApple OSS Distributions 	return VM_KERNEL_UNSLIDE(ptr);
559*bbb1b6f9SApple OSS Distributions }
560*bbb1b6f9SApple OSS Distributions 
561*bbb1b6f9SApple OSS Distributions #if __arm64__
562*bbb1b6f9SApple OSS Distributions 
563*bbb1b6f9SApple OSS Distributions #if defined(HAS_APPLE_PAC)
564*bbb1b6f9SApple OSS Distributions #include <ptrauth.h>
565*bbb1b6f9SApple OSS Distributions #endif
566*bbb1b6f9SApple OSS Distributions 
567*bbb1b6f9SApple OSS Distributions // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
568*bbb1b6f9SApple OSS Distributions // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
569*bbb1b6f9SApple OSS Distributions // after sampling has finished.
570*bbb1b6f9SApple OSS Distributions //
571*bbb1b6f9SApple OSS Distributions // For an N-entry callstack:
572*bbb1b6f9SApple OSS Distributions //
573*bbb1b6f9SApple OSS Distributions // [0]      current pc
574*bbb1b6f9SApple OSS Distributions // [1..N-3] stack frames (including current one)
575*bbb1b6f9SApple OSS Distributions // [N-2]    current LR (return value if we're in a leaf function)
576*bbb1b6f9SApple OSS Distributions // [N-1]    current r0 (in case we've saved LR in r0) (optional)
577*bbb1b6f9SApple OSS Distributions //
578*bbb1b6f9SApple OSS Distributions //
579*bbb1b6f9SApple OSS Distributions #define CS_FLAG_EXTRASP  1  // capture extra sp register
580*bbb1b6f9SApple OSS Distributions 
581*bbb1b6f9SApple OSS Distributions static kern_return_t
chudxnu_thread_get_callstack64_internal(thread_t thread,uint64_t * callStack,mach_msg_type_number_t * count,boolean_t user_only,int flags)582*bbb1b6f9SApple OSS Distributions chudxnu_thread_get_callstack64_internal(
583*bbb1b6f9SApple OSS Distributions 	thread_t                thread,
584*bbb1b6f9SApple OSS Distributions 	uint64_t                *callStack,
585*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t  *count,
586*bbb1b6f9SApple OSS Distributions 	boolean_t               user_only,
587*bbb1b6f9SApple OSS Distributions 	int flags)
588*bbb1b6f9SApple OSS Distributions {
589*bbb1b6f9SApple OSS Distributions 	kern_return_t   kr = KERN_SUCCESS;
590*bbb1b6f9SApple OSS Distributions 	task_t                  task;
591*bbb1b6f9SApple OSS Distributions 	uint64_t                currPC = 0ULL, currLR = 0ULL, currSP = 0ULL;
592*bbb1b6f9SApple OSS Distributions 	uint64_t                prevPC = 0ULL;
593*bbb1b6f9SApple OSS Distributions 	uint64_t                kernStackMin = thread->kernel_stack;
594*bbb1b6f9SApple OSS Distributions 	uint64_t                kernStackMax = kernStackMin + kernel_stack_size;
595*bbb1b6f9SApple OSS Distributions 	uint64_t       *buffer = callStack;
596*bbb1b6f9SApple OSS Distributions 	int             bufferIndex = 0;
597*bbb1b6f9SApple OSS Distributions 	int             bufferMaxIndex = 0;
598*bbb1b6f9SApple OSS Distributions 	boolean_t       kernel = FALSE;
599*bbb1b6f9SApple OSS Distributions 	struct arm_saved_state *sstate = NULL;
600*bbb1b6f9SApple OSS Distributions 	uint64_t                pc = 0ULL;
601*bbb1b6f9SApple OSS Distributions 
602*bbb1b6f9SApple OSS Distributions 	task = get_threadtask(thread);
603*bbb1b6f9SApple OSS Distributions 	bufferMaxIndex = *count;
604*bbb1b6f9SApple OSS Distributions 	//get thread state
605*bbb1b6f9SApple OSS Distributions 	if (user_only) {
606*bbb1b6f9SApple OSS Distributions 		sstate = find_user_regs(thread);
607*bbb1b6f9SApple OSS Distributions 	} else {
608*bbb1b6f9SApple OSS Distributions 		sstate = find_kern_regs(thread);
609*bbb1b6f9SApple OSS Distributions 	}
610*bbb1b6f9SApple OSS Distributions 
611*bbb1b6f9SApple OSS Distributions 	if (!sstate) {
612*bbb1b6f9SApple OSS Distributions 		*count = 0;
613*bbb1b6f9SApple OSS Distributions 		return KERN_FAILURE;
614*bbb1b6f9SApple OSS Distributions 	}
615*bbb1b6f9SApple OSS Distributions 
616*bbb1b6f9SApple OSS Distributions 	if (is_saved_state64(sstate)) {
617*bbb1b6f9SApple OSS Distributions 		struct arm_saved_state64 *state = NULL;
618*bbb1b6f9SApple OSS Distributions 		uint64_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
619*bbb1b6f9SApple OSS Distributions 		uint64_t frame[2];
620*bbb1b6f9SApple OSS Distributions 
621*bbb1b6f9SApple OSS Distributions 		state = saved_state64(sstate);
622*bbb1b6f9SApple OSS Distributions 
623*bbb1b6f9SApple OSS Distributions 		/* make sure it is safe to dereference before you do it */
624*bbb1b6f9SApple OSS Distributions 		kernel = PSR64_IS_KERNEL(state->cpsr);
625*bbb1b6f9SApple OSS Distributions 
626*bbb1b6f9SApple OSS Distributions 		/* can't take a kernel callstack if we've got a user frame */
627*bbb1b6f9SApple OSS Distributions 		if (!user_only && !kernel) {
628*bbb1b6f9SApple OSS Distributions 			return KERN_FAILURE;
629*bbb1b6f9SApple OSS Distributions 		}
630*bbb1b6f9SApple OSS Distributions 
631*bbb1b6f9SApple OSS Distributions 		/*
632*bbb1b6f9SApple OSS Distributions 		 * Reserve space for saving LR (and sometimes SP) at the end of the
633*bbb1b6f9SApple OSS Distributions 		 * backtrace.
634*bbb1b6f9SApple OSS Distributions 		 */
635*bbb1b6f9SApple OSS Distributions 		if (flags & CS_FLAG_EXTRASP) {
636*bbb1b6f9SApple OSS Distributions 			bufferMaxIndex -= 2;
637*bbb1b6f9SApple OSS Distributions 		} else {
638*bbb1b6f9SApple OSS Distributions 			bufferMaxIndex -= 1;
639*bbb1b6f9SApple OSS Distributions 		}
640*bbb1b6f9SApple OSS Distributions 
641*bbb1b6f9SApple OSS Distributions 		if (bufferMaxIndex < 2) {
642*bbb1b6f9SApple OSS Distributions 			*count = 0;
643*bbb1b6f9SApple OSS Distributions 			return KERN_RESOURCE_SHORTAGE;
644*bbb1b6f9SApple OSS Distributions 		}
645*bbb1b6f9SApple OSS Distributions 
646*bbb1b6f9SApple OSS Distributions 		currPC = state->pc;
647*bbb1b6f9SApple OSS Distributions 		currLR = state->lr;
648*bbb1b6f9SApple OSS Distributions 		currSP = state->sp;
649*bbb1b6f9SApple OSS Distributions 
650*bbb1b6f9SApple OSS Distributions 		fp = (uint64_t *)state->fp; /* frame pointer */
651*bbb1b6f9SApple OSS Distributions #if defined(HAS_APPLE_PAC)
652*bbb1b6f9SApple OSS Distributions 		/* frame pointers on stack will be signed by arm64e ABI */
653*bbb1b6f9SApple OSS Distributions 		fp = ptrauth_strip(fp, ptrauth_key_frame_pointer);
654*bbb1b6f9SApple OSS Distributions #endif
655*bbb1b6f9SApple OSS Distributions 		topfp = fp;
656*bbb1b6f9SApple OSS Distributions 
657*bbb1b6f9SApple OSS Distributions 		bufferIndex = 0;  // start with a stack of size zero
658*bbb1b6f9SApple OSS Distributions 		buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
659*bbb1b6f9SApple OSS Distributions 
660*bbb1b6f9SApple OSS Distributions 		BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 0);
661*bbb1b6f9SApple OSS Distributions 
662*bbb1b6f9SApple OSS Distributions 		// Now, fill buffer with stack backtraces.
663*bbb1b6f9SApple OSS Distributions 		while (bufferIndex < bufferMaxIndex) {
664*bbb1b6f9SApple OSS Distributions 			pc = 0ULL;
665*bbb1b6f9SApple OSS Distributions 			/*
666*bbb1b6f9SApple OSS Distributions 			 * Below the frame pointer, the following values are saved:
667*bbb1b6f9SApple OSS Distributions 			 * -> FP
668*bbb1b6f9SApple OSS Distributions 			 */
669*bbb1b6f9SApple OSS Distributions 
670*bbb1b6f9SApple OSS Distributions 			/*
671*bbb1b6f9SApple OSS Distributions 			 * Note that we read the pc even for the first stack frame
672*bbb1b6f9SApple OSS Distributions 			 * (which, in theory, is always empty because the callee fills
673*bbb1b6f9SApple OSS Distributions 			 * it in just before it lowers the stack.  However, if we
674*bbb1b6f9SApple OSS Distributions 			 * catch the program in between filling in the return address
675*bbb1b6f9SApple OSS Distributions 			 * and lowering the stack, we want to still have a valid
676*bbb1b6f9SApple OSS Distributions 			 * backtrace. FixupStack correctly disregards this value if
677*bbb1b6f9SApple OSS Distributions 			 * necessary.
678*bbb1b6f9SApple OSS Distributions 			 */
679*bbb1b6f9SApple OSS Distributions 
680*bbb1b6f9SApple OSS Distributions 			if ((uint64_t)fp == 0 || ((uint64_t)fp & 0x3) != 0) {
681*bbb1b6f9SApple OSS Distributions 				/* frame pointer is invalid - stop backtracing */
682*bbb1b6f9SApple OSS Distributions 				pc = 0ULL;
683*bbb1b6f9SApple OSS Distributions 				break;
684*bbb1b6f9SApple OSS Distributions 			}
685*bbb1b6f9SApple OSS Distributions 
686*bbb1b6f9SApple OSS Distributions 			if (kernel) {
687*bbb1b6f9SApple OSS Distributions 				if (((uint64_t)fp > kernStackMax) ||
688*bbb1b6f9SApple OSS Distributions 				    ((uint64_t)fp < kernStackMin)) {
689*bbb1b6f9SApple OSS Distributions 					kr = KERN_FAILURE;
690*bbb1b6f9SApple OSS Distributions 				} else {
691*bbb1b6f9SApple OSS Distributions 					kr = chudxnu_kern_read(&frame,
692*bbb1b6f9SApple OSS Distributions 					    (vm_offset_t)fp,
693*bbb1b6f9SApple OSS Distributions 					    (vm_size_t)sizeof(frame));
694*bbb1b6f9SApple OSS Distributions 					if (kr == KERN_SUCCESS) {
695*bbb1b6f9SApple OSS Distributions #if defined(HAS_APPLE_PAC)
696*bbb1b6f9SApple OSS Distributions 						/* return addresses on stack will be signed by arm64e ABI */
697*bbb1b6f9SApple OSS Distributions 						pc = (uint64_t)ptrauth_strip((void *)frame[1], ptrauth_key_return_address);
698*bbb1b6f9SApple OSS Distributions #else
699*bbb1b6f9SApple OSS Distributions 						pc = frame[1];
700*bbb1b6f9SApple OSS Distributions #endif
701*bbb1b6f9SApple OSS Distributions 						nextFramePointer = (uint64_t *)frame[0];
702*bbb1b6f9SApple OSS Distributions #if defined(HAS_APPLE_PAC)
703*bbb1b6f9SApple OSS Distributions 						/* frame pointers on stack will be signed by arm64e ABI */
704*bbb1b6f9SApple OSS Distributions 						nextFramePointer = ptrauth_strip(nextFramePointer, ptrauth_key_frame_pointer);
705*bbb1b6f9SApple OSS Distributions #endif
706*bbb1b6f9SApple OSS Distributions 					} else {
707*bbb1b6f9SApple OSS Distributions 						pc = 0ULL;
708*bbb1b6f9SApple OSS Distributions 						nextFramePointer = 0ULL;
709*bbb1b6f9SApple OSS Distributions 						kr = KERN_FAILURE;
710*bbb1b6f9SApple OSS Distributions 					}
711*bbb1b6f9SApple OSS Distributions 				}
712*bbb1b6f9SApple OSS Distributions 			} else {
713*bbb1b6f9SApple OSS Distributions 				kr = chudxnu_task_read(task,
714*bbb1b6f9SApple OSS Distributions 				    &frame,
715*bbb1b6f9SApple OSS Distributions 				    (vm_offset_t)fp,
716*bbb1b6f9SApple OSS Distributions 				    (vm_size_t)sizeof(frame));
717*bbb1b6f9SApple OSS Distributions 				if (kr == KERN_SUCCESS) {
718*bbb1b6f9SApple OSS Distributions #if defined(HAS_APPLE_PAC)
719*bbb1b6f9SApple OSS Distributions 					/* return addresses on stack will be signed by arm64e ABI */
720*bbb1b6f9SApple OSS Distributions 					pc = (uint64_t)ptrauth_strip((void *)frame[1], ptrauth_key_return_address);
721*bbb1b6f9SApple OSS Distributions #else
722*bbb1b6f9SApple OSS Distributions 					pc = frame[1];
723*bbb1b6f9SApple OSS Distributions #endif
724*bbb1b6f9SApple OSS Distributions 					nextFramePointer = (uint64_t *)(frame[0]);
725*bbb1b6f9SApple OSS Distributions #if defined(HAS_APPLE_PAC)
726*bbb1b6f9SApple OSS Distributions 					/* frame pointers on stack will be signed by arm64e ABI */
727*bbb1b6f9SApple OSS Distributions 					nextFramePointer = ptrauth_strip(nextFramePointer, ptrauth_key_frame_pointer);
728*bbb1b6f9SApple OSS Distributions #endif
729*bbb1b6f9SApple OSS Distributions 				} else {
730*bbb1b6f9SApple OSS Distributions 					pc = 0ULL;
731*bbb1b6f9SApple OSS Distributions 					nextFramePointer = 0ULL;
732*bbb1b6f9SApple OSS Distributions 					kr = KERN_FAILURE;
733*bbb1b6f9SApple OSS Distributions 				}
734*bbb1b6f9SApple OSS Distributions 			}
735*bbb1b6f9SApple OSS Distributions 
736*bbb1b6f9SApple OSS Distributions 			if (kr != KERN_SUCCESS) {
737*bbb1b6f9SApple OSS Distributions 				pc = 0ULL;
738*bbb1b6f9SApple OSS Distributions 				break;
739*bbb1b6f9SApple OSS Distributions 			}
740*bbb1b6f9SApple OSS Distributions 
741*bbb1b6f9SApple OSS Distributions 			if (nextFramePointer) {
742*bbb1b6f9SApple OSS Distributions 				buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
743*bbb1b6f9SApple OSS Distributions 				prevPC = pc;
744*bbb1b6f9SApple OSS Distributions 			}
745*bbb1b6f9SApple OSS Distributions 
746*bbb1b6f9SApple OSS Distributions 			if (nextFramePointer < fp) {
747*bbb1b6f9SApple OSS Distributions 				break;
748*bbb1b6f9SApple OSS Distributions 			} else {
749*bbb1b6f9SApple OSS Distributions 				fp = nextFramePointer;
750*bbb1b6f9SApple OSS Distributions 			}
751*bbb1b6f9SApple OSS Distributions 		}
752*bbb1b6f9SApple OSS Distributions 
753*bbb1b6f9SApple OSS Distributions 		BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
754*bbb1b6f9SApple OSS Distributions 
755*bbb1b6f9SApple OSS Distributions 		if (bufferIndex >= bufferMaxIndex) {
756*bbb1b6f9SApple OSS Distributions 			bufferIndex = bufferMaxIndex;
757*bbb1b6f9SApple OSS Distributions 			kr = KERN_RESOURCE_SHORTAGE;
758*bbb1b6f9SApple OSS Distributions 		} else {
759*bbb1b6f9SApple OSS Distributions 			kr = KERN_SUCCESS;
760*bbb1b6f9SApple OSS Distributions 		}
761*bbb1b6f9SApple OSS Distributions 
762*bbb1b6f9SApple OSS Distributions 		// Save link register and SP at bottom of stack (used for later fixup).
763*bbb1b6f9SApple OSS Distributions 		buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
764*bbb1b6f9SApple OSS Distributions 		if (flags & CS_FLAG_EXTRASP) {
765*bbb1b6f9SApple OSS Distributions 			buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
766*bbb1b6f9SApple OSS Distributions 		}
767*bbb1b6f9SApple OSS Distributions 	} else {
768*bbb1b6f9SApple OSS Distributions 		struct arm_saved_state32 *state = NULL;
769*bbb1b6f9SApple OSS Distributions 		uint32_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
770*bbb1b6f9SApple OSS Distributions 
771*bbb1b6f9SApple OSS Distributions 		/* 64-bit kernel stacks, 32-bit user stacks */
772*bbb1b6f9SApple OSS Distributions 		uint64_t frame[2];
773*bbb1b6f9SApple OSS Distributions 		uint32_t frame32[2];
774*bbb1b6f9SApple OSS Distributions 
775*bbb1b6f9SApple OSS Distributions 		state = saved_state32(sstate);
776*bbb1b6f9SApple OSS Distributions 
777*bbb1b6f9SApple OSS Distributions 		/* make sure it is safe to dereference before you do it */
778*bbb1b6f9SApple OSS Distributions 		kernel = PSR_IS_KERNEL(state->cpsr);
779*bbb1b6f9SApple OSS Distributions 
780*bbb1b6f9SApple OSS Distributions 		/* can't take a kernel callstack if we've got a user frame */
781*bbb1b6f9SApple OSS Distributions 		if (!user_only && !kernel) {
782*bbb1b6f9SApple OSS Distributions 			return KERN_FAILURE;
783*bbb1b6f9SApple OSS Distributions 		}
784*bbb1b6f9SApple OSS Distributions 
785*bbb1b6f9SApple OSS Distributions 		/*
786*bbb1b6f9SApple OSS Distributions 		 * Reserve space for saving LR (and sometimes SP) at the end of the
787*bbb1b6f9SApple OSS Distributions 		 * backtrace.
788*bbb1b6f9SApple OSS Distributions 		 */
789*bbb1b6f9SApple OSS Distributions 		if (flags & CS_FLAG_EXTRASP) {
790*bbb1b6f9SApple OSS Distributions 			bufferMaxIndex -= 2;
791*bbb1b6f9SApple OSS Distributions 		} else {
792*bbb1b6f9SApple OSS Distributions 			bufferMaxIndex -= 1;
793*bbb1b6f9SApple OSS Distributions 		}
794*bbb1b6f9SApple OSS Distributions 
795*bbb1b6f9SApple OSS Distributions 		if (bufferMaxIndex < 2) {
796*bbb1b6f9SApple OSS Distributions 			*count = 0;
797*bbb1b6f9SApple OSS Distributions 			return KERN_RESOURCE_SHORTAGE;
798*bbb1b6f9SApple OSS Distributions 		}
799*bbb1b6f9SApple OSS Distributions 
800*bbb1b6f9SApple OSS Distributions 		currPC = (uint64_t)state->pc; /* r15 */
801*bbb1b6f9SApple OSS Distributions 		if (state->cpsr & PSR_TF) {
802*bbb1b6f9SApple OSS Distributions 			currPC |= 1ULL; /* encode thumb mode into low bit of PC */
803*bbb1b6f9SApple OSS Distributions 		}
804*bbb1b6f9SApple OSS Distributions 		currLR = (uint64_t)state->lr; /* r14 */
805*bbb1b6f9SApple OSS Distributions 		currSP = (uint64_t)state->sp; /* r13 */
806*bbb1b6f9SApple OSS Distributions 
807*bbb1b6f9SApple OSS Distributions 		fp = (uint32_t *)(uintptr_t)state->r[7]; /* frame pointer */
808*bbb1b6f9SApple OSS Distributions 		topfp = fp;
809*bbb1b6f9SApple OSS Distributions 
810*bbb1b6f9SApple OSS Distributions 		bufferIndex = 0;  // start with a stack of size zero
811*bbb1b6f9SApple OSS Distributions 		buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
812*bbb1b6f9SApple OSS Distributions 
813*bbb1b6f9SApple OSS Distributions 		BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 1);
814*bbb1b6f9SApple OSS Distributions 
815*bbb1b6f9SApple OSS Distributions 		// Now, fill buffer with stack backtraces.
816*bbb1b6f9SApple OSS Distributions 		while (bufferIndex < bufferMaxIndex) {
817*bbb1b6f9SApple OSS Distributions 			pc = 0ULL;
818*bbb1b6f9SApple OSS Distributions 			/*
819*bbb1b6f9SApple OSS Distributions 			 * Below the frame pointer, the following values are saved:
820*bbb1b6f9SApple OSS Distributions 			 * -> FP
821*bbb1b6f9SApple OSS Distributions 			 */
822*bbb1b6f9SApple OSS Distributions 
823*bbb1b6f9SApple OSS Distributions 			/*
824*bbb1b6f9SApple OSS Distributions 			 * Note that we read the pc even for the first stack frame
825*bbb1b6f9SApple OSS Distributions 			 * (which, in theory, is always empty because the callee fills
826*bbb1b6f9SApple OSS Distributions 			 * it in just before it lowers the stack.  However, if we
827*bbb1b6f9SApple OSS Distributions 			 * catch the program in between filling in the return address
828*bbb1b6f9SApple OSS Distributions 			 * and lowering the stack, we want to still have a valid
829*bbb1b6f9SApple OSS Distributions 			 * backtrace. FixupStack correctly disregards this value if
830*bbb1b6f9SApple OSS Distributions 			 * necessary.
831*bbb1b6f9SApple OSS Distributions 			 */
832*bbb1b6f9SApple OSS Distributions 
833*bbb1b6f9SApple OSS Distributions 			if ((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) {
834*bbb1b6f9SApple OSS Distributions 				/* frame pointer is invalid - stop backtracing */
835*bbb1b6f9SApple OSS Distributions 				pc = 0ULL;
836*bbb1b6f9SApple OSS Distributions 				break;
837*bbb1b6f9SApple OSS Distributions 			}
838*bbb1b6f9SApple OSS Distributions 
839*bbb1b6f9SApple OSS Distributions 			if (kernel) {
840*bbb1b6f9SApple OSS Distributions 				if (((uint32_t)fp > kernStackMax) ||
841*bbb1b6f9SApple OSS Distributions 				    ((uint32_t)fp < kernStackMin)) {
842*bbb1b6f9SApple OSS Distributions 					kr = KERN_FAILURE;
843*bbb1b6f9SApple OSS Distributions 				} else {
844*bbb1b6f9SApple OSS Distributions 					kr = chudxnu_kern_read(&frame,
845*bbb1b6f9SApple OSS Distributions 					    (vm_offset_t)fp,
846*bbb1b6f9SApple OSS Distributions 					    (vm_size_t)sizeof(frame));
847*bbb1b6f9SApple OSS Distributions 					if (kr == KERN_SUCCESS) {
848*bbb1b6f9SApple OSS Distributions 						pc = (uint64_t)frame[1];
849*bbb1b6f9SApple OSS Distributions 						nextFramePointer = (uint32_t *) (frame[0]);
850*bbb1b6f9SApple OSS Distributions 					} else {
851*bbb1b6f9SApple OSS Distributions 						pc = 0ULL;
852*bbb1b6f9SApple OSS Distributions 						nextFramePointer = 0ULL;
853*bbb1b6f9SApple OSS Distributions 						kr = KERN_FAILURE;
854*bbb1b6f9SApple OSS Distributions 					}
855*bbb1b6f9SApple OSS Distributions 				}
856*bbb1b6f9SApple OSS Distributions 			} else {
857*bbb1b6f9SApple OSS Distributions 				kr = chudxnu_task_read(task,
858*bbb1b6f9SApple OSS Distributions 				    &frame32,
859*bbb1b6f9SApple OSS Distributions 				    (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL),
860*bbb1b6f9SApple OSS Distributions 				    sizeof(frame32));
861*bbb1b6f9SApple OSS Distributions 				if (kr == KERN_SUCCESS) {
862*bbb1b6f9SApple OSS Distributions 					pc = (uint64_t)frame32[1];
863*bbb1b6f9SApple OSS Distributions 					nextFramePointer = (uint32_t *)(uintptr_t)(frame32[0]);
864*bbb1b6f9SApple OSS Distributions 				} else {
865*bbb1b6f9SApple OSS Distributions 					pc = 0ULL;
866*bbb1b6f9SApple OSS Distributions 					nextFramePointer = 0ULL;
867*bbb1b6f9SApple OSS Distributions 					kr = KERN_FAILURE;
868*bbb1b6f9SApple OSS Distributions 				}
869*bbb1b6f9SApple OSS Distributions 			}
870*bbb1b6f9SApple OSS Distributions 
871*bbb1b6f9SApple OSS Distributions 			if (kr != KERN_SUCCESS) {
872*bbb1b6f9SApple OSS Distributions 				pc = 0ULL;
873*bbb1b6f9SApple OSS Distributions 				break;
874*bbb1b6f9SApple OSS Distributions 			}
875*bbb1b6f9SApple OSS Distributions 
876*bbb1b6f9SApple OSS Distributions 			if (nextFramePointer) {
877*bbb1b6f9SApple OSS Distributions 				buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
878*bbb1b6f9SApple OSS Distributions 				prevPC = pc;
879*bbb1b6f9SApple OSS Distributions 			}
880*bbb1b6f9SApple OSS Distributions 
881*bbb1b6f9SApple OSS Distributions 			if (nextFramePointer < fp) {
882*bbb1b6f9SApple OSS Distributions 				break;
883*bbb1b6f9SApple OSS Distributions 			} else {
884*bbb1b6f9SApple OSS Distributions 				fp = nextFramePointer;
885*bbb1b6f9SApple OSS Distributions 			}
886*bbb1b6f9SApple OSS Distributions 		}
887*bbb1b6f9SApple OSS Distributions 
888*bbb1b6f9SApple OSS Distributions 		BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
889*bbb1b6f9SApple OSS Distributions 
890*bbb1b6f9SApple OSS Distributions 		/* clamp callstack size to max */
891*bbb1b6f9SApple OSS Distributions 		if (bufferIndex >= bufferMaxIndex) {
892*bbb1b6f9SApple OSS Distributions 			bufferIndex = bufferMaxIndex;
893*bbb1b6f9SApple OSS Distributions 			kr = KERN_RESOURCE_SHORTAGE;
894*bbb1b6f9SApple OSS Distributions 		} else {
895*bbb1b6f9SApple OSS Distributions 			/* ignore all other failures */
896*bbb1b6f9SApple OSS Distributions 			kr = KERN_SUCCESS;
897*bbb1b6f9SApple OSS Distributions 		}
898*bbb1b6f9SApple OSS Distributions 
899*bbb1b6f9SApple OSS Distributions 		// Save link register and R13 (sp) at bottom of stack (used for later fixup).
900*bbb1b6f9SApple OSS Distributions 		buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
901*bbb1b6f9SApple OSS Distributions 		if (flags & CS_FLAG_EXTRASP) {
902*bbb1b6f9SApple OSS Distributions 			buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
903*bbb1b6f9SApple OSS Distributions 		}
904*bbb1b6f9SApple OSS Distributions 	}
905*bbb1b6f9SApple OSS Distributions 
906*bbb1b6f9SApple OSS Distributions 	*count = bufferIndex;
907*bbb1b6f9SApple OSS Distributions 	return kr;
908*bbb1b6f9SApple OSS Distributions }
909*bbb1b6f9SApple OSS Distributions 
910*bbb1b6f9SApple OSS Distributions kern_return_t
chudxnu_thread_get_callstack64_kperf(thread_t thread,uint64_t * callStack,mach_msg_type_number_t * count,boolean_t user_only)911*bbb1b6f9SApple OSS Distributions chudxnu_thread_get_callstack64_kperf(
912*bbb1b6f9SApple OSS Distributions 	thread_t                thread,
913*bbb1b6f9SApple OSS Distributions 	uint64_t                *callStack,
914*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t  *count,
915*bbb1b6f9SApple OSS Distributions 	boolean_t               user_only)
916*bbb1b6f9SApple OSS Distributions {
917*bbb1b6f9SApple OSS Distributions 	return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 );
918*bbb1b6f9SApple OSS Distributions }
919*bbb1b6f9SApple OSS Distributions #elif __x86_64__
920*bbb1b6f9SApple OSS Distributions 
921*bbb1b6f9SApple OSS Distributions #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr)   (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
922*bbb1b6f9SApple OSS Distributions // don't try to read in the hole
923*bbb1b6f9SApple OSS Distributions #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
924*bbb1b6f9SApple OSS Distributions (supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
925*bbb1b6f9SApple OSS Distributions ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
926*bbb1b6f9SApple OSS Distributions 
927*bbb1b6f9SApple OSS Distributions typedef struct _cframe64_t {
928*bbb1b6f9SApple OSS Distributions 	uint64_t        prevFP;         // can't use a real pointer here until we're a 64 bit kernel
929*bbb1b6f9SApple OSS Distributions 	uint64_t        caller;
930*bbb1b6f9SApple OSS Distributions 	uint64_t        args[0];
931*bbb1b6f9SApple OSS Distributions }cframe64_t;
932*bbb1b6f9SApple OSS Distributions 
933*bbb1b6f9SApple OSS Distributions 
934*bbb1b6f9SApple OSS Distributions typedef struct _cframe_t {
935*bbb1b6f9SApple OSS Distributions 	uint32_t                prev;   // this is really a user32-space pointer to the previous frame
936*bbb1b6f9SApple OSS Distributions 	uint32_t                caller;
937*bbb1b6f9SApple OSS Distributions 	uint32_t                args[0];
938*bbb1b6f9SApple OSS Distributions } cframe_t;
939*bbb1b6f9SApple OSS Distributions 
940*bbb1b6f9SApple OSS Distributions extern void * find_user_regs(thread_t);
941*bbb1b6f9SApple OSS Distributions extern x86_saved_state32_t *find_kern_regs(thread_t);
942*bbb1b6f9SApple OSS Distributions 
943*bbb1b6f9SApple OSS Distributions static kern_return_t
do_kernel_backtrace(thread_t thread,struct x86_kernel_state * regs,uint64_t * frames,mach_msg_type_number_t * start_idx,mach_msg_type_number_t max_idx)944*bbb1b6f9SApple OSS Distributions do_kernel_backtrace(
945*bbb1b6f9SApple OSS Distributions 	thread_t thread,
946*bbb1b6f9SApple OSS Distributions 	struct x86_kernel_state *regs,
947*bbb1b6f9SApple OSS Distributions 	uint64_t *frames,
948*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t *start_idx,
949*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t max_idx)
950*bbb1b6f9SApple OSS Distributions {
951*bbb1b6f9SApple OSS Distributions 	uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
952*bbb1b6f9SApple OSS Distributions 	uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
953*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t ct = *start_idx;
954*bbb1b6f9SApple OSS Distributions 	kern_return_t kr = KERN_FAILURE;
955*bbb1b6f9SApple OSS Distributions 
956*bbb1b6f9SApple OSS Distributions #if __LP64__
957*bbb1b6f9SApple OSS Distributions 	uint64_t currPC = 0ULL;
958*bbb1b6f9SApple OSS Distributions 	uint64_t currFP = 0ULL;
959*bbb1b6f9SApple OSS Distributions 	uint64_t prevPC = 0ULL;
960*bbb1b6f9SApple OSS Distributions 	uint64_t prevFP = 0ULL;
961*bbb1b6f9SApple OSS Distributions 	if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
962*bbb1b6f9SApple OSS Distributions 		return KERN_FAILURE;
963*bbb1b6f9SApple OSS Distributions 	}
964*bbb1b6f9SApple OSS Distributions 	if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
965*bbb1b6f9SApple OSS Distributions 		return KERN_FAILURE;
966*bbb1b6f9SApple OSS Distributions 	}
967*bbb1b6f9SApple OSS Distributions #else
968*bbb1b6f9SApple OSS Distributions 	uint32_t currPC = 0U;
969*bbb1b6f9SApple OSS Distributions 	uint32_t currFP = 0U;
970*bbb1b6f9SApple OSS Distributions 	uint32_t prevPC = 0U;
971*bbb1b6f9SApple OSS Distributions 	uint32_t prevFP = 0U;
972*bbb1b6f9SApple OSS Distributions 	if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
973*bbb1b6f9SApple OSS Distributions 		return KERN_FAILURE;
974*bbb1b6f9SApple OSS Distributions 	}
975*bbb1b6f9SApple OSS Distributions 	if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
976*bbb1b6f9SApple OSS Distributions 		return KERN_FAILURE;
977*bbb1b6f9SApple OSS Distributions 	}
978*bbb1b6f9SApple OSS Distributions #endif
979*bbb1b6f9SApple OSS Distributions 
980*bbb1b6f9SApple OSS Distributions 	if (*start_idx >= max_idx) {
981*bbb1b6f9SApple OSS Distributions 		return KERN_RESOURCE_SHORTAGE;  // no frames traced
982*bbb1b6f9SApple OSS Distributions 	}
983*bbb1b6f9SApple OSS Distributions 	if (!currPC) {
984*bbb1b6f9SApple OSS Distributions 		return KERN_FAILURE;
985*bbb1b6f9SApple OSS Distributions 	}
986*bbb1b6f9SApple OSS Distributions 
987*bbb1b6f9SApple OSS Distributions 	frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
988*bbb1b6f9SApple OSS Distributions 
989*bbb1b6f9SApple OSS Distributions 	// build a backtrace of this kernel state
990*bbb1b6f9SApple OSS Distributions #if __LP64__
991*bbb1b6f9SApple OSS Distributions 	while (VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
992*bbb1b6f9SApple OSS Distributions 		// this is the address where caller lives in the user thread
993*bbb1b6f9SApple OSS Distributions 		uint64_t caller = currFP + sizeof(uint64_t);
994*bbb1b6f9SApple OSS Distributions #else
995*bbb1b6f9SApple OSS Distributions 	while (VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
996*bbb1b6f9SApple OSS Distributions 		uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
997*bbb1b6f9SApple OSS Distributions #endif
998*bbb1b6f9SApple OSS Distributions 
999*bbb1b6f9SApple OSS Distributions 		if (!currFP || !currPC) {
1000*bbb1b6f9SApple OSS Distributions 			currPC = 0;
1001*bbb1b6f9SApple OSS Distributions 			break;
1002*bbb1b6f9SApple OSS Distributions 		}
1003*bbb1b6f9SApple OSS Distributions 
1004*bbb1b6f9SApple OSS Distributions 		if (ct >= max_idx) {
1005*bbb1b6f9SApple OSS Distributions 			*start_idx = ct;
1006*bbb1b6f9SApple OSS Distributions 			return KERN_RESOURCE_SHORTAGE;
1007*bbb1b6f9SApple OSS Distributions 		}
1008*bbb1b6f9SApple OSS Distributions 
1009*bbb1b6f9SApple OSS Distributions 		/* read our caller */
1010*bbb1b6f9SApple OSS Distributions 		kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));
1011*bbb1b6f9SApple OSS Distributions 
1012*bbb1b6f9SApple OSS Distributions 		if (kr != KERN_SUCCESS || !currPC) {
1013*bbb1b6f9SApple OSS Distributions 			currPC = 0UL;
1014*bbb1b6f9SApple OSS Distributions 			break;
1015*bbb1b6f9SApple OSS Distributions 		}
1016*bbb1b6f9SApple OSS Distributions 
1017*bbb1b6f9SApple OSS Distributions 		/*
1018*bbb1b6f9SApple OSS Distributions 		 * retrive contents of the frame pointer and advance to the next stack
1019*bbb1b6f9SApple OSS Distributions 		 * frame if it's valid
1020*bbb1b6f9SApple OSS Distributions 		 */
1021*bbb1b6f9SApple OSS Distributions 		prevFP = 0;
1022*bbb1b6f9SApple OSS Distributions 		kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));
1023*bbb1b6f9SApple OSS Distributions 
1024*bbb1b6f9SApple OSS Distributions #if __LP64__
1025*bbb1b6f9SApple OSS Distributions 		if (VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
1026*bbb1b6f9SApple OSS Distributions #else
1027*bbb1b6f9SApple OSS Distributions 		if (VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
1028*bbb1b6f9SApple OSS Distributions #endif
1029*bbb1b6f9SApple OSS Distributions 			frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
1030*bbb1b6f9SApple OSS Distributions 			prevPC = currPC;
1031*bbb1b6f9SApple OSS Distributions 		}
1032*bbb1b6f9SApple OSS Distributions 		if (prevFP <= currFP) {
1033*bbb1b6f9SApple OSS Distributions 			break;
1034*bbb1b6f9SApple OSS Distributions 		} else {
1035*bbb1b6f9SApple OSS Distributions 			currFP = prevFP;
1036*bbb1b6f9SApple OSS Distributions 		}
1037*bbb1b6f9SApple OSS Distributions 	}
1038*bbb1b6f9SApple OSS Distributions 
1039*bbb1b6f9SApple OSS Distributions 	*start_idx = ct;
1040*bbb1b6f9SApple OSS Distributions 	return KERN_SUCCESS;
1041*bbb1b6f9SApple OSS Distributions }
1042*bbb1b6f9SApple OSS Distributions 
1043*bbb1b6f9SApple OSS Distributions 
1044*bbb1b6f9SApple OSS Distributions 
1045*bbb1b6f9SApple OSS Distributions static kern_return_t
1046*bbb1b6f9SApple OSS Distributions do_backtrace32(
1047*bbb1b6f9SApple OSS Distributions 	task_t task,
1048*bbb1b6f9SApple OSS Distributions 	thread_t thread,
1049*bbb1b6f9SApple OSS Distributions 	x86_saved_state32_t *regs,
1050*bbb1b6f9SApple OSS Distributions 	uint64_t *frames,
1051*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t *start_idx,
1052*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t max_idx,
1053*bbb1b6f9SApple OSS Distributions 	boolean_t supervisor)
1054*bbb1b6f9SApple OSS Distributions {
1055*bbb1b6f9SApple OSS Distributions 	uint32_t tmpWord = 0UL;
1056*bbb1b6f9SApple OSS Distributions 	uint64_t currPC = (uint64_t) regs->eip;
1057*bbb1b6f9SApple OSS Distributions 	uint64_t currFP = (uint64_t) regs->ebp;
1058*bbb1b6f9SApple OSS Distributions 	uint64_t prevPC = 0ULL;
1059*bbb1b6f9SApple OSS Distributions 	uint64_t prevFP = 0ULL;
1060*bbb1b6f9SApple OSS Distributions 	uint64_t kernStackMin = thread->kernel_stack;
1061*bbb1b6f9SApple OSS Distributions 	uint64_t kernStackMax = kernStackMin + kernel_stack_size;
1062*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t ct = *start_idx;
1063*bbb1b6f9SApple OSS Distributions 	kern_return_t kr = KERN_FAILURE;
1064*bbb1b6f9SApple OSS Distributions 
1065*bbb1b6f9SApple OSS Distributions 	if (ct >= max_idx) {
1066*bbb1b6f9SApple OSS Distributions 		return KERN_RESOURCE_SHORTAGE;  // no frames traced
1067*bbb1b6f9SApple OSS Distributions 	}
1068*bbb1b6f9SApple OSS Distributions 	frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1069*bbb1b6f9SApple OSS Distributions 
1070*bbb1b6f9SApple OSS Distributions 	// build a backtrace of this 32 bit state.
1071*bbb1b6f9SApple OSS Distributions 	while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
1072*bbb1b6f9SApple OSS Distributions 		cframe_t *fp = (cframe_t *) (uintptr_t) currFP;
1073*bbb1b6f9SApple OSS Distributions 
1074*bbb1b6f9SApple OSS Distributions 		if (!currFP) {
1075*bbb1b6f9SApple OSS Distributions 			currPC = 0;
1076*bbb1b6f9SApple OSS Distributions 			break;
1077*bbb1b6f9SApple OSS Distributions 		}
1078*bbb1b6f9SApple OSS Distributions 
1079*bbb1b6f9SApple OSS Distributions 		if (ct >= max_idx) {
1080*bbb1b6f9SApple OSS Distributions 			*start_idx = ct;
1081*bbb1b6f9SApple OSS Distributions 			return KERN_RESOURCE_SHORTAGE;
1082*bbb1b6f9SApple OSS Distributions 		}
1083*bbb1b6f9SApple OSS Distributions 
1084*bbb1b6f9SApple OSS Distributions 		/* read our caller */
1085*bbb1b6f9SApple OSS Distributions 		if (supervisor) {
1086*bbb1b6f9SApple OSS Distributions 			kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1087*bbb1b6f9SApple OSS Distributions 		} else {
1088*bbb1b6f9SApple OSS Distributions 			kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1089*bbb1b6f9SApple OSS Distributions 		}
1090*bbb1b6f9SApple OSS Distributions 
1091*bbb1b6f9SApple OSS Distributions 		if (kr != KERN_SUCCESS) {
1092*bbb1b6f9SApple OSS Distributions 			currPC = 0ULL;
1093*bbb1b6f9SApple OSS Distributions 			break;
1094*bbb1b6f9SApple OSS Distributions 		}
1095*bbb1b6f9SApple OSS Distributions 
1096*bbb1b6f9SApple OSS Distributions 		currPC = (uint64_t) tmpWord;    // promote 32 bit address
1097*bbb1b6f9SApple OSS Distributions 
1098*bbb1b6f9SApple OSS Distributions 		/*
1099*bbb1b6f9SApple OSS Distributions 		 * retrive contents of the frame pointer and advance to the next stack
1100*bbb1b6f9SApple OSS Distributions 		 * frame if it's valid
1101*bbb1b6f9SApple OSS Distributions 		 */
1102*bbb1b6f9SApple OSS Distributions 		prevFP = 0;
1103*bbb1b6f9SApple OSS Distributions 		if (supervisor) {
1104*bbb1b6f9SApple OSS Distributions 			kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1105*bbb1b6f9SApple OSS Distributions 		} else {
1106*bbb1b6f9SApple OSS Distributions 			kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1107*bbb1b6f9SApple OSS Distributions 		}
1108*bbb1b6f9SApple OSS Distributions 		prevFP = (uint64_t) tmpWord;    // promote 32 bit address
1109*bbb1b6f9SApple OSS Distributions 
1110*bbb1b6f9SApple OSS Distributions 		if (prevFP) {
1111*bbb1b6f9SApple OSS Distributions 			frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1112*bbb1b6f9SApple OSS Distributions 			prevPC = currPC;
1113*bbb1b6f9SApple OSS Distributions 		}
1114*bbb1b6f9SApple OSS Distributions 		if (prevFP < currFP) {
1115*bbb1b6f9SApple OSS Distributions 			break;
1116*bbb1b6f9SApple OSS Distributions 		} else {
1117*bbb1b6f9SApple OSS Distributions 			currFP = prevFP;
1118*bbb1b6f9SApple OSS Distributions 		}
1119*bbb1b6f9SApple OSS Distributions 	}
1120*bbb1b6f9SApple OSS Distributions 
1121*bbb1b6f9SApple OSS Distributions 	*start_idx = ct;
1122*bbb1b6f9SApple OSS Distributions 	return KERN_SUCCESS;
1123*bbb1b6f9SApple OSS Distributions }
1124*bbb1b6f9SApple OSS Distributions 
1125*bbb1b6f9SApple OSS Distributions static kern_return_t
1126*bbb1b6f9SApple OSS Distributions do_backtrace64(
1127*bbb1b6f9SApple OSS Distributions 	task_t task,
1128*bbb1b6f9SApple OSS Distributions 	thread_t thread,
1129*bbb1b6f9SApple OSS Distributions 	x86_saved_state64_t *regs,
1130*bbb1b6f9SApple OSS Distributions 	uint64_t *frames,
1131*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t *start_idx,
1132*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t max_idx,
1133*bbb1b6f9SApple OSS Distributions 	boolean_t supervisor)
1134*bbb1b6f9SApple OSS Distributions {
1135*bbb1b6f9SApple OSS Distributions 	uint64_t currPC = regs->isf.rip;
1136*bbb1b6f9SApple OSS Distributions 	uint64_t currFP = regs->rbp;
1137*bbb1b6f9SApple OSS Distributions 	uint64_t prevPC = 0ULL;
1138*bbb1b6f9SApple OSS Distributions 	uint64_t prevFP = 0ULL;
1139*bbb1b6f9SApple OSS Distributions 	uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
1140*bbb1b6f9SApple OSS Distributions 	uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
1141*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t ct = *start_idx;
1142*bbb1b6f9SApple OSS Distributions 	kern_return_t kr = KERN_FAILURE;
1143*bbb1b6f9SApple OSS Distributions 
1144*bbb1b6f9SApple OSS Distributions 	if (*start_idx >= max_idx) {
1145*bbb1b6f9SApple OSS Distributions 		return KERN_RESOURCE_SHORTAGE;  // no frames traced
1146*bbb1b6f9SApple OSS Distributions 	}
1147*bbb1b6f9SApple OSS Distributions 	frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1148*bbb1b6f9SApple OSS Distributions 
1149*bbb1b6f9SApple OSS Distributions 	// build a backtrace of this 32 bit state.
1150*bbb1b6f9SApple OSS Distributions 	while (VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
1151*bbb1b6f9SApple OSS Distributions 		// this is the address where caller lives in the user thread
1152*bbb1b6f9SApple OSS Distributions 		uint64_t caller = currFP + sizeof(uint64_t);
1153*bbb1b6f9SApple OSS Distributions 
1154*bbb1b6f9SApple OSS Distributions 		if (!currFP) {
1155*bbb1b6f9SApple OSS Distributions 			currPC = 0;
1156*bbb1b6f9SApple OSS Distributions 			break;
1157*bbb1b6f9SApple OSS Distributions 		}
1158*bbb1b6f9SApple OSS Distributions 
1159*bbb1b6f9SApple OSS Distributions 		if (ct >= max_idx) {
1160*bbb1b6f9SApple OSS Distributions 			*start_idx = ct;
1161*bbb1b6f9SApple OSS Distributions 			return KERN_RESOURCE_SHORTAGE;
1162*bbb1b6f9SApple OSS Distributions 		}
1163*bbb1b6f9SApple OSS Distributions 
1164*bbb1b6f9SApple OSS Distributions 		/* read our caller */
1165*bbb1b6f9SApple OSS Distributions 		if (supervisor) {
1166*bbb1b6f9SApple OSS Distributions 			kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t));
1167*bbb1b6f9SApple OSS Distributions 		} else {
1168*bbb1b6f9SApple OSS Distributions 			kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
1169*bbb1b6f9SApple OSS Distributions 		}
1170*bbb1b6f9SApple OSS Distributions 
1171*bbb1b6f9SApple OSS Distributions 		if (kr != KERN_SUCCESS) {
1172*bbb1b6f9SApple OSS Distributions 			currPC = 0ULL;
1173*bbb1b6f9SApple OSS Distributions 			break;
1174*bbb1b6f9SApple OSS Distributions 		}
1175*bbb1b6f9SApple OSS Distributions 
1176*bbb1b6f9SApple OSS Distributions 		/*
1177*bbb1b6f9SApple OSS Distributions 		 * retrive contents of the frame pointer and advance to the next stack
1178*bbb1b6f9SApple OSS Distributions 		 * frame if it's valid
1179*bbb1b6f9SApple OSS Distributions 		 */
1180*bbb1b6f9SApple OSS Distributions 		prevFP = 0;
1181*bbb1b6f9SApple OSS Distributions 		if (supervisor) {
1182*bbb1b6f9SApple OSS Distributions 			kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t));
1183*bbb1b6f9SApple OSS Distributions 		} else {
1184*bbb1b6f9SApple OSS Distributions 			kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
1185*bbb1b6f9SApple OSS Distributions 		}
1186*bbb1b6f9SApple OSS Distributions 
1187*bbb1b6f9SApple OSS Distributions 		if (VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
1188*bbb1b6f9SApple OSS Distributions 			frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1189*bbb1b6f9SApple OSS Distributions 			prevPC = currPC;
1190*bbb1b6f9SApple OSS Distributions 		}
1191*bbb1b6f9SApple OSS Distributions 		if (prevFP < currFP) {
1192*bbb1b6f9SApple OSS Distributions 			break;
1193*bbb1b6f9SApple OSS Distributions 		} else {
1194*bbb1b6f9SApple OSS Distributions 			currFP = prevFP;
1195*bbb1b6f9SApple OSS Distributions 		}
1196*bbb1b6f9SApple OSS Distributions 	}
1197*bbb1b6f9SApple OSS Distributions 
1198*bbb1b6f9SApple OSS Distributions 	*start_idx = ct;
1199*bbb1b6f9SApple OSS Distributions 	return KERN_SUCCESS;
1200*bbb1b6f9SApple OSS Distributions }
1201*bbb1b6f9SApple OSS Distributions 
1202*bbb1b6f9SApple OSS Distributions static kern_return_t
1203*bbb1b6f9SApple OSS Distributions chudxnu_thread_get_callstack64_internal(
1204*bbb1b6f9SApple OSS Distributions 	thread_t                thread,
1205*bbb1b6f9SApple OSS Distributions 	uint64_t                *callstack,
1206*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t  *count,
1207*bbb1b6f9SApple OSS Distributions 	boolean_t               user_only,
1208*bbb1b6f9SApple OSS Distributions 	boolean_t               kern_only)
1209*bbb1b6f9SApple OSS Distributions {
1210*bbb1b6f9SApple OSS Distributions 	kern_return_t kr = KERN_FAILURE;
1211*bbb1b6f9SApple OSS Distributions 	task_t task = get_threadtask(thread);
1212*bbb1b6f9SApple OSS Distributions 	uint64_t currPC = 0ULL;
1213*bbb1b6f9SApple OSS Distributions 	boolean_t supervisor = FALSE;
1214*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t bufferIndex = 0;
1215*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t bufferMaxIndex = *count;
1216*bbb1b6f9SApple OSS Distributions 	x86_saved_state_t *tagged_regs = NULL;          // kernel register state
1217*bbb1b6f9SApple OSS Distributions 	x86_saved_state64_t *regs64 = NULL;
1218*bbb1b6f9SApple OSS Distributions 	x86_saved_state32_t *regs32 = NULL;
1219*bbb1b6f9SApple OSS Distributions 	x86_saved_state32_t *u_regs32 = NULL;
1220*bbb1b6f9SApple OSS Distributions 	x86_saved_state64_t *u_regs64 = NULL;
1221*bbb1b6f9SApple OSS Distributions 	struct x86_kernel_state *kregs = NULL;
1222*bbb1b6f9SApple OSS Distributions 
1223*bbb1b6f9SApple OSS Distributions 	if (ml_at_interrupt_context()) {
1224*bbb1b6f9SApple OSS Distributions 		if (user_only) {
1225*bbb1b6f9SApple OSS Distributions 			/* can't backtrace user state on interrupt stack. */
1226*bbb1b6f9SApple OSS Distributions 			return KERN_FAILURE;
1227*bbb1b6f9SApple OSS Distributions 		}
1228*bbb1b6f9SApple OSS Distributions 
1229*bbb1b6f9SApple OSS Distributions 		/* backtracing at interrupt context? */
1230*bbb1b6f9SApple OSS Distributions 		if (thread == current_thread() && current_cpu_datap()->cpu_int_state) {
1231*bbb1b6f9SApple OSS Distributions 			/*
1232*bbb1b6f9SApple OSS Distributions 			 * Locate the registers for the interrupted thread, assuming it is
1233*bbb1b6f9SApple OSS Distributions 			 * current_thread().
1234*bbb1b6f9SApple OSS Distributions 			 */
1235*bbb1b6f9SApple OSS Distributions 			tagged_regs = current_cpu_datap()->cpu_int_state;
1236*bbb1b6f9SApple OSS Distributions 
1237*bbb1b6f9SApple OSS Distributions 			if (is_saved_state64(tagged_regs)) {
1238*bbb1b6f9SApple OSS Distributions 				/* 64 bit registers */
1239*bbb1b6f9SApple OSS Distributions 				regs64 = saved_state64(tagged_regs);
1240*bbb1b6f9SApple OSS Distributions 				supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1241*bbb1b6f9SApple OSS Distributions 			} else {
1242*bbb1b6f9SApple OSS Distributions 				/* 32 bit registers */
1243*bbb1b6f9SApple OSS Distributions 				regs32 = saved_state32(tagged_regs);
1244*bbb1b6f9SApple OSS Distributions 				supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1245*bbb1b6f9SApple OSS Distributions 			}
1246*bbb1b6f9SApple OSS Distributions 		}
1247*bbb1b6f9SApple OSS Distributions 	}
1248*bbb1b6f9SApple OSS Distributions 
1249*bbb1b6f9SApple OSS Distributions 	if (!ml_at_interrupt_context() && kernel_task == task) {
1250*bbb1b6f9SApple OSS Distributions 		if (!thread->kernel_stack) {
1251*bbb1b6f9SApple OSS Distributions 			return KERN_FAILURE;
1252*bbb1b6f9SApple OSS Distributions 		}
1253*bbb1b6f9SApple OSS Distributions 
1254*bbb1b6f9SApple OSS Distributions 		// Kernel thread not at interrupt context
1255*bbb1b6f9SApple OSS Distributions 		kregs = (struct x86_kernel_state *)NULL;
1256*bbb1b6f9SApple OSS Distributions 
1257*bbb1b6f9SApple OSS Distributions 		// nofault read of the thread->kernel_stack pointer
1258*bbb1b6f9SApple OSS Distributions 		if (KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
1259*bbb1b6f9SApple OSS Distributions 			return KERN_FAILURE;
1260*bbb1b6f9SApple OSS Distributions 		}
1261*bbb1b6f9SApple OSS Distributions 
1262*bbb1b6f9SApple OSS Distributions 		// Adjust to find the saved kernel state
1263*bbb1b6f9SApple OSS Distributions 		kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);
1264*bbb1b6f9SApple OSS Distributions 
1265*bbb1b6f9SApple OSS Distributions 		supervisor = TRUE;
1266*bbb1b6f9SApple OSS Distributions 	} else if (!tagged_regs) {
1267*bbb1b6f9SApple OSS Distributions 		/*
1268*bbb1b6f9SApple OSS Distributions 		 * not at interrupt context, or tracing a different thread than
1269*bbb1b6f9SApple OSS Distributions 		 * current_thread() at interrupt context
1270*bbb1b6f9SApple OSS Distributions 		 */
1271*bbb1b6f9SApple OSS Distributions 		tagged_regs = USER_STATE(thread);
1272*bbb1b6f9SApple OSS Distributions 		if (is_saved_state64(tagged_regs)) {
1273*bbb1b6f9SApple OSS Distributions 			/* 64 bit registers */
1274*bbb1b6f9SApple OSS Distributions 			regs64 = saved_state64(tagged_regs);
1275*bbb1b6f9SApple OSS Distributions 			supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1276*bbb1b6f9SApple OSS Distributions 		} else {
1277*bbb1b6f9SApple OSS Distributions 			/* 32 bit registers */
1278*bbb1b6f9SApple OSS Distributions 			regs32 = saved_state32(tagged_regs);
1279*bbb1b6f9SApple OSS Distributions 			supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1280*bbb1b6f9SApple OSS Distributions 		}
1281*bbb1b6f9SApple OSS Distributions 	}
1282*bbb1b6f9SApple OSS Distributions 
1283*bbb1b6f9SApple OSS Distributions 	*count = 0;
1284*bbb1b6f9SApple OSS Distributions 
1285*bbb1b6f9SApple OSS Distributions 	if (supervisor) {
1286*bbb1b6f9SApple OSS Distributions 		// the caller only wants a user callstack.
1287*bbb1b6f9SApple OSS Distributions 		if (user_only) {
1288*bbb1b6f9SApple OSS Distributions 			// bail - we've only got kernel state
1289*bbb1b6f9SApple OSS Distributions 			return KERN_FAILURE;
1290*bbb1b6f9SApple OSS Distributions 		}
1291*bbb1b6f9SApple OSS Distributions 	} else {
1292*bbb1b6f9SApple OSS Distributions 		// regs32(64) is not in supervisor mode.
1293*bbb1b6f9SApple OSS Distributions 		u_regs32 = regs32;
1294*bbb1b6f9SApple OSS Distributions 		u_regs64 = regs64;
1295*bbb1b6f9SApple OSS Distributions 		regs32 = NULL;
1296*bbb1b6f9SApple OSS Distributions 		regs64 = NULL;
1297*bbb1b6f9SApple OSS Distributions 	}
1298*bbb1b6f9SApple OSS Distributions 
1299*bbb1b6f9SApple OSS Distributions 	if (user_only) {
1300*bbb1b6f9SApple OSS Distributions 		/* we only want to backtrace the user mode */
1301*bbb1b6f9SApple OSS Distributions 		if (!(u_regs32 || u_regs64)) {
1302*bbb1b6f9SApple OSS Distributions 			/* no user state to look at */
1303*bbb1b6f9SApple OSS Distributions 			return KERN_FAILURE;
1304*bbb1b6f9SApple OSS Distributions 		}
1305*bbb1b6f9SApple OSS Distributions 	}
1306*bbb1b6f9SApple OSS Distributions 
1307*bbb1b6f9SApple OSS Distributions 	/*
1308*bbb1b6f9SApple OSS Distributions 	 * Order of preference for top of stack:
1309*bbb1b6f9SApple OSS Distributions 	 * 64 bit kernel state (not likely)
1310*bbb1b6f9SApple OSS Distributions 	 * 32 bit kernel state
1311*bbb1b6f9SApple OSS Distributions 	 * 64 bit user land state
1312*bbb1b6f9SApple OSS Distributions 	 * 32 bit user land state
1313*bbb1b6f9SApple OSS Distributions 	 */
1314*bbb1b6f9SApple OSS Distributions 
1315*bbb1b6f9SApple OSS Distributions 	if (kregs) {
1316*bbb1b6f9SApple OSS Distributions 		/*
1317*bbb1b6f9SApple OSS Distributions 		 * nofault read of the registers from the kernel stack (as they can
1318*bbb1b6f9SApple OSS Distributions 		 * disappear on the fly).
1319*bbb1b6f9SApple OSS Distributions 		 */
1320*bbb1b6f9SApple OSS Distributions 
1321*bbb1b6f9SApple OSS Distributions 		if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
1322*bbb1b6f9SApple OSS Distributions 			return KERN_FAILURE;
1323*bbb1b6f9SApple OSS Distributions 		}
1324*bbb1b6f9SApple OSS Distributions 	} else if (regs64) {
1325*bbb1b6f9SApple OSS Distributions 		currPC = regs64->isf.rip;
1326*bbb1b6f9SApple OSS Distributions 	} else if (regs32) {
1327*bbb1b6f9SApple OSS Distributions 		currPC = (uint64_t) regs32->eip;
1328*bbb1b6f9SApple OSS Distributions 	} else if (u_regs64) {
1329*bbb1b6f9SApple OSS Distributions 		currPC = u_regs64->isf.rip;
1330*bbb1b6f9SApple OSS Distributions 	} else if (u_regs32) {
1331*bbb1b6f9SApple OSS Distributions 		currPC = (uint64_t) u_regs32->eip;
1332*bbb1b6f9SApple OSS Distributions 	}
1333*bbb1b6f9SApple OSS Distributions 
1334*bbb1b6f9SApple OSS Distributions 	if (!currPC) {
1335*bbb1b6f9SApple OSS Distributions 		/* no top of the stack, bail out */
1336*bbb1b6f9SApple OSS Distributions 		return KERN_FAILURE;
1337*bbb1b6f9SApple OSS Distributions 	}
1338*bbb1b6f9SApple OSS Distributions 
1339*bbb1b6f9SApple OSS Distributions 	bufferIndex = 0;
1340*bbb1b6f9SApple OSS Distributions 
1341*bbb1b6f9SApple OSS Distributions 	if (bufferMaxIndex < 1) {
1342*bbb1b6f9SApple OSS Distributions 		*count = 0;
1343*bbb1b6f9SApple OSS Distributions 		return KERN_RESOURCE_SHORTAGE;
1344*bbb1b6f9SApple OSS Distributions 	}
1345*bbb1b6f9SApple OSS Distributions 
1346*bbb1b6f9SApple OSS Distributions 	/* backtrace kernel */
1347*bbb1b6f9SApple OSS Distributions 	if (kregs) {
1348*bbb1b6f9SApple OSS Distributions 		addr64_t address = 0ULL;
1349*bbb1b6f9SApple OSS Distributions 		size_t size = 0UL;
1350*bbb1b6f9SApple OSS Distributions 
1351*bbb1b6f9SApple OSS Distributions 		// do the backtrace
1352*bbb1b6f9SApple OSS Distributions 		kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);
1353*bbb1b6f9SApple OSS Distributions 
1354*bbb1b6f9SApple OSS Distributions 		// and do a nofault read of (r|e)sp
1355*bbb1b6f9SApple OSS Distributions 		uint64_t rsp = 0ULL;
1356*bbb1b6f9SApple OSS Distributions 		size = sizeof(uint64_t);
1357*bbb1b6f9SApple OSS Distributions 
1358*bbb1b6f9SApple OSS Distributions 		if (KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
1359*bbb1b6f9SApple OSS Distributions 			address = 0ULL;
1360*bbb1b6f9SApple OSS Distributions 		}
1361*bbb1b6f9SApple OSS Distributions 
1362*bbb1b6f9SApple OSS Distributions 		if (address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
1363*bbb1b6f9SApple OSS Distributions 			callstack[bufferIndex++] = (uint64_t)rsp;
1364*bbb1b6f9SApple OSS Distributions 		}
1365*bbb1b6f9SApple OSS Distributions 	} else if (regs64) {
1366*bbb1b6f9SApple OSS Distributions 		uint64_t rsp = 0ULL;
1367*bbb1b6f9SApple OSS Distributions 
1368*bbb1b6f9SApple OSS Distributions 		// backtrace the 64bit side.
1369*bbb1b6f9SApple OSS Distributions 		kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
1370*bbb1b6f9SApple OSS Distributions 		    bufferMaxIndex - 1, TRUE);
1371*bbb1b6f9SApple OSS Distributions 
1372*bbb1b6f9SApple OSS Distributions 		if (KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) &&
1373*bbb1b6f9SApple OSS Distributions 		    bufferIndex < bufferMaxIndex) {
1374*bbb1b6f9SApple OSS Distributions 			callstack[bufferIndex++] = rsp;
1375*bbb1b6f9SApple OSS Distributions 		}
1376*bbb1b6f9SApple OSS Distributions 	} else if (regs32) {
1377*bbb1b6f9SApple OSS Distributions 		uint32_t esp = 0UL;
1378*bbb1b6f9SApple OSS Distributions 
1379*bbb1b6f9SApple OSS Distributions 		// backtrace the 32bit side.
1380*bbb1b6f9SApple OSS Distributions 		kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
1381*bbb1b6f9SApple OSS Distributions 		    bufferMaxIndex - 1, TRUE);
1382*bbb1b6f9SApple OSS Distributions 
1383*bbb1b6f9SApple OSS Distributions 		if (KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) &&
1384*bbb1b6f9SApple OSS Distributions 		    bufferIndex < bufferMaxIndex) {
1385*bbb1b6f9SApple OSS Distributions 			callstack[bufferIndex++] = (uint64_t) esp;
1386*bbb1b6f9SApple OSS Distributions 		}
1387*bbb1b6f9SApple OSS Distributions 	} else if (u_regs64 && !kern_only) {
1388*bbb1b6f9SApple OSS Distributions 		/* backtrace user land */
1389*bbb1b6f9SApple OSS Distributions 		uint64_t rsp = 0ULL;
1390*bbb1b6f9SApple OSS Distributions 
1391*bbb1b6f9SApple OSS Distributions 		kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
1392*bbb1b6f9SApple OSS Distributions 		    bufferMaxIndex - 1, FALSE);
1393*bbb1b6f9SApple OSS Distributions 
1394*bbb1b6f9SApple OSS Distributions 		if (KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
1395*bbb1b6f9SApple OSS Distributions 		    bufferIndex < bufferMaxIndex) {
1396*bbb1b6f9SApple OSS Distributions 			callstack[bufferIndex++] = rsp;
1397*bbb1b6f9SApple OSS Distributions 		}
1398*bbb1b6f9SApple OSS Distributions 	} else if (u_regs32 && !kern_only) {
1399*bbb1b6f9SApple OSS Distributions 		uint32_t esp = 0UL;
1400*bbb1b6f9SApple OSS Distributions 
1401*bbb1b6f9SApple OSS Distributions 		kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
1402*bbb1b6f9SApple OSS Distributions 		    bufferMaxIndex - 1, FALSE);
1403*bbb1b6f9SApple OSS Distributions 
1404*bbb1b6f9SApple OSS Distributions 		if (KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
1405*bbb1b6f9SApple OSS Distributions 		    bufferIndex < bufferMaxIndex) {
1406*bbb1b6f9SApple OSS Distributions 			callstack[bufferIndex++] = (uint64_t) esp;
1407*bbb1b6f9SApple OSS Distributions 		}
1408*bbb1b6f9SApple OSS Distributions 	}
1409*bbb1b6f9SApple OSS Distributions 
1410*bbb1b6f9SApple OSS Distributions 	*count = bufferIndex;
1411*bbb1b6f9SApple OSS Distributions 	return kr;
1412*bbb1b6f9SApple OSS Distributions }
1413*bbb1b6f9SApple OSS Distributions 
1414*bbb1b6f9SApple OSS Distributions __private_extern__
1415*bbb1b6f9SApple OSS Distributions kern_return_t
1416*bbb1b6f9SApple OSS Distributions chudxnu_thread_get_callstack64_kperf(
1417*bbb1b6f9SApple OSS Distributions 	thread_t                thread,
1418*bbb1b6f9SApple OSS Distributions 	uint64_t                *callstack,
1419*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t  *count,
1420*bbb1b6f9SApple OSS Distributions 	boolean_t               is_user)
1421*bbb1b6f9SApple OSS Distributions {
1422*bbb1b6f9SApple OSS Distributions 	return chudxnu_thread_get_callstack64_internal(thread, callstack, count, is_user, !is_user);
1423*bbb1b6f9SApple OSS Distributions }
1424*bbb1b6f9SApple OSS Distributions #else /* !__arm64__ && !__x86_64__ */
1425*bbb1b6f9SApple OSS Distributions #error kperf: unsupported architecture
1426*bbb1b6f9SApple OSS Distributions #endif /* !__arm64__ && !__x86_64__ */
1427