1*a1e26a70SApple OSS Distributions /*
2*a1e26a70SApple OSS Distributions * Copyright (c) 2011-2022 Apple Computer, Inc. All rights reserved.
3*a1e26a70SApple OSS Distributions *
4*a1e26a70SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*a1e26a70SApple OSS Distributions *
6*a1e26a70SApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*a1e26a70SApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*a1e26a70SApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*a1e26a70SApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*a1e26a70SApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*a1e26a70SApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*a1e26a70SApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*a1e26a70SApple OSS Distributions * terms of an Apple operating system software license agreement.
14*a1e26a70SApple OSS Distributions *
15*a1e26a70SApple OSS Distributions * Please obtain a copy of the License at
16*a1e26a70SApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*a1e26a70SApple OSS Distributions *
18*a1e26a70SApple OSS Distributions * The Original Code and all software distributed under the License are
19*a1e26a70SApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*a1e26a70SApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*a1e26a70SApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*a1e26a70SApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*a1e26a70SApple OSS Distributions * Please see the License for the specific language governing rights and
24*a1e26a70SApple OSS Distributions * limitations under the License.
25*a1e26a70SApple OSS Distributions *
26*a1e26a70SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*a1e26a70SApple OSS Distributions */
28*a1e26a70SApple OSS Distributions
29*a1e26a70SApple OSS Distributions /* Collect kernel callstacks */
30*a1e26a70SApple OSS Distributions
31*a1e26a70SApple OSS Distributions #include <mach/mach_types.h>
32*a1e26a70SApple OSS Distributions #include <kern/thread.h>
33*a1e26a70SApple OSS Distributions #include <kern/backtrace.h>
34*a1e26a70SApple OSS Distributions #include <kern/cambria_layout.h>
35*a1e26a70SApple OSS Distributions #include <vm/vm_map_xnu.h>
36*a1e26a70SApple OSS Distributions #include <kperf/buffer.h>
37*a1e26a70SApple OSS Distributions #include <kperf/context.h>
38*a1e26a70SApple OSS Distributions #include <kperf/callstack.h>
39*a1e26a70SApple OSS Distributions #include <kperf/ast.h>
40*a1e26a70SApple OSS Distributions #include <sys/errno.h>
41*a1e26a70SApple OSS Distributions #include <mach/exclaves.h>
42*a1e26a70SApple OSS Distributions
43*a1e26a70SApple OSS Distributions #if defined(__arm64__)
44*a1e26a70SApple OSS Distributions #include <arm/cpu_data.h>
45*a1e26a70SApple OSS Distributions #include <arm/cpu_data_internal.h>
46*a1e26a70SApple OSS Distributions #endif
47*a1e26a70SApple OSS Distributions
48*a1e26a70SApple OSS Distributions static void
callstack_fixup_user(struct kp_ucallstack * cs,thread_t thread)49*a1e26a70SApple OSS Distributions callstack_fixup_user(struct kp_ucallstack *cs, thread_t thread)
50*a1e26a70SApple OSS Distributions {
51*a1e26a70SApple OSS Distributions uint64_t fixup_val = 0;
52*a1e26a70SApple OSS Distributions assert(cs->kpuc_nframes < MAX_UCALLSTACK_FRAMES);
53*a1e26a70SApple OSS Distributions
54*a1e26a70SApple OSS Distributions #if defined(__x86_64__)
55*a1e26a70SApple OSS Distributions user_addr_t sp_user;
56*a1e26a70SApple OSS Distributions bool user_64;
57*a1e26a70SApple OSS Distributions x86_saved_state_t *state;
58*a1e26a70SApple OSS Distributions
59*a1e26a70SApple OSS Distributions state = get_user_regs(thread);
60*a1e26a70SApple OSS Distributions if (!state) {
61*a1e26a70SApple OSS Distributions goto out;
62*a1e26a70SApple OSS Distributions }
63*a1e26a70SApple OSS Distributions
64*a1e26a70SApple OSS Distributions user_64 = is_saved_state64(state);
65*a1e26a70SApple OSS Distributions if (user_64) {
66*a1e26a70SApple OSS Distributions sp_user = saved_state64(state)->isf.rsp;
67*a1e26a70SApple OSS Distributions } else {
68*a1e26a70SApple OSS Distributions sp_user = saved_state32(state)->uesp;
69*a1e26a70SApple OSS Distributions }
70*a1e26a70SApple OSS Distributions
71*a1e26a70SApple OSS Distributions if (thread == current_thread()) {
72*a1e26a70SApple OSS Distributions (void)copyin(sp_user, (char *)&fixup_val,
73*a1e26a70SApple OSS Distributions user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
74*a1e26a70SApple OSS Distributions } else {
75*a1e26a70SApple OSS Distributions (void)vm_map_read_user(get_task_map(get_threadtask(thread)), sp_user,
76*a1e26a70SApple OSS Distributions &fixup_val, user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
77*a1e26a70SApple OSS Distributions }
78*a1e26a70SApple OSS Distributions
79*a1e26a70SApple OSS Distributions #elif defined(__arm64__)
80*a1e26a70SApple OSS Distributions
81*a1e26a70SApple OSS Distributions struct arm_saved_state *state = get_user_regs(thread);
82*a1e26a70SApple OSS Distributions if (!state) {
83*a1e26a70SApple OSS Distributions goto out;
84*a1e26a70SApple OSS Distributions }
85*a1e26a70SApple OSS Distributions
86*a1e26a70SApple OSS Distributions /* encode thumb mode into low bit of PC */
87*a1e26a70SApple OSS Distributions if (is_saved_state32(state) && (get_saved_state_cpsr(state) & PSR_TF)) {
88*a1e26a70SApple OSS Distributions cs->kpuc_frames[0] |= 1ULL;
89*a1e26a70SApple OSS Distributions }
90*a1e26a70SApple OSS Distributions
91*a1e26a70SApple OSS Distributions
92*a1e26a70SApple OSS Distributions fixup_val = get_saved_state_lr(state);
93*a1e26a70SApple OSS Distributions
94*a1e26a70SApple OSS Distributions #else
95*a1e26a70SApple OSS Distributions #error "callstack_fixup_user: unsupported architecture"
96*a1e26a70SApple OSS Distributions #endif
97*a1e26a70SApple OSS Distributions
98*a1e26a70SApple OSS Distributions out:
99*a1e26a70SApple OSS Distributions cs->kpuc_frames[cs->kpuc_nframes++] = fixup_val;
100*a1e26a70SApple OSS Distributions }
101*a1e26a70SApple OSS Distributions
102*a1e26a70SApple OSS Distributions #if defined(__x86_64__)
103*a1e26a70SApple OSS Distributions
104*a1e26a70SApple OSS Distributions __attribute__((used))
105*a1e26a70SApple OSS Distributions static kern_return_t
interrupted_kernel_sp_value(uintptr_t * sp_val)106*a1e26a70SApple OSS Distributions interrupted_kernel_sp_value(uintptr_t *sp_val)
107*a1e26a70SApple OSS Distributions {
108*a1e26a70SApple OSS Distributions x86_saved_state_t *state;
109*a1e26a70SApple OSS Distributions uintptr_t sp;
110*a1e26a70SApple OSS Distributions bool state_64;
111*a1e26a70SApple OSS Distributions uint64_t cs;
112*a1e26a70SApple OSS Distributions uintptr_t top, bottom;
113*a1e26a70SApple OSS Distributions
114*a1e26a70SApple OSS Distributions state = current_cpu_datap()->cpu_int_state;
115*a1e26a70SApple OSS Distributions if (!state) {
116*a1e26a70SApple OSS Distributions return KERN_FAILURE;
117*a1e26a70SApple OSS Distributions }
118*a1e26a70SApple OSS Distributions
119*a1e26a70SApple OSS Distributions state_64 = is_saved_state64(state);
120*a1e26a70SApple OSS Distributions
121*a1e26a70SApple OSS Distributions if (state_64) {
122*a1e26a70SApple OSS Distributions cs = saved_state64(state)->isf.cs;
123*a1e26a70SApple OSS Distributions } else {
124*a1e26a70SApple OSS Distributions cs = saved_state32(state)->cs;
125*a1e26a70SApple OSS Distributions }
126*a1e26a70SApple OSS Distributions /* return early if interrupted a thread in user space */
127*a1e26a70SApple OSS Distributions if ((cs & SEL_PL) == SEL_PL_U) {
128*a1e26a70SApple OSS Distributions return KERN_FAILURE;
129*a1e26a70SApple OSS Distributions }
130*a1e26a70SApple OSS Distributions
131*a1e26a70SApple OSS Distributions if (state_64) {
132*a1e26a70SApple OSS Distributions sp = saved_state64(state)->isf.rsp;
133*a1e26a70SApple OSS Distributions } else {
134*a1e26a70SApple OSS Distributions sp = saved_state32(state)->uesp;
135*a1e26a70SApple OSS Distributions }
136*a1e26a70SApple OSS Distributions
137*a1e26a70SApple OSS Distributions /* make sure the stack pointer is pointing somewhere in this stack */
138*a1e26a70SApple OSS Distributions bottom = current_thread()->kernel_stack;
139*a1e26a70SApple OSS Distributions top = bottom + kernel_stack_size;
140*a1e26a70SApple OSS Distributions if (sp >= bottom && sp < top) {
141*a1e26a70SApple OSS Distributions return KERN_FAILURE;
142*a1e26a70SApple OSS Distributions }
143*a1e26a70SApple OSS Distributions
144*a1e26a70SApple OSS Distributions *sp_val = *(uintptr_t *)sp;
145*a1e26a70SApple OSS Distributions return KERN_SUCCESS;
146*a1e26a70SApple OSS Distributions }
147*a1e26a70SApple OSS Distributions
148*a1e26a70SApple OSS Distributions #elif defined(__arm64__)
149*a1e26a70SApple OSS Distributions
150*a1e26a70SApple OSS Distributions __attribute__((used))
151*a1e26a70SApple OSS Distributions static kern_return_t
interrupted_kernel_lr(uintptr_t * lr)152*a1e26a70SApple OSS Distributions interrupted_kernel_lr(uintptr_t *lr)
153*a1e26a70SApple OSS Distributions {
154*a1e26a70SApple OSS Distributions struct arm_saved_state *state;
155*a1e26a70SApple OSS Distributions
156*a1e26a70SApple OSS Distributions state = getCpuDatap()->cpu_int_state;
157*a1e26a70SApple OSS Distributions
158*a1e26a70SApple OSS Distributions /* return early if interrupted a thread in user space */
159*a1e26a70SApple OSS Distributions if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
160*a1e26a70SApple OSS Distributions return KERN_FAILURE;
161*a1e26a70SApple OSS Distributions }
162*a1e26a70SApple OSS Distributions
163*a1e26a70SApple OSS Distributions *lr = get_saved_state_lr(state);
164*a1e26a70SApple OSS Distributions return KERN_SUCCESS;
165*a1e26a70SApple OSS Distributions }
166*a1e26a70SApple OSS Distributions #else /* defined(__arm64__) */
167*a1e26a70SApple OSS Distributions #error "interrupted_kernel_{sp,lr}: unsupported architecture"
168*a1e26a70SApple OSS Distributions #endif /* !defined(__arm64__) */
169*a1e26a70SApple OSS Distributions
170*a1e26a70SApple OSS Distributions
171*a1e26a70SApple OSS Distributions static void
callstack_fixup_interrupted(struct kp_kcallstack * cs)172*a1e26a70SApple OSS Distributions callstack_fixup_interrupted(struct kp_kcallstack *cs)
173*a1e26a70SApple OSS Distributions {
174*a1e26a70SApple OSS Distributions uintptr_t fixup_val = 0;
175*a1e26a70SApple OSS Distributions assert(cs->kpkc_nframes < MAX_KCALLSTACK_FRAMES);
176*a1e26a70SApple OSS Distributions
177*a1e26a70SApple OSS Distributions /*
178*a1e26a70SApple OSS Distributions * Only provide arbitrary data on development or debug kernels.
179*a1e26a70SApple OSS Distributions */
180*a1e26a70SApple OSS Distributions #if DEVELOPMENT || DEBUG
181*a1e26a70SApple OSS Distributions #if defined(__x86_64__)
182*a1e26a70SApple OSS Distributions (void)interrupted_kernel_sp_value(&fixup_val);
183*a1e26a70SApple OSS Distributions #elif defined(__arm64__)
184*a1e26a70SApple OSS Distributions (void)interrupted_kernel_lr(&fixup_val);
185*a1e26a70SApple OSS Distributions #endif /* defined(__x86_64__) */
186*a1e26a70SApple OSS Distributions #endif /* DEVELOPMENT || DEBUG */
187*a1e26a70SApple OSS Distributions
188*a1e26a70SApple OSS Distributions assert(cs->kpkc_flags & CALLSTACK_KERNEL);
189*a1e26a70SApple OSS Distributions cs->kpkc_frames[cs->kpkc_nframes++] = fixup_val;
190*a1e26a70SApple OSS Distributions }
191*a1e26a70SApple OSS Distributions
192*a1e26a70SApple OSS Distributions void
kperf_continuation_sample(struct kp_kcallstack * cs,struct kperf_context * context)193*a1e26a70SApple OSS Distributions kperf_continuation_sample(struct kp_kcallstack *cs, struct kperf_context *context)
194*a1e26a70SApple OSS Distributions {
195*a1e26a70SApple OSS Distributions thread_t thread;
196*a1e26a70SApple OSS Distributions
197*a1e26a70SApple OSS Distributions assert(cs != NULL);
198*a1e26a70SApple OSS Distributions assert(context != NULL);
199*a1e26a70SApple OSS Distributions
200*a1e26a70SApple OSS Distributions thread = context->cur_thread;
201*a1e26a70SApple OSS Distributions assert(thread != NULL);
202*a1e26a70SApple OSS Distributions assert(thread->continuation != NULL);
203*a1e26a70SApple OSS Distributions
204*a1e26a70SApple OSS Distributions cs->kpkc_flags = CALLSTACK_CONTINUATION | CALLSTACK_VALID | CALLSTACK_KERNEL;
205*a1e26a70SApple OSS Distributions #ifdef __LP64__
206*a1e26a70SApple OSS Distributions cs->kpkc_flags |= CALLSTACK_64BIT;
207*a1e26a70SApple OSS Distributions #endif
208*a1e26a70SApple OSS Distributions
209*a1e26a70SApple OSS Distributions cs->kpkc_nframes = 1;
210*a1e26a70SApple OSS Distributions cs->kpkc_frames[0] = VM_KERNEL_UNSLIDE(thread->continuation);
211*a1e26a70SApple OSS Distributions }
212*a1e26a70SApple OSS Distributions
213*a1e26a70SApple OSS Distributions void
kperf_backtrace_sample(struct kp_kcallstack * cs,struct kperf_context * context)214*a1e26a70SApple OSS Distributions kperf_backtrace_sample(struct kp_kcallstack *cs, struct kperf_context *context)
215*a1e26a70SApple OSS Distributions {
216*a1e26a70SApple OSS Distributions assert(cs != NULL);
217*a1e26a70SApple OSS Distributions assert(context != NULL);
218*a1e26a70SApple OSS Distributions assert(context->cur_thread == current_thread());
219*a1e26a70SApple OSS Distributions
220*a1e26a70SApple OSS Distributions cs->kpkc_flags = CALLSTACK_KERNEL | CALLSTACK_KERNEL_WORDS;
221*a1e26a70SApple OSS Distributions #ifdef __LP64__
222*a1e26a70SApple OSS Distributions cs->kpkc_flags |= CALLSTACK_64BIT;
223*a1e26a70SApple OSS Distributions #endif
224*a1e26a70SApple OSS Distributions
225*a1e26a70SApple OSS Distributions BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, 1);
226*a1e26a70SApple OSS Distributions
227*a1e26a70SApple OSS Distributions backtrace_info_t btinfo = BTI_NONE;
228*a1e26a70SApple OSS Distributions struct backtrace_control ctl = {
229*a1e26a70SApple OSS Distributions .btc_frame_addr = (uintptr_t)context->starting_fp,
230*a1e26a70SApple OSS Distributions };
231*a1e26a70SApple OSS Distributions cs->kpkc_nframes = backtrace(cs->kpkc_word_frames, cs->kpkc_nframes - 1,
232*a1e26a70SApple OSS Distributions &ctl, &btinfo);
233*a1e26a70SApple OSS Distributions if (cs->kpkc_nframes > 0) {
234*a1e26a70SApple OSS Distributions cs->kpkc_flags |= CALLSTACK_VALID;
235*a1e26a70SApple OSS Distributions
236*a1e26a70SApple OSS Distributions cs->kpkc_exclaves_offset = 0;
237*a1e26a70SApple OSS Distributions #if CONFIG_EXCLAVES
238*a1e26a70SApple OSS Distributions if ((context->cur_thread->th_exclaves_state & TH_EXCLAVES_RPC) != 0) {
239*a1e26a70SApple OSS Distributions cs->kpkc_exclaves_offset = exclaves_stack_offset(cs->kpkc_word_frames, cs->kpkc_nframes, true);
240*a1e26a70SApple OSS Distributions }
241*a1e26a70SApple OSS Distributions #endif /* CONFIG_EXCLAVES */
242*a1e26a70SApple OSS Distributions
243*a1e26a70SApple OSS Distributions /*
244*a1e26a70SApple OSS Distributions * Fake the value pointed to by the stack pointer or the link
245*a1e26a70SApple OSS Distributions * register for symbolicators.
246*a1e26a70SApple OSS Distributions */
247*a1e26a70SApple OSS Distributions cs->kpkc_word_frames[cs->kpkc_nframes + 1] = 0;
248*a1e26a70SApple OSS Distributions cs->kpkc_nframes += 1;
249*a1e26a70SApple OSS Distributions }
250*a1e26a70SApple OSS Distributions if ((btinfo & BTI_TRUNCATED)) {
251*a1e26a70SApple OSS Distributions cs->kpkc_flags |= CALLSTACK_TRUNCATED;
252*a1e26a70SApple OSS Distributions }
253*a1e26a70SApple OSS Distributions
254*a1e26a70SApple OSS Distributions BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, cs->kpkc_nframes);
255*a1e26a70SApple OSS Distributions }
256*a1e26a70SApple OSS Distributions
257*a1e26a70SApple OSS Distributions kern_return_t chudxnu_thread_get_callstack64_kperf(thread_t thread,
258*a1e26a70SApple OSS Distributions uint64_t *callStack, mach_msg_type_number_t *count,
259*a1e26a70SApple OSS Distributions boolean_t user_only);
260*a1e26a70SApple OSS Distributions
261*a1e26a70SApple OSS Distributions void
kperf_kcallstack_sample(struct kp_kcallstack * cs,struct kperf_context * context)262*a1e26a70SApple OSS Distributions kperf_kcallstack_sample(struct kp_kcallstack *cs, struct kperf_context *context)
263*a1e26a70SApple OSS Distributions {
264*a1e26a70SApple OSS Distributions thread_t thread;
265*a1e26a70SApple OSS Distributions
266*a1e26a70SApple OSS Distributions assert(cs != NULL);
267*a1e26a70SApple OSS Distributions assert(context != NULL);
268*a1e26a70SApple OSS Distributions assert(cs->kpkc_nframes <= MAX_KCALLSTACK_FRAMES);
269*a1e26a70SApple OSS Distributions
270*a1e26a70SApple OSS Distributions thread = context->cur_thread;
271*a1e26a70SApple OSS Distributions assert(thread != NULL);
272*a1e26a70SApple OSS Distributions
273*a1e26a70SApple OSS Distributions BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread),
274*a1e26a70SApple OSS Distributions cs->kpkc_nframes);
275*a1e26a70SApple OSS Distributions
276*a1e26a70SApple OSS Distributions cs->kpkc_flags = CALLSTACK_KERNEL;
277*a1e26a70SApple OSS Distributions #ifdef __LP64__
278*a1e26a70SApple OSS Distributions cs->kpkc_flags |= CALLSTACK_64BIT;
279*a1e26a70SApple OSS Distributions #endif
280*a1e26a70SApple OSS Distributions
281*a1e26a70SApple OSS Distributions if (ml_at_interrupt_context()) {
282*a1e26a70SApple OSS Distributions assert(thread == current_thread());
283*a1e26a70SApple OSS Distributions cs->kpkc_flags |= CALLSTACK_KERNEL_WORDS;
284*a1e26a70SApple OSS Distributions backtrace_info_t btinfo = BTI_NONE;
285*a1e26a70SApple OSS Distributions struct backtrace_control ctl = { .btc_flags = BTF_KERN_INTERRUPTED, };
286*a1e26a70SApple OSS Distributions cs->kpkc_nframes = backtrace(cs->kpkc_word_frames, cs->kpkc_nframes - 1,
287*a1e26a70SApple OSS Distributions &ctl, &btinfo);
288*a1e26a70SApple OSS Distributions if (cs->kpkc_nframes != 0) {
289*a1e26a70SApple OSS Distributions callstack_fixup_interrupted(cs);
290*a1e26a70SApple OSS Distributions }
291*a1e26a70SApple OSS Distributions if ((btinfo & BTI_TRUNCATED)) {
292*a1e26a70SApple OSS Distributions cs->kpkc_flags |= CALLSTACK_TRUNCATED;
293*a1e26a70SApple OSS Distributions }
294*a1e26a70SApple OSS Distributions
295*a1e26a70SApple OSS Distributions cs->kpkc_exclaves_offset = 0;
296*a1e26a70SApple OSS Distributions #if CONFIG_EXCLAVES
297*a1e26a70SApple OSS Distributions if ((thread->th_exclaves_state & TH_EXCLAVES_RPC) != 0) {
298*a1e26a70SApple OSS Distributions cs->kpkc_exclaves_offset = exclaves_stack_offset(cs->kpkc_word_frames, cs->kpkc_nframes, true);
299*a1e26a70SApple OSS Distributions }
300*a1e26a70SApple OSS Distributions #endif /* CONFIG_EXCLAVES */
301*a1e26a70SApple OSS Distributions } else {
302*a1e26a70SApple OSS Distributions /*
303*a1e26a70SApple OSS Distributions * Rely on legacy CHUD backtracer to backtrace kernel stacks on
304*a1e26a70SApple OSS Distributions * other threads.
305*a1e26a70SApple OSS Distributions */
306*a1e26a70SApple OSS Distributions kern_return_t kr;
307*a1e26a70SApple OSS Distributions kr = chudxnu_thread_get_callstack64_kperf(thread,
308*a1e26a70SApple OSS Distributions cs->kpkc_frames, &cs->kpkc_nframes, FALSE);
309*a1e26a70SApple OSS Distributions if (kr == KERN_SUCCESS) {
310*a1e26a70SApple OSS Distributions cs->kpkc_flags |= CALLSTACK_VALID;
311*a1e26a70SApple OSS Distributions } else if (kr == KERN_RESOURCE_SHORTAGE) {
312*a1e26a70SApple OSS Distributions cs->kpkc_flags |= CALLSTACK_VALID;
313*a1e26a70SApple OSS Distributions cs->kpkc_flags |= CALLSTACK_TRUNCATED;
314*a1e26a70SApple OSS Distributions } else {
315*a1e26a70SApple OSS Distributions cs->kpkc_nframes = 0;
316*a1e26a70SApple OSS Distributions }
317*a1e26a70SApple OSS Distributions }
318*a1e26a70SApple OSS Distributions
319*a1e26a70SApple OSS Distributions if (!(cs->kpkc_flags & CALLSTACK_VALID)) {
320*a1e26a70SApple OSS Distributions BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK);
321*a1e26a70SApple OSS Distributions }
322*a1e26a70SApple OSS Distributions
323*a1e26a70SApple OSS Distributions BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
324*a1e26a70SApple OSS Distributions cs->kpkc_flags, cs->kpkc_nframes);
325*a1e26a70SApple OSS Distributions }
326*a1e26a70SApple OSS Distributions
327*a1e26a70SApple OSS Distributions void
kperf_ucallstack_sample(struct kp_ucallstack * cs,struct kperf_context * context)328*a1e26a70SApple OSS Distributions kperf_ucallstack_sample(struct kp_ucallstack *cs, struct kperf_context *context)
329*a1e26a70SApple OSS Distributions {
330*a1e26a70SApple OSS Distributions assert(ml_get_interrupts_enabled() == TRUE);
331*a1e26a70SApple OSS Distributions
332*a1e26a70SApple OSS Distributions thread_t thread = context->cur_thread;
333*a1e26a70SApple OSS Distributions assert(thread != NULL);
334*a1e26a70SApple OSS Distributions
335*a1e26a70SApple OSS Distributions BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_START,
336*a1e26a70SApple OSS Distributions (uintptr_t)thread_tid(thread), cs->kpuc_nframes);
337*a1e26a70SApple OSS Distributions
338*a1e26a70SApple OSS Distributions struct backtrace_user_info btinfo = BTUINFO_INIT;
339*a1e26a70SApple OSS Distributions /*
340*a1e26a70SApple OSS Distributions * Leave space for the fixup information.
341*a1e26a70SApple OSS Distributions */
342*a1e26a70SApple OSS Distributions unsigned int maxnframes = cs->kpuc_nframes - 1;
343*a1e26a70SApple OSS Distributions struct backtrace_control ctl = { .btc_user_thread = thread, };
344*a1e26a70SApple OSS Distributions unsigned int nframes = backtrace_user(cs->kpuc_frames, maxnframes, &ctl,
345*a1e26a70SApple OSS Distributions &btinfo);
346*a1e26a70SApple OSS Distributions cs->kpuc_nframes = MIN(maxnframes, nframes);
347*a1e26a70SApple OSS Distributions
348*a1e26a70SApple OSS Distributions cs->kpuc_flags |= CALLSTACK_KERNEL_WORDS |
349*a1e26a70SApple OSS Distributions ((btinfo.btui_info & BTI_TRUNCATED) ? CALLSTACK_TRUNCATED : 0) |
350*a1e26a70SApple OSS Distributions ((btinfo.btui_info & BTI_64_BIT) ? CALLSTACK_64BIT : 0);
351*a1e26a70SApple OSS Distributions
352*a1e26a70SApple OSS Distributions /*
353*a1e26a70SApple OSS Distributions * Ignore EFAULT to get as much of the stack as possible.
354*a1e26a70SApple OSS Distributions */
355*a1e26a70SApple OSS Distributions if (btinfo.btui_error == 0 || btinfo.btui_error == EFAULT) {
356*a1e26a70SApple OSS Distributions callstack_fixup_user(cs, thread);
357*a1e26a70SApple OSS Distributions cs->kpuc_flags |= CALLSTACK_VALID;
358*a1e26a70SApple OSS Distributions
359*a1e26a70SApple OSS Distributions if (cs->kpuc_nframes < maxnframes &&
360*a1e26a70SApple OSS Distributions btinfo.btui_async_frame_addr != 0) {
361*a1e26a70SApple OSS Distributions cs->kpuc_async_index = btinfo.btui_async_start_index;
362*a1e26a70SApple OSS Distributions ctl.btc_frame_addr = btinfo.btui_async_frame_addr;
363*a1e26a70SApple OSS Distributions ctl.btc_addr_offset = BTCTL_ASYNC_ADDR_OFFSET;
364*a1e26a70SApple OSS Distributions maxnframes -= cs->kpuc_nframes;
365*a1e26a70SApple OSS Distributions btinfo = BTUINFO_INIT;
366*a1e26a70SApple OSS Distributions unsigned int nasync_frames = backtrace_user(
367*a1e26a70SApple OSS Distributions &cs->kpuc_frames[cs->kpuc_nframes], maxnframes, &ctl, &btinfo);
368*a1e26a70SApple OSS Distributions if (btinfo.btui_info & BTI_TRUNCATED) {
369*a1e26a70SApple OSS Distributions cs->kpuc_flags |= CALLSTACK_TRUNCATED;
370*a1e26a70SApple OSS Distributions }
371*a1e26a70SApple OSS Distributions if (btinfo.btui_error == 0 || btinfo.btui_error == EFAULT) {
372*a1e26a70SApple OSS Distributions cs->kpuc_flags |= CALLSTACK_HAS_ASYNC;
373*a1e26a70SApple OSS Distributions cs->kpuc_async_nframes = nasync_frames;
374*a1e26a70SApple OSS Distributions }
375*a1e26a70SApple OSS Distributions }
376*a1e26a70SApple OSS Distributions } else {
377*a1e26a70SApple OSS Distributions cs->kpuc_nframes = 0;
378*a1e26a70SApple OSS Distributions BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK, btinfo.btui_error);
379*a1e26a70SApple OSS Distributions }
380*a1e26a70SApple OSS Distributions
381*a1e26a70SApple OSS Distributions BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
382*a1e26a70SApple OSS Distributions cs->kpuc_flags, cs->kpuc_nframes);
383*a1e26a70SApple OSS Distributions }
384*a1e26a70SApple OSS Distributions
385*a1e26a70SApple OSS Distributions static inline uintptr_t
scrub_word(uintptr_t * bt,int n_frames,int frame,bool kern)386*a1e26a70SApple OSS Distributions scrub_word(uintptr_t *bt, int n_frames, int frame, bool kern)
387*a1e26a70SApple OSS Distributions {
388*a1e26a70SApple OSS Distributions if (frame < n_frames) {
389*a1e26a70SApple OSS Distributions if (kern) {
390*a1e26a70SApple OSS Distributions return VM_KERNEL_UNSLIDE(bt[frame]);
391*a1e26a70SApple OSS Distributions } else {
392*a1e26a70SApple OSS Distributions return bt[frame];
393*a1e26a70SApple OSS Distributions }
394*a1e26a70SApple OSS Distributions } else {
395*a1e26a70SApple OSS Distributions return 0;
396*a1e26a70SApple OSS Distributions }
397*a1e26a70SApple OSS Distributions }
398*a1e26a70SApple OSS Distributions
399*a1e26a70SApple OSS Distributions static inline uintptr_t
scrub_frame(uint64_t * bt,int n_frames,int frame)400*a1e26a70SApple OSS Distributions scrub_frame(uint64_t *bt, int n_frames, int frame)
401*a1e26a70SApple OSS Distributions {
402*a1e26a70SApple OSS Distributions if (frame < n_frames) {
403*a1e26a70SApple OSS Distributions return (uintptr_t)(bt[frame]);
404*a1e26a70SApple OSS Distributions } else {
405*a1e26a70SApple OSS Distributions return 0;
406*a1e26a70SApple OSS Distributions }
407*a1e26a70SApple OSS Distributions }
408*a1e26a70SApple OSS Distributions
409*a1e26a70SApple OSS Distributions static void
callstack_log(uint32_t hdrid,uint32_t dataid,void * vframes,unsigned int nframes,unsigned int flags,unsigned int async_index,unsigned int async_nframes)410*a1e26a70SApple OSS Distributions callstack_log(uint32_t hdrid, uint32_t dataid, void *vframes,
411*a1e26a70SApple OSS Distributions unsigned int nframes, unsigned int flags, unsigned int async_index,
412*a1e26a70SApple OSS Distributions unsigned int async_nframes)
413*a1e26a70SApple OSS Distributions {
414*a1e26a70SApple OSS Distributions BUF_VERB(PERF_CS_LOG | DBG_FUNC_START, flags, nframes);
415*a1e26a70SApple OSS Distributions BUF_DATA(hdrid, flags, nframes - async_nframes, async_index, async_nframes);
416*a1e26a70SApple OSS Distributions
417*a1e26a70SApple OSS Distributions unsigned int nevts = nframes / 4;
418*a1e26a70SApple OSS Distributions unsigned int ovf = nframes % 4;
419*a1e26a70SApple OSS Distributions if (ovf != 0) {
420*a1e26a70SApple OSS Distributions nevts++;
421*a1e26a70SApple OSS Distributions }
422*a1e26a70SApple OSS Distributions
423*a1e26a70SApple OSS Distributions bool kern = flags & CALLSTACK_KERNEL;
424*a1e26a70SApple OSS Distributions
425*a1e26a70SApple OSS Distributions if (flags & CALLSTACK_KERNEL_WORDS) {
426*a1e26a70SApple OSS Distributions uintptr_t *frames = vframes;
427*a1e26a70SApple OSS Distributions for (unsigned int i = 0; i < nevts; i++) {
428*a1e26a70SApple OSS Distributions unsigned int j = i * 4;
429*a1e26a70SApple OSS Distributions BUF_DATA(dataid,
430*a1e26a70SApple OSS Distributions scrub_word(frames, nframes, j + 0, kern),
431*a1e26a70SApple OSS Distributions scrub_word(frames, nframes, j + 1, kern),
432*a1e26a70SApple OSS Distributions scrub_word(frames, nframes, j + 2, kern),
433*a1e26a70SApple OSS Distributions scrub_word(frames, nframes, j + 3, kern));
434*a1e26a70SApple OSS Distributions }
435*a1e26a70SApple OSS Distributions } else {
436*a1e26a70SApple OSS Distributions for (unsigned int i = 0; i < nevts; i++) {
437*a1e26a70SApple OSS Distributions uint64_t *frames = vframes;
438*a1e26a70SApple OSS Distributions unsigned int j = i * 4;
439*a1e26a70SApple OSS Distributions BUF_DATA(dataid,
440*a1e26a70SApple OSS Distributions scrub_frame(frames, nframes, j + 0),
441*a1e26a70SApple OSS Distributions scrub_frame(frames, nframes, j + 1),
442*a1e26a70SApple OSS Distributions scrub_frame(frames, nframes, j + 2),
443*a1e26a70SApple OSS Distributions scrub_frame(frames, nframes, j + 3));
444*a1e26a70SApple OSS Distributions }
445*a1e26a70SApple OSS Distributions }
446*a1e26a70SApple OSS Distributions
447*a1e26a70SApple OSS Distributions BUF_VERB(PERF_CS_LOG | DBG_FUNC_END, flags, nframes);
448*a1e26a70SApple OSS Distributions }
449*a1e26a70SApple OSS Distributions
450*a1e26a70SApple OSS Distributions void
kperf_kcallstack_log(struct kp_kcallstack * cs)451*a1e26a70SApple OSS Distributions kperf_kcallstack_log(struct kp_kcallstack *cs)
452*a1e26a70SApple OSS Distributions {
453*a1e26a70SApple OSS Distributions callstack_log(PERF_CS_KHDR, PERF_CS_KDATA, cs->kpkc_frames,
454*a1e26a70SApple OSS Distributions cs->kpkc_nframes, cs->kpkc_flags, 0, 0);
455*a1e26a70SApple OSS Distributions
456*a1e26a70SApple OSS Distributions if (cs->kpkc_exclaves_offset != 0) {
457*a1e26a70SApple OSS Distributions BUF_DATA(PERF_CS_KEXOFFSET, cs->kpkc_exclaves_offset);
458*a1e26a70SApple OSS Distributions }
459*a1e26a70SApple OSS Distributions }
460*a1e26a70SApple OSS Distributions
461*a1e26a70SApple OSS Distributions void
kperf_ucallstack_log(struct kp_ucallstack * cs)462*a1e26a70SApple OSS Distributions kperf_ucallstack_log(struct kp_ucallstack *cs)
463*a1e26a70SApple OSS Distributions {
464*a1e26a70SApple OSS Distributions callstack_log(PERF_CS_UHDR, PERF_CS_UDATA, cs->kpuc_frames,
465*a1e26a70SApple OSS Distributions cs->kpuc_nframes + cs->kpuc_async_nframes, cs->kpuc_flags,
466*a1e26a70SApple OSS Distributions cs->kpuc_async_index, cs->kpuc_async_nframes);
467*a1e26a70SApple OSS Distributions }
468*a1e26a70SApple OSS Distributions
469*a1e26a70SApple OSS Distributions #if CONFIG_EXCLAVES
470*a1e26a70SApple OSS Distributions void
kperf_excallstack_log(const stackshottypes_ipcstackentry_s * ipcstack)471*a1e26a70SApple OSS Distributions kperf_excallstack_log(const stackshottypes_ipcstackentry_s *ipcstack)
472*a1e26a70SApple OSS Distributions {
473*a1e26a70SApple OSS Distributions __block unsigned int nframes = 0;
474*a1e26a70SApple OSS Distributions __block unsigned int flags = CALLSTACK_VALID;
475*a1e26a70SApple OSS Distributions uint64_t frames[MAX_EXCALLSTACK_FRAMES] = {};
476*a1e26a70SApple OSS Distributions uint64_t *frames_block = frames;
477*a1e26a70SApple OSS Distributions
478*a1e26a70SApple OSS Distributions BUF_DATA(PERF_CS_EXSTACK, ipcstack->asid);
479*a1e26a70SApple OSS Distributions
480*a1e26a70SApple OSS Distributions if (ipcstack->stacktrace.has_value) {
481*a1e26a70SApple OSS Distributions address__v_visit(&ipcstack->stacktrace.value, ^(size_t i, const stackshottypes_address_s item) {
482*a1e26a70SApple OSS Distributions if (i >= MAX_EXCALLSTACK_FRAMES) {
483*a1e26a70SApple OSS Distributions flags |= CALLSTACK_TRUNCATED;
484*a1e26a70SApple OSS Distributions return;
485*a1e26a70SApple OSS Distributions }
486*a1e26a70SApple OSS Distributions frames_block[i] = item;
487*a1e26a70SApple OSS Distributions nframes += 1;
488*a1e26a70SApple OSS Distributions });
489*a1e26a70SApple OSS Distributions callstack_log(PERF_CS_EXHDR, PERF_CS_EXDATA, frames, nframes, flags, 0, 0);
490*a1e26a70SApple OSS Distributions }
491*a1e26a70SApple OSS Distributions }
492*a1e26a70SApple OSS Distributions
493*a1e26a70SApple OSS Distributions bool
kperf_exclave_callstack_pend(struct kperf_context * context,unsigned int actionid)494*a1e26a70SApple OSS Distributions kperf_exclave_callstack_pend(struct kperf_context *context, unsigned int actionid)
495*a1e26a70SApple OSS Distributions {
496*a1e26a70SApple OSS Distributions if ((context->cur_thread->th_exclaves_state & TH_EXCLAVES_RPC)
497*a1e26a70SApple OSS Distributions && (os_atomic_load(&context->cur_thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_NOINSPECT) == 0) {
498*a1e26a70SApple OSS Distributions os_atomic_or(&context->cur_thread->th_exclaves_inspection_state, TH_EXCLAVES_INSPECTION_KPERF, relaxed);
499*a1e26a70SApple OSS Distributions context->cur_thread->kperf_exclaves_ast |= T_KPERF_SET_ACTIONID(actionid);
500*a1e26a70SApple OSS Distributions return true;
501*a1e26a70SApple OSS Distributions }
502*a1e26a70SApple OSS Distributions return false;
503*a1e26a70SApple OSS Distributions }
504*a1e26a70SApple OSS Distributions #endif /* CONFIG_EXCLAVES */
505*a1e26a70SApple OSS Distributions
506*a1e26a70SApple OSS Distributions int
kperf_ucallstack_pend(struct kperf_context * context,uint32_t depth,unsigned int actionid)507*a1e26a70SApple OSS Distributions kperf_ucallstack_pend(struct kperf_context * context, uint32_t depth,
508*a1e26a70SApple OSS Distributions unsigned int actionid)
509*a1e26a70SApple OSS Distributions {
510*a1e26a70SApple OSS Distributions if (depth < 2) {
511*a1e26a70SApple OSS Distributions panic("HUH");
512*a1e26a70SApple OSS Distributions }
513*a1e26a70SApple OSS Distributions kperf_ast_set_callstack_depth(context->cur_thread, depth);
514*a1e26a70SApple OSS Distributions return kperf_ast_pend(context->cur_thread, T_KPERF_AST_CALLSTACK,
515*a1e26a70SApple OSS Distributions actionid);
516*a1e26a70SApple OSS Distributions }
517*a1e26a70SApple OSS Distributions
518*a1e26a70SApple OSS Distributions static kern_return_t
chudxnu_kern_read(void * dstaddr,vm_offset_t srcaddr,vm_size_t size)519*a1e26a70SApple OSS Distributions chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
520*a1e26a70SApple OSS Distributions {
521*a1e26a70SApple OSS Distributions return (ml_nofault_copy(srcaddr, (vm_offset_t)dstaddr, size) == size) ?
522*a1e26a70SApple OSS Distributions KERN_SUCCESS : KERN_FAILURE;
523*a1e26a70SApple OSS Distributions }
524*a1e26a70SApple OSS Distributions
525*a1e26a70SApple OSS Distributions static kern_return_t
chudxnu_task_read(task_t task,void * kernaddr,uint64_t usraddr,vm_size_t size)526*a1e26a70SApple OSS Distributions chudxnu_task_read(
527*a1e26a70SApple OSS Distributions task_t task,
528*a1e26a70SApple OSS Distributions void *kernaddr,
529*a1e26a70SApple OSS Distributions uint64_t usraddr,
530*a1e26a70SApple OSS Distributions vm_size_t size)
531*a1e26a70SApple OSS Distributions {
532*a1e26a70SApple OSS Distributions //ppc version ported to arm
533*a1e26a70SApple OSS Distributions kern_return_t ret = KERN_SUCCESS;
534*a1e26a70SApple OSS Distributions
535*a1e26a70SApple OSS Distributions if (ml_at_interrupt_context()) {
536*a1e26a70SApple OSS Distributions return KERN_FAILURE; // can't look at tasks on interrupt stack
537*a1e26a70SApple OSS Distributions }
538*a1e26a70SApple OSS Distributions
539*a1e26a70SApple OSS Distributions if (current_task() == task) {
540*a1e26a70SApple OSS Distributions if (copyin(usraddr, kernaddr, size)) {
541*a1e26a70SApple OSS Distributions ret = KERN_FAILURE;
542*a1e26a70SApple OSS Distributions }
543*a1e26a70SApple OSS Distributions } else {
544*a1e26a70SApple OSS Distributions vm_map_t map = get_task_map(task);
545*a1e26a70SApple OSS Distributions ret = vm_map_read_user(map, usraddr, kernaddr, size);
546*a1e26a70SApple OSS Distributions }
547*a1e26a70SApple OSS Distributions
548*a1e26a70SApple OSS Distributions return ret;
549*a1e26a70SApple OSS Distributions }
550*a1e26a70SApple OSS Distributions
551*a1e26a70SApple OSS Distributions static inline uint64_t
chudxnu_vm_unslide(uint64_t ptr,int kaddr)552*a1e26a70SApple OSS Distributions chudxnu_vm_unslide( uint64_t ptr, int kaddr )
553*a1e26a70SApple OSS Distributions {
554*a1e26a70SApple OSS Distributions if (!kaddr) {
555*a1e26a70SApple OSS Distributions return ptr;
556*a1e26a70SApple OSS Distributions }
557*a1e26a70SApple OSS Distributions
558*a1e26a70SApple OSS Distributions return VM_KERNEL_UNSLIDE(ptr);
559*a1e26a70SApple OSS Distributions }
560*a1e26a70SApple OSS Distributions
561*a1e26a70SApple OSS Distributions #if __arm64__
562*a1e26a70SApple OSS Distributions
563*a1e26a70SApple OSS Distributions #if defined(HAS_APPLE_PAC)
564*a1e26a70SApple OSS Distributions #include <ptrauth.h>
565*a1e26a70SApple OSS Distributions #endif
566*a1e26a70SApple OSS Distributions
567*a1e26a70SApple OSS Distributions // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
568*a1e26a70SApple OSS Distributions // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
569*a1e26a70SApple OSS Distributions // after sampling has finished.
570*a1e26a70SApple OSS Distributions //
571*a1e26a70SApple OSS Distributions // For an N-entry callstack:
572*a1e26a70SApple OSS Distributions //
573*a1e26a70SApple OSS Distributions // [0] current pc
574*a1e26a70SApple OSS Distributions // [1..N-3] stack frames (including current one)
575*a1e26a70SApple OSS Distributions // [N-2] current LR (return value if we're in a leaf function)
576*a1e26a70SApple OSS Distributions // [N-1] current r0 (in case we've saved LR in r0) (optional)
577*a1e26a70SApple OSS Distributions //
578*a1e26a70SApple OSS Distributions //
579*a1e26a70SApple OSS Distributions #define CS_FLAG_EXTRASP 1 // capture extra sp register
580*a1e26a70SApple OSS Distributions
581*a1e26a70SApple OSS Distributions static kern_return_t
chudxnu_thread_get_callstack64_internal(thread_t thread,uint64_t * callStack,mach_msg_type_number_t * count,boolean_t user_only,int flags)582*a1e26a70SApple OSS Distributions chudxnu_thread_get_callstack64_internal(
583*a1e26a70SApple OSS Distributions thread_t thread,
584*a1e26a70SApple OSS Distributions uint64_t *callStack,
585*a1e26a70SApple OSS Distributions mach_msg_type_number_t *count,
586*a1e26a70SApple OSS Distributions boolean_t user_only,
587*a1e26a70SApple OSS Distributions int flags)
588*a1e26a70SApple OSS Distributions {
589*a1e26a70SApple OSS Distributions kern_return_t kr = KERN_SUCCESS;
590*a1e26a70SApple OSS Distributions task_t task;
591*a1e26a70SApple OSS Distributions uint64_t currPC = 0ULL, currLR = 0ULL, currSP = 0ULL;
592*a1e26a70SApple OSS Distributions uint64_t prevPC = 0ULL;
593*a1e26a70SApple OSS Distributions uint64_t kernStackMin = thread->kernel_stack;
594*a1e26a70SApple OSS Distributions uint64_t kernStackMax = kernStackMin + kernel_stack_size;
595*a1e26a70SApple OSS Distributions uint64_t *buffer = callStack;
596*a1e26a70SApple OSS Distributions int bufferIndex = 0;
597*a1e26a70SApple OSS Distributions int bufferMaxIndex = 0;
598*a1e26a70SApple OSS Distributions boolean_t kernel = FALSE;
599*a1e26a70SApple OSS Distributions struct arm_saved_state *sstate = NULL;
600*a1e26a70SApple OSS Distributions uint64_t pc = 0ULL;
601*a1e26a70SApple OSS Distributions
602*a1e26a70SApple OSS Distributions task = get_threadtask(thread);
603*a1e26a70SApple OSS Distributions bufferMaxIndex = *count;
604*a1e26a70SApple OSS Distributions //get thread state
605*a1e26a70SApple OSS Distributions if (user_only) {
606*a1e26a70SApple OSS Distributions sstate = find_user_regs(thread);
607*a1e26a70SApple OSS Distributions } else {
608*a1e26a70SApple OSS Distributions sstate = find_kern_regs(thread);
609*a1e26a70SApple OSS Distributions }
610*a1e26a70SApple OSS Distributions
611*a1e26a70SApple OSS Distributions if (!sstate) {
612*a1e26a70SApple OSS Distributions *count = 0;
613*a1e26a70SApple OSS Distributions return KERN_FAILURE;
614*a1e26a70SApple OSS Distributions }
615*a1e26a70SApple OSS Distributions
616*a1e26a70SApple OSS Distributions if (is_saved_state64(sstate)) {
617*a1e26a70SApple OSS Distributions struct arm_saved_state64 *state = NULL;
618*a1e26a70SApple OSS Distributions uint64_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
619*a1e26a70SApple OSS Distributions uint64_t frame[2];
620*a1e26a70SApple OSS Distributions
621*a1e26a70SApple OSS Distributions state = saved_state64(sstate);
622*a1e26a70SApple OSS Distributions
623*a1e26a70SApple OSS Distributions /* make sure it is safe to dereference before you do it */
624*a1e26a70SApple OSS Distributions kernel = PSR64_IS_KERNEL(state->cpsr);
625*a1e26a70SApple OSS Distributions
626*a1e26a70SApple OSS Distributions /* can't take a kernel callstack if we've got a user frame */
627*a1e26a70SApple OSS Distributions if (!user_only && !kernel) {
628*a1e26a70SApple OSS Distributions return KERN_FAILURE;
629*a1e26a70SApple OSS Distributions }
630*a1e26a70SApple OSS Distributions
631*a1e26a70SApple OSS Distributions /*
632*a1e26a70SApple OSS Distributions * Reserve space for saving LR (and sometimes SP) at the end of the
633*a1e26a70SApple OSS Distributions * backtrace.
634*a1e26a70SApple OSS Distributions */
635*a1e26a70SApple OSS Distributions if (flags & CS_FLAG_EXTRASP) {
636*a1e26a70SApple OSS Distributions bufferMaxIndex -= 2;
637*a1e26a70SApple OSS Distributions } else {
638*a1e26a70SApple OSS Distributions bufferMaxIndex -= 1;
639*a1e26a70SApple OSS Distributions }
640*a1e26a70SApple OSS Distributions
641*a1e26a70SApple OSS Distributions if (bufferMaxIndex < 2) {
642*a1e26a70SApple OSS Distributions *count = 0;
643*a1e26a70SApple OSS Distributions return KERN_RESOURCE_SHORTAGE;
644*a1e26a70SApple OSS Distributions }
645*a1e26a70SApple OSS Distributions
646*a1e26a70SApple OSS Distributions currPC = state->pc;
647*a1e26a70SApple OSS Distributions currLR = state->lr;
648*a1e26a70SApple OSS Distributions currSP = state->sp;
649*a1e26a70SApple OSS Distributions
650*a1e26a70SApple OSS Distributions fp = (uint64_t *)state->fp; /* frame pointer */
651*a1e26a70SApple OSS Distributions #if defined(HAS_APPLE_PAC)
652*a1e26a70SApple OSS Distributions /* frame pointers on stack will be signed by arm64e ABI */
653*a1e26a70SApple OSS Distributions fp = ptrauth_strip(fp, ptrauth_key_frame_pointer);
654*a1e26a70SApple OSS Distributions #endif
655*a1e26a70SApple OSS Distributions topfp = fp;
656*a1e26a70SApple OSS Distributions
657*a1e26a70SApple OSS Distributions bufferIndex = 0; // start with a stack of size zero
658*a1e26a70SApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
659*a1e26a70SApple OSS Distributions
660*a1e26a70SApple OSS Distributions BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 0);
661*a1e26a70SApple OSS Distributions
662*a1e26a70SApple OSS Distributions // Now, fill buffer with stack backtraces.
663*a1e26a70SApple OSS Distributions while (bufferIndex < bufferMaxIndex) {
664*a1e26a70SApple OSS Distributions pc = 0ULL;
665*a1e26a70SApple OSS Distributions /*
666*a1e26a70SApple OSS Distributions * Below the frame pointer, the following values are saved:
667*a1e26a70SApple OSS Distributions * -> FP
668*a1e26a70SApple OSS Distributions */
669*a1e26a70SApple OSS Distributions
670*a1e26a70SApple OSS Distributions /*
671*a1e26a70SApple OSS Distributions * Note that we read the pc even for the first stack frame
672*a1e26a70SApple OSS Distributions * (which, in theory, is always empty because the callee fills
673*a1e26a70SApple OSS Distributions * it in just before it lowers the stack. However, if we
674*a1e26a70SApple OSS Distributions * catch the program in between filling in the return address
675*a1e26a70SApple OSS Distributions * and lowering the stack, we want to still have a valid
676*a1e26a70SApple OSS Distributions * backtrace. FixupStack correctly disregards this value if
677*a1e26a70SApple OSS Distributions * necessary.
678*a1e26a70SApple OSS Distributions */
679*a1e26a70SApple OSS Distributions
680*a1e26a70SApple OSS Distributions if ((uint64_t)fp == 0 || ((uint64_t)fp & 0x3) != 0) {
681*a1e26a70SApple OSS Distributions /* frame pointer is invalid - stop backtracing */
682*a1e26a70SApple OSS Distributions pc = 0ULL;
683*a1e26a70SApple OSS Distributions break;
684*a1e26a70SApple OSS Distributions }
685*a1e26a70SApple OSS Distributions
686*a1e26a70SApple OSS Distributions if (kernel) {
687*a1e26a70SApple OSS Distributions if (((uint64_t)fp > kernStackMax) ||
688*a1e26a70SApple OSS Distributions ((uint64_t)fp < kernStackMin)) {
689*a1e26a70SApple OSS Distributions kr = KERN_FAILURE;
690*a1e26a70SApple OSS Distributions } else {
691*a1e26a70SApple OSS Distributions kr = chudxnu_kern_read(&frame,
692*a1e26a70SApple OSS Distributions (vm_offset_t)fp,
693*a1e26a70SApple OSS Distributions (vm_size_t)sizeof(frame));
694*a1e26a70SApple OSS Distributions if (kr == KERN_SUCCESS) {
695*a1e26a70SApple OSS Distributions #if defined(HAS_APPLE_PAC)
696*a1e26a70SApple OSS Distributions /* return addresses on stack will be signed by arm64e ABI */
697*a1e26a70SApple OSS Distributions pc = (uint64_t)ptrauth_strip((void *)frame[1], ptrauth_key_return_address);
698*a1e26a70SApple OSS Distributions #else
699*a1e26a70SApple OSS Distributions pc = frame[1];
700*a1e26a70SApple OSS Distributions #endif
701*a1e26a70SApple OSS Distributions nextFramePointer = (uint64_t *)frame[0];
702*a1e26a70SApple OSS Distributions #if defined(HAS_APPLE_PAC)
703*a1e26a70SApple OSS Distributions /* frame pointers on stack will be signed by arm64e ABI */
704*a1e26a70SApple OSS Distributions nextFramePointer = ptrauth_strip(nextFramePointer, ptrauth_key_frame_pointer);
705*a1e26a70SApple OSS Distributions #endif
706*a1e26a70SApple OSS Distributions } else {
707*a1e26a70SApple OSS Distributions pc = 0ULL;
708*a1e26a70SApple OSS Distributions nextFramePointer = 0ULL;
709*a1e26a70SApple OSS Distributions kr = KERN_FAILURE;
710*a1e26a70SApple OSS Distributions }
711*a1e26a70SApple OSS Distributions }
712*a1e26a70SApple OSS Distributions } else {
713*a1e26a70SApple OSS Distributions kr = chudxnu_task_read(task,
714*a1e26a70SApple OSS Distributions &frame,
715*a1e26a70SApple OSS Distributions (vm_offset_t)fp,
716*a1e26a70SApple OSS Distributions (vm_size_t)sizeof(frame));
717*a1e26a70SApple OSS Distributions if (kr == KERN_SUCCESS) {
718*a1e26a70SApple OSS Distributions #if defined(HAS_APPLE_PAC)
719*a1e26a70SApple OSS Distributions /* return addresses on stack will be signed by arm64e ABI */
720*a1e26a70SApple OSS Distributions pc = (uint64_t)ptrauth_strip((void *)frame[1], ptrauth_key_return_address);
721*a1e26a70SApple OSS Distributions #else
722*a1e26a70SApple OSS Distributions pc = frame[1];
723*a1e26a70SApple OSS Distributions #endif
724*a1e26a70SApple OSS Distributions nextFramePointer = (uint64_t *)(frame[0]);
725*a1e26a70SApple OSS Distributions #if defined(HAS_APPLE_PAC)
726*a1e26a70SApple OSS Distributions /* frame pointers on stack will be signed by arm64e ABI */
727*a1e26a70SApple OSS Distributions nextFramePointer = ptrauth_strip(nextFramePointer, ptrauth_key_frame_pointer);
728*a1e26a70SApple OSS Distributions #endif
729*a1e26a70SApple OSS Distributions } else {
730*a1e26a70SApple OSS Distributions pc = 0ULL;
731*a1e26a70SApple OSS Distributions nextFramePointer = 0ULL;
732*a1e26a70SApple OSS Distributions kr = KERN_FAILURE;
733*a1e26a70SApple OSS Distributions }
734*a1e26a70SApple OSS Distributions }
735*a1e26a70SApple OSS Distributions
736*a1e26a70SApple OSS Distributions if (kr != KERN_SUCCESS) {
737*a1e26a70SApple OSS Distributions pc = 0ULL;
738*a1e26a70SApple OSS Distributions break;
739*a1e26a70SApple OSS Distributions }
740*a1e26a70SApple OSS Distributions
741*a1e26a70SApple OSS Distributions if (nextFramePointer) {
742*a1e26a70SApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
743*a1e26a70SApple OSS Distributions prevPC = pc;
744*a1e26a70SApple OSS Distributions }
745*a1e26a70SApple OSS Distributions
746*a1e26a70SApple OSS Distributions if (nextFramePointer < fp) {
747*a1e26a70SApple OSS Distributions break;
748*a1e26a70SApple OSS Distributions } else {
749*a1e26a70SApple OSS Distributions fp = nextFramePointer;
750*a1e26a70SApple OSS Distributions }
751*a1e26a70SApple OSS Distributions }
752*a1e26a70SApple OSS Distributions
753*a1e26a70SApple OSS Distributions BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
754*a1e26a70SApple OSS Distributions
755*a1e26a70SApple OSS Distributions if (bufferIndex >= bufferMaxIndex) {
756*a1e26a70SApple OSS Distributions bufferIndex = bufferMaxIndex;
757*a1e26a70SApple OSS Distributions kr = KERN_RESOURCE_SHORTAGE;
758*a1e26a70SApple OSS Distributions } else {
759*a1e26a70SApple OSS Distributions kr = KERN_SUCCESS;
760*a1e26a70SApple OSS Distributions }
761*a1e26a70SApple OSS Distributions
762*a1e26a70SApple OSS Distributions // Save link register and SP at bottom of stack (used for later fixup).
763*a1e26a70SApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
764*a1e26a70SApple OSS Distributions if (flags & CS_FLAG_EXTRASP) {
765*a1e26a70SApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
766*a1e26a70SApple OSS Distributions }
767*a1e26a70SApple OSS Distributions } else {
768*a1e26a70SApple OSS Distributions struct arm_saved_state32 *state = NULL;
769*a1e26a70SApple OSS Distributions uint32_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
770*a1e26a70SApple OSS Distributions
771*a1e26a70SApple OSS Distributions /* 64-bit kernel stacks, 32-bit user stacks */
772*a1e26a70SApple OSS Distributions uint64_t frame[2];
773*a1e26a70SApple OSS Distributions uint32_t frame32[2];
774*a1e26a70SApple OSS Distributions
775*a1e26a70SApple OSS Distributions state = saved_state32(sstate);
776*a1e26a70SApple OSS Distributions
777*a1e26a70SApple OSS Distributions /* make sure it is safe to dereference before you do it */
778*a1e26a70SApple OSS Distributions kernel = PSR_IS_KERNEL(state->cpsr);
779*a1e26a70SApple OSS Distributions
780*a1e26a70SApple OSS Distributions /* can't take a kernel callstack if we've got a user frame */
781*a1e26a70SApple OSS Distributions if (!user_only && !kernel) {
782*a1e26a70SApple OSS Distributions return KERN_FAILURE;
783*a1e26a70SApple OSS Distributions }
784*a1e26a70SApple OSS Distributions
785*a1e26a70SApple OSS Distributions /*
786*a1e26a70SApple OSS Distributions * Reserve space for saving LR (and sometimes SP) at the end of the
787*a1e26a70SApple OSS Distributions * backtrace.
788*a1e26a70SApple OSS Distributions */
789*a1e26a70SApple OSS Distributions if (flags & CS_FLAG_EXTRASP) {
790*a1e26a70SApple OSS Distributions bufferMaxIndex -= 2;
791*a1e26a70SApple OSS Distributions } else {
792*a1e26a70SApple OSS Distributions bufferMaxIndex -= 1;
793*a1e26a70SApple OSS Distributions }
794*a1e26a70SApple OSS Distributions
795*a1e26a70SApple OSS Distributions if (bufferMaxIndex < 2) {
796*a1e26a70SApple OSS Distributions *count = 0;
797*a1e26a70SApple OSS Distributions return KERN_RESOURCE_SHORTAGE;
798*a1e26a70SApple OSS Distributions }
799*a1e26a70SApple OSS Distributions
800*a1e26a70SApple OSS Distributions currPC = (uint64_t)state->pc; /* r15 */
801*a1e26a70SApple OSS Distributions if (state->cpsr & PSR_TF) {
802*a1e26a70SApple OSS Distributions currPC |= 1ULL; /* encode thumb mode into low bit of PC */
803*a1e26a70SApple OSS Distributions }
804*a1e26a70SApple OSS Distributions currLR = (uint64_t)state->lr; /* r14 */
805*a1e26a70SApple OSS Distributions currSP = (uint64_t)state->sp; /* r13 */
806*a1e26a70SApple OSS Distributions
807*a1e26a70SApple OSS Distributions fp = (uint32_t *)(uintptr_t)state->r[7]; /* frame pointer */
808*a1e26a70SApple OSS Distributions topfp = fp;
809*a1e26a70SApple OSS Distributions
810*a1e26a70SApple OSS Distributions bufferIndex = 0; // start with a stack of size zero
811*a1e26a70SApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
812*a1e26a70SApple OSS Distributions
813*a1e26a70SApple OSS Distributions BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 1);
814*a1e26a70SApple OSS Distributions
815*a1e26a70SApple OSS Distributions // Now, fill buffer with stack backtraces.
816*a1e26a70SApple OSS Distributions while (bufferIndex < bufferMaxIndex) {
817*a1e26a70SApple OSS Distributions pc = 0ULL;
818*a1e26a70SApple OSS Distributions /*
819*a1e26a70SApple OSS Distributions * Below the frame pointer, the following values are saved:
820*a1e26a70SApple OSS Distributions * -> FP
821*a1e26a70SApple OSS Distributions */
822*a1e26a70SApple OSS Distributions
823*a1e26a70SApple OSS Distributions /*
824*a1e26a70SApple OSS Distributions * Note that we read the pc even for the first stack frame
825*a1e26a70SApple OSS Distributions * (which, in theory, is always empty because the callee fills
826*a1e26a70SApple OSS Distributions * it in just before it lowers the stack. However, if we
827*a1e26a70SApple OSS Distributions * catch the program in between filling in the return address
828*a1e26a70SApple OSS Distributions * and lowering the stack, we want to still have a valid
829*a1e26a70SApple OSS Distributions * backtrace. FixupStack correctly disregards this value if
830*a1e26a70SApple OSS Distributions * necessary.
831*a1e26a70SApple OSS Distributions */
832*a1e26a70SApple OSS Distributions
833*a1e26a70SApple OSS Distributions if ((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) {
834*a1e26a70SApple OSS Distributions /* frame pointer is invalid - stop backtracing */
835*a1e26a70SApple OSS Distributions pc = 0ULL;
836*a1e26a70SApple OSS Distributions break;
837*a1e26a70SApple OSS Distributions }
838*a1e26a70SApple OSS Distributions
839*a1e26a70SApple OSS Distributions if (kernel) {
840*a1e26a70SApple OSS Distributions if (((uint32_t)fp > kernStackMax) ||
841*a1e26a70SApple OSS Distributions ((uint32_t)fp < kernStackMin)) {
842*a1e26a70SApple OSS Distributions kr = KERN_FAILURE;
843*a1e26a70SApple OSS Distributions } else {
844*a1e26a70SApple OSS Distributions kr = chudxnu_kern_read(&frame,
845*a1e26a70SApple OSS Distributions (vm_offset_t)fp,
846*a1e26a70SApple OSS Distributions (vm_size_t)sizeof(frame));
847*a1e26a70SApple OSS Distributions if (kr == KERN_SUCCESS) {
848*a1e26a70SApple OSS Distributions pc = (uint64_t)frame[1];
849*a1e26a70SApple OSS Distributions nextFramePointer = (uint32_t *) (frame[0]);
850*a1e26a70SApple OSS Distributions } else {
851*a1e26a70SApple OSS Distributions pc = 0ULL;
852*a1e26a70SApple OSS Distributions nextFramePointer = 0ULL;
853*a1e26a70SApple OSS Distributions kr = KERN_FAILURE;
854*a1e26a70SApple OSS Distributions }
855*a1e26a70SApple OSS Distributions }
856*a1e26a70SApple OSS Distributions } else {
857*a1e26a70SApple OSS Distributions kr = chudxnu_task_read(task,
858*a1e26a70SApple OSS Distributions &frame32,
859*a1e26a70SApple OSS Distributions (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL),
860*a1e26a70SApple OSS Distributions sizeof(frame32));
861*a1e26a70SApple OSS Distributions if (kr == KERN_SUCCESS) {
862*a1e26a70SApple OSS Distributions pc = (uint64_t)frame32[1];
863*a1e26a70SApple OSS Distributions nextFramePointer = (uint32_t *)(uintptr_t)(frame32[0]);
864*a1e26a70SApple OSS Distributions } else {
865*a1e26a70SApple OSS Distributions pc = 0ULL;
866*a1e26a70SApple OSS Distributions nextFramePointer = 0ULL;
867*a1e26a70SApple OSS Distributions kr = KERN_FAILURE;
868*a1e26a70SApple OSS Distributions }
869*a1e26a70SApple OSS Distributions }
870*a1e26a70SApple OSS Distributions
871*a1e26a70SApple OSS Distributions if (kr != KERN_SUCCESS) {
872*a1e26a70SApple OSS Distributions pc = 0ULL;
873*a1e26a70SApple OSS Distributions break;
874*a1e26a70SApple OSS Distributions }
875*a1e26a70SApple OSS Distributions
876*a1e26a70SApple OSS Distributions if (nextFramePointer) {
877*a1e26a70SApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
878*a1e26a70SApple OSS Distributions prevPC = pc;
879*a1e26a70SApple OSS Distributions }
880*a1e26a70SApple OSS Distributions
881*a1e26a70SApple OSS Distributions if (nextFramePointer < fp) {
882*a1e26a70SApple OSS Distributions break;
883*a1e26a70SApple OSS Distributions } else {
884*a1e26a70SApple OSS Distributions fp = nextFramePointer;
885*a1e26a70SApple OSS Distributions }
886*a1e26a70SApple OSS Distributions }
887*a1e26a70SApple OSS Distributions
888*a1e26a70SApple OSS Distributions BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
889*a1e26a70SApple OSS Distributions
890*a1e26a70SApple OSS Distributions /* clamp callstack size to max */
891*a1e26a70SApple OSS Distributions if (bufferIndex >= bufferMaxIndex) {
892*a1e26a70SApple OSS Distributions bufferIndex = bufferMaxIndex;
893*a1e26a70SApple OSS Distributions kr = KERN_RESOURCE_SHORTAGE;
894*a1e26a70SApple OSS Distributions } else {
895*a1e26a70SApple OSS Distributions /* ignore all other failures */
896*a1e26a70SApple OSS Distributions kr = KERN_SUCCESS;
897*a1e26a70SApple OSS Distributions }
898*a1e26a70SApple OSS Distributions
899*a1e26a70SApple OSS Distributions // Save link register and R13 (sp) at bottom of stack (used for later fixup).
900*a1e26a70SApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
901*a1e26a70SApple OSS Distributions if (flags & CS_FLAG_EXTRASP) {
902*a1e26a70SApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
903*a1e26a70SApple OSS Distributions }
904*a1e26a70SApple OSS Distributions }
905*a1e26a70SApple OSS Distributions
906*a1e26a70SApple OSS Distributions *count = bufferIndex;
907*a1e26a70SApple OSS Distributions return kr;
908*a1e26a70SApple OSS Distributions }
909*a1e26a70SApple OSS Distributions
910*a1e26a70SApple OSS Distributions kern_return_t
chudxnu_thread_get_callstack64_kperf(thread_t thread,uint64_t * callStack,mach_msg_type_number_t * count,boolean_t user_only)911*a1e26a70SApple OSS Distributions chudxnu_thread_get_callstack64_kperf(
912*a1e26a70SApple OSS Distributions thread_t thread,
913*a1e26a70SApple OSS Distributions uint64_t *callStack,
914*a1e26a70SApple OSS Distributions mach_msg_type_number_t *count,
915*a1e26a70SApple OSS Distributions boolean_t user_only)
916*a1e26a70SApple OSS Distributions {
917*a1e26a70SApple OSS Distributions return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 );
918*a1e26a70SApple OSS Distributions }
919*a1e26a70SApple OSS Distributions #elif __x86_64__
920*a1e26a70SApple OSS Distributions
921*a1e26a70SApple OSS Distributions #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
922*a1e26a70SApple OSS Distributions // don't try to read in the hole
923*a1e26a70SApple OSS Distributions #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
924*a1e26a70SApple OSS Distributions (supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
925*a1e26a70SApple OSS Distributions ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
926*a1e26a70SApple OSS Distributions
927*a1e26a70SApple OSS Distributions typedef struct _cframe64_t {
928*a1e26a70SApple OSS Distributions uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
929*a1e26a70SApple OSS Distributions uint64_t caller;
930*a1e26a70SApple OSS Distributions uint64_t args[0];
931*a1e26a70SApple OSS Distributions }cframe64_t;
932*a1e26a70SApple OSS Distributions
933*a1e26a70SApple OSS Distributions
934*a1e26a70SApple OSS Distributions typedef struct _cframe_t {
935*a1e26a70SApple OSS Distributions uint32_t prev; // this is really a user32-space pointer to the previous frame
936*a1e26a70SApple OSS Distributions uint32_t caller;
937*a1e26a70SApple OSS Distributions uint32_t args[0];
938*a1e26a70SApple OSS Distributions } cframe_t;
939*a1e26a70SApple OSS Distributions
940*a1e26a70SApple OSS Distributions extern void * find_user_regs(thread_t);
941*a1e26a70SApple OSS Distributions extern x86_saved_state32_t *find_kern_regs(thread_t);
942*a1e26a70SApple OSS Distributions
943*a1e26a70SApple OSS Distributions static kern_return_t
do_kernel_backtrace(thread_t thread,struct x86_kernel_state * regs,uint64_t * frames,mach_msg_type_number_t * start_idx,mach_msg_type_number_t max_idx)944*a1e26a70SApple OSS Distributions do_kernel_backtrace(
945*a1e26a70SApple OSS Distributions thread_t thread,
946*a1e26a70SApple OSS Distributions struct x86_kernel_state *regs,
947*a1e26a70SApple OSS Distributions uint64_t *frames,
948*a1e26a70SApple OSS Distributions mach_msg_type_number_t *start_idx,
949*a1e26a70SApple OSS Distributions mach_msg_type_number_t max_idx)
950*a1e26a70SApple OSS Distributions {
951*a1e26a70SApple OSS Distributions uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
952*a1e26a70SApple OSS Distributions uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
953*a1e26a70SApple OSS Distributions mach_msg_type_number_t ct = *start_idx;
954*a1e26a70SApple OSS Distributions kern_return_t kr = KERN_FAILURE;
955*a1e26a70SApple OSS Distributions
956*a1e26a70SApple OSS Distributions #if __LP64__
957*a1e26a70SApple OSS Distributions uint64_t currPC = 0ULL;
958*a1e26a70SApple OSS Distributions uint64_t currFP = 0ULL;
959*a1e26a70SApple OSS Distributions uint64_t prevPC = 0ULL;
960*a1e26a70SApple OSS Distributions uint64_t prevFP = 0ULL;
961*a1e26a70SApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
962*a1e26a70SApple OSS Distributions return KERN_FAILURE;
963*a1e26a70SApple OSS Distributions }
964*a1e26a70SApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
965*a1e26a70SApple OSS Distributions return KERN_FAILURE;
966*a1e26a70SApple OSS Distributions }
967*a1e26a70SApple OSS Distributions #else
968*a1e26a70SApple OSS Distributions uint32_t currPC = 0U;
969*a1e26a70SApple OSS Distributions uint32_t currFP = 0U;
970*a1e26a70SApple OSS Distributions uint32_t prevPC = 0U;
971*a1e26a70SApple OSS Distributions uint32_t prevFP = 0U;
972*a1e26a70SApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
973*a1e26a70SApple OSS Distributions return KERN_FAILURE;
974*a1e26a70SApple OSS Distributions }
975*a1e26a70SApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
976*a1e26a70SApple OSS Distributions return KERN_FAILURE;
977*a1e26a70SApple OSS Distributions }
978*a1e26a70SApple OSS Distributions #endif
979*a1e26a70SApple OSS Distributions
980*a1e26a70SApple OSS Distributions if (*start_idx >= max_idx) {
981*a1e26a70SApple OSS Distributions return KERN_RESOURCE_SHORTAGE; // no frames traced
982*a1e26a70SApple OSS Distributions }
983*a1e26a70SApple OSS Distributions if (!currPC) {
984*a1e26a70SApple OSS Distributions return KERN_FAILURE;
985*a1e26a70SApple OSS Distributions }
986*a1e26a70SApple OSS Distributions
987*a1e26a70SApple OSS Distributions frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
988*a1e26a70SApple OSS Distributions
989*a1e26a70SApple OSS Distributions // build a backtrace of this kernel state
990*a1e26a70SApple OSS Distributions #if __LP64__
991*a1e26a70SApple OSS Distributions while (VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
992*a1e26a70SApple OSS Distributions // this is the address where caller lives in the user thread
993*a1e26a70SApple OSS Distributions uint64_t caller = currFP + sizeof(uint64_t);
994*a1e26a70SApple OSS Distributions #else
995*a1e26a70SApple OSS Distributions while (VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
996*a1e26a70SApple OSS Distributions uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
997*a1e26a70SApple OSS Distributions #endif
998*a1e26a70SApple OSS Distributions
999*a1e26a70SApple OSS Distributions if (!currFP || !currPC) {
1000*a1e26a70SApple OSS Distributions currPC = 0;
1001*a1e26a70SApple OSS Distributions break;
1002*a1e26a70SApple OSS Distributions }
1003*a1e26a70SApple OSS Distributions
1004*a1e26a70SApple OSS Distributions if (ct >= max_idx) {
1005*a1e26a70SApple OSS Distributions *start_idx = ct;
1006*a1e26a70SApple OSS Distributions return KERN_RESOURCE_SHORTAGE;
1007*a1e26a70SApple OSS Distributions }
1008*a1e26a70SApple OSS Distributions
1009*a1e26a70SApple OSS Distributions /* read our caller */
1010*a1e26a70SApple OSS Distributions kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));
1011*a1e26a70SApple OSS Distributions
1012*a1e26a70SApple OSS Distributions if (kr != KERN_SUCCESS || !currPC) {
1013*a1e26a70SApple OSS Distributions currPC = 0UL;
1014*a1e26a70SApple OSS Distributions break;
1015*a1e26a70SApple OSS Distributions }
1016*a1e26a70SApple OSS Distributions
1017*a1e26a70SApple OSS Distributions /*
1018*a1e26a70SApple OSS Distributions * retrive contents of the frame pointer and advance to the next stack
1019*a1e26a70SApple OSS Distributions * frame if it's valid
1020*a1e26a70SApple OSS Distributions */
1021*a1e26a70SApple OSS Distributions prevFP = 0;
1022*a1e26a70SApple OSS Distributions kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));
1023*a1e26a70SApple OSS Distributions
1024*a1e26a70SApple OSS Distributions #if __LP64__
1025*a1e26a70SApple OSS Distributions if (VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
1026*a1e26a70SApple OSS Distributions #else
1027*a1e26a70SApple OSS Distributions if (VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
1028*a1e26a70SApple OSS Distributions #endif
1029*a1e26a70SApple OSS Distributions frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
1030*a1e26a70SApple OSS Distributions prevPC = currPC;
1031*a1e26a70SApple OSS Distributions }
1032*a1e26a70SApple OSS Distributions if (prevFP <= currFP) {
1033*a1e26a70SApple OSS Distributions break;
1034*a1e26a70SApple OSS Distributions } else {
1035*a1e26a70SApple OSS Distributions currFP = prevFP;
1036*a1e26a70SApple OSS Distributions }
1037*a1e26a70SApple OSS Distributions }
1038*a1e26a70SApple OSS Distributions
1039*a1e26a70SApple OSS Distributions *start_idx = ct;
1040*a1e26a70SApple OSS Distributions return KERN_SUCCESS;
1041*a1e26a70SApple OSS Distributions }
1042*a1e26a70SApple OSS Distributions
1043*a1e26a70SApple OSS Distributions
1044*a1e26a70SApple OSS Distributions
1045*a1e26a70SApple OSS Distributions static kern_return_t
1046*a1e26a70SApple OSS Distributions do_backtrace32(
1047*a1e26a70SApple OSS Distributions task_t task,
1048*a1e26a70SApple OSS Distributions thread_t thread,
1049*a1e26a70SApple OSS Distributions x86_saved_state32_t *regs,
1050*a1e26a70SApple OSS Distributions uint64_t *frames,
1051*a1e26a70SApple OSS Distributions mach_msg_type_number_t *start_idx,
1052*a1e26a70SApple OSS Distributions mach_msg_type_number_t max_idx,
1053*a1e26a70SApple OSS Distributions boolean_t supervisor)
1054*a1e26a70SApple OSS Distributions {
1055*a1e26a70SApple OSS Distributions uint32_t tmpWord = 0UL;
1056*a1e26a70SApple OSS Distributions uint64_t currPC = (uint64_t) regs->eip;
1057*a1e26a70SApple OSS Distributions uint64_t currFP = (uint64_t) regs->ebp;
1058*a1e26a70SApple OSS Distributions uint64_t prevPC = 0ULL;
1059*a1e26a70SApple OSS Distributions uint64_t prevFP = 0ULL;
1060*a1e26a70SApple OSS Distributions uint64_t kernStackMin = thread->kernel_stack;
1061*a1e26a70SApple OSS Distributions uint64_t kernStackMax = kernStackMin + kernel_stack_size;
1062*a1e26a70SApple OSS Distributions mach_msg_type_number_t ct = *start_idx;
1063*a1e26a70SApple OSS Distributions kern_return_t kr = KERN_FAILURE;
1064*a1e26a70SApple OSS Distributions
1065*a1e26a70SApple OSS Distributions if (ct >= max_idx) {
1066*a1e26a70SApple OSS Distributions return KERN_RESOURCE_SHORTAGE; // no frames traced
1067*a1e26a70SApple OSS Distributions }
1068*a1e26a70SApple OSS Distributions frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1069*a1e26a70SApple OSS Distributions
1070*a1e26a70SApple OSS Distributions // build a backtrace of this 32 bit state.
1071*a1e26a70SApple OSS Distributions while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
1072*a1e26a70SApple OSS Distributions cframe_t *fp = (cframe_t *) (uintptr_t) currFP;
1073*a1e26a70SApple OSS Distributions
1074*a1e26a70SApple OSS Distributions if (!currFP) {
1075*a1e26a70SApple OSS Distributions currPC = 0;
1076*a1e26a70SApple OSS Distributions break;
1077*a1e26a70SApple OSS Distributions }
1078*a1e26a70SApple OSS Distributions
1079*a1e26a70SApple OSS Distributions if (ct >= max_idx) {
1080*a1e26a70SApple OSS Distributions *start_idx = ct;
1081*a1e26a70SApple OSS Distributions return KERN_RESOURCE_SHORTAGE;
1082*a1e26a70SApple OSS Distributions }
1083*a1e26a70SApple OSS Distributions
1084*a1e26a70SApple OSS Distributions /* read our caller */
1085*a1e26a70SApple OSS Distributions if (supervisor) {
1086*a1e26a70SApple OSS Distributions kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1087*a1e26a70SApple OSS Distributions } else {
1088*a1e26a70SApple OSS Distributions kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1089*a1e26a70SApple OSS Distributions }
1090*a1e26a70SApple OSS Distributions
1091*a1e26a70SApple OSS Distributions if (kr != KERN_SUCCESS) {
1092*a1e26a70SApple OSS Distributions currPC = 0ULL;
1093*a1e26a70SApple OSS Distributions break;
1094*a1e26a70SApple OSS Distributions }
1095*a1e26a70SApple OSS Distributions
1096*a1e26a70SApple OSS Distributions currPC = (uint64_t) tmpWord; // promote 32 bit address
1097*a1e26a70SApple OSS Distributions
1098*a1e26a70SApple OSS Distributions /*
1099*a1e26a70SApple OSS Distributions * retrive contents of the frame pointer and advance to the next stack
1100*a1e26a70SApple OSS Distributions * frame if it's valid
1101*a1e26a70SApple OSS Distributions */
1102*a1e26a70SApple OSS Distributions prevFP = 0;
1103*a1e26a70SApple OSS Distributions if (supervisor) {
1104*a1e26a70SApple OSS Distributions kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1105*a1e26a70SApple OSS Distributions } else {
1106*a1e26a70SApple OSS Distributions kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1107*a1e26a70SApple OSS Distributions }
1108*a1e26a70SApple OSS Distributions prevFP = (uint64_t) tmpWord; // promote 32 bit address
1109*a1e26a70SApple OSS Distributions
1110*a1e26a70SApple OSS Distributions if (prevFP) {
1111*a1e26a70SApple OSS Distributions frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1112*a1e26a70SApple OSS Distributions prevPC = currPC;
1113*a1e26a70SApple OSS Distributions }
1114*a1e26a70SApple OSS Distributions if (prevFP < currFP) {
1115*a1e26a70SApple OSS Distributions break;
1116*a1e26a70SApple OSS Distributions } else {
1117*a1e26a70SApple OSS Distributions currFP = prevFP;
1118*a1e26a70SApple OSS Distributions }
1119*a1e26a70SApple OSS Distributions }
1120*a1e26a70SApple OSS Distributions
1121*a1e26a70SApple OSS Distributions *start_idx = ct;
1122*a1e26a70SApple OSS Distributions return KERN_SUCCESS;
1123*a1e26a70SApple OSS Distributions }
1124*a1e26a70SApple OSS Distributions
1125*a1e26a70SApple OSS Distributions static kern_return_t
1126*a1e26a70SApple OSS Distributions do_backtrace64(
1127*a1e26a70SApple OSS Distributions task_t task,
1128*a1e26a70SApple OSS Distributions thread_t thread,
1129*a1e26a70SApple OSS Distributions x86_saved_state64_t *regs,
1130*a1e26a70SApple OSS Distributions uint64_t *frames,
1131*a1e26a70SApple OSS Distributions mach_msg_type_number_t *start_idx,
1132*a1e26a70SApple OSS Distributions mach_msg_type_number_t max_idx,
1133*a1e26a70SApple OSS Distributions boolean_t supervisor)
1134*a1e26a70SApple OSS Distributions {
1135*a1e26a70SApple OSS Distributions uint64_t currPC = regs->isf.rip;
1136*a1e26a70SApple OSS Distributions uint64_t currFP = regs->rbp;
1137*a1e26a70SApple OSS Distributions uint64_t prevPC = 0ULL;
1138*a1e26a70SApple OSS Distributions uint64_t prevFP = 0ULL;
1139*a1e26a70SApple OSS Distributions uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
1140*a1e26a70SApple OSS Distributions uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
1141*a1e26a70SApple OSS Distributions mach_msg_type_number_t ct = *start_idx;
1142*a1e26a70SApple OSS Distributions kern_return_t kr = KERN_FAILURE;
1143*a1e26a70SApple OSS Distributions
1144*a1e26a70SApple OSS Distributions if (*start_idx >= max_idx) {
1145*a1e26a70SApple OSS Distributions return KERN_RESOURCE_SHORTAGE; // no frames traced
1146*a1e26a70SApple OSS Distributions }
1147*a1e26a70SApple OSS Distributions frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1148*a1e26a70SApple OSS Distributions
1149*a1e26a70SApple OSS Distributions // build a backtrace of this 32 bit state.
1150*a1e26a70SApple OSS Distributions while (VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
1151*a1e26a70SApple OSS Distributions // this is the address where caller lives in the user thread
1152*a1e26a70SApple OSS Distributions uint64_t caller = currFP + sizeof(uint64_t);
1153*a1e26a70SApple OSS Distributions
1154*a1e26a70SApple OSS Distributions if (!currFP) {
1155*a1e26a70SApple OSS Distributions currPC = 0;
1156*a1e26a70SApple OSS Distributions break;
1157*a1e26a70SApple OSS Distributions }
1158*a1e26a70SApple OSS Distributions
1159*a1e26a70SApple OSS Distributions if (ct >= max_idx) {
1160*a1e26a70SApple OSS Distributions *start_idx = ct;
1161*a1e26a70SApple OSS Distributions return KERN_RESOURCE_SHORTAGE;
1162*a1e26a70SApple OSS Distributions }
1163*a1e26a70SApple OSS Distributions
1164*a1e26a70SApple OSS Distributions /* read our caller */
1165*a1e26a70SApple OSS Distributions if (supervisor) {
1166*a1e26a70SApple OSS Distributions kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t));
1167*a1e26a70SApple OSS Distributions } else {
1168*a1e26a70SApple OSS Distributions kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
1169*a1e26a70SApple OSS Distributions }
1170*a1e26a70SApple OSS Distributions
1171*a1e26a70SApple OSS Distributions if (kr != KERN_SUCCESS) {
1172*a1e26a70SApple OSS Distributions currPC = 0ULL;
1173*a1e26a70SApple OSS Distributions break;
1174*a1e26a70SApple OSS Distributions }
1175*a1e26a70SApple OSS Distributions
1176*a1e26a70SApple OSS Distributions /*
1177*a1e26a70SApple OSS Distributions * retrive contents of the frame pointer and advance to the next stack
1178*a1e26a70SApple OSS Distributions * frame if it's valid
1179*a1e26a70SApple OSS Distributions */
1180*a1e26a70SApple OSS Distributions prevFP = 0;
1181*a1e26a70SApple OSS Distributions if (supervisor) {
1182*a1e26a70SApple OSS Distributions kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t));
1183*a1e26a70SApple OSS Distributions } else {
1184*a1e26a70SApple OSS Distributions kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
1185*a1e26a70SApple OSS Distributions }
1186*a1e26a70SApple OSS Distributions
1187*a1e26a70SApple OSS Distributions if (VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
1188*a1e26a70SApple OSS Distributions frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1189*a1e26a70SApple OSS Distributions prevPC = currPC;
1190*a1e26a70SApple OSS Distributions }
1191*a1e26a70SApple OSS Distributions if (prevFP < currFP) {
1192*a1e26a70SApple OSS Distributions break;
1193*a1e26a70SApple OSS Distributions } else {
1194*a1e26a70SApple OSS Distributions currFP = prevFP;
1195*a1e26a70SApple OSS Distributions }
1196*a1e26a70SApple OSS Distributions }
1197*a1e26a70SApple OSS Distributions
1198*a1e26a70SApple OSS Distributions *start_idx = ct;
1199*a1e26a70SApple OSS Distributions return KERN_SUCCESS;
1200*a1e26a70SApple OSS Distributions }
1201*a1e26a70SApple OSS Distributions
1202*a1e26a70SApple OSS Distributions static kern_return_t
1203*a1e26a70SApple OSS Distributions chudxnu_thread_get_callstack64_internal(
1204*a1e26a70SApple OSS Distributions thread_t thread,
1205*a1e26a70SApple OSS Distributions uint64_t *callstack,
1206*a1e26a70SApple OSS Distributions mach_msg_type_number_t *count,
1207*a1e26a70SApple OSS Distributions boolean_t user_only,
1208*a1e26a70SApple OSS Distributions boolean_t kern_only)
1209*a1e26a70SApple OSS Distributions {
1210*a1e26a70SApple OSS Distributions kern_return_t kr = KERN_FAILURE;
1211*a1e26a70SApple OSS Distributions task_t task = get_threadtask(thread);
1212*a1e26a70SApple OSS Distributions uint64_t currPC = 0ULL;
1213*a1e26a70SApple OSS Distributions boolean_t supervisor = FALSE;
1214*a1e26a70SApple OSS Distributions mach_msg_type_number_t bufferIndex = 0;
1215*a1e26a70SApple OSS Distributions mach_msg_type_number_t bufferMaxIndex = *count;
1216*a1e26a70SApple OSS Distributions x86_saved_state_t *tagged_regs = NULL; // kernel register state
1217*a1e26a70SApple OSS Distributions x86_saved_state64_t *regs64 = NULL;
1218*a1e26a70SApple OSS Distributions x86_saved_state32_t *regs32 = NULL;
1219*a1e26a70SApple OSS Distributions x86_saved_state32_t *u_regs32 = NULL;
1220*a1e26a70SApple OSS Distributions x86_saved_state64_t *u_regs64 = NULL;
1221*a1e26a70SApple OSS Distributions struct x86_kernel_state *kregs = NULL;
1222*a1e26a70SApple OSS Distributions
1223*a1e26a70SApple OSS Distributions if (ml_at_interrupt_context()) {
1224*a1e26a70SApple OSS Distributions if (user_only) {
1225*a1e26a70SApple OSS Distributions /* can't backtrace user state on interrupt stack. */
1226*a1e26a70SApple OSS Distributions return KERN_FAILURE;
1227*a1e26a70SApple OSS Distributions }
1228*a1e26a70SApple OSS Distributions
1229*a1e26a70SApple OSS Distributions /* backtracing at interrupt context? */
1230*a1e26a70SApple OSS Distributions if (thread == current_thread() && current_cpu_datap()->cpu_int_state) {
1231*a1e26a70SApple OSS Distributions /*
1232*a1e26a70SApple OSS Distributions * Locate the registers for the interrupted thread, assuming it is
1233*a1e26a70SApple OSS Distributions * current_thread().
1234*a1e26a70SApple OSS Distributions */
1235*a1e26a70SApple OSS Distributions tagged_regs = current_cpu_datap()->cpu_int_state;
1236*a1e26a70SApple OSS Distributions
1237*a1e26a70SApple OSS Distributions if (is_saved_state64(tagged_regs)) {
1238*a1e26a70SApple OSS Distributions /* 64 bit registers */
1239*a1e26a70SApple OSS Distributions regs64 = saved_state64(tagged_regs);
1240*a1e26a70SApple OSS Distributions supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1241*a1e26a70SApple OSS Distributions } else {
1242*a1e26a70SApple OSS Distributions /* 32 bit registers */
1243*a1e26a70SApple OSS Distributions regs32 = saved_state32(tagged_regs);
1244*a1e26a70SApple OSS Distributions supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1245*a1e26a70SApple OSS Distributions }
1246*a1e26a70SApple OSS Distributions }
1247*a1e26a70SApple OSS Distributions }
1248*a1e26a70SApple OSS Distributions
1249*a1e26a70SApple OSS Distributions if (!ml_at_interrupt_context() && kernel_task == task) {
1250*a1e26a70SApple OSS Distributions if (!thread->kernel_stack) {
1251*a1e26a70SApple OSS Distributions return KERN_FAILURE;
1252*a1e26a70SApple OSS Distributions }
1253*a1e26a70SApple OSS Distributions
1254*a1e26a70SApple OSS Distributions // Kernel thread not at interrupt context
1255*a1e26a70SApple OSS Distributions kregs = (struct x86_kernel_state *)NULL;
1256*a1e26a70SApple OSS Distributions
1257*a1e26a70SApple OSS Distributions // nofault read of the thread->kernel_stack pointer
1258*a1e26a70SApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
1259*a1e26a70SApple OSS Distributions return KERN_FAILURE;
1260*a1e26a70SApple OSS Distributions }
1261*a1e26a70SApple OSS Distributions
1262*a1e26a70SApple OSS Distributions // Adjust to find the saved kernel state
1263*a1e26a70SApple OSS Distributions kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);
1264*a1e26a70SApple OSS Distributions
1265*a1e26a70SApple OSS Distributions supervisor = TRUE;
1266*a1e26a70SApple OSS Distributions } else if (!tagged_regs) {
1267*a1e26a70SApple OSS Distributions /*
1268*a1e26a70SApple OSS Distributions * not at interrupt context, or tracing a different thread than
1269*a1e26a70SApple OSS Distributions * current_thread() at interrupt context
1270*a1e26a70SApple OSS Distributions */
1271*a1e26a70SApple OSS Distributions tagged_regs = USER_STATE(thread);
1272*a1e26a70SApple OSS Distributions if (is_saved_state64(tagged_regs)) {
1273*a1e26a70SApple OSS Distributions /* 64 bit registers */
1274*a1e26a70SApple OSS Distributions regs64 = saved_state64(tagged_regs);
1275*a1e26a70SApple OSS Distributions supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1276*a1e26a70SApple OSS Distributions } else {
1277*a1e26a70SApple OSS Distributions /* 32 bit registers */
1278*a1e26a70SApple OSS Distributions regs32 = saved_state32(tagged_regs);
1279*a1e26a70SApple OSS Distributions supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1280*a1e26a70SApple OSS Distributions }
1281*a1e26a70SApple OSS Distributions }
1282*a1e26a70SApple OSS Distributions
1283*a1e26a70SApple OSS Distributions *count = 0;
1284*a1e26a70SApple OSS Distributions
1285*a1e26a70SApple OSS Distributions if (supervisor) {
1286*a1e26a70SApple OSS Distributions // the caller only wants a user callstack.
1287*a1e26a70SApple OSS Distributions if (user_only) {
1288*a1e26a70SApple OSS Distributions // bail - we've only got kernel state
1289*a1e26a70SApple OSS Distributions return KERN_FAILURE;
1290*a1e26a70SApple OSS Distributions }
1291*a1e26a70SApple OSS Distributions } else {
1292*a1e26a70SApple OSS Distributions // regs32(64) is not in supervisor mode.
1293*a1e26a70SApple OSS Distributions u_regs32 = regs32;
1294*a1e26a70SApple OSS Distributions u_regs64 = regs64;
1295*a1e26a70SApple OSS Distributions regs32 = NULL;
1296*a1e26a70SApple OSS Distributions regs64 = NULL;
1297*a1e26a70SApple OSS Distributions }
1298*a1e26a70SApple OSS Distributions
1299*a1e26a70SApple OSS Distributions if (user_only) {
1300*a1e26a70SApple OSS Distributions /* we only want to backtrace the user mode */
1301*a1e26a70SApple OSS Distributions if (!(u_regs32 || u_regs64)) {
1302*a1e26a70SApple OSS Distributions /* no user state to look at */
1303*a1e26a70SApple OSS Distributions return KERN_FAILURE;
1304*a1e26a70SApple OSS Distributions }
1305*a1e26a70SApple OSS Distributions }
1306*a1e26a70SApple OSS Distributions
1307*a1e26a70SApple OSS Distributions /*
1308*a1e26a70SApple OSS Distributions * Order of preference for top of stack:
1309*a1e26a70SApple OSS Distributions * 64 bit kernel state (not likely)
1310*a1e26a70SApple OSS Distributions * 32 bit kernel state
1311*a1e26a70SApple OSS Distributions * 64 bit user land state
1312*a1e26a70SApple OSS Distributions * 32 bit user land state
1313*a1e26a70SApple OSS Distributions */
1314*a1e26a70SApple OSS Distributions
1315*a1e26a70SApple OSS Distributions if (kregs) {
1316*a1e26a70SApple OSS Distributions /*
1317*a1e26a70SApple OSS Distributions * nofault read of the registers from the kernel stack (as they can
1318*a1e26a70SApple OSS Distributions * disappear on the fly).
1319*a1e26a70SApple OSS Distributions */
1320*a1e26a70SApple OSS Distributions
1321*a1e26a70SApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
1322*a1e26a70SApple OSS Distributions return KERN_FAILURE;
1323*a1e26a70SApple OSS Distributions }
1324*a1e26a70SApple OSS Distributions } else if (regs64) {
1325*a1e26a70SApple OSS Distributions currPC = regs64->isf.rip;
1326*a1e26a70SApple OSS Distributions } else if (regs32) {
1327*a1e26a70SApple OSS Distributions currPC = (uint64_t) regs32->eip;
1328*a1e26a70SApple OSS Distributions } else if (u_regs64) {
1329*a1e26a70SApple OSS Distributions currPC = u_regs64->isf.rip;
1330*a1e26a70SApple OSS Distributions } else if (u_regs32) {
1331*a1e26a70SApple OSS Distributions currPC = (uint64_t) u_regs32->eip;
1332*a1e26a70SApple OSS Distributions }
1333*a1e26a70SApple OSS Distributions
1334*a1e26a70SApple OSS Distributions if (!currPC) {
1335*a1e26a70SApple OSS Distributions /* no top of the stack, bail out */
1336*a1e26a70SApple OSS Distributions return KERN_FAILURE;
1337*a1e26a70SApple OSS Distributions }
1338*a1e26a70SApple OSS Distributions
1339*a1e26a70SApple OSS Distributions bufferIndex = 0;
1340*a1e26a70SApple OSS Distributions
1341*a1e26a70SApple OSS Distributions if (bufferMaxIndex < 1) {
1342*a1e26a70SApple OSS Distributions *count = 0;
1343*a1e26a70SApple OSS Distributions return KERN_RESOURCE_SHORTAGE;
1344*a1e26a70SApple OSS Distributions }
1345*a1e26a70SApple OSS Distributions
1346*a1e26a70SApple OSS Distributions /* backtrace kernel */
1347*a1e26a70SApple OSS Distributions if (kregs) {
1348*a1e26a70SApple OSS Distributions addr64_t address = 0ULL;
1349*a1e26a70SApple OSS Distributions size_t size = 0UL;
1350*a1e26a70SApple OSS Distributions
1351*a1e26a70SApple OSS Distributions // do the backtrace
1352*a1e26a70SApple OSS Distributions kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);
1353*a1e26a70SApple OSS Distributions
1354*a1e26a70SApple OSS Distributions // and do a nofault read of (r|e)sp
1355*a1e26a70SApple OSS Distributions uint64_t rsp = 0ULL;
1356*a1e26a70SApple OSS Distributions size = sizeof(uint64_t);
1357*a1e26a70SApple OSS Distributions
1358*a1e26a70SApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
1359*a1e26a70SApple OSS Distributions address = 0ULL;
1360*a1e26a70SApple OSS Distributions }
1361*a1e26a70SApple OSS Distributions
1362*a1e26a70SApple OSS Distributions if (address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
1363*a1e26a70SApple OSS Distributions callstack[bufferIndex++] = (uint64_t)rsp;
1364*a1e26a70SApple OSS Distributions }
1365*a1e26a70SApple OSS Distributions } else if (regs64) {
1366*a1e26a70SApple OSS Distributions uint64_t rsp = 0ULL;
1367*a1e26a70SApple OSS Distributions
1368*a1e26a70SApple OSS Distributions // backtrace the 64bit side.
1369*a1e26a70SApple OSS Distributions kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
1370*a1e26a70SApple OSS Distributions bufferMaxIndex - 1, TRUE);
1371*a1e26a70SApple OSS Distributions
1372*a1e26a70SApple OSS Distributions if (KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) &&
1373*a1e26a70SApple OSS Distributions bufferIndex < bufferMaxIndex) {
1374*a1e26a70SApple OSS Distributions callstack[bufferIndex++] = rsp;
1375*a1e26a70SApple OSS Distributions }
1376*a1e26a70SApple OSS Distributions } else if (regs32) {
1377*a1e26a70SApple OSS Distributions uint32_t esp = 0UL;
1378*a1e26a70SApple OSS Distributions
1379*a1e26a70SApple OSS Distributions // backtrace the 32bit side.
1380*a1e26a70SApple OSS Distributions kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
1381*a1e26a70SApple OSS Distributions bufferMaxIndex - 1, TRUE);
1382*a1e26a70SApple OSS Distributions
1383*a1e26a70SApple OSS Distributions if (KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) &&
1384*a1e26a70SApple OSS Distributions bufferIndex < bufferMaxIndex) {
1385*a1e26a70SApple OSS Distributions callstack[bufferIndex++] = (uint64_t) esp;
1386*a1e26a70SApple OSS Distributions }
1387*a1e26a70SApple OSS Distributions } else if (u_regs64 && !kern_only) {
1388*a1e26a70SApple OSS Distributions /* backtrace user land */
1389*a1e26a70SApple OSS Distributions uint64_t rsp = 0ULL;
1390*a1e26a70SApple OSS Distributions
1391*a1e26a70SApple OSS Distributions kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
1392*a1e26a70SApple OSS Distributions bufferMaxIndex - 1, FALSE);
1393*a1e26a70SApple OSS Distributions
1394*a1e26a70SApple OSS Distributions if (KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
1395*a1e26a70SApple OSS Distributions bufferIndex < bufferMaxIndex) {
1396*a1e26a70SApple OSS Distributions callstack[bufferIndex++] = rsp;
1397*a1e26a70SApple OSS Distributions }
1398*a1e26a70SApple OSS Distributions } else if (u_regs32 && !kern_only) {
1399*a1e26a70SApple OSS Distributions uint32_t esp = 0UL;
1400*a1e26a70SApple OSS Distributions
1401*a1e26a70SApple OSS Distributions kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
1402*a1e26a70SApple OSS Distributions bufferMaxIndex - 1, FALSE);
1403*a1e26a70SApple OSS Distributions
1404*a1e26a70SApple OSS Distributions if (KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
1405*a1e26a70SApple OSS Distributions bufferIndex < bufferMaxIndex) {
1406*a1e26a70SApple OSS Distributions callstack[bufferIndex++] = (uint64_t) esp;
1407*a1e26a70SApple OSS Distributions }
1408*a1e26a70SApple OSS Distributions }
1409*a1e26a70SApple OSS Distributions
1410*a1e26a70SApple OSS Distributions *count = bufferIndex;
1411*a1e26a70SApple OSS Distributions return kr;
1412*a1e26a70SApple OSS Distributions }
1413*a1e26a70SApple OSS Distributions
1414*a1e26a70SApple OSS Distributions __private_extern__
1415*a1e26a70SApple OSS Distributions kern_return_t
1416*a1e26a70SApple OSS Distributions chudxnu_thread_get_callstack64_kperf(
1417*a1e26a70SApple OSS Distributions thread_t thread,
1418*a1e26a70SApple OSS Distributions uint64_t *callstack,
1419*a1e26a70SApple OSS Distributions mach_msg_type_number_t *count,
1420*a1e26a70SApple OSS Distributions boolean_t is_user)
1421*a1e26a70SApple OSS Distributions {
1422*a1e26a70SApple OSS Distributions return chudxnu_thread_get_callstack64_internal(thread, callstack, count, is_user, !is_user);
1423*a1e26a70SApple OSS Distributions }
1424*a1e26a70SApple OSS Distributions #else /* !__arm64__ && !__x86_64__ */
1425*a1e26a70SApple OSS Distributions #error kperf: unsupported architecture
1426*a1e26a70SApple OSS Distributions #endif /* !__arm64__ && !__x86_64__ */
1427