1*4d495c6eSApple OSS Distributions /*
2*4d495c6eSApple OSS Distributions * Copyright (c) 2011-2022 Apple Computer, Inc. All rights reserved.
3*4d495c6eSApple OSS Distributions *
4*4d495c6eSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*4d495c6eSApple OSS Distributions *
6*4d495c6eSApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*4d495c6eSApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*4d495c6eSApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*4d495c6eSApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*4d495c6eSApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*4d495c6eSApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*4d495c6eSApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*4d495c6eSApple OSS Distributions * terms of an Apple operating system software license agreement.
14*4d495c6eSApple OSS Distributions *
15*4d495c6eSApple OSS Distributions * Please obtain a copy of the License at
16*4d495c6eSApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*4d495c6eSApple OSS Distributions *
18*4d495c6eSApple OSS Distributions * The Original Code and all software distributed under the License are
19*4d495c6eSApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*4d495c6eSApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*4d495c6eSApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*4d495c6eSApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*4d495c6eSApple OSS Distributions * Please see the License for the specific language governing rights and
24*4d495c6eSApple OSS Distributions * limitations under the License.
25*4d495c6eSApple OSS Distributions *
26*4d495c6eSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*4d495c6eSApple OSS Distributions */
28*4d495c6eSApple OSS Distributions
29*4d495c6eSApple OSS Distributions /* Collect kernel callstacks */
30*4d495c6eSApple OSS Distributions
31*4d495c6eSApple OSS Distributions #include <mach/mach_types.h>
32*4d495c6eSApple OSS Distributions #include <kern/thread.h>
33*4d495c6eSApple OSS Distributions #include <kern/backtrace.h>
34*4d495c6eSApple OSS Distributions #include <kern/cambria_layout.h>
35*4d495c6eSApple OSS Distributions #include <vm/vm_map_xnu.h>
36*4d495c6eSApple OSS Distributions #include <kperf/buffer.h>
37*4d495c6eSApple OSS Distributions #include <kperf/context.h>
38*4d495c6eSApple OSS Distributions #include <kperf/callstack.h>
39*4d495c6eSApple OSS Distributions #include <kperf/ast.h>
40*4d495c6eSApple OSS Distributions #include <sys/errno.h>
41*4d495c6eSApple OSS Distributions #include <mach/exclaves.h>
42*4d495c6eSApple OSS Distributions
43*4d495c6eSApple OSS Distributions #if defined(__arm64__)
44*4d495c6eSApple OSS Distributions #include <arm/cpu_data.h>
45*4d495c6eSApple OSS Distributions #include <arm/cpu_data_internal.h>
46*4d495c6eSApple OSS Distributions #endif
47*4d495c6eSApple OSS Distributions
48*4d495c6eSApple OSS Distributions static void
callstack_fixup_user(struct kp_ucallstack * cs,thread_t thread)49*4d495c6eSApple OSS Distributions callstack_fixup_user(struct kp_ucallstack *cs, thread_t thread)
50*4d495c6eSApple OSS Distributions {
51*4d495c6eSApple OSS Distributions uint64_t fixup_val = 0;
52*4d495c6eSApple OSS Distributions assert(cs->kpuc_nframes < MAX_UCALLSTACK_FRAMES);
53*4d495c6eSApple OSS Distributions
54*4d495c6eSApple OSS Distributions #if defined(__x86_64__)
55*4d495c6eSApple OSS Distributions user_addr_t sp_user;
56*4d495c6eSApple OSS Distributions bool user_64;
57*4d495c6eSApple OSS Distributions x86_saved_state_t *state;
58*4d495c6eSApple OSS Distributions
59*4d495c6eSApple OSS Distributions state = get_user_regs(thread);
60*4d495c6eSApple OSS Distributions if (!state) {
61*4d495c6eSApple OSS Distributions goto out;
62*4d495c6eSApple OSS Distributions }
63*4d495c6eSApple OSS Distributions
64*4d495c6eSApple OSS Distributions user_64 = is_saved_state64(state);
65*4d495c6eSApple OSS Distributions if (user_64) {
66*4d495c6eSApple OSS Distributions sp_user = saved_state64(state)->isf.rsp;
67*4d495c6eSApple OSS Distributions } else {
68*4d495c6eSApple OSS Distributions sp_user = saved_state32(state)->uesp;
69*4d495c6eSApple OSS Distributions }
70*4d495c6eSApple OSS Distributions
71*4d495c6eSApple OSS Distributions if (thread == current_thread()) {
72*4d495c6eSApple OSS Distributions (void)copyin(sp_user, (char *)&fixup_val,
73*4d495c6eSApple OSS Distributions user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
74*4d495c6eSApple OSS Distributions } else {
75*4d495c6eSApple OSS Distributions (void)vm_map_read_user(get_task_map(get_threadtask(thread)), sp_user,
76*4d495c6eSApple OSS Distributions &fixup_val, user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
77*4d495c6eSApple OSS Distributions }
78*4d495c6eSApple OSS Distributions
79*4d495c6eSApple OSS Distributions #elif defined(__arm64__)
80*4d495c6eSApple OSS Distributions
81*4d495c6eSApple OSS Distributions struct arm_saved_state *state = get_user_regs(thread);
82*4d495c6eSApple OSS Distributions if (!state) {
83*4d495c6eSApple OSS Distributions goto out;
84*4d495c6eSApple OSS Distributions }
85*4d495c6eSApple OSS Distributions
86*4d495c6eSApple OSS Distributions /* encode thumb mode into low bit of PC */
87*4d495c6eSApple OSS Distributions if (is_saved_state32(state) && (get_saved_state_cpsr(state) & PSR_TF)) {
88*4d495c6eSApple OSS Distributions cs->kpuc_frames[0] |= 1ULL;
89*4d495c6eSApple OSS Distributions }
90*4d495c6eSApple OSS Distributions
91*4d495c6eSApple OSS Distributions
92*4d495c6eSApple OSS Distributions fixup_val = get_saved_state_lr(state);
93*4d495c6eSApple OSS Distributions
94*4d495c6eSApple OSS Distributions #else
95*4d495c6eSApple OSS Distributions #error "callstack_fixup_user: unsupported architecture"
96*4d495c6eSApple OSS Distributions #endif
97*4d495c6eSApple OSS Distributions
98*4d495c6eSApple OSS Distributions out:
99*4d495c6eSApple OSS Distributions cs->kpuc_frames[cs->kpuc_nframes++] = fixup_val;
100*4d495c6eSApple OSS Distributions }
101*4d495c6eSApple OSS Distributions
102*4d495c6eSApple OSS Distributions #if defined(__x86_64__)
103*4d495c6eSApple OSS Distributions
104*4d495c6eSApple OSS Distributions __attribute__((used))
105*4d495c6eSApple OSS Distributions static kern_return_t
interrupted_kernel_sp_value(uintptr_t * sp_val)106*4d495c6eSApple OSS Distributions interrupted_kernel_sp_value(uintptr_t *sp_val)
107*4d495c6eSApple OSS Distributions {
108*4d495c6eSApple OSS Distributions x86_saved_state_t *state;
109*4d495c6eSApple OSS Distributions uintptr_t sp;
110*4d495c6eSApple OSS Distributions bool state_64;
111*4d495c6eSApple OSS Distributions uint64_t cs;
112*4d495c6eSApple OSS Distributions uintptr_t top, bottom;
113*4d495c6eSApple OSS Distributions
114*4d495c6eSApple OSS Distributions state = current_cpu_datap()->cpu_int_state;
115*4d495c6eSApple OSS Distributions if (!state) {
116*4d495c6eSApple OSS Distributions return KERN_FAILURE;
117*4d495c6eSApple OSS Distributions }
118*4d495c6eSApple OSS Distributions
119*4d495c6eSApple OSS Distributions state_64 = is_saved_state64(state);
120*4d495c6eSApple OSS Distributions
121*4d495c6eSApple OSS Distributions if (state_64) {
122*4d495c6eSApple OSS Distributions cs = saved_state64(state)->isf.cs;
123*4d495c6eSApple OSS Distributions } else {
124*4d495c6eSApple OSS Distributions cs = saved_state32(state)->cs;
125*4d495c6eSApple OSS Distributions }
126*4d495c6eSApple OSS Distributions /* return early if interrupted a thread in user space */
127*4d495c6eSApple OSS Distributions if ((cs & SEL_PL) == SEL_PL_U) {
128*4d495c6eSApple OSS Distributions return KERN_FAILURE;
129*4d495c6eSApple OSS Distributions }
130*4d495c6eSApple OSS Distributions
131*4d495c6eSApple OSS Distributions if (state_64) {
132*4d495c6eSApple OSS Distributions sp = saved_state64(state)->isf.rsp;
133*4d495c6eSApple OSS Distributions } else {
134*4d495c6eSApple OSS Distributions sp = saved_state32(state)->uesp;
135*4d495c6eSApple OSS Distributions }
136*4d495c6eSApple OSS Distributions
137*4d495c6eSApple OSS Distributions /* make sure the stack pointer is pointing somewhere in this stack */
138*4d495c6eSApple OSS Distributions bottom = current_thread()->kernel_stack;
139*4d495c6eSApple OSS Distributions top = bottom + kernel_stack_size;
140*4d495c6eSApple OSS Distributions if (sp >= bottom && sp < top) {
141*4d495c6eSApple OSS Distributions return KERN_FAILURE;
142*4d495c6eSApple OSS Distributions }
143*4d495c6eSApple OSS Distributions
144*4d495c6eSApple OSS Distributions *sp_val = *(uintptr_t *)sp;
145*4d495c6eSApple OSS Distributions return KERN_SUCCESS;
146*4d495c6eSApple OSS Distributions }
147*4d495c6eSApple OSS Distributions
148*4d495c6eSApple OSS Distributions #elif defined(__arm64__)
149*4d495c6eSApple OSS Distributions
150*4d495c6eSApple OSS Distributions __attribute__((used))
151*4d495c6eSApple OSS Distributions static kern_return_t
interrupted_kernel_lr(uintptr_t * lr)152*4d495c6eSApple OSS Distributions interrupted_kernel_lr(uintptr_t *lr)
153*4d495c6eSApple OSS Distributions {
154*4d495c6eSApple OSS Distributions struct arm_saved_state *state;
155*4d495c6eSApple OSS Distributions
156*4d495c6eSApple OSS Distributions state = getCpuDatap()->cpu_int_state;
157*4d495c6eSApple OSS Distributions
158*4d495c6eSApple OSS Distributions /* return early if interrupted a thread in user space */
159*4d495c6eSApple OSS Distributions if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
160*4d495c6eSApple OSS Distributions return KERN_FAILURE;
161*4d495c6eSApple OSS Distributions }
162*4d495c6eSApple OSS Distributions
163*4d495c6eSApple OSS Distributions *lr = get_saved_state_lr(state);
164*4d495c6eSApple OSS Distributions return KERN_SUCCESS;
165*4d495c6eSApple OSS Distributions }
166*4d495c6eSApple OSS Distributions #else /* defined(__arm64__) */
167*4d495c6eSApple OSS Distributions #error "interrupted_kernel_{sp,lr}: unsupported architecture"
168*4d495c6eSApple OSS Distributions #endif /* !defined(__arm64__) */
169*4d495c6eSApple OSS Distributions
170*4d495c6eSApple OSS Distributions
171*4d495c6eSApple OSS Distributions static void
callstack_fixup_interrupted(struct kp_kcallstack * cs)172*4d495c6eSApple OSS Distributions callstack_fixup_interrupted(struct kp_kcallstack *cs)
173*4d495c6eSApple OSS Distributions {
174*4d495c6eSApple OSS Distributions uintptr_t fixup_val = 0;
175*4d495c6eSApple OSS Distributions assert(cs->kpkc_nframes < MAX_KCALLSTACK_FRAMES);
176*4d495c6eSApple OSS Distributions
177*4d495c6eSApple OSS Distributions /*
178*4d495c6eSApple OSS Distributions * Only provide arbitrary data on development or debug kernels.
179*4d495c6eSApple OSS Distributions */
180*4d495c6eSApple OSS Distributions #if DEVELOPMENT || DEBUG
181*4d495c6eSApple OSS Distributions #if defined(__x86_64__)
182*4d495c6eSApple OSS Distributions (void)interrupted_kernel_sp_value(&fixup_val);
183*4d495c6eSApple OSS Distributions #elif defined(__arm64__)
184*4d495c6eSApple OSS Distributions (void)interrupted_kernel_lr(&fixup_val);
185*4d495c6eSApple OSS Distributions #endif /* defined(__x86_64__) */
186*4d495c6eSApple OSS Distributions #endif /* DEVELOPMENT || DEBUG */
187*4d495c6eSApple OSS Distributions
188*4d495c6eSApple OSS Distributions assert(cs->kpkc_flags & CALLSTACK_KERNEL);
189*4d495c6eSApple OSS Distributions cs->kpkc_frames[cs->kpkc_nframes++] = fixup_val;
190*4d495c6eSApple OSS Distributions }
191*4d495c6eSApple OSS Distributions
192*4d495c6eSApple OSS Distributions void
kperf_continuation_sample(struct kp_kcallstack * cs,struct kperf_context * context)193*4d495c6eSApple OSS Distributions kperf_continuation_sample(struct kp_kcallstack *cs, struct kperf_context *context)
194*4d495c6eSApple OSS Distributions {
195*4d495c6eSApple OSS Distributions thread_t thread;
196*4d495c6eSApple OSS Distributions
197*4d495c6eSApple OSS Distributions assert(cs != NULL);
198*4d495c6eSApple OSS Distributions assert(context != NULL);
199*4d495c6eSApple OSS Distributions
200*4d495c6eSApple OSS Distributions thread = context->cur_thread;
201*4d495c6eSApple OSS Distributions assert(thread != NULL);
202*4d495c6eSApple OSS Distributions assert(thread->continuation != NULL);
203*4d495c6eSApple OSS Distributions
204*4d495c6eSApple OSS Distributions cs->kpkc_flags = CALLSTACK_CONTINUATION | CALLSTACK_VALID | CALLSTACK_KERNEL;
205*4d495c6eSApple OSS Distributions #ifdef __LP64__
206*4d495c6eSApple OSS Distributions cs->kpkc_flags |= CALLSTACK_64BIT;
207*4d495c6eSApple OSS Distributions #endif
208*4d495c6eSApple OSS Distributions
209*4d495c6eSApple OSS Distributions cs->kpkc_nframes = 1;
210*4d495c6eSApple OSS Distributions cs->kpkc_frames[0] = VM_KERNEL_UNSLIDE(thread->continuation);
211*4d495c6eSApple OSS Distributions }
212*4d495c6eSApple OSS Distributions
213*4d495c6eSApple OSS Distributions void
kperf_backtrace_sample(struct kp_kcallstack * cs,struct kperf_context * context)214*4d495c6eSApple OSS Distributions kperf_backtrace_sample(struct kp_kcallstack *cs, struct kperf_context *context)
215*4d495c6eSApple OSS Distributions {
216*4d495c6eSApple OSS Distributions assert(cs != NULL);
217*4d495c6eSApple OSS Distributions assert(context != NULL);
218*4d495c6eSApple OSS Distributions assert(context->cur_thread == current_thread());
219*4d495c6eSApple OSS Distributions
220*4d495c6eSApple OSS Distributions cs->kpkc_flags = CALLSTACK_KERNEL | CALLSTACK_KERNEL_WORDS;
221*4d495c6eSApple OSS Distributions #ifdef __LP64__
222*4d495c6eSApple OSS Distributions cs->kpkc_flags |= CALLSTACK_64BIT;
223*4d495c6eSApple OSS Distributions #endif
224*4d495c6eSApple OSS Distributions
225*4d495c6eSApple OSS Distributions BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, 1);
226*4d495c6eSApple OSS Distributions
227*4d495c6eSApple OSS Distributions backtrace_info_t btinfo = BTI_NONE;
228*4d495c6eSApple OSS Distributions struct backtrace_control ctl = {
229*4d495c6eSApple OSS Distributions .btc_frame_addr = (uintptr_t)context->starting_fp,
230*4d495c6eSApple OSS Distributions };
231*4d495c6eSApple OSS Distributions cs->kpkc_nframes = backtrace(cs->kpkc_word_frames, cs->kpkc_nframes - 1,
232*4d495c6eSApple OSS Distributions &ctl, &btinfo);
233*4d495c6eSApple OSS Distributions if (cs->kpkc_nframes > 0) {
234*4d495c6eSApple OSS Distributions cs->kpkc_flags |= CALLSTACK_VALID;
235*4d495c6eSApple OSS Distributions
236*4d495c6eSApple OSS Distributions cs->kpkc_exclaves_offset = 0;
237*4d495c6eSApple OSS Distributions #if CONFIG_EXCLAVES
238*4d495c6eSApple OSS Distributions if ((context->cur_thread->th_exclaves_state & TH_EXCLAVES_RPC) != 0) {
239*4d495c6eSApple OSS Distributions cs->kpkc_exclaves_offset = exclaves_stack_offset(cs->kpkc_word_frames, cs->kpkc_nframes, true);
240*4d495c6eSApple OSS Distributions }
241*4d495c6eSApple OSS Distributions #endif /* CONFIG_EXCLAVES */
242*4d495c6eSApple OSS Distributions
243*4d495c6eSApple OSS Distributions /*
244*4d495c6eSApple OSS Distributions * Fake the value pointed to by the stack pointer or the link
245*4d495c6eSApple OSS Distributions * register for symbolicators.
246*4d495c6eSApple OSS Distributions */
247*4d495c6eSApple OSS Distributions cs->kpkc_word_frames[cs->kpkc_nframes + 1] = 0;
248*4d495c6eSApple OSS Distributions cs->kpkc_nframes += 1;
249*4d495c6eSApple OSS Distributions }
250*4d495c6eSApple OSS Distributions if ((btinfo & BTI_TRUNCATED)) {
251*4d495c6eSApple OSS Distributions cs->kpkc_flags |= CALLSTACK_TRUNCATED;
252*4d495c6eSApple OSS Distributions }
253*4d495c6eSApple OSS Distributions
254*4d495c6eSApple OSS Distributions BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, cs->kpkc_nframes);
255*4d495c6eSApple OSS Distributions }
256*4d495c6eSApple OSS Distributions
257*4d495c6eSApple OSS Distributions kern_return_t chudxnu_thread_get_callstack64_kperf(thread_t thread,
258*4d495c6eSApple OSS Distributions uint64_t *callStack, mach_msg_type_number_t *count,
259*4d495c6eSApple OSS Distributions boolean_t user_only);
260*4d495c6eSApple OSS Distributions
261*4d495c6eSApple OSS Distributions void
kperf_kcallstack_sample(struct kp_kcallstack * cs,struct kperf_context * context)262*4d495c6eSApple OSS Distributions kperf_kcallstack_sample(struct kp_kcallstack *cs, struct kperf_context *context)
263*4d495c6eSApple OSS Distributions {
264*4d495c6eSApple OSS Distributions thread_t thread;
265*4d495c6eSApple OSS Distributions
266*4d495c6eSApple OSS Distributions assert(cs != NULL);
267*4d495c6eSApple OSS Distributions assert(context != NULL);
268*4d495c6eSApple OSS Distributions assert(cs->kpkc_nframes <= MAX_KCALLSTACK_FRAMES);
269*4d495c6eSApple OSS Distributions
270*4d495c6eSApple OSS Distributions thread = context->cur_thread;
271*4d495c6eSApple OSS Distributions assert(thread != NULL);
272*4d495c6eSApple OSS Distributions
273*4d495c6eSApple OSS Distributions BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread),
274*4d495c6eSApple OSS Distributions cs->kpkc_nframes);
275*4d495c6eSApple OSS Distributions
276*4d495c6eSApple OSS Distributions cs->kpkc_flags = CALLSTACK_KERNEL;
277*4d495c6eSApple OSS Distributions #ifdef __LP64__
278*4d495c6eSApple OSS Distributions cs->kpkc_flags |= CALLSTACK_64BIT;
279*4d495c6eSApple OSS Distributions #endif
280*4d495c6eSApple OSS Distributions
281*4d495c6eSApple OSS Distributions if (ml_at_interrupt_context()) {
282*4d495c6eSApple OSS Distributions assert(thread == current_thread());
283*4d495c6eSApple OSS Distributions cs->kpkc_flags |= CALLSTACK_KERNEL_WORDS;
284*4d495c6eSApple OSS Distributions backtrace_info_t btinfo = BTI_NONE;
285*4d495c6eSApple OSS Distributions struct backtrace_control ctl = { .btc_flags = BTF_KERN_INTERRUPTED, };
286*4d495c6eSApple OSS Distributions cs->kpkc_nframes = backtrace(cs->kpkc_word_frames, cs->kpkc_nframes - 1,
287*4d495c6eSApple OSS Distributions &ctl, &btinfo);
288*4d495c6eSApple OSS Distributions if (cs->kpkc_nframes != 0) {
289*4d495c6eSApple OSS Distributions callstack_fixup_interrupted(cs);
290*4d495c6eSApple OSS Distributions }
291*4d495c6eSApple OSS Distributions if ((btinfo & BTI_TRUNCATED)) {
292*4d495c6eSApple OSS Distributions cs->kpkc_flags |= CALLSTACK_TRUNCATED;
293*4d495c6eSApple OSS Distributions }
294*4d495c6eSApple OSS Distributions
295*4d495c6eSApple OSS Distributions cs->kpkc_exclaves_offset = 0;
296*4d495c6eSApple OSS Distributions #if CONFIG_EXCLAVES
297*4d495c6eSApple OSS Distributions if ((thread->th_exclaves_state & TH_EXCLAVES_RPC) != 0) {
298*4d495c6eSApple OSS Distributions cs->kpkc_exclaves_offset = exclaves_stack_offset(cs->kpkc_word_frames, cs->kpkc_nframes, true);
299*4d495c6eSApple OSS Distributions }
300*4d495c6eSApple OSS Distributions #endif /* CONFIG_EXCLAVES */
301*4d495c6eSApple OSS Distributions } else {
302*4d495c6eSApple OSS Distributions /*
303*4d495c6eSApple OSS Distributions * Rely on legacy CHUD backtracer to backtrace kernel stacks on
304*4d495c6eSApple OSS Distributions * other threads.
305*4d495c6eSApple OSS Distributions */
306*4d495c6eSApple OSS Distributions kern_return_t kr;
307*4d495c6eSApple OSS Distributions kr = chudxnu_thread_get_callstack64_kperf(thread,
308*4d495c6eSApple OSS Distributions cs->kpkc_frames, &cs->kpkc_nframes, FALSE);
309*4d495c6eSApple OSS Distributions if (kr == KERN_SUCCESS) {
310*4d495c6eSApple OSS Distributions cs->kpkc_flags |= CALLSTACK_VALID;
311*4d495c6eSApple OSS Distributions } else if (kr == KERN_RESOURCE_SHORTAGE) {
312*4d495c6eSApple OSS Distributions cs->kpkc_flags |= CALLSTACK_VALID;
313*4d495c6eSApple OSS Distributions cs->kpkc_flags |= CALLSTACK_TRUNCATED;
314*4d495c6eSApple OSS Distributions } else {
315*4d495c6eSApple OSS Distributions cs->kpkc_nframes = 0;
316*4d495c6eSApple OSS Distributions }
317*4d495c6eSApple OSS Distributions }
318*4d495c6eSApple OSS Distributions
319*4d495c6eSApple OSS Distributions if (!(cs->kpkc_flags & CALLSTACK_VALID)) {
320*4d495c6eSApple OSS Distributions BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK);
321*4d495c6eSApple OSS Distributions }
322*4d495c6eSApple OSS Distributions
323*4d495c6eSApple OSS Distributions BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
324*4d495c6eSApple OSS Distributions cs->kpkc_flags, cs->kpkc_nframes);
325*4d495c6eSApple OSS Distributions }
326*4d495c6eSApple OSS Distributions
327*4d495c6eSApple OSS Distributions void
kperf_ucallstack_sample(struct kp_ucallstack * cs,struct kperf_context * context)328*4d495c6eSApple OSS Distributions kperf_ucallstack_sample(struct kp_ucallstack *cs, struct kperf_context *context)
329*4d495c6eSApple OSS Distributions {
330*4d495c6eSApple OSS Distributions assert(ml_get_interrupts_enabled() == TRUE);
331*4d495c6eSApple OSS Distributions
332*4d495c6eSApple OSS Distributions thread_t thread = context->cur_thread;
333*4d495c6eSApple OSS Distributions assert(thread != NULL);
334*4d495c6eSApple OSS Distributions
335*4d495c6eSApple OSS Distributions BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_START,
336*4d495c6eSApple OSS Distributions (uintptr_t)thread_tid(thread), cs->kpuc_nframes);
337*4d495c6eSApple OSS Distributions
338*4d495c6eSApple OSS Distributions struct backtrace_user_info btinfo = BTUINFO_INIT;
339*4d495c6eSApple OSS Distributions /*
340*4d495c6eSApple OSS Distributions * Leave space for the fixup information.
341*4d495c6eSApple OSS Distributions */
342*4d495c6eSApple OSS Distributions unsigned int maxnframes = cs->kpuc_nframes - 1;
343*4d495c6eSApple OSS Distributions struct backtrace_control ctl = { .btc_user_thread = thread, };
344*4d495c6eSApple OSS Distributions unsigned int nframes = backtrace_user(cs->kpuc_frames, maxnframes, &ctl,
345*4d495c6eSApple OSS Distributions &btinfo);
346*4d495c6eSApple OSS Distributions cs->kpuc_nframes = MIN(maxnframes, nframes);
347*4d495c6eSApple OSS Distributions
348*4d495c6eSApple OSS Distributions cs->kpuc_flags |= CALLSTACK_KERNEL_WORDS |
349*4d495c6eSApple OSS Distributions ((btinfo.btui_info & BTI_TRUNCATED) ? CALLSTACK_TRUNCATED : 0) |
350*4d495c6eSApple OSS Distributions ((btinfo.btui_info & BTI_64_BIT) ? CALLSTACK_64BIT : 0);
351*4d495c6eSApple OSS Distributions
352*4d495c6eSApple OSS Distributions /*
353*4d495c6eSApple OSS Distributions * Ignore EFAULT to get as much of the stack as possible.
354*4d495c6eSApple OSS Distributions */
355*4d495c6eSApple OSS Distributions if (btinfo.btui_error == 0 || btinfo.btui_error == EFAULT) {
356*4d495c6eSApple OSS Distributions callstack_fixup_user(cs, thread);
357*4d495c6eSApple OSS Distributions cs->kpuc_flags |= CALLSTACK_VALID;
358*4d495c6eSApple OSS Distributions
359*4d495c6eSApple OSS Distributions if (cs->kpuc_nframes < maxnframes &&
360*4d495c6eSApple OSS Distributions btinfo.btui_async_frame_addr != 0) {
361*4d495c6eSApple OSS Distributions cs->kpuc_async_index = btinfo.btui_async_start_index;
362*4d495c6eSApple OSS Distributions ctl.btc_frame_addr = btinfo.btui_async_frame_addr;
363*4d495c6eSApple OSS Distributions ctl.btc_addr_offset = BTCTL_ASYNC_ADDR_OFFSET;
364*4d495c6eSApple OSS Distributions maxnframes -= cs->kpuc_nframes;
365*4d495c6eSApple OSS Distributions btinfo = BTUINFO_INIT;
366*4d495c6eSApple OSS Distributions unsigned int nasync_frames = backtrace_user(
367*4d495c6eSApple OSS Distributions &cs->kpuc_frames[cs->kpuc_nframes], maxnframes, &ctl, &btinfo);
368*4d495c6eSApple OSS Distributions if (btinfo.btui_info & BTI_TRUNCATED) {
369*4d495c6eSApple OSS Distributions cs->kpuc_flags |= CALLSTACK_TRUNCATED;
370*4d495c6eSApple OSS Distributions }
371*4d495c6eSApple OSS Distributions if (btinfo.btui_error == 0 || btinfo.btui_error == EFAULT) {
372*4d495c6eSApple OSS Distributions cs->kpuc_flags |= CALLSTACK_HAS_ASYNC;
373*4d495c6eSApple OSS Distributions cs->kpuc_async_nframes = nasync_frames;
374*4d495c6eSApple OSS Distributions }
375*4d495c6eSApple OSS Distributions }
376*4d495c6eSApple OSS Distributions } else {
377*4d495c6eSApple OSS Distributions cs->kpuc_nframes = 0;
378*4d495c6eSApple OSS Distributions BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK, btinfo.btui_error);
379*4d495c6eSApple OSS Distributions }
380*4d495c6eSApple OSS Distributions
381*4d495c6eSApple OSS Distributions BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
382*4d495c6eSApple OSS Distributions cs->kpuc_flags, cs->kpuc_nframes);
383*4d495c6eSApple OSS Distributions }
384*4d495c6eSApple OSS Distributions
385*4d495c6eSApple OSS Distributions static inline uintptr_t
scrub_word(uintptr_t * bt,int n_frames,int frame,bool kern)386*4d495c6eSApple OSS Distributions scrub_word(uintptr_t *bt, int n_frames, int frame, bool kern)
387*4d495c6eSApple OSS Distributions {
388*4d495c6eSApple OSS Distributions if (frame < n_frames) {
389*4d495c6eSApple OSS Distributions if (kern) {
390*4d495c6eSApple OSS Distributions return VM_KERNEL_UNSLIDE(bt[frame]);
391*4d495c6eSApple OSS Distributions } else {
392*4d495c6eSApple OSS Distributions return bt[frame];
393*4d495c6eSApple OSS Distributions }
394*4d495c6eSApple OSS Distributions } else {
395*4d495c6eSApple OSS Distributions return 0;
396*4d495c6eSApple OSS Distributions }
397*4d495c6eSApple OSS Distributions }
398*4d495c6eSApple OSS Distributions
399*4d495c6eSApple OSS Distributions static inline uintptr_t
scrub_frame(uint64_t * bt,int n_frames,int frame)400*4d495c6eSApple OSS Distributions scrub_frame(uint64_t *bt, int n_frames, int frame)
401*4d495c6eSApple OSS Distributions {
402*4d495c6eSApple OSS Distributions if (frame < n_frames) {
403*4d495c6eSApple OSS Distributions return (uintptr_t)(bt[frame]);
404*4d495c6eSApple OSS Distributions } else {
405*4d495c6eSApple OSS Distributions return 0;
406*4d495c6eSApple OSS Distributions }
407*4d495c6eSApple OSS Distributions }
408*4d495c6eSApple OSS Distributions
409*4d495c6eSApple OSS Distributions static void
callstack_log(uint32_t hdrid,uint32_t dataid,void * vframes,unsigned int nframes,unsigned int flags,unsigned int async_index,unsigned int async_nframes)410*4d495c6eSApple OSS Distributions callstack_log(uint32_t hdrid, uint32_t dataid, void *vframes,
411*4d495c6eSApple OSS Distributions unsigned int nframes, unsigned int flags, unsigned int async_index,
412*4d495c6eSApple OSS Distributions unsigned int async_nframes)
413*4d495c6eSApple OSS Distributions {
414*4d495c6eSApple OSS Distributions BUF_VERB(PERF_CS_LOG | DBG_FUNC_START, flags, nframes);
415*4d495c6eSApple OSS Distributions BUF_DATA(hdrid, flags, nframes - async_nframes, async_index, async_nframes);
416*4d495c6eSApple OSS Distributions
417*4d495c6eSApple OSS Distributions unsigned int nevts = nframes / 4;
418*4d495c6eSApple OSS Distributions unsigned int ovf = nframes % 4;
419*4d495c6eSApple OSS Distributions if (ovf != 0) {
420*4d495c6eSApple OSS Distributions nevts++;
421*4d495c6eSApple OSS Distributions }
422*4d495c6eSApple OSS Distributions
423*4d495c6eSApple OSS Distributions bool kern = flags & CALLSTACK_KERNEL;
424*4d495c6eSApple OSS Distributions
425*4d495c6eSApple OSS Distributions if (flags & CALLSTACK_KERNEL_WORDS) {
426*4d495c6eSApple OSS Distributions uintptr_t *frames = vframes;
427*4d495c6eSApple OSS Distributions for (unsigned int i = 0; i < nevts; i++) {
428*4d495c6eSApple OSS Distributions unsigned int j = i * 4;
429*4d495c6eSApple OSS Distributions BUF_DATA(dataid,
430*4d495c6eSApple OSS Distributions scrub_word(frames, nframes, j + 0, kern),
431*4d495c6eSApple OSS Distributions scrub_word(frames, nframes, j + 1, kern),
432*4d495c6eSApple OSS Distributions scrub_word(frames, nframes, j + 2, kern),
433*4d495c6eSApple OSS Distributions scrub_word(frames, nframes, j + 3, kern));
434*4d495c6eSApple OSS Distributions }
435*4d495c6eSApple OSS Distributions } else {
436*4d495c6eSApple OSS Distributions for (unsigned int i = 0; i < nevts; i++) {
437*4d495c6eSApple OSS Distributions uint64_t *frames = vframes;
438*4d495c6eSApple OSS Distributions unsigned int j = i * 4;
439*4d495c6eSApple OSS Distributions BUF_DATA(dataid,
440*4d495c6eSApple OSS Distributions scrub_frame(frames, nframes, j + 0),
441*4d495c6eSApple OSS Distributions scrub_frame(frames, nframes, j + 1),
442*4d495c6eSApple OSS Distributions scrub_frame(frames, nframes, j + 2),
443*4d495c6eSApple OSS Distributions scrub_frame(frames, nframes, j + 3));
444*4d495c6eSApple OSS Distributions }
445*4d495c6eSApple OSS Distributions }
446*4d495c6eSApple OSS Distributions
447*4d495c6eSApple OSS Distributions BUF_VERB(PERF_CS_LOG | DBG_FUNC_END, flags, nframes);
448*4d495c6eSApple OSS Distributions }
449*4d495c6eSApple OSS Distributions
450*4d495c6eSApple OSS Distributions void
kperf_kcallstack_log(struct kp_kcallstack * cs)451*4d495c6eSApple OSS Distributions kperf_kcallstack_log(struct kp_kcallstack *cs)
452*4d495c6eSApple OSS Distributions {
453*4d495c6eSApple OSS Distributions callstack_log(PERF_CS_KHDR, PERF_CS_KDATA, cs->kpkc_frames,
454*4d495c6eSApple OSS Distributions cs->kpkc_nframes, cs->kpkc_flags, 0, 0);
455*4d495c6eSApple OSS Distributions
456*4d495c6eSApple OSS Distributions if (cs->kpkc_exclaves_offset != 0) {
457*4d495c6eSApple OSS Distributions BUF_DATA(PERF_CS_KEXOFFSET, cs->kpkc_exclaves_offset);
458*4d495c6eSApple OSS Distributions }
459*4d495c6eSApple OSS Distributions }
460*4d495c6eSApple OSS Distributions
461*4d495c6eSApple OSS Distributions void
kperf_ucallstack_log(struct kp_ucallstack * cs)462*4d495c6eSApple OSS Distributions kperf_ucallstack_log(struct kp_ucallstack *cs)
463*4d495c6eSApple OSS Distributions {
464*4d495c6eSApple OSS Distributions callstack_log(PERF_CS_UHDR, PERF_CS_UDATA, cs->kpuc_frames,
465*4d495c6eSApple OSS Distributions cs->kpuc_nframes + cs->kpuc_async_nframes, cs->kpuc_flags,
466*4d495c6eSApple OSS Distributions cs->kpuc_async_index, cs->kpuc_async_nframes);
467*4d495c6eSApple OSS Distributions }
468*4d495c6eSApple OSS Distributions
469*4d495c6eSApple OSS Distributions #if CONFIG_EXCLAVES
470*4d495c6eSApple OSS Distributions void
kperf_excallstack_log(const stackshottypes_ipcstackentry_s * ipcstack)471*4d495c6eSApple OSS Distributions kperf_excallstack_log(const stackshottypes_ipcstackentry_s *ipcstack)
472*4d495c6eSApple OSS Distributions {
473*4d495c6eSApple OSS Distributions __block unsigned int nframes = 0;
474*4d495c6eSApple OSS Distributions __block unsigned int flags = CALLSTACK_VALID;
475*4d495c6eSApple OSS Distributions uint64_t frames[MAX_EXCALLSTACK_FRAMES] = {};
476*4d495c6eSApple OSS Distributions uint64_t *frames_block = frames;
477*4d495c6eSApple OSS Distributions
478*4d495c6eSApple OSS Distributions BUF_DATA(PERF_CS_EXSTACK, ipcstack->asid);
479*4d495c6eSApple OSS Distributions
480*4d495c6eSApple OSS Distributions if (ipcstack->stacktrace.has_value) {
481*4d495c6eSApple OSS Distributions address__v_visit(&ipcstack->stacktrace.value, ^(size_t i, const stackshottypes_address_s item) {
482*4d495c6eSApple OSS Distributions if (i >= MAX_EXCALLSTACK_FRAMES) {
483*4d495c6eSApple OSS Distributions flags |= CALLSTACK_TRUNCATED;
484*4d495c6eSApple OSS Distributions return;
485*4d495c6eSApple OSS Distributions }
486*4d495c6eSApple OSS Distributions frames_block[i] = item;
487*4d495c6eSApple OSS Distributions nframes += 1;
488*4d495c6eSApple OSS Distributions });
489*4d495c6eSApple OSS Distributions callstack_log(PERF_CS_EXHDR, PERF_CS_EXDATA, frames, nframes, flags, 0, 0);
490*4d495c6eSApple OSS Distributions }
491*4d495c6eSApple OSS Distributions }
492*4d495c6eSApple OSS Distributions
493*4d495c6eSApple OSS Distributions bool
kperf_exclave_callstack_pend(struct kperf_context * context,unsigned int actionid)494*4d495c6eSApple OSS Distributions kperf_exclave_callstack_pend(struct kperf_context *context, unsigned int actionid)
495*4d495c6eSApple OSS Distributions {
496*4d495c6eSApple OSS Distributions if ((context->cur_thread->th_exclaves_state & TH_EXCLAVES_RPC)
497*4d495c6eSApple OSS Distributions && (os_atomic_load(&context->cur_thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_NOINSPECT) == 0) {
498*4d495c6eSApple OSS Distributions os_atomic_or(&context->cur_thread->th_exclaves_inspection_state, TH_EXCLAVES_INSPECTION_KPERF, relaxed);
499*4d495c6eSApple OSS Distributions context->cur_thread->kperf_exclaves_ast |= T_KPERF_SET_ACTIONID(actionid);
500*4d495c6eSApple OSS Distributions return true;
501*4d495c6eSApple OSS Distributions }
502*4d495c6eSApple OSS Distributions return false;
503*4d495c6eSApple OSS Distributions }
504*4d495c6eSApple OSS Distributions #endif /* CONFIG_EXCLAVES */
505*4d495c6eSApple OSS Distributions
506*4d495c6eSApple OSS Distributions int
kperf_ucallstack_pend(struct kperf_context * context,uint32_t depth,unsigned int actionid)507*4d495c6eSApple OSS Distributions kperf_ucallstack_pend(struct kperf_context * context, uint32_t depth,
508*4d495c6eSApple OSS Distributions unsigned int actionid)
509*4d495c6eSApple OSS Distributions {
510*4d495c6eSApple OSS Distributions if (depth < 2) {
511*4d495c6eSApple OSS Distributions panic("HUH");
512*4d495c6eSApple OSS Distributions }
513*4d495c6eSApple OSS Distributions kperf_ast_set_callstack_depth(context->cur_thread, depth);
514*4d495c6eSApple OSS Distributions return kperf_ast_pend(context->cur_thread, T_KPERF_AST_CALLSTACK,
515*4d495c6eSApple OSS Distributions actionid);
516*4d495c6eSApple OSS Distributions }
517*4d495c6eSApple OSS Distributions
518*4d495c6eSApple OSS Distributions static kern_return_t
chudxnu_kern_read(void * dstaddr,vm_offset_t srcaddr,vm_size_t size)519*4d495c6eSApple OSS Distributions chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
520*4d495c6eSApple OSS Distributions {
521*4d495c6eSApple OSS Distributions return (ml_nofault_copy(srcaddr, (vm_offset_t)dstaddr, size) == size) ?
522*4d495c6eSApple OSS Distributions KERN_SUCCESS : KERN_FAILURE;
523*4d495c6eSApple OSS Distributions }
524*4d495c6eSApple OSS Distributions
525*4d495c6eSApple OSS Distributions static kern_return_t
chudxnu_task_read(task_t task,void * kernaddr,uint64_t usraddr,vm_size_t size)526*4d495c6eSApple OSS Distributions chudxnu_task_read(
527*4d495c6eSApple OSS Distributions task_t task,
528*4d495c6eSApple OSS Distributions void *kernaddr,
529*4d495c6eSApple OSS Distributions uint64_t usraddr,
530*4d495c6eSApple OSS Distributions vm_size_t size)
531*4d495c6eSApple OSS Distributions {
532*4d495c6eSApple OSS Distributions //ppc version ported to arm
533*4d495c6eSApple OSS Distributions kern_return_t ret = KERN_SUCCESS;
534*4d495c6eSApple OSS Distributions
535*4d495c6eSApple OSS Distributions if (ml_at_interrupt_context()) {
536*4d495c6eSApple OSS Distributions return KERN_FAILURE; // can't look at tasks on interrupt stack
537*4d495c6eSApple OSS Distributions }
538*4d495c6eSApple OSS Distributions
539*4d495c6eSApple OSS Distributions if (current_task() == task) {
540*4d495c6eSApple OSS Distributions if (copyin(usraddr, kernaddr, size)) {
541*4d495c6eSApple OSS Distributions ret = KERN_FAILURE;
542*4d495c6eSApple OSS Distributions }
543*4d495c6eSApple OSS Distributions } else {
544*4d495c6eSApple OSS Distributions vm_map_t map = get_task_map(task);
545*4d495c6eSApple OSS Distributions ret = vm_map_read_user(map, usraddr, kernaddr, size);
546*4d495c6eSApple OSS Distributions }
547*4d495c6eSApple OSS Distributions
548*4d495c6eSApple OSS Distributions return ret;
549*4d495c6eSApple OSS Distributions }
550*4d495c6eSApple OSS Distributions
551*4d495c6eSApple OSS Distributions static inline uint64_t
chudxnu_vm_unslide(uint64_t ptr,int kaddr)552*4d495c6eSApple OSS Distributions chudxnu_vm_unslide( uint64_t ptr, int kaddr )
553*4d495c6eSApple OSS Distributions {
554*4d495c6eSApple OSS Distributions if (!kaddr) {
555*4d495c6eSApple OSS Distributions return ptr;
556*4d495c6eSApple OSS Distributions }
557*4d495c6eSApple OSS Distributions
558*4d495c6eSApple OSS Distributions return VM_KERNEL_UNSLIDE(ptr);
559*4d495c6eSApple OSS Distributions }
560*4d495c6eSApple OSS Distributions
561*4d495c6eSApple OSS Distributions #if __arm64__
562*4d495c6eSApple OSS Distributions
563*4d495c6eSApple OSS Distributions #if defined(HAS_APPLE_PAC)
564*4d495c6eSApple OSS Distributions #include <ptrauth.h>
565*4d495c6eSApple OSS Distributions #endif
566*4d495c6eSApple OSS Distributions
567*4d495c6eSApple OSS Distributions // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
568*4d495c6eSApple OSS Distributions // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
569*4d495c6eSApple OSS Distributions // after sampling has finished.
570*4d495c6eSApple OSS Distributions //
571*4d495c6eSApple OSS Distributions // For an N-entry callstack:
572*4d495c6eSApple OSS Distributions //
573*4d495c6eSApple OSS Distributions // [0] current pc
574*4d495c6eSApple OSS Distributions // [1..N-3] stack frames (including current one)
575*4d495c6eSApple OSS Distributions // [N-2] current LR (return value if we're in a leaf function)
576*4d495c6eSApple OSS Distributions // [N-1] current r0 (in case we've saved LR in r0) (optional)
577*4d495c6eSApple OSS Distributions //
578*4d495c6eSApple OSS Distributions //
579*4d495c6eSApple OSS Distributions #define CS_FLAG_EXTRASP 1 // capture extra sp register
580*4d495c6eSApple OSS Distributions
581*4d495c6eSApple OSS Distributions static kern_return_t
chudxnu_thread_get_callstack64_internal(thread_t thread,uint64_t * callStack,mach_msg_type_number_t * count,boolean_t user_only,int flags)582*4d495c6eSApple OSS Distributions chudxnu_thread_get_callstack64_internal(
583*4d495c6eSApple OSS Distributions thread_t thread,
584*4d495c6eSApple OSS Distributions uint64_t *callStack,
585*4d495c6eSApple OSS Distributions mach_msg_type_number_t *count,
586*4d495c6eSApple OSS Distributions boolean_t user_only,
587*4d495c6eSApple OSS Distributions int flags)
588*4d495c6eSApple OSS Distributions {
589*4d495c6eSApple OSS Distributions kern_return_t kr = KERN_SUCCESS;
590*4d495c6eSApple OSS Distributions task_t task;
591*4d495c6eSApple OSS Distributions uint64_t currPC = 0ULL, currLR = 0ULL, currSP = 0ULL;
592*4d495c6eSApple OSS Distributions uint64_t prevPC = 0ULL;
593*4d495c6eSApple OSS Distributions uint64_t kernStackMin = thread->kernel_stack;
594*4d495c6eSApple OSS Distributions uint64_t kernStackMax = kernStackMin + kernel_stack_size;
595*4d495c6eSApple OSS Distributions uint64_t *buffer = callStack;
596*4d495c6eSApple OSS Distributions int bufferIndex = 0;
597*4d495c6eSApple OSS Distributions int bufferMaxIndex = 0;
598*4d495c6eSApple OSS Distributions boolean_t kernel = FALSE;
599*4d495c6eSApple OSS Distributions struct arm_saved_state *sstate = NULL;
600*4d495c6eSApple OSS Distributions uint64_t pc = 0ULL;
601*4d495c6eSApple OSS Distributions
602*4d495c6eSApple OSS Distributions task = get_threadtask(thread);
603*4d495c6eSApple OSS Distributions bufferMaxIndex = *count;
604*4d495c6eSApple OSS Distributions //get thread state
605*4d495c6eSApple OSS Distributions if (user_only) {
606*4d495c6eSApple OSS Distributions sstate = find_user_regs(thread);
607*4d495c6eSApple OSS Distributions } else {
608*4d495c6eSApple OSS Distributions sstate = find_kern_regs(thread);
609*4d495c6eSApple OSS Distributions }
610*4d495c6eSApple OSS Distributions
611*4d495c6eSApple OSS Distributions if (!sstate) {
612*4d495c6eSApple OSS Distributions *count = 0;
613*4d495c6eSApple OSS Distributions return KERN_FAILURE;
614*4d495c6eSApple OSS Distributions }
615*4d495c6eSApple OSS Distributions
616*4d495c6eSApple OSS Distributions if (is_saved_state64(sstate)) {
617*4d495c6eSApple OSS Distributions struct arm_saved_state64 *state = NULL;
618*4d495c6eSApple OSS Distributions uint64_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
619*4d495c6eSApple OSS Distributions uint64_t frame[2];
620*4d495c6eSApple OSS Distributions
621*4d495c6eSApple OSS Distributions state = saved_state64(sstate);
622*4d495c6eSApple OSS Distributions
623*4d495c6eSApple OSS Distributions /* make sure it is safe to dereference before you do it */
624*4d495c6eSApple OSS Distributions kernel = PSR64_IS_KERNEL(state->cpsr);
625*4d495c6eSApple OSS Distributions
626*4d495c6eSApple OSS Distributions /* can't take a kernel callstack if we've got a user frame */
627*4d495c6eSApple OSS Distributions if (!user_only && !kernel) {
628*4d495c6eSApple OSS Distributions return KERN_FAILURE;
629*4d495c6eSApple OSS Distributions }
630*4d495c6eSApple OSS Distributions
631*4d495c6eSApple OSS Distributions /*
632*4d495c6eSApple OSS Distributions * Reserve space for saving LR (and sometimes SP) at the end of the
633*4d495c6eSApple OSS Distributions * backtrace.
634*4d495c6eSApple OSS Distributions */
635*4d495c6eSApple OSS Distributions if (flags & CS_FLAG_EXTRASP) {
636*4d495c6eSApple OSS Distributions bufferMaxIndex -= 2;
637*4d495c6eSApple OSS Distributions } else {
638*4d495c6eSApple OSS Distributions bufferMaxIndex -= 1;
639*4d495c6eSApple OSS Distributions }
640*4d495c6eSApple OSS Distributions
641*4d495c6eSApple OSS Distributions if (bufferMaxIndex < 2) {
642*4d495c6eSApple OSS Distributions *count = 0;
643*4d495c6eSApple OSS Distributions return KERN_RESOURCE_SHORTAGE;
644*4d495c6eSApple OSS Distributions }
645*4d495c6eSApple OSS Distributions
646*4d495c6eSApple OSS Distributions currPC = state->pc;
647*4d495c6eSApple OSS Distributions currLR = state->lr;
648*4d495c6eSApple OSS Distributions currSP = state->sp;
649*4d495c6eSApple OSS Distributions
650*4d495c6eSApple OSS Distributions fp = (uint64_t *)state->fp; /* frame pointer */
651*4d495c6eSApple OSS Distributions #if defined(HAS_APPLE_PAC)
652*4d495c6eSApple OSS Distributions /* frame pointers on stack will be signed by arm64e ABI */
653*4d495c6eSApple OSS Distributions fp = ptrauth_strip(fp, ptrauth_key_frame_pointer);
654*4d495c6eSApple OSS Distributions #endif
655*4d495c6eSApple OSS Distributions topfp = fp;
656*4d495c6eSApple OSS Distributions
657*4d495c6eSApple OSS Distributions bufferIndex = 0; // start with a stack of size zero
658*4d495c6eSApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
659*4d495c6eSApple OSS Distributions
660*4d495c6eSApple OSS Distributions BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 0);
661*4d495c6eSApple OSS Distributions
662*4d495c6eSApple OSS Distributions // Now, fill buffer with stack backtraces.
663*4d495c6eSApple OSS Distributions while (bufferIndex < bufferMaxIndex) {
664*4d495c6eSApple OSS Distributions pc = 0ULL;
665*4d495c6eSApple OSS Distributions /*
666*4d495c6eSApple OSS Distributions * Below the frame pointer, the following values are saved:
667*4d495c6eSApple OSS Distributions * -> FP
668*4d495c6eSApple OSS Distributions */
669*4d495c6eSApple OSS Distributions
670*4d495c6eSApple OSS Distributions /*
671*4d495c6eSApple OSS Distributions * Note that we read the pc even for the first stack frame
672*4d495c6eSApple OSS Distributions * (which, in theory, is always empty because the callee fills
673*4d495c6eSApple OSS Distributions * it in just before it lowers the stack. However, if we
674*4d495c6eSApple OSS Distributions * catch the program in between filling in the return address
675*4d495c6eSApple OSS Distributions * and lowering the stack, we want to still have a valid
676*4d495c6eSApple OSS Distributions * backtrace. FixupStack correctly disregards this value if
677*4d495c6eSApple OSS Distributions * necessary.
678*4d495c6eSApple OSS Distributions */
679*4d495c6eSApple OSS Distributions
680*4d495c6eSApple OSS Distributions if ((uint64_t)fp == 0 || ((uint64_t)fp & 0x3) != 0) {
681*4d495c6eSApple OSS Distributions /* frame pointer is invalid - stop backtracing */
682*4d495c6eSApple OSS Distributions pc = 0ULL;
683*4d495c6eSApple OSS Distributions break;
684*4d495c6eSApple OSS Distributions }
685*4d495c6eSApple OSS Distributions
686*4d495c6eSApple OSS Distributions if (kernel) {
687*4d495c6eSApple OSS Distributions if (((uint64_t)fp > kernStackMax) ||
688*4d495c6eSApple OSS Distributions ((uint64_t)fp < kernStackMin)) {
689*4d495c6eSApple OSS Distributions kr = KERN_FAILURE;
690*4d495c6eSApple OSS Distributions } else {
691*4d495c6eSApple OSS Distributions kr = chudxnu_kern_read(&frame,
692*4d495c6eSApple OSS Distributions (vm_offset_t)fp,
693*4d495c6eSApple OSS Distributions (vm_size_t)sizeof(frame));
694*4d495c6eSApple OSS Distributions if (kr == KERN_SUCCESS) {
695*4d495c6eSApple OSS Distributions #if defined(HAS_APPLE_PAC)
696*4d495c6eSApple OSS Distributions /* return addresses on stack will be signed by arm64e ABI */
697*4d495c6eSApple OSS Distributions pc = (uint64_t)ptrauth_strip((void *)frame[1], ptrauth_key_return_address);
698*4d495c6eSApple OSS Distributions #else
699*4d495c6eSApple OSS Distributions pc = frame[1];
700*4d495c6eSApple OSS Distributions #endif
701*4d495c6eSApple OSS Distributions nextFramePointer = (uint64_t *)frame[0];
702*4d495c6eSApple OSS Distributions #if defined(HAS_APPLE_PAC)
703*4d495c6eSApple OSS Distributions /* frame pointers on stack will be signed by arm64e ABI */
704*4d495c6eSApple OSS Distributions nextFramePointer = ptrauth_strip(nextFramePointer, ptrauth_key_frame_pointer);
705*4d495c6eSApple OSS Distributions #endif
706*4d495c6eSApple OSS Distributions } else {
707*4d495c6eSApple OSS Distributions pc = 0ULL;
708*4d495c6eSApple OSS Distributions nextFramePointer = 0ULL;
709*4d495c6eSApple OSS Distributions kr = KERN_FAILURE;
710*4d495c6eSApple OSS Distributions }
711*4d495c6eSApple OSS Distributions }
712*4d495c6eSApple OSS Distributions } else {
713*4d495c6eSApple OSS Distributions kr = chudxnu_task_read(task,
714*4d495c6eSApple OSS Distributions &frame,
715*4d495c6eSApple OSS Distributions (vm_offset_t)fp,
716*4d495c6eSApple OSS Distributions (vm_size_t)sizeof(frame));
717*4d495c6eSApple OSS Distributions if (kr == KERN_SUCCESS) {
718*4d495c6eSApple OSS Distributions #if defined(HAS_APPLE_PAC)
719*4d495c6eSApple OSS Distributions /* return addresses on stack will be signed by arm64e ABI */
720*4d495c6eSApple OSS Distributions pc = (uint64_t)ptrauth_strip((void *)frame[1], ptrauth_key_return_address);
721*4d495c6eSApple OSS Distributions #else
722*4d495c6eSApple OSS Distributions pc = frame[1];
723*4d495c6eSApple OSS Distributions #endif
724*4d495c6eSApple OSS Distributions nextFramePointer = (uint64_t *)(frame[0]);
725*4d495c6eSApple OSS Distributions #if defined(HAS_APPLE_PAC)
726*4d495c6eSApple OSS Distributions /* frame pointers on stack will be signed by arm64e ABI */
727*4d495c6eSApple OSS Distributions nextFramePointer = ptrauth_strip(nextFramePointer, ptrauth_key_frame_pointer);
728*4d495c6eSApple OSS Distributions #endif
729*4d495c6eSApple OSS Distributions } else {
730*4d495c6eSApple OSS Distributions pc = 0ULL;
731*4d495c6eSApple OSS Distributions nextFramePointer = 0ULL;
732*4d495c6eSApple OSS Distributions kr = KERN_FAILURE;
733*4d495c6eSApple OSS Distributions }
734*4d495c6eSApple OSS Distributions }
735*4d495c6eSApple OSS Distributions
736*4d495c6eSApple OSS Distributions if (kr != KERN_SUCCESS) {
737*4d495c6eSApple OSS Distributions pc = 0ULL;
738*4d495c6eSApple OSS Distributions break;
739*4d495c6eSApple OSS Distributions }
740*4d495c6eSApple OSS Distributions
741*4d495c6eSApple OSS Distributions if (nextFramePointer) {
742*4d495c6eSApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
743*4d495c6eSApple OSS Distributions prevPC = pc;
744*4d495c6eSApple OSS Distributions }
745*4d495c6eSApple OSS Distributions
746*4d495c6eSApple OSS Distributions if (nextFramePointer < fp) {
747*4d495c6eSApple OSS Distributions break;
748*4d495c6eSApple OSS Distributions } else {
749*4d495c6eSApple OSS Distributions fp = nextFramePointer;
750*4d495c6eSApple OSS Distributions }
751*4d495c6eSApple OSS Distributions }
752*4d495c6eSApple OSS Distributions
753*4d495c6eSApple OSS Distributions BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
754*4d495c6eSApple OSS Distributions
755*4d495c6eSApple OSS Distributions if (bufferIndex >= bufferMaxIndex) {
756*4d495c6eSApple OSS Distributions bufferIndex = bufferMaxIndex;
757*4d495c6eSApple OSS Distributions kr = KERN_RESOURCE_SHORTAGE;
758*4d495c6eSApple OSS Distributions } else {
759*4d495c6eSApple OSS Distributions kr = KERN_SUCCESS;
760*4d495c6eSApple OSS Distributions }
761*4d495c6eSApple OSS Distributions
762*4d495c6eSApple OSS Distributions // Save link register and SP at bottom of stack (used for later fixup).
763*4d495c6eSApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
764*4d495c6eSApple OSS Distributions if (flags & CS_FLAG_EXTRASP) {
765*4d495c6eSApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
766*4d495c6eSApple OSS Distributions }
767*4d495c6eSApple OSS Distributions } else {
768*4d495c6eSApple OSS Distributions struct arm_saved_state32 *state = NULL;
769*4d495c6eSApple OSS Distributions uint32_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
770*4d495c6eSApple OSS Distributions
771*4d495c6eSApple OSS Distributions /* 64-bit kernel stacks, 32-bit user stacks */
772*4d495c6eSApple OSS Distributions uint64_t frame[2];
773*4d495c6eSApple OSS Distributions uint32_t frame32[2];
774*4d495c6eSApple OSS Distributions
775*4d495c6eSApple OSS Distributions state = saved_state32(sstate);
776*4d495c6eSApple OSS Distributions
777*4d495c6eSApple OSS Distributions /* make sure it is safe to dereference before you do it */
778*4d495c6eSApple OSS Distributions kernel = PSR_IS_KERNEL(state->cpsr);
779*4d495c6eSApple OSS Distributions
780*4d495c6eSApple OSS Distributions /* can't take a kernel callstack if we've got a user frame */
781*4d495c6eSApple OSS Distributions if (!user_only && !kernel) {
782*4d495c6eSApple OSS Distributions return KERN_FAILURE;
783*4d495c6eSApple OSS Distributions }
784*4d495c6eSApple OSS Distributions
785*4d495c6eSApple OSS Distributions /*
786*4d495c6eSApple OSS Distributions * Reserve space for saving LR (and sometimes SP) at the end of the
787*4d495c6eSApple OSS Distributions * backtrace.
788*4d495c6eSApple OSS Distributions */
789*4d495c6eSApple OSS Distributions if (flags & CS_FLAG_EXTRASP) {
790*4d495c6eSApple OSS Distributions bufferMaxIndex -= 2;
791*4d495c6eSApple OSS Distributions } else {
792*4d495c6eSApple OSS Distributions bufferMaxIndex -= 1;
793*4d495c6eSApple OSS Distributions }
794*4d495c6eSApple OSS Distributions
795*4d495c6eSApple OSS Distributions if (bufferMaxIndex < 2) {
796*4d495c6eSApple OSS Distributions *count = 0;
797*4d495c6eSApple OSS Distributions return KERN_RESOURCE_SHORTAGE;
798*4d495c6eSApple OSS Distributions }
799*4d495c6eSApple OSS Distributions
800*4d495c6eSApple OSS Distributions currPC = (uint64_t)state->pc; /* r15 */
801*4d495c6eSApple OSS Distributions if (state->cpsr & PSR_TF) {
802*4d495c6eSApple OSS Distributions currPC |= 1ULL; /* encode thumb mode into low bit of PC */
803*4d495c6eSApple OSS Distributions }
804*4d495c6eSApple OSS Distributions currLR = (uint64_t)state->lr; /* r14 */
805*4d495c6eSApple OSS Distributions currSP = (uint64_t)state->sp; /* r13 */
806*4d495c6eSApple OSS Distributions
807*4d495c6eSApple OSS Distributions fp = (uint32_t *)(uintptr_t)state->r[7]; /* frame pointer */
808*4d495c6eSApple OSS Distributions topfp = fp;
809*4d495c6eSApple OSS Distributions
810*4d495c6eSApple OSS Distributions bufferIndex = 0; // start with a stack of size zero
811*4d495c6eSApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
812*4d495c6eSApple OSS Distributions
813*4d495c6eSApple OSS Distributions BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 1);
814*4d495c6eSApple OSS Distributions
815*4d495c6eSApple OSS Distributions // Now, fill buffer with stack backtraces.
816*4d495c6eSApple OSS Distributions while (bufferIndex < bufferMaxIndex) {
817*4d495c6eSApple OSS Distributions pc = 0ULL;
818*4d495c6eSApple OSS Distributions /*
819*4d495c6eSApple OSS Distributions * Below the frame pointer, the following values are saved:
820*4d495c6eSApple OSS Distributions * -> FP
821*4d495c6eSApple OSS Distributions */
822*4d495c6eSApple OSS Distributions
823*4d495c6eSApple OSS Distributions /*
824*4d495c6eSApple OSS Distributions * Note that we read the pc even for the first stack frame
825*4d495c6eSApple OSS Distributions * (which, in theory, is always empty because the callee fills
826*4d495c6eSApple OSS Distributions * it in just before it lowers the stack. However, if we
827*4d495c6eSApple OSS Distributions * catch the program in between filling in the return address
828*4d495c6eSApple OSS Distributions * and lowering the stack, we want to still have a valid
829*4d495c6eSApple OSS Distributions * backtrace. FixupStack correctly disregards this value if
830*4d495c6eSApple OSS Distributions * necessary.
831*4d495c6eSApple OSS Distributions */
832*4d495c6eSApple OSS Distributions
833*4d495c6eSApple OSS Distributions if ((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) {
834*4d495c6eSApple OSS Distributions /* frame pointer is invalid - stop backtracing */
835*4d495c6eSApple OSS Distributions pc = 0ULL;
836*4d495c6eSApple OSS Distributions break;
837*4d495c6eSApple OSS Distributions }
838*4d495c6eSApple OSS Distributions
839*4d495c6eSApple OSS Distributions if (kernel) {
840*4d495c6eSApple OSS Distributions if (((uint32_t)fp > kernStackMax) ||
841*4d495c6eSApple OSS Distributions ((uint32_t)fp < kernStackMin)) {
842*4d495c6eSApple OSS Distributions kr = KERN_FAILURE;
843*4d495c6eSApple OSS Distributions } else {
844*4d495c6eSApple OSS Distributions kr = chudxnu_kern_read(&frame,
845*4d495c6eSApple OSS Distributions (vm_offset_t)fp,
846*4d495c6eSApple OSS Distributions (vm_size_t)sizeof(frame));
847*4d495c6eSApple OSS Distributions if (kr == KERN_SUCCESS) {
848*4d495c6eSApple OSS Distributions pc = (uint64_t)frame[1];
849*4d495c6eSApple OSS Distributions nextFramePointer = (uint32_t *) (frame[0]);
850*4d495c6eSApple OSS Distributions } else {
851*4d495c6eSApple OSS Distributions pc = 0ULL;
852*4d495c6eSApple OSS Distributions nextFramePointer = 0ULL;
853*4d495c6eSApple OSS Distributions kr = KERN_FAILURE;
854*4d495c6eSApple OSS Distributions }
855*4d495c6eSApple OSS Distributions }
856*4d495c6eSApple OSS Distributions } else {
857*4d495c6eSApple OSS Distributions kr = chudxnu_task_read(task,
858*4d495c6eSApple OSS Distributions &frame32,
859*4d495c6eSApple OSS Distributions (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL),
860*4d495c6eSApple OSS Distributions sizeof(frame32));
861*4d495c6eSApple OSS Distributions if (kr == KERN_SUCCESS) {
862*4d495c6eSApple OSS Distributions pc = (uint64_t)frame32[1];
863*4d495c6eSApple OSS Distributions nextFramePointer = (uint32_t *)(uintptr_t)(frame32[0]);
864*4d495c6eSApple OSS Distributions } else {
865*4d495c6eSApple OSS Distributions pc = 0ULL;
866*4d495c6eSApple OSS Distributions nextFramePointer = 0ULL;
867*4d495c6eSApple OSS Distributions kr = KERN_FAILURE;
868*4d495c6eSApple OSS Distributions }
869*4d495c6eSApple OSS Distributions }
870*4d495c6eSApple OSS Distributions
871*4d495c6eSApple OSS Distributions if (kr != KERN_SUCCESS) {
872*4d495c6eSApple OSS Distributions pc = 0ULL;
873*4d495c6eSApple OSS Distributions break;
874*4d495c6eSApple OSS Distributions }
875*4d495c6eSApple OSS Distributions
876*4d495c6eSApple OSS Distributions if (nextFramePointer) {
877*4d495c6eSApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
878*4d495c6eSApple OSS Distributions prevPC = pc;
879*4d495c6eSApple OSS Distributions }
880*4d495c6eSApple OSS Distributions
881*4d495c6eSApple OSS Distributions if (nextFramePointer < fp) {
882*4d495c6eSApple OSS Distributions break;
883*4d495c6eSApple OSS Distributions } else {
884*4d495c6eSApple OSS Distributions fp = nextFramePointer;
885*4d495c6eSApple OSS Distributions }
886*4d495c6eSApple OSS Distributions }
887*4d495c6eSApple OSS Distributions
888*4d495c6eSApple OSS Distributions BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
889*4d495c6eSApple OSS Distributions
890*4d495c6eSApple OSS Distributions /* clamp callstack size to max */
891*4d495c6eSApple OSS Distributions if (bufferIndex >= bufferMaxIndex) {
892*4d495c6eSApple OSS Distributions bufferIndex = bufferMaxIndex;
893*4d495c6eSApple OSS Distributions kr = KERN_RESOURCE_SHORTAGE;
894*4d495c6eSApple OSS Distributions } else {
895*4d495c6eSApple OSS Distributions /* ignore all other failures */
896*4d495c6eSApple OSS Distributions kr = KERN_SUCCESS;
897*4d495c6eSApple OSS Distributions }
898*4d495c6eSApple OSS Distributions
899*4d495c6eSApple OSS Distributions // Save link register and R13 (sp) at bottom of stack (used for later fixup).
900*4d495c6eSApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
901*4d495c6eSApple OSS Distributions if (flags & CS_FLAG_EXTRASP) {
902*4d495c6eSApple OSS Distributions buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
903*4d495c6eSApple OSS Distributions }
904*4d495c6eSApple OSS Distributions }
905*4d495c6eSApple OSS Distributions
906*4d495c6eSApple OSS Distributions *count = bufferIndex;
907*4d495c6eSApple OSS Distributions return kr;
908*4d495c6eSApple OSS Distributions }
909*4d495c6eSApple OSS Distributions
910*4d495c6eSApple OSS Distributions kern_return_t
chudxnu_thread_get_callstack64_kperf(thread_t thread,uint64_t * callStack,mach_msg_type_number_t * count,boolean_t user_only)911*4d495c6eSApple OSS Distributions chudxnu_thread_get_callstack64_kperf(
912*4d495c6eSApple OSS Distributions thread_t thread,
913*4d495c6eSApple OSS Distributions uint64_t *callStack,
914*4d495c6eSApple OSS Distributions mach_msg_type_number_t *count,
915*4d495c6eSApple OSS Distributions boolean_t user_only)
916*4d495c6eSApple OSS Distributions {
917*4d495c6eSApple OSS Distributions return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 );
918*4d495c6eSApple OSS Distributions }
919*4d495c6eSApple OSS Distributions #elif __x86_64__
920*4d495c6eSApple OSS Distributions
921*4d495c6eSApple OSS Distributions #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
922*4d495c6eSApple OSS Distributions // don't try to read in the hole
923*4d495c6eSApple OSS Distributions #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
924*4d495c6eSApple OSS Distributions (supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
925*4d495c6eSApple OSS Distributions ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
926*4d495c6eSApple OSS Distributions
927*4d495c6eSApple OSS Distributions typedef struct _cframe64_t {
928*4d495c6eSApple OSS Distributions uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
929*4d495c6eSApple OSS Distributions uint64_t caller;
930*4d495c6eSApple OSS Distributions uint64_t args[0];
931*4d495c6eSApple OSS Distributions }cframe64_t;
932*4d495c6eSApple OSS Distributions
933*4d495c6eSApple OSS Distributions
934*4d495c6eSApple OSS Distributions typedef struct _cframe_t {
935*4d495c6eSApple OSS Distributions uint32_t prev; // this is really a user32-space pointer to the previous frame
936*4d495c6eSApple OSS Distributions uint32_t caller;
937*4d495c6eSApple OSS Distributions uint32_t args[0];
938*4d495c6eSApple OSS Distributions } cframe_t;
939*4d495c6eSApple OSS Distributions
940*4d495c6eSApple OSS Distributions extern void * find_user_regs(thread_t);
941*4d495c6eSApple OSS Distributions extern x86_saved_state32_t *find_kern_regs(thread_t);
942*4d495c6eSApple OSS Distributions
943*4d495c6eSApple OSS Distributions static kern_return_t
do_kernel_backtrace(thread_t thread,struct x86_kernel_state * regs,uint64_t * frames,mach_msg_type_number_t * start_idx,mach_msg_type_number_t max_idx)944*4d495c6eSApple OSS Distributions do_kernel_backtrace(
945*4d495c6eSApple OSS Distributions thread_t thread,
946*4d495c6eSApple OSS Distributions struct x86_kernel_state *regs,
947*4d495c6eSApple OSS Distributions uint64_t *frames,
948*4d495c6eSApple OSS Distributions mach_msg_type_number_t *start_idx,
949*4d495c6eSApple OSS Distributions mach_msg_type_number_t max_idx)
950*4d495c6eSApple OSS Distributions {
951*4d495c6eSApple OSS Distributions uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
952*4d495c6eSApple OSS Distributions uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
953*4d495c6eSApple OSS Distributions mach_msg_type_number_t ct = *start_idx;
954*4d495c6eSApple OSS Distributions kern_return_t kr = KERN_FAILURE;
955*4d495c6eSApple OSS Distributions
956*4d495c6eSApple OSS Distributions #if __LP64__
957*4d495c6eSApple OSS Distributions uint64_t currPC = 0ULL;
958*4d495c6eSApple OSS Distributions uint64_t currFP = 0ULL;
959*4d495c6eSApple OSS Distributions uint64_t prevPC = 0ULL;
960*4d495c6eSApple OSS Distributions uint64_t prevFP = 0ULL;
961*4d495c6eSApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
962*4d495c6eSApple OSS Distributions return KERN_FAILURE;
963*4d495c6eSApple OSS Distributions }
964*4d495c6eSApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
965*4d495c6eSApple OSS Distributions return KERN_FAILURE;
966*4d495c6eSApple OSS Distributions }
967*4d495c6eSApple OSS Distributions #else
968*4d495c6eSApple OSS Distributions uint32_t currPC = 0U;
969*4d495c6eSApple OSS Distributions uint32_t currFP = 0U;
970*4d495c6eSApple OSS Distributions uint32_t prevPC = 0U;
971*4d495c6eSApple OSS Distributions uint32_t prevFP = 0U;
972*4d495c6eSApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
973*4d495c6eSApple OSS Distributions return KERN_FAILURE;
974*4d495c6eSApple OSS Distributions }
975*4d495c6eSApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
976*4d495c6eSApple OSS Distributions return KERN_FAILURE;
977*4d495c6eSApple OSS Distributions }
978*4d495c6eSApple OSS Distributions #endif
979*4d495c6eSApple OSS Distributions
980*4d495c6eSApple OSS Distributions if (*start_idx >= max_idx) {
981*4d495c6eSApple OSS Distributions return KERN_RESOURCE_SHORTAGE; // no frames traced
982*4d495c6eSApple OSS Distributions }
983*4d495c6eSApple OSS Distributions if (!currPC) {
984*4d495c6eSApple OSS Distributions return KERN_FAILURE;
985*4d495c6eSApple OSS Distributions }
986*4d495c6eSApple OSS Distributions
987*4d495c6eSApple OSS Distributions frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
988*4d495c6eSApple OSS Distributions
989*4d495c6eSApple OSS Distributions // build a backtrace of this kernel state
990*4d495c6eSApple OSS Distributions #if __LP64__
991*4d495c6eSApple OSS Distributions while (VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
992*4d495c6eSApple OSS Distributions // this is the address where caller lives in the user thread
993*4d495c6eSApple OSS Distributions uint64_t caller = currFP + sizeof(uint64_t);
994*4d495c6eSApple OSS Distributions #else
995*4d495c6eSApple OSS Distributions while (VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
996*4d495c6eSApple OSS Distributions uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
997*4d495c6eSApple OSS Distributions #endif
998*4d495c6eSApple OSS Distributions
999*4d495c6eSApple OSS Distributions if (!currFP || !currPC) {
1000*4d495c6eSApple OSS Distributions currPC = 0;
1001*4d495c6eSApple OSS Distributions break;
1002*4d495c6eSApple OSS Distributions }
1003*4d495c6eSApple OSS Distributions
1004*4d495c6eSApple OSS Distributions if (ct >= max_idx) {
1005*4d495c6eSApple OSS Distributions *start_idx = ct;
1006*4d495c6eSApple OSS Distributions return KERN_RESOURCE_SHORTAGE;
1007*4d495c6eSApple OSS Distributions }
1008*4d495c6eSApple OSS Distributions
1009*4d495c6eSApple OSS Distributions /* read our caller */
1010*4d495c6eSApple OSS Distributions kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));
1011*4d495c6eSApple OSS Distributions
1012*4d495c6eSApple OSS Distributions if (kr != KERN_SUCCESS || !currPC) {
1013*4d495c6eSApple OSS Distributions currPC = 0UL;
1014*4d495c6eSApple OSS Distributions break;
1015*4d495c6eSApple OSS Distributions }
1016*4d495c6eSApple OSS Distributions
1017*4d495c6eSApple OSS Distributions /*
1018*4d495c6eSApple OSS Distributions * retrive contents of the frame pointer and advance to the next stack
1019*4d495c6eSApple OSS Distributions * frame if it's valid
1020*4d495c6eSApple OSS Distributions */
1021*4d495c6eSApple OSS Distributions prevFP = 0;
1022*4d495c6eSApple OSS Distributions kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));
1023*4d495c6eSApple OSS Distributions
1024*4d495c6eSApple OSS Distributions #if __LP64__
1025*4d495c6eSApple OSS Distributions if (VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
1026*4d495c6eSApple OSS Distributions #else
1027*4d495c6eSApple OSS Distributions if (VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
1028*4d495c6eSApple OSS Distributions #endif
1029*4d495c6eSApple OSS Distributions frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
1030*4d495c6eSApple OSS Distributions prevPC = currPC;
1031*4d495c6eSApple OSS Distributions }
1032*4d495c6eSApple OSS Distributions if (prevFP <= currFP) {
1033*4d495c6eSApple OSS Distributions break;
1034*4d495c6eSApple OSS Distributions } else {
1035*4d495c6eSApple OSS Distributions currFP = prevFP;
1036*4d495c6eSApple OSS Distributions }
1037*4d495c6eSApple OSS Distributions }
1038*4d495c6eSApple OSS Distributions
1039*4d495c6eSApple OSS Distributions *start_idx = ct;
1040*4d495c6eSApple OSS Distributions return KERN_SUCCESS;
1041*4d495c6eSApple OSS Distributions }
1042*4d495c6eSApple OSS Distributions
1043*4d495c6eSApple OSS Distributions
1044*4d495c6eSApple OSS Distributions
1045*4d495c6eSApple OSS Distributions static kern_return_t
1046*4d495c6eSApple OSS Distributions do_backtrace32(
1047*4d495c6eSApple OSS Distributions task_t task,
1048*4d495c6eSApple OSS Distributions thread_t thread,
1049*4d495c6eSApple OSS Distributions x86_saved_state32_t *regs,
1050*4d495c6eSApple OSS Distributions uint64_t *frames,
1051*4d495c6eSApple OSS Distributions mach_msg_type_number_t *start_idx,
1052*4d495c6eSApple OSS Distributions mach_msg_type_number_t max_idx,
1053*4d495c6eSApple OSS Distributions boolean_t supervisor)
1054*4d495c6eSApple OSS Distributions {
1055*4d495c6eSApple OSS Distributions uint32_t tmpWord = 0UL;
1056*4d495c6eSApple OSS Distributions uint64_t currPC = (uint64_t) regs->eip;
1057*4d495c6eSApple OSS Distributions uint64_t currFP = (uint64_t) regs->ebp;
1058*4d495c6eSApple OSS Distributions uint64_t prevPC = 0ULL;
1059*4d495c6eSApple OSS Distributions uint64_t prevFP = 0ULL;
1060*4d495c6eSApple OSS Distributions uint64_t kernStackMin = thread->kernel_stack;
1061*4d495c6eSApple OSS Distributions uint64_t kernStackMax = kernStackMin + kernel_stack_size;
1062*4d495c6eSApple OSS Distributions mach_msg_type_number_t ct = *start_idx;
1063*4d495c6eSApple OSS Distributions kern_return_t kr = KERN_FAILURE;
1064*4d495c6eSApple OSS Distributions
1065*4d495c6eSApple OSS Distributions if (ct >= max_idx) {
1066*4d495c6eSApple OSS Distributions return KERN_RESOURCE_SHORTAGE; // no frames traced
1067*4d495c6eSApple OSS Distributions }
1068*4d495c6eSApple OSS Distributions frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1069*4d495c6eSApple OSS Distributions
1070*4d495c6eSApple OSS Distributions // build a backtrace of this 32 bit state.
1071*4d495c6eSApple OSS Distributions while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
1072*4d495c6eSApple OSS Distributions cframe_t *fp = (cframe_t *) (uintptr_t) currFP;
1073*4d495c6eSApple OSS Distributions
1074*4d495c6eSApple OSS Distributions if (!currFP) {
1075*4d495c6eSApple OSS Distributions currPC = 0;
1076*4d495c6eSApple OSS Distributions break;
1077*4d495c6eSApple OSS Distributions }
1078*4d495c6eSApple OSS Distributions
1079*4d495c6eSApple OSS Distributions if (ct >= max_idx) {
1080*4d495c6eSApple OSS Distributions *start_idx = ct;
1081*4d495c6eSApple OSS Distributions return KERN_RESOURCE_SHORTAGE;
1082*4d495c6eSApple OSS Distributions }
1083*4d495c6eSApple OSS Distributions
1084*4d495c6eSApple OSS Distributions /* read our caller */
1085*4d495c6eSApple OSS Distributions if (supervisor) {
1086*4d495c6eSApple OSS Distributions kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1087*4d495c6eSApple OSS Distributions } else {
1088*4d495c6eSApple OSS Distributions kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1089*4d495c6eSApple OSS Distributions }
1090*4d495c6eSApple OSS Distributions
1091*4d495c6eSApple OSS Distributions if (kr != KERN_SUCCESS) {
1092*4d495c6eSApple OSS Distributions currPC = 0ULL;
1093*4d495c6eSApple OSS Distributions break;
1094*4d495c6eSApple OSS Distributions }
1095*4d495c6eSApple OSS Distributions
1096*4d495c6eSApple OSS Distributions currPC = (uint64_t) tmpWord; // promote 32 bit address
1097*4d495c6eSApple OSS Distributions
1098*4d495c6eSApple OSS Distributions /*
1099*4d495c6eSApple OSS Distributions * retrive contents of the frame pointer and advance to the next stack
1100*4d495c6eSApple OSS Distributions * frame if it's valid
1101*4d495c6eSApple OSS Distributions */
1102*4d495c6eSApple OSS Distributions prevFP = 0;
1103*4d495c6eSApple OSS Distributions if (supervisor) {
1104*4d495c6eSApple OSS Distributions kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1105*4d495c6eSApple OSS Distributions } else {
1106*4d495c6eSApple OSS Distributions kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1107*4d495c6eSApple OSS Distributions }
1108*4d495c6eSApple OSS Distributions prevFP = (uint64_t) tmpWord; // promote 32 bit address
1109*4d495c6eSApple OSS Distributions
1110*4d495c6eSApple OSS Distributions if (prevFP) {
1111*4d495c6eSApple OSS Distributions frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1112*4d495c6eSApple OSS Distributions prevPC = currPC;
1113*4d495c6eSApple OSS Distributions }
1114*4d495c6eSApple OSS Distributions if (prevFP < currFP) {
1115*4d495c6eSApple OSS Distributions break;
1116*4d495c6eSApple OSS Distributions } else {
1117*4d495c6eSApple OSS Distributions currFP = prevFP;
1118*4d495c6eSApple OSS Distributions }
1119*4d495c6eSApple OSS Distributions }
1120*4d495c6eSApple OSS Distributions
1121*4d495c6eSApple OSS Distributions *start_idx = ct;
1122*4d495c6eSApple OSS Distributions return KERN_SUCCESS;
1123*4d495c6eSApple OSS Distributions }
1124*4d495c6eSApple OSS Distributions
1125*4d495c6eSApple OSS Distributions static kern_return_t
1126*4d495c6eSApple OSS Distributions do_backtrace64(
1127*4d495c6eSApple OSS Distributions task_t task,
1128*4d495c6eSApple OSS Distributions thread_t thread,
1129*4d495c6eSApple OSS Distributions x86_saved_state64_t *regs,
1130*4d495c6eSApple OSS Distributions uint64_t *frames,
1131*4d495c6eSApple OSS Distributions mach_msg_type_number_t *start_idx,
1132*4d495c6eSApple OSS Distributions mach_msg_type_number_t max_idx,
1133*4d495c6eSApple OSS Distributions boolean_t supervisor)
1134*4d495c6eSApple OSS Distributions {
1135*4d495c6eSApple OSS Distributions uint64_t currPC = regs->isf.rip;
1136*4d495c6eSApple OSS Distributions uint64_t currFP = regs->rbp;
1137*4d495c6eSApple OSS Distributions uint64_t prevPC = 0ULL;
1138*4d495c6eSApple OSS Distributions uint64_t prevFP = 0ULL;
1139*4d495c6eSApple OSS Distributions uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
1140*4d495c6eSApple OSS Distributions uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
1141*4d495c6eSApple OSS Distributions mach_msg_type_number_t ct = *start_idx;
1142*4d495c6eSApple OSS Distributions kern_return_t kr = KERN_FAILURE;
1143*4d495c6eSApple OSS Distributions
1144*4d495c6eSApple OSS Distributions if (*start_idx >= max_idx) {
1145*4d495c6eSApple OSS Distributions return KERN_RESOURCE_SHORTAGE; // no frames traced
1146*4d495c6eSApple OSS Distributions }
1147*4d495c6eSApple OSS Distributions frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1148*4d495c6eSApple OSS Distributions
1149*4d495c6eSApple OSS Distributions // build a backtrace of this 32 bit state.
1150*4d495c6eSApple OSS Distributions while (VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
1151*4d495c6eSApple OSS Distributions // this is the address where caller lives in the user thread
1152*4d495c6eSApple OSS Distributions uint64_t caller = currFP + sizeof(uint64_t);
1153*4d495c6eSApple OSS Distributions
1154*4d495c6eSApple OSS Distributions if (!currFP) {
1155*4d495c6eSApple OSS Distributions currPC = 0;
1156*4d495c6eSApple OSS Distributions break;
1157*4d495c6eSApple OSS Distributions }
1158*4d495c6eSApple OSS Distributions
1159*4d495c6eSApple OSS Distributions if (ct >= max_idx) {
1160*4d495c6eSApple OSS Distributions *start_idx = ct;
1161*4d495c6eSApple OSS Distributions return KERN_RESOURCE_SHORTAGE;
1162*4d495c6eSApple OSS Distributions }
1163*4d495c6eSApple OSS Distributions
1164*4d495c6eSApple OSS Distributions /* read our caller */
1165*4d495c6eSApple OSS Distributions if (supervisor) {
1166*4d495c6eSApple OSS Distributions kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t));
1167*4d495c6eSApple OSS Distributions } else {
1168*4d495c6eSApple OSS Distributions kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
1169*4d495c6eSApple OSS Distributions }
1170*4d495c6eSApple OSS Distributions
1171*4d495c6eSApple OSS Distributions if (kr != KERN_SUCCESS) {
1172*4d495c6eSApple OSS Distributions currPC = 0ULL;
1173*4d495c6eSApple OSS Distributions break;
1174*4d495c6eSApple OSS Distributions }
1175*4d495c6eSApple OSS Distributions
1176*4d495c6eSApple OSS Distributions /*
1177*4d495c6eSApple OSS Distributions * retrive contents of the frame pointer and advance to the next stack
1178*4d495c6eSApple OSS Distributions * frame if it's valid
1179*4d495c6eSApple OSS Distributions */
1180*4d495c6eSApple OSS Distributions prevFP = 0;
1181*4d495c6eSApple OSS Distributions if (supervisor) {
1182*4d495c6eSApple OSS Distributions kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t));
1183*4d495c6eSApple OSS Distributions } else {
1184*4d495c6eSApple OSS Distributions kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
1185*4d495c6eSApple OSS Distributions }
1186*4d495c6eSApple OSS Distributions
1187*4d495c6eSApple OSS Distributions if (VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
1188*4d495c6eSApple OSS Distributions frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1189*4d495c6eSApple OSS Distributions prevPC = currPC;
1190*4d495c6eSApple OSS Distributions }
1191*4d495c6eSApple OSS Distributions if (prevFP < currFP) {
1192*4d495c6eSApple OSS Distributions break;
1193*4d495c6eSApple OSS Distributions } else {
1194*4d495c6eSApple OSS Distributions currFP = prevFP;
1195*4d495c6eSApple OSS Distributions }
1196*4d495c6eSApple OSS Distributions }
1197*4d495c6eSApple OSS Distributions
1198*4d495c6eSApple OSS Distributions *start_idx = ct;
1199*4d495c6eSApple OSS Distributions return KERN_SUCCESS;
1200*4d495c6eSApple OSS Distributions }
1201*4d495c6eSApple OSS Distributions
1202*4d495c6eSApple OSS Distributions static kern_return_t
1203*4d495c6eSApple OSS Distributions chudxnu_thread_get_callstack64_internal(
1204*4d495c6eSApple OSS Distributions thread_t thread,
1205*4d495c6eSApple OSS Distributions uint64_t *callstack,
1206*4d495c6eSApple OSS Distributions mach_msg_type_number_t *count,
1207*4d495c6eSApple OSS Distributions boolean_t user_only,
1208*4d495c6eSApple OSS Distributions boolean_t kern_only)
1209*4d495c6eSApple OSS Distributions {
1210*4d495c6eSApple OSS Distributions kern_return_t kr = KERN_FAILURE;
1211*4d495c6eSApple OSS Distributions task_t task = get_threadtask(thread);
1212*4d495c6eSApple OSS Distributions uint64_t currPC = 0ULL;
1213*4d495c6eSApple OSS Distributions boolean_t supervisor = FALSE;
1214*4d495c6eSApple OSS Distributions mach_msg_type_number_t bufferIndex = 0;
1215*4d495c6eSApple OSS Distributions mach_msg_type_number_t bufferMaxIndex = *count;
1216*4d495c6eSApple OSS Distributions x86_saved_state_t *tagged_regs = NULL; // kernel register state
1217*4d495c6eSApple OSS Distributions x86_saved_state64_t *regs64 = NULL;
1218*4d495c6eSApple OSS Distributions x86_saved_state32_t *regs32 = NULL;
1219*4d495c6eSApple OSS Distributions x86_saved_state32_t *u_regs32 = NULL;
1220*4d495c6eSApple OSS Distributions x86_saved_state64_t *u_regs64 = NULL;
1221*4d495c6eSApple OSS Distributions struct x86_kernel_state *kregs = NULL;
1222*4d495c6eSApple OSS Distributions
1223*4d495c6eSApple OSS Distributions if (ml_at_interrupt_context()) {
1224*4d495c6eSApple OSS Distributions if (user_only) {
1225*4d495c6eSApple OSS Distributions /* can't backtrace user state on interrupt stack. */
1226*4d495c6eSApple OSS Distributions return KERN_FAILURE;
1227*4d495c6eSApple OSS Distributions }
1228*4d495c6eSApple OSS Distributions
1229*4d495c6eSApple OSS Distributions /* backtracing at interrupt context? */
1230*4d495c6eSApple OSS Distributions if (thread == current_thread() && current_cpu_datap()->cpu_int_state) {
1231*4d495c6eSApple OSS Distributions /*
1232*4d495c6eSApple OSS Distributions * Locate the registers for the interrupted thread, assuming it is
1233*4d495c6eSApple OSS Distributions * current_thread().
1234*4d495c6eSApple OSS Distributions */
1235*4d495c6eSApple OSS Distributions tagged_regs = current_cpu_datap()->cpu_int_state;
1236*4d495c6eSApple OSS Distributions
1237*4d495c6eSApple OSS Distributions if (is_saved_state64(tagged_regs)) {
1238*4d495c6eSApple OSS Distributions /* 64 bit registers */
1239*4d495c6eSApple OSS Distributions regs64 = saved_state64(tagged_regs);
1240*4d495c6eSApple OSS Distributions supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1241*4d495c6eSApple OSS Distributions } else {
1242*4d495c6eSApple OSS Distributions /* 32 bit registers */
1243*4d495c6eSApple OSS Distributions regs32 = saved_state32(tagged_regs);
1244*4d495c6eSApple OSS Distributions supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1245*4d495c6eSApple OSS Distributions }
1246*4d495c6eSApple OSS Distributions }
1247*4d495c6eSApple OSS Distributions }
1248*4d495c6eSApple OSS Distributions
1249*4d495c6eSApple OSS Distributions if (!ml_at_interrupt_context() && kernel_task == task) {
1250*4d495c6eSApple OSS Distributions if (!thread->kernel_stack) {
1251*4d495c6eSApple OSS Distributions return KERN_FAILURE;
1252*4d495c6eSApple OSS Distributions }
1253*4d495c6eSApple OSS Distributions
1254*4d495c6eSApple OSS Distributions // Kernel thread not at interrupt context
1255*4d495c6eSApple OSS Distributions kregs = (struct x86_kernel_state *)NULL;
1256*4d495c6eSApple OSS Distributions
1257*4d495c6eSApple OSS Distributions // nofault read of the thread->kernel_stack pointer
1258*4d495c6eSApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
1259*4d495c6eSApple OSS Distributions return KERN_FAILURE;
1260*4d495c6eSApple OSS Distributions }
1261*4d495c6eSApple OSS Distributions
1262*4d495c6eSApple OSS Distributions // Adjust to find the saved kernel state
1263*4d495c6eSApple OSS Distributions kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);
1264*4d495c6eSApple OSS Distributions
1265*4d495c6eSApple OSS Distributions supervisor = TRUE;
1266*4d495c6eSApple OSS Distributions } else if (!tagged_regs) {
1267*4d495c6eSApple OSS Distributions /*
1268*4d495c6eSApple OSS Distributions * not at interrupt context, or tracing a different thread than
1269*4d495c6eSApple OSS Distributions * current_thread() at interrupt context
1270*4d495c6eSApple OSS Distributions */
1271*4d495c6eSApple OSS Distributions tagged_regs = USER_STATE(thread);
1272*4d495c6eSApple OSS Distributions if (is_saved_state64(tagged_regs)) {
1273*4d495c6eSApple OSS Distributions /* 64 bit registers */
1274*4d495c6eSApple OSS Distributions regs64 = saved_state64(tagged_regs);
1275*4d495c6eSApple OSS Distributions supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1276*4d495c6eSApple OSS Distributions } else {
1277*4d495c6eSApple OSS Distributions /* 32 bit registers */
1278*4d495c6eSApple OSS Distributions regs32 = saved_state32(tagged_regs);
1279*4d495c6eSApple OSS Distributions supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1280*4d495c6eSApple OSS Distributions }
1281*4d495c6eSApple OSS Distributions }
1282*4d495c6eSApple OSS Distributions
1283*4d495c6eSApple OSS Distributions *count = 0;
1284*4d495c6eSApple OSS Distributions
1285*4d495c6eSApple OSS Distributions if (supervisor) {
1286*4d495c6eSApple OSS Distributions // the caller only wants a user callstack.
1287*4d495c6eSApple OSS Distributions if (user_only) {
1288*4d495c6eSApple OSS Distributions // bail - we've only got kernel state
1289*4d495c6eSApple OSS Distributions return KERN_FAILURE;
1290*4d495c6eSApple OSS Distributions }
1291*4d495c6eSApple OSS Distributions } else {
1292*4d495c6eSApple OSS Distributions // regs32(64) is not in supervisor mode.
1293*4d495c6eSApple OSS Distributions u_regs32 = regs32;
1294*4d495c6eSApple OSS Distributions u_regs64 = regs64;
1295*4d495c6eSApple OSS Distributions regs32 = NULL;
1296*4d495c6eSApple OSS Distributions regs64 = NULL;
1297*4d495c6eSApple OSS Distributions }
1298*4d495c6eSApple OSS Distributions
1299*4d495c6eSApple OSS Distributions if (user_only) {
1300*4d495c6eSApple OSS Distributions /* we only want to backtrace the user mode */
1301*4d495c6eSApple OSS Distributions if (!(u_regs32 || u_regs64)) {
1302*4d495c6eSApple OSS Distributions /* no user state to look at */
1303*4d495c6eSApple OSS Distributions return KERN_FAILURE;
1304*4d495c6eSApple OSS Distributions }
1305*4d495c6eSApple OSS Distributions }
1306*4d495c6eSApple OSS Distributions
1307*4d495c6eSApple OSS Distributions /*
1308*4d495c6eSApple OSS Distributions * Order of preference for top of stack:
1309*4d495c6eSApple OSS Distributions * 64 bit kernel state (not likely)
1310*4d495c6eSApple OSS Distributions * 32 bit kernel state
1311*4d495c6eSApple OSS Distributions * 64 bit user land state
1312*4d495c6eSApple OSS Distributions * 32 bit user land state
1313*4d495c6eSApple OSS Distributions */
1314*4d495c6eSApple OSS Distributions
1315*4d495c6eSApple OSS Distributions if (kregs) {
1316*4d495c6eSApple OSS Distributions /*
1317*4d495c6eSApple OSS Distributions * nofault read of the registers from the kernel stack (as they can
1318*4d495c6eSApple OSS Distributions * disappear on the fly).
1319*4d495c6eSApple OSS Distributions */
1320*4d495c6eSApple OSS Distributions
1321*4d495c6eSApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
1322*4d495c6eSApple OSS Distributions return KERN_FAILURE;
1323*4d495c6eSApple OSS Distributions }
1324*4d495c6eSApple OSS Distributions } else if (regs64) {
1325*4d495c6eSApple OSS Distributions currPC = regs64->isf.rip;
1326*4d495c6eSApple OSS Distributions } else if (regs32) {
1327*4d495c6eSApple OSS Distributions currPC = (uint64_t) regs32->eip;
1328*4d495c6eSApple OSS Distributions } else if (u_regs64) {
1329*4d495c6eSApple OSS Distributions currPC = u_regs64->isf.rip;
1330*4d495c6eSApple OSS Distributions } else if (u_regs32) {
1331*4d495c6eSApple OSS Distributions currPC = (uint64_t) u_regs32->eip;
1332*4d495c6eSApple OSS Distributions }
1333*4d495c6eSApple OSS Distributions
1334*4d495c6eSApple OSS Distributions if (!currPC) {
1335*4d495c6eSApple OSS Distributions /* no top of the stack, bail out */
1336*4d495c6eSApple OSS Distributions return KERN_FAILURE;
1337*4d495c6eSApple OSS Distributions }
1338*4d495c6eSApple OSS Distributions
1339*4d495c6eSApple OSS Distributions bufferIndex = 0;
1340*4d495c6eSApple OSS Distributions
1341*4d495c6eSApple OSS Distributions if (bufferMaxIndex < 1) {
1342*4d495c6eSApple OSS Distributions *count = 0;
1343*4d495c6eSApple OSS Distributions return KERN_RESOURCE_SHORTAGE;
1344*4d495c6eSApple OSS Distributions }
1345*4d495c6eSApple OSS Distributions
1346*4d495c6eSApple OSS Distributions /* backtrace kernel */
1347*4d495c6eSApple OSS Distributions if (kregs) {
1348*4d495c6eSApple OSS Distributions addr64_t address = 0ULL;
1349*4d495c6eSApple OSS Distributions size_t size = 0UL;
1350*4d495c6eSApple OSS Distributions
1351*4d495c6eSApple OSS Distributions // do the backtrace
1352*4d495c6eSApple OSS Distributions kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);
1353*4d495c6eSApple OSS Distributions
1354*4d495c6eSApple OSS Distributions // and do a nofault read of (r|e)sp
1355*4d495c6eSApple OSS Distributions uint64_t rsp = 0ULL;
1356*4d495c6eSApple OSS Distributions size = sizeof(uint64_t);
1357*4d495c6eSApple OSS Distributions
1358*4d495c6eSApple OSS Distributions if (KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
1359*4d495c6eSApple OSS Distributions address = 0ULL;
1360*4d495c6eSApple OSS Distributions }
1361*4d495c6eSApple OSS Distributions
1362*4d495c6eSApple OSS Distributions if (address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
1363*4d495c6eSApple OSS Distributions callstack[bufferIndex++] = (uint64_t)rsp;
1364*4d495c6eSApple OSS Distributions }
1365*4d495c6eSApple OSS Distributions } else if (regs64) {
1366*4d495c6eSApple OSS Distributions uint64_t rsp = 0ULL;
1367*4d495c6eSApple OSS Distributions
1368*4d495c6eSApple OSS Distributions // backtrace the 64bit side.
1369*4d495c6eSApple OSS Distributions kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
1370*4d495c6eSApple OSS Distributions bufferMaxIndex - 1, TRUE);
1371*4d495c6eSApple OSS Distributions
1372*4d495c6eSApple OSS Distributions if (KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) &&
1373*4d495c6eSApple OSS Distributions bufferIndex < bufferMaxIndex) {
1374*4d495c6eSApple OSS Distributions callstack[bufferIndex++] = rsp;
1375*4d495c6eSApple OSS Distributions }
1376*4d495c6eSApple OSS Distributions } else if (regs32) {
1377*4d495c6eSApple OSS Distributions uint32_t esp = 0UL;
1378*4d495c6eSApple OSS Distributions
1379*4d495c6eSApple OSS Distributions // backtrace the 32bit side.
1380*4d495c6eSApple OSS Distributions kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
1381*4d495c6eSApple OSS Distributions bufferMaxIndex - 1, TRUE);
1382*4d495c6eSApple OSS Distributions
1383*4d495c6eSApple OSS Distributions if (KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) &&
1384*4d495c6eSApple OSS Distributions bufferIndex < bufferMaxIndex) {
1385*4d495c6eSApple OSS Distributions callstack[bufferIndex++] = (uint64_t) esp;
1386*4d495c6eSApple OSS Distributions }
1387*4d495c6eSApple OSS Distributions } else if (u_regs64 && !kern_only) {
1388*4d495c6eSApple OSS Distributions /* backtrace user land */
1389*4d495c6eSApple OSS Distributions uint64_t rsp = 0ULL;
1390*4d495c6eSApple OSS Distributions
1391*4d495c6eSApple OSS Distributions kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
1392*4d495c6eSApple OSS Distributions bufferMaxIndex - 1, FALSE);
1393*4d495c6eSApple OSS Distributions
1394*4d495c6eSApple OSS Distributions if (KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
1395*4d495c6eSApple OSS Distributions bufferIndex < bufferMaxIndex) {
1396*4d495c6eSApple OSS Distributions callstack[bufferIndex++] = rsp;
1397*4d495c6eSApple OSS Distributions }
1398*4d495c6eSApple OSS Distributions } else if (u_regs32 && !kern_only) {
1399*4d495c6eSApple OSS Distributions uint32_t esp = 0UL;
1400*4d495c6eSApple OSS Distributions
1401*4d495c6eSApple OSS Distributions kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
1402*4d495c6eSApple OSS Distributions bufferMaxIndex - 1, FALSE);
1403*4d495c6eSApple OSS Distributions
1404*4d495c6eSApple OSS Distributions if (KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
1405*4d495c6eSApple OSS Distributions bufferIndex < bufferMaxIndex) {
1406*4d495c6eSApple OSS Distributions callstack[bufferIndex++] = (uint64_t) esp;
1407*4d495c6eSApple OSS Distributions }
1408*4d495c6eSApple OSS Distributions }
1409*4d495c6eSApple OSS Distributions
1410*4d495c6eSApple OSS Distributions *count = bufferIndex;
1411*4d495c6eSApple OSS Distributions return kr;
1412*4d495c6eSApple OSS Distributions }
1413*4d495c6eSApple OSS Distributions
1414*4d495c6eSApple OSS Distributions __private_extern__
1415*4d495c6eSApple OSS Distributions kern_return_t
1416*4d495c6eSApple OSS Distributions chudxnu_thread_get_callstack64_kperf(
1417*4d495c6eSApple OSS Distributions thread_t thread,
1418*4d495c6eSApple OSS Distributions uint64_t *callstack,
1419*4d495c6eSApple OSS Distributions mach_msg_type_number_t *count,
1420*4d495c6eSApple OSS Distributions boolean_t is_user)
1421*4d495c6eSApple OSS Distributions {
1422*4d495c6eSApple OSS Distributions return chudxnu_thread_get_callstack64_internal(thread, callstack, count, is_user, !is_user);
1423*4d495c6eSApple OSS Distributions }
1424*4d495c6eSApple OSS Distributions #else /* !__arm64__ && !__x86_64__ */
1425*4d495c6eSApple OSS Distributions #error kperf: unsupported architecture
1426*4d495c6eSApple OSS Distributions #endif /* !__arm64__ && !__x86_64__ */
1427