1 /*
2 * Copyright (c) 2011-2022 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* Collect kernel callstacks */
30
31 #include <mach/mach_types.h>
32 #include <kern/thread.h>
33 #include <kern/backtrace.h>
34 #include <kern/cambria_layout.h>
35 #include <vm/vm_map.h>
36 #include <kperf/buffer.h>
37 #include <kperf/context.h>
38 #include <kperf/callstack.h>
39 #include <kperf/ast.h>
40 #include <sys/errno.h>
41
42 #if defined(__arm64__)
43 #include <arm/cpu_data.h>
44 #include <arm/cpu_data_internal.h>
45 #endif
46
47 static void
callstack_fixup_user(struct kp_ucallstack * cs,thread_t thread)48 callstack_fixup_user(struct kp_ucallstack *cs, thread_t thread)
49 {
50 uint64_t fixup_val = 0;
51 assert(cs->kpuc_nframes < MAX_UCALLSTACK_FRAMES);
52
53 #if defined(__x86_64__)
54 user_addr_t sp_user;
55 bool user_64;
56 x86_saved_state_t *state;
57
58 state = get_user_regs(thread);
59 if (!state) {
60 goto out;
61 }
62
63 user_64 = is_saved_state64(state);
64 if (user_64) {
65 sp_user = saved_state64(state)->isf.rsp;
66 } else {
67 sp_user = saved_state32(state)->uesp;
68 }
69
70 if (thread == current_thread()) {
71 (void)copyin(sp_user, (char *)&fixup_val,
72 user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
73 } else {
74 (void)vm_map_read_user(get_task_map(get_threadtask(thread)), sp_user,
75 &fixup_val, user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
76 }
77
78 #elif defined(__arm64__)
79
80 struct arm_saved_state *state = get_user_regs(thread);
81 if (!state) {
82 goto out;
83 }
84
85 /* encode thumb mode into low bit of PC */
86 if (is_saved_state32(state) && (get_saved_state_cpsr(state) & PSR_TF)) {
87 cs->kpuc_frames[0] |= 1ULL;
88 }
89
90
91 fixup_val = get_saved_state_lr(state);
92
93 #else
94 #error "callstack_fixup_user: unsupported architecture"
95 #endif
96
97 out:
98 cs->kpuc_frames[cs->kpuc_nframes++] = fixup_val;
99 }
100
101 #if defined(__x86_64__)
102
103 __attribute__((used))
104 static kern_return_t
interrupted_kernel_sp_value(uintptr_t * sp_val)105 interrupted_kernel_sp_value(uintptr_t *sp_val)
106 {
107 x86_saved_state_t *state;
108 uintptr_t sp;
109 bool state_64;
110 uint64_t cs;
111 uintptr_t top, bottom;
112
113 state = current_cpu_datap()->cpu_int_state;
114 if (!state) {
115 return KERN_FAILURE;
116 }
117
118 state_64 = is_saved_state64(state);
119
120 if (state_64) {
121 cs = saved_state64(state)->isf.cs;
122 } else {
123 cs = saved_state32(state)->cs;
124 }
125 /* return early if interrupted a thread in user space */
126 if ((cs & SEL_PL) == SEL_PL_U) {
127 return KERN_FAILURE;
128 }
129
130 if (state_64) {
131 sp = saved_state64(state)->isf.rsp;
132 } else {
133 sp = saved_state32(state)->uesp;
134 }
135
136 /* make sure the stack pointer is pointing somewhere in this stack */
137 bottom = current_thread()->kernel_stack;
138 top = bottom + kernel_stack_size;
139 if (sp >= bottom && sp < top) {
140 return KERN_FAILURE;
141 }
142
143 *sp_val = *(uintptr_t *)sp;
144 return KERN_SUCCESS;
145 }
146
147 #elif defined(__arm64__)
148
149 __attribute__((used))
150 static kern_return_t
interrupted_kernel_lr(uintptr_t * lr)151 interrupted_kernel_lr(uintptr_t *lr)
152 {
153 struct arm_saved_state *state;
154
155 state = getCpuDatap()->cpu_int_state;
156
157 /* return early if interrupted a thread in user space */
158 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
159 return KERN_FAILURE;
160 }
161
162 *lr = get_saved_state_lr(state);
163 return KERN_SUCCESS;
164 }
165 #else /* defined(__arm64__) */
166 #error "interrupted_kernel_{sp,lr}: unsupported architecture"
167 #endif /* !defined(__arm64__) */
168
169
170 static void
callstack_fixup_interrupted(struct kp_kcallstack * cs)171 callstack_fixup_interrupted(struct kp_kcallstack *cs)
172 {
173 uintptr_t fixup_val = 0;
174 assert(cs->kpkc_nframes < MAX_KCALLSTACK_FRAMES);
175
176 /*
177 * Only provide arbitrary data on development or debug kernels.
178 */
179 #if DEVELOPMENT || DEBUG
180 #if defined(__x86_64__)
181 (void)interrupted_kernel_sp_value(&fixup_val);
182 #elif defined(__arm64__)
183 (void)interrupted_kernel_lr(&fixup_val);
184 #endif /* defined(__x86_64__) */
185 #endif /* DEVELOPMENT || DEBUG */
186
187 assert(cs->kpkc_flags & CALLSTACK_KERNEL);
188 cs->kpkc_frames[cs->kpkc_nframes++] = fixup_val;
189 }
190
191 void
kperf_continuation_sample(struct kp_kcallstack * cs,struct kperf_context * context)192 kperf_continuation_sample(struct kp_kcallstack *cs, struct kperf_context *context)
193 {
194 thread_t thread;
195
196 assert(cs != NULL);
197 assert(context != NULL);
198
199 thread = context->cur_thread;
200 assert(thread != NULL);
201 assert(thread->continuation != NULL);
202
203 cs->kpkc_flags = CALLSTACK_CONTINUATION | CALLSTACK_VALID | CALLSTACK_KERNEL;
204 #ifdef __LP64__
205 cs->kpkc_flags |= CALLSTACK_64BIT;
206 #endif
207
208 cs->kpkc_nframes = 1;
209 cs->kpkc_frames[0] = VM_KERNEL_UNSLIDE(thread->continuation);
210 }
211
212 void
kperf_backtrace_sample(struct kp_kcallstack * cs,struct kperf_context * context)213 kperf_backtrace_sample(struct kp_kcallstack *cs, struct kperf_context *context)
214 {
215 assert(cs != NULL);
216 assert(context != NULL);
217 assert(context->cur_thread == current_thread());
218
219 cs->kpkc_flags = CALLSTACK_KERNEL | CALLSTACK_KERNEL_WORDS;
220 #ifdef __LP64__
221 cs->kpkc_flags |= CALLSTACK_64BIT;
222 #endif
223
224 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, 1);
225
226 backtrace_info_t btinfo = BTI_NONE;
227 struct backtrace_control ctl = {
228 .btc_frame_addr = (uintptr_t)context->starting_fp,
229 };
230 cs->kpkc_nframes = backtrace(cs->kpkc_word_frames, cs->kpkc_nframes - 1,
231 &ctl, &btinfo);
232 if (cs->kpkc_nframes > 0) {
233 cs->kpkc_flags |= CALLSTACK_VALID;
234 /*
235 * Fake the value pointed to by the stack pointer or the link
236 * register for symbolicators.
237 */
238 cs->kpkc_word_frames[cs->kpkc_nframes + 1] = 0;
239 cs->kpkc_nframes += 1;
240 }
241 if ((btinfo & BTI_TRUNCATED)) {
242 cs->kpkc_flags |= CALLSTACK_TRUNCATED;
243 }
244
245 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, cs->kpkc_nframes);
246 }
247
248 kern_return_t chudxnu_thread_get_callstack64_kperf(thread_t thread,
249 uint64_t *callStack, mach_msg_type_number_t *count,
250 boolean_t user_only);
251
252 void
kperf_kcallstack_sample(struct kp_kcallstack * cs,struct kperf_context * context)253 kperf_kcallstack_sample(struct kp_kcallstack *cs, struct kperf_context *context)
254 {
255 thread_t thread;
256
257 assert(cs != NULL);
258 assert(context != NULL);
259 assert(cs->kpkc_nframes <= MAX_KCALLSTACK_FRAMES);
260
261 thread = context->cur_thread;
262 assert(thread != NULL);
263
264 BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread),
265 cs->kpkc_nframes);
266
267 cs->kpkc_flags = CALLSTACK_KERNEL;
268 #ifdef __LP64__
269 cs->kpkc_flags |= CALLSTACK_64BIT;
270 #endif
271
272 if (ml_at_interrupt_context()) {
273 assert(thread == current_thread());
274 cs->kpkc_flags |= CALLSTACK_KERNEL_WORDS;
275 backtrace_info_t btinfo = BTI_NONE;
276 struct backtrace_control ctl = { .btc_flags = BTF_KERN_INTERRUPTED, };
277 cs->kpkc_nframes = backtrace(cs->kpkc_word_frames, cs->kpkc_nframes - 1,
278 &ctl, &btinfo);
279 if (cs->kpkc_nframes != 0) {
280 callstack_fixup_interrupted(cs);
281 }
282 if ((btinfo & BTI_TRUNCATED)) {
283 cs->kpkc_flags |= CALLSTACK_TRUNCATED;
284 }
285 } else {
286 /*
287 * Rely on legacy CHUD backtracer to backtrace kernel stacks on
288 * other threads.
289 */
290 kern_return_t kr;
291 kr = chudxnu_thread_get_callstack64_kperf(thread,
292 cs->kpkc_frames, &cs->kpkc_nframes, FALSE);
293 if (kr == KERN_SUCCESS) {
294 cs->kpkc_flags |= CALLSTACK_VALID;
295 } else if (kr == KERN_RESOURCE_SHORTAGE) {
296 cs->kpkc_flags |= CALLSTACK_VALID;
297 cs->kpkc_flags |= CALLSTACK_TRUNCATED;
298 } else {
299 cs->kpkc_nframes = 0;
300 }
301 }
302
303 if (!(cs->kpkc_flags & CALLSTACK_VALID)) {
304 BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK);
305 }
306
307 BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
308 cs->kpkc_flags, cs->kpkc_nframes);
309 }
310
311 void
kperf_ucallstack_sample(struct kp_ucallstack * cs,struct kperf_context * context)312 kperf_ucallstack_sample(struct kp_ucallstack *cs, struct kperf_context *context)
313 {
314 assert(ml_get_interrupts_enabled() == TRUE);
315
316 thread_t thread = context->cur_thread;
317 assert(thread != NULL);
318
319 BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_START,
320 (uintptr_t)thread_tid(thread), cs->kpuc_nframes);
321
322 struct backtrace_user_info btinfo = BTUINFO_INIT;
323 /*
324 * Leave space for the fixup information.
325 */
326 unsigned int maxnframes = cs->kpuc_nframes - 1;
327 struct backtrace_control ctl = { .btc_user_thread = thread, };
328 unsigned int nframes = backtrace_user(cs->kpuc_frames, maxnframes, &ctl,
329 &btinfo);
330 cs->kpuc_nframes = MIN(maxnframes, nframes);
331
332 cs->kpuc_flags |= CALLSTACK_KERNEL_WORDS |
333 ((btinfo.btui_info & BTI_TRUNCATED) ? CALLSTACK_TRUNCATED : 0) |
334 ((btinfo.btui_info & BTI_64_BIT) ? CALLSTACK_64BIT : 0);
335
336 /*
337 * Ignore EFAULT to get as much of the stack as possible.
338 */
339 if (btinfo.btui_error == 0 || btinfo.btui_error == EFAULT) {
340 callstack_fixup_user(cs, thread);
341 cs->kpuc_flags |= CALLSTACK_VALID;
342
343 if (cs->kpuc_nframes < maxnframes &&
344 btinfo.btui_async_frame_addr != 0) {
345 cs->kpuc_async_index = btinfo.btui_async_start_index;
346 ctl.btc_frame_addr = btinfo.btui_async_frame_addr;
347 ctl.btc_addr_offset = BTCTL_ASYNC_ADDR_OFFSET;
348 maxnframes -= cs->kpuc_nframes;
349 btinfo = BTUINFO_INIT;
350 unsigned int nasync_frames = backtrace_user(
351 &cs->kpuc_frames[cs->kpuc_nframes], maxnframes, &ctl, &btinfo);
352 if (btinfo.btui_info & BTI_TRUNCATED) {
353 cs->kpuc_flags |= CALLSTACK_TRUNCATED;
354 }
355 if (btinfo.btui_error == 0 || btinfo.btui_error == EFAULT) {
356 cs->kpuc_flags |= CALLSTACK_HAS_ASYNC;
357 cs->kpuc_async_nframes = nasync_frames;
358 }
359 }
360 } else {
361 cs->kpuc_nframes = 0;
362 BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK, btinfo.btui_error);
363 }
364
365 BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
366 cs->kpuc_flags, cs->kpuc_nframes);
367 }
368
369 static inline uintptr_t
scrub_word(uintptr_t * bt,int n_frames,int frame,bool kern)370 scrub_word(uintptr_t *bt, int n_frames, int frame, bool kern)
371 {
372 if (frame < n_frames) {
373 if (kern) {
374 return VM_KERNEL_UNSLIDE(bt[frame]);
375 } else {
376 return bt[frame];
377 }
378 } else {
379 return 0;
380 }
381 }
382
383 static inline uintptr_t
scrub_frame(uint64_t * bt,int n_frames,int frame)384 scrub_frame(uint64_t *bt, int n_frames, int frame)
385 {
386 if (frame < n_frames) {
387 return (uintptr_t)(bt[frame]);
388 } else {
389 return 0;
390 }
391 }
392
393 static void
callstack_log(uint32_t hdrid,uint32_t dataid,void * vframes,unsigned int nframes,unsigned int flags,unsigned int async_index,unsigned int async_nframes)394 callstack_log(uint32_t hdrid, uint32_t dataid, void *vframes,
395 unsigned int nframes, unsigned int flags, unsigned int async_index,
396 unsigned int async_nframes)
397 {
398 BUF_VERB(PERF_CS_LOG | DBG_FUNC_START, flags, nframes);
399 BUF_DATA(hdrid, flags, nframes - async_nframes, async_index, async_nframes);
400
401 unsigned int nevts = nframes / 4;
402 unsigned int ovf = nframes % 4;
403 if (ovf != 0) {
404 nevts++;
405 }
406
407 bool kern = flags & CALLSTACK_KERNEL;
408
409 if (flags & CALLSTACK_KERNEL_WORDS) {
410 uintptr_t *frames = vframes;
411 for (unsigned int i = 0; i < nevts; i++) {
412 unsigned int j = i * 4;
413 BUF_DATA(dataid,
414 scrub_word(frames, nframes, j + 0, kern),
415 scrub_word(frames, nframes, j + 1, kern),
416 scrub_word(frames, nframes, j + 2, kern),
417 scrub_word(frames, nframes, j + 3, kern));
418 }
419 } else {
420 for (unsigned int i = 0; i < nevts; i++) {
421 uint64_t *frames = vframes;
422 unsigned int j = i * 4;
423 BUF_DATA(dataid,
424 scrub_frame(frames, nframes, j + 0),
425 scrub_frame(frames, nframes, j + 1),
426 scrub_frame(frames, nframes, j + 2),
427 scrub_frame(frames, nframes, j + 3));
428 }
429 }
430
431 BUF_VERB(PERF_CS_LOG | DBG_FUNC_END, flags, nframes);
432 }
433
434 void
kperf_kcallstack_log(struct kp_kcallstack * cs)435 kperf_kcallstack_log(struct kp_kcallstack *cs)
436 {
437 callstack_log(PERF_CS_KHDR, PERF_CS_KDATA, cs->kpkc_frames,
438 cs->kpkc_nframes, cs->kpkc_flags, 0, 0);
439 }
440
441 void
kperf_ucallstack_log(struct kp_ucallstack * cs)442 kperf_ucallstack_log(struct kp_ucallstack *cs)
443 {
444 callstack_log(PERF_CS_UHDR, PERF_CS_UDATA, cs->kpuc_frames,
445 cs->kpuc_nframes + cs->kpuc_async_nframes, cs->kpuc_flags,
446 cs->kpuc_async_index, cs->kpuc_async_nframes);
447 }
448
449 int
kperf_ucallstack_pend(struct kperf_context * context,uint32_t depth,unsigned int actionid)450 kperf_ucallstack_pend(struct kperf_context * context, uint32_t depth,
451 unsigned int actionid)
452 {
453 if (depth < 2) {
454 panic("HUH");
455 }
456 kperf_ast_set_callstack_depth(context->cur_thread, depth);
457 return kperf_ast_pend(context->cur_thread, T_KPERF_AST_CALLSTACK,
458 actionid);
459 }
460
461 static kern_return_t
chudxnu_kern_read(void * dstaddr,vm_offset_t srcaddr,vm_size_t size)462 chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
463 {
464 return (ml_nofault_copy(srcaddr, (vm_offset_t)dstaddr, size) == size) ?
465 KERN_SUCCESS : KERN_FAILURE;
466 }
467
468 static kern_return_t
chudxnu_task_read(task_t task,void * kernaddr,uint64_t usraddr,vm_size_t size)469 chudxnu_task_read(
470 task_t task,
471 void *kernaddr,
472 uint64_t usraddr,
473 vm_size_t size)
474 {
475 //ppc version ported to arm
476 kern_return_t ret = KERN_SUCCESS;
477
478 if (ml_at_interrupt_context()) {
479 return KERN_FAILURE; // can't look at tasks on interrupt stack
480 }
481
482 if (current_task() == task) {
483 if (copyin(usraddr, kernaddr, size)) {
484 ret = KERN_FAILURE;
485 }
486 } else {
487 vm_map_t map = get_task_map(task);
488 ret = vm_map_read_user(map, usraddr, kernaddr, size);
489 }
490
491 return ret;
492 }
493
494 static inline uint64_t
chudxnu_vm_unslide(uint64_t ptr,int kaddr)495 chudxnu_vm_unslide( uint64_t ptr, int kaddr )
496 {
497 if (!kaddr) {
498 return ptr;
499 }
500
501 return VM_KERNEL_UNSLIDE(ptr);
502 }
503
504 #if __arm64__
505
506 #if defined(HAS_APPLE_PAC)
507 #include <ptrauth.h>
508 #endif
509
510 // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
511 // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
512 // after sampling has finished.
513 //
514 // For an N-entry callstack:
515 //
516 // [0] current pc
517 // [1..N-3] stack frames (including current one)
518 // [N-2] current LR (return value if we're in a leaf function)
519 // [N-1] current r0 (in case we've saved LR in r0) (optional)
520 //
521 //
522 #define CS_FLAG_EXTRASP 1 // capture extra sp register
523
524 static kern_return_t
chudxnu_thread_get_callstack64_internal(thread_t thread,uint64_t * callStack,mach_msg_type_number_t * count,boolean_t user_only,int flags)525 chudxnu_thread_get_callstack64_internal(
526 thread_t thread,
527 uint64_t *callStack,
528 mach_msg_type_number_t *count,
529 boolean_t user_only,
530 int flags)
531 {
532 kern_return_t kr = KERN_SUCCESS;
533 task_t task;
534 uint64_t currPC = 0ULL, currLR = 0ULL, currSP = 0ULL;
535 uint64_t prevPC = 0ULL;
536 uint64_t kernStackMin = thread->kernel_stack;
537 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
538 uint64_t *buffer = callStack;
539 int bufferIndex = 0;
540 int bufferMaxIndex = 0;
541 boolean_t kernel = FALSE;
542 struct arm_saved_state *sstate = NULL;
543 uint64_t pc = 0ULL;
544
545 task = get_threadtask(thread);
546 bufferMaxIndex = *count;
547 //get thread state
548 if (user_only) {
549 sstate = find_user_regs(thread);
550 } else {
551 sstate = find_kern_regs(thread);
552 }
553
554 if (!sstate) {
555 *count = 0;
556 return KERN_FAILURE;
557 }
558
559 if (is_saved_state64(sstate)) {
560 struct arm_saved_state64 *state = NULL;
561 uint64_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
562 uint64_t frame[2];
563
564 state = saved_state64(sstate);
565
566 /* make sure it is safe to dereference before you do it */
567 kernel = PSR64_IS_KERNEL(state->cpsr);
568
569 /* can't take a kernel callstack if we've got a user frame */
570 if (!user_only && !kernel) {
571 return KERN_FAILURE;
572 }
573
574 /*
575 * Reserve space for saving LR (and sometimes SP) at the end of the
576 * backtrace.
577 */
578 if (flags & CS_FLAG_EXTRASP) {
579 bufferMaxIndex -= 2;
580 } else {
581 bufferMaxIndex -= 1;
582 }
583
584 if (bufferMaxIndex < 2) {
585 *count = 0;
586 return KERN_RESOURCE_SHORTAGE;
587 }
588
589 currPC = state->pc;
590 currLR = state->lr;
591 currSP = state->sp;
592
593 fp = (uint64_t *)state->fp; /* frame pointer */
594 topfp = fp;
595
596 bufferIndex = 0; // start with a stack of size zero
597 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
598
599 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 0);
600
601 // Now, fill buffer with stack backtraces.
602 while (bufferIndex < bufferMaxIndex) {
603 pc = 0ULL;
604 /*
605 * Below the frame pointer, the following values are saved:
606 * -> FP
607 */
608
609 /*
610 * Note that we read the pc even for the first stack frame
611 * (which, in theory, is always empty because the callee fills
612 * it in just before it lowers the stack. However, if we
613 * catch the program in between filling in the return address
614 * and lowering the stack, we want to still have a valid
615 * backtrace. FixupStack correctly disregards this value if
616 * necessary.
617 */
618
619 if ((uint64_t)fp == 0 || ((uint64_t)fp & 0x3) != 0) {
620 /* frame pointer is invalid - stop backtracing */
621 pc = 0ULL;
622 break;
623 }
624
625 if (kernel) {
626 if (((uint64_t)fp > kernStackMax) ||
627 ((uint64_t)fp < kernStackMin)) {
628 kr = KERN_FAILURE;
629 } else {
630 kr = chudxnu_kern_read(&frame,
631 (vm_offset_t)fp,
632 (vm_size_t)sizeof(frame));
633 if (kr == KERN_SUCCESS) {
634 #if defined(HAS_APPLE_PAC)
635 /* return addresses on stack will be signed by arm64e ABI */
636 pc = (uint64_t)ptrauth_strip((void *)frame[1], ptrauth_key_return_address);
637 #else
638 pc = frame[1];
639 #endif
640 nextFramePointer = (uint64_t *)frame[0];
641 } else {
642 pc = 0ULL;
643 nextFramePointer = 0ULL;
644 kr = KERN_FAILURE;
645 }
646 }
647 } else {
648 kr = chudxnu_task_read(task,
649 &frame,
650 (vm_offset_t)fp,
651 (vm_size_t)sizeof(frame));
652 if (kr == KERN_SUCCESS) {
653 #if defined(HAS_APPLE_PAC)
654 /* return addresses on stack will be signed by arm64e ABI */
655 pc = (uint64_t)ptrauth_strip((void *)frame[1], ptrauth_key_return_address);
656 #else
657 pc = frame[1];
658 #endif
659 nextFramePointer = (uint64_t *)(frame[0]);
660 } else {
661 pc = 0ULL;
662 nextFramePointer = 0ULL;
663 kr = KERN_FAILURE;
664 }
665 }
666
667 if (kr != KERN_SUCCESS) {
668 pc = 0ULL;
669 break;
670 }
671
672 if (nextFramePointer) {
673 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
674 prevPC = pc;
675 }
676
677 if (nextFramePointer < fp) {
678 break;
679 } else {
680 fp = nextFramePointer;
681 }
682 }
683
684 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
685
686 if (bufferIndex >= bufferMaxIndex) {
687 bufferIndex = bufferMaxIndex;
688 kr = KERN_RESOURCE_SHORTAGE;
689 } else {
690 kr = KERN_SUCCESS;
691 }
692
693 // Save link register and SP at bottom of stack (used for later fixup).
694 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
695 if (flags & CS_FLAG_EXTRASP) {
696 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
697 }
698 } else {
699 struct arm_saved_state32 *state = NULL;
700 uint32_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
701
702 /* 64-bit kernel stacks, 32-bit user stacks */
703 uint64_t frame[2];
704 uint32_t frame32[2];
705
706 state = saved_state32(sstate);
707
708 /* make sure it is safe to dereference before you do it */
709 kernel = PSR_IS_KERNEL(state->cpsr);
710
711 /* can't take a kernel callstack if we've got a user frame */
712 if (!user_only && !kernel) {
713 return KERN_FAILURE;
714 }
715
716 /*
717 * Reserve space for saving LR (and sometimes SP) at the end of the
718 * backtrace.
719 */
720 if (flags & CS_FLAG_EXTRASP) {
721 bufferMaxIndex -= 2;
722 } else {
723 bufferMaxIndex -= 1;
724 }
725
726 if (bufferMaxIndex < 2) {
727 *count = 0;
728 return KERN_RESOURCE_SHORTAGE;
729 }
730
731 currPC = (uint64_t)state->pc; /* r15 */
732 if (state->cpsr & PSR_TF) {
733 currPC |= 1ULL; /* encode thumb mode into low bit of PC */
734 }
735 currLR = (uint64_t)state->lr; /* r14 */
736 currSP = (uint64_t)state->sp; /* r13 */
737
738 fp = (uint32_t *)(uintptr_t)state->r[7]; /* frame pointer */
739 topfp = fp;
740
741 bufferIndex = 0; // start with a stack of size zero
742 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
743
744 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 1);
745
746 // Now, fill buffer with stack backtraces.
747 while (bufferIndex < bufferMaxIndex) {
748 pc = 0ULL;
749 /*
750 * Below the frame pointer, the following values are saved:
751 * -> FP
752 */
753
754 /*
755 * Note that we read the pc even for the first stack frame
756 * (which, in theory, is always empty because the callee fills
757 * it in just before it lowers the stack. However, if we
758 * catch the program in between filling in the return address
759 * and lowering the stack, we want to still have a valid
760 * backtrace. FixupStack correctly disregards this value if
761 * necessary.
762 */
763
764 if ((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) {
765 /* frame pointer is invalid - stop backtracing */
766 pc = 0ULL;
767 break;
768 }
769
770 if (kernel) {
771 if (((uint32_t)fp > kernStackMax) ||
772 ((uint32_t)fp < kernStackMin)) {
773 kr = KERN_FAILURE;
774 } else {
775 kr = chudxnu_kern_read(&frame,
776 (vm_offset_t)fp,
777 (vm_size_t)sizeof(frame));
778 if (kr == KERN_SUCCESS) {
779 pc = (uint64_t)frame[1];
780 nextFramePointer = (uint32_t *) (frame[0]);
781 } else {
782 pc = 0ULL;
783 nextFramePointer = 0ULL;
784 kr = KERN_FAILURE;
785 }
786 }
787 } else {
788 kr = chudxnu_task_read(task,
789 &frame32,
790 (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL),
791 sizeof(frame32));
792 if (kr == KERN_SUCCESS) {
793 pc = (uint64_t)frame32[1];
794 nextFramePointer = (uint32_t *)(uintptr_t)(frame32[0]);
795 } else {
796 pc = 0ULL;
797 nextFramePointer = 0ULL;
798 kr = KERN_FAILURE;
799 }
800 }
801
802 if (kr != KERN_SUCCESS) {
803 pc = 0ULL;
804 break;
805 }
806
807 if (nextFramePointer) {
808 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
809 prevPC = pc;
810 }
811
812 if (nextFramePointer < fp) {
813 break;
814 } else {
815 fp = nextFramePointer;
816 }
817 }
818
819 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
820
821 /* clamp callstack size to max */
822 if (bufferIndex >= bufferMaxIndex) {
823 bufferIndex = bufferMaxIndex;
824 kr = KERN_RESOURCE_SHORTAGE;
825 } else {
826 /* ignore all other failures */
827 kr = KERN_SUCCESS;
828 }
829
830 // Save link register and R13 (sp) at bottom of stack (used for later fixup).
831 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
832 if (flags & CS_FLAG_EXTRASP) {
833 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
834 }
835 }
836
837 *count = bufferIndex;
838 return kr;
839 }
840
841 kern_return_t
chudxnu_thread_get_callstack64_kperf(thread_t thread,uint64_t * callStack,mach_msg_type_number_t * count,boolean_t user_only)842 chudxnu_thread_get_callstack64_kperf(
843 thread_t thread,
844 uint64_t *callStack,
845 mach_msg_type_number_t *count,
846 boolean_t user_only)
847 {
848 return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 );
849 }
850 #elif __x86_64__
851
852 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
853 // don't try to read in the hole
854 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
855 (supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
856 ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
857
858 typedef struct _cframe64_t {
859 uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
860 uint64_t caller;
861 uint64_t args[0];
862 }cframe64_t;
863
864
865 typedef struct _cframe_t {
866 uint32_t prev; // this is really a user32-space pointer to the previous frame
867 uint32_t caller;
868 uint32_t args[0];
869 } cframe_t;
870
871 extern void * find_user_regs(thread_t);
872 extern x86_saved_state32_t *find_kern_regs(thread_t);
873
874 static kern_return_t
do_kernel_backtrace(thread_t thread,struct x86_kernel_state * regs,uint64_t * frames,mach_msg_type_number_t * start_idx,mach_msg_type_number_t max_idx)875 do_kernel_backtrace(
876 thread_t thread,
877 struct x86_kernel_state *regs,
878 uint64_t *frames,
879 mach_msg_type_number_t *start_idx,
880 mach_msg_type_number_t max_idx)
881 {
882 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
883 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
884 mach_msg_type_number_t ct = *start_idx;
885 kern_return_t kr = KERN_FAILURE;
886
887 #if __LP64__
888 uint64_t currPC = 0ULL;
889 uint64_t currFP = 0ULL;
890 uint64_t prevPC = 0ULL;
891 uint64_t prevFP = 0ULL;
892 if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
893 return KERN_FAILURE;
894 }
895 if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
896 return KERN_FAILURE;
897 }
898 #else
899 uint32_t currPC = 0U;
900 uint32_t currFP = 0U;
901 uint32_t prevPC = 0U;
902 uint32_t prevFP = 0U;
903 if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
904 return KERN_FAILURE;
905 }
906 if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
907 return KERN_FAILURE;
908 }
909 #endif
910
911 if (*start_idx >= max_idx) {
912 return KERN_RESOURCE_SHORTAGE; // no frames traced
913 }
914 if (!currPC) {
915 return KERN_FAILURE;
916 }
917
918 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
919
920 // build a backtrace of this kernel state
921 #if __LP64__
922 while (VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
923 // this is the address where caller lives in the user thread
924 uint64_t caller = currFP + sizeof(uint64_t);
925 #else
926 while (VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
927 uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
928 #endif
929
930 if (!currFP || !currPC) {
931 currPC = 0;
932 break;
933 }
934
935 if (ct >= max_idx) {
936 *start_idx = ct;
937 return KERN_RESOURCE_SHORTAGE;
938 }
939
940 /* read our caller */
941 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));
942
943 if (kr != KERN_SUCCESS || !currPC) {
944 currPC = 0UL;
945 break;
946 }
947
948 /*
949 * retrive contents of the frame pointer and advance to the next stack
950 * frame if it's valid
951 */
952 prevFP = 0;
953 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));
954
955 #if __LP64__
956 if (VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
957 #else
958 if (VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
959 #endif
960 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
961 prevPC = currPC;
962 }
963 if (prevFP <= currFP) {
964 break;
965 } else {
966 currFP = prevFP;
967 }
968 }
969
970 *start_idx = ct;
971 return KERN_SUCCESS;
972 }
973
974
975
976 static kern_return_t
977 do_backtrace32(
978 task_t task,
979 thread_t thread,
980 x86_saved_state32_t *regs,
981 uint64_t *frames,
982 mach_msg_type_number_t *start_idx,
983 mach_msg_type_number_t max_idx,
984 boolean_t supervisor)
985 {
986 uint32_t tmpWord = 0UL;
987 uint64_t currPC = (uint64_t) regs->eip;
988 uint64_t currFP = (uint64_t) regs->ebp;
989 uint64_t prevPC = 0ULL;
990 uint64_t prevFP = 0ULL;
991 uint64_t kernStackMin = thread->kernel_stack;
992 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
993 mach_msg_type_number_t ct = *start_idx;
994 kern_return_t kr = KERN_FAILURE;
995
996 if (ct >= max_idx) {
997 return KERN_RESOURCE_SHORTAGE; // no frames traced
998 }
999 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1000
1001 // build a backtrace of this 32 bit state.
1002 while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
1003 cframe_t *fp = (cframe_t *) (uintptr_t) currFP;
1004
1005 if (!currFP) {
1006 currPC = 0;
1007 break;
1008 }
1009
1010 if (ct >= max_idx) {
1011 *start_idx = ct;
1012 return KERN_RESOURCE_SHORTAGE;
1013 }
1014
1015 /* read our caller */
1016 if (supervisor) {
1017 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1018 } else {
1019 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1020 }
1021
1022 if (kr != KERN_SUCCESS) {
1023 currPC = 0ULL;
1024 break;
1025 }
1026
1027 currPC = (uint64_t) tmpWord; // promote 32 bit address
1028
1029 /*
1030 * retrive contents of the frame pointer and advance to the next stack
1031 * frame if it's valid
1032 */
1033 prevFP = 0;
1034 if (supervisor) {
1035 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1036 } else {
1037 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1038 }
1039 prevFP = (uint64_t) tmpWord; // promote 32 bit address
1040
1041 if (prevFP) {
1042 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1043 prevPC = currPC;
1044 }
1045 if (prevFP < currFP) {
1046 break;
1047 } else {
1048 currFP = prevFP;
1049 }
1050 }
1051
1052 *start_idx = ct;
1053 return KERN_SUCCESS;
1054 }
1055
1056 static kern_return_t
1057 do_backtrace64(
1058 task_t task,
1059 thread_t thread,
1060 x86_saved_state64_t *regs,
1061 uint64_t *frames,
1062 mach_msg_type_number_t *start_idx,
1063 mach_msg_type_number_t max_idx,
1064 boolean_t supervisor)
1065 {
1066 uint64_t currPC = regs->isf.rip;
1067 uint64_t currFP = regs->rbp;
1068 uint64_t prevPC = 0ULL;
1069 uint64_t prevFP = 0ULL;
1070 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
1071 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
1072 mach_msg_type_number_t ct = *start_idx;
1073 kern_return_t kr = KERN_FAILURE;
1074
1075 if (*start_idx >= max_idx) {
1076 return KERN_RESOURCE_SHORTAGE; // no frames traced
1077 }
1078 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1079
1080 // build a backtrace of this 32 bit state.
1081 while (VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
1082 // this is the address where caller lives in the user thread
1083 uint64_t caller = currFP + sizeof(uint64_t);
1084
1085 if (!currFP) {
1086 currPC = 0;
1087 break;
1088 }
1089
1090 if (ct >= max_idx) {
1091 *start_idx = ct;
1092 return KERN_RESOURCE_SHORTAGE;
1093 }
1094
1095 /* read our caller */
1096 if (supervisor) {
1097 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t));
1098 } else {
1099 kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
1100 }
1101
1102 if (kr != KERN_SUCCESS) {
1103 currPC = 0ULL;
1104 break;
1105 }
1106
1107 /*
1108 * retrive contents of the frame pointer and advance to the next stack
1109 * frame if it's valid
1110 */
1111 prevFP = 0;
1112 if (supervisor) {
1113 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t));
1114 } else {
1115 kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
1116 }
1117
1118 if (VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
1119 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1120 prevPC = currPC;
1121 }
1122 if (prevFP < currFP) {
1123 break;
1124 } else {
1125 currFP = prevFP;
1126 }
1127 }
1128
1129 *start_idx = ct;
1130 return KERN_SUCCESS;
1131 }
1132
1133 static kern_return_t
1134 chudxnu_thread_get_callstack64_internal(
1135 thread_t thread,
1136 uint64_t *callstack,
1137 mach_msg_type_number_t *count,
1138 boolean_t user_only,
1139 boolean_t kern_only)
1140 {
1141 kern_return_t kr = KERN_FAILURE;
1142 task_t task = get_threadtask(thread);
1143 uint64_t currPC = 0ULL;
1144 boolean_t supervisor = FALSE;
1145 mach_msg_type_number_t bufferIndex = 0;
1146 mach_msg_type_number_t bufferMaxIndex = *count;
1147 x86_saved_state_t *tagged_regs = NULL; // kernel register state
1148 x86_saved_state64_t *regs64 = NULL;
1149 x86_saved_state32_t *regs32 = NULL;
1150 x86_saved_state32_t *u_regs32 = NULL;
1151 x86_saved_state64_t *u_regs64 = NULL;
1152 struct x86_kernel_state *kregs = NULL;
1153
1154 if (ml_at_interrupt_context()) {
1155 if (user_only) {
1156 /* can't backtrace user state on interrupt stack. */
1157 return KERN_FAILURE;
1158 }
1159
1160 /* backtracing at interrupt context? */
1161 if (thread == current_thread() && current_cpu_datap()->cpu_int_state) {
1162 /*
1163 * Locate the registers for the interrupted thread, assuming it is
1164 * current_thread().
1165 */
1166 tagged_regs = current_cpu_datap()->cpu_int_state;
1167
1168 if (is_saved_state64(tagged_regs)) {
1169 /* 64 bit registers */
1170 regs64 = saved_state64(tagged_regs);
1171 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1172 } else {
1173 /* 32 bit registers */
1174 regs32 = saved_state32(tagged_regs);
1175 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1176 }
1177 }
1178 }
1179
1180 if (!ml_at_interrupt_context() && kernel_task == task) {
1181 if (!thread->kernel_stack) {
1182 return KERN_FAILURE;
1183 }
1184
1185 // Kernel thread not at interrupt context
1186 kregs = (struct x86_kernel_state *)NULL;
1187
1188 // nofault read of the thread->kernel_stack pointer
1189 if (KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
1190 return KERN_FAILURE;
1191 }
1192
1193 // Adjust to find the saved kernel state
1194 kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);
1195
1196 supervisor = TRUE;
1197 } else if (!tagged_regs) {
1198 /*
1199 * not at interrupt context, or tracing a different thread than
1200 * current_thread() at interrupt context
1201 */
1202 tagged_regs = USER_STATE(thread);
1203 if (is_saved_state64(tagged_regs)) {
1204 /* 64 bit registers */
1205 regs64 = saved_state64(tagged_regs);
1206 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1207 } else {
1208 /* 32 bit registers */
1209 regs32 = saved_state32(tagged_regs);
1210 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1211 }
1212 }
1213
1214 *count = 0;
1215
1216 if (supervisor) {
1217 // the caller only wants a user callstack.
1218 if (user_only) {
1219 // bail - we've only got kernel state
1220 return KERN_FAILURE;
1221 }
1222 } else {
1223 // regs32(64) is not in supervisor mode.
1224 u_regs32 = regs32;
1225 u_regs64 = regs64;
1226 regs32 = NULL;
1227 regs64 = NULL;
1228 }
1229
1230 if (user_only) {
1231 /* we only want to backtrace the user mode */
1232 if (!(u_regs32 || u_regs64)) {
1233 /* no user state to look at */
1234 return KERN_FAILURE;
1235 }
1236 }
1237
1238 /*
1239 * Order of preference for top of stack:
1240 * 64 bit kernel state (not likely)
1241 * 32 bit kernel state
1242 * 64 bit user land state
1243 * 32 bit user land state
1244 */
1245
1246 if (kregs) {
1247 /*
1248 * nofault read of the registers from the kernel stack (as they can
1249 * disappear on the fly).
1250 */
1251
1252 if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
1253 return KERN_FAILURE;
1254 }
1255 } else if (regs64) {
1256 currPC = regs64->isf.rip;
1257 } else if (regs32) {
1258 currPC = (uint64_t) regs32->eip;
1259 } else if (u_regs64) {
1260 currPC = u_regs64->isf.rip;
1261 } else if (u_regs32) {
1262 currPC = (uint64_t) u_regs32->eip;
1263 }
1264
1265 if (!currPC) {
1266 /* no top of the stack, bail out */
1267 return KERN_FAILURE;
1268 }
1269
1270 bufferIndex = 0;
1271
1272 if (bufferMaxIndex < 1) {
1273 *count = 0;
1274 return KERN_RESOURCE_SHORTAGE;
1275 }
1276
1277 /* backtrace kernel */
1278 if (kregs) {
1279 addr64_t address = 0ULL;
1280 size_t size = 0UL;
1281
1282 // do the backtrace
1283 kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);
1284
1285 // and do a nofault read of (r|e)sp
1286 uint64_t rsp = 0ULL;
1287 size = sizeof(uint64_t);
1288
1289 if (KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
1290 address = 0ULL;
1291 }
1292
1293 if (address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
1294 callstack[bufferIndex++] = (uint64_t)rsp;
1295 }
1296 } else if (regs64) {
1297 uint64_t rsp = 0ULL;
1298
1299 // backtrace the 64bit side.
1300 kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
1301 bufferMaxIndex - 1, TRUE);
1302
1303 if (KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) &&
1304 bufferIndex < bufferMaxIndex) {
1305 callstack[bufferIndex++] = rsp;
1306 }
1307 } else if (regs32) {
1308 uint32_t esp = 0UL;
1309
1310 // backtrace the 32bit side.
1311 kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
1312 bufferMaxIndex - 1, TRUE);
1313
1314 if (KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) &&
1315 bufferIndex < bufferMaxIndex) {
1316 callstack[bufferIndex++] = (uint64_t) esp;
1317 }
1318 } else if (u_regs64 && !kern_only) {
1319 /* backtrace user land */
1320 uint64_t rsp = 0ULL;
1321
1322 kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
1323 bufferMaxIndex - 1, FALSE);
1324
1325 if (KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
1326 bufferIndex < bufferMaxIndex) {
1327 callstack[bufferIndex++] = rsp;
1328 }
1329 } else if (u_regs32 && !kern_only) {
1330 uint32_t esp = 0UL;
1331
1332 kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
1333 bufferMaxIndex - 1, FALSE);
1334
1335 if (KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
1336 bufferIndex < bufferMaxIndex) {
1337 callstack[bufferIndex++] = (uint64_t) esp;
1338 }
1339 }
1340
1341 *count = bufferIndex;
1342 return kr;
1343 }
1344
1345 __private_extern__
1346 kern_return_t
1347 chudxnu_thread_get_callstack64_kperf(
1348 thread_t thread,
1349 uint64_t *callstack,
1350 mach_msg_type_number_t *count,
1351 boolean_t is_user)
1352 {
1353 return chudxnu_thread_get_callstack64_internal(thread, callstack, count, is_user, !is_user);
1354 }
1355 #else /* !__arm64__ && !__x86_64__ */
1356 #error kperf: unsupported architecture
1357 #endif /* !__arm64__ && !__x86_64__ */
1358