1 /*
2 * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/host_priv.h>
29 #include <mach/host_special_ports.h>
30 #include <mach/mach_types.h>
31 #include <mach/telemetry_notification_server.h>
32
33 #include <kern/assert.h>
34 #include <kern/clock.h>
35 #include <kern/coalition.h>
36 #include <kern/debug.h>
37 #include <kern/host.h>
38 #include <kern/kalloc.h>
39 #include <kern/kern_types.h>
40 #include <kern/locks.h>
41 #include <kern/misc_protos.h>
42 #include <kern/sched.h>
43 #include <kern/sched_prim.h>
44 #include <kern/telemetry.h>
45 #include <kern/timer_call.h>
46 #include <kern/policy_internal.h>
47 #include <kern/kcdata.h>
48
49 #include <pexpert/pexpert.h>
50
51 #include <string.h>
52 #include <vm/vm_kern_xnu.h>
53 #include <vm/vm_shared_region.h>
54
55 #include <kperf/callstack.h>
56 #include <kern/backtrace.h>
57 #include <kern/monotonic.h>
58
59 #include <security/mac_mach_internal.h>
60
61 #include <sys/errno.h>
62 #include <sys/kdebug.h>
63 #include <uuid/uuid.h>
64 #include <kdp/kdp_dyld.h>
65
66 #include <libkern/coreanalytics/coreanalytics.h>
67 #include <kern/thread_call.h>
68
69 #define TELEMETRY_DEBUG 0
70
71 struct proc;
72 extern int proc_pid(struct proc *);
73 extern char *proc_name_address(void *p);
74 extern char *proc_longname_address(void *p);
75 extern uint64_t proc_uniqueid(void *p);
76 extern uint64_t proc_was_throttled(void *p);
77 extern uint64_t proc_did_throttle(void *p);
78 extern int proc_selfpid(void);
79 extern boolean_t task_did_exec(task_t task);
80 extern boolean_t task_is_exec_copy(task_t task);
81
82 struct micro_snapshot_buffer {
83 vm_offset_t buffer;
84 uint32_t size;
85 uint32_t current_position;
86 uint32_t end_point;
87 };
88
89 static bool telemetry_task_ready_for_sample(task_t task);
90
91 static void telemetry_instrumentation_begin(
92 struct micro_snapshot_buffer *buffer, enum micro_snapshot_flags flags);
93
94 static void telemetry_instrumentation_end(struct micro_snapshot_buffer *buffer);
95
96 static void telemetry_take_sample(thread_t thread, enum micro_snapshot_flags flags);
97
98 #if CONFIG_MACF
99 static void telemetry_macf_take_sample(thread_t thread, enum micro_snapshot_flags flags);
100 #endif
101
102 struct telemetry_target {
103 thread_t thread;
104 uintptr_t *frames;
105 size_t frames_count;
106 bool user64_regs;
107 uint16_t async_start_index;
108 enum micro_snapshot_flags microsnapshot_flags;
109 struct micro_snapshot_buffer *buffer;
110 lck_mtx_t *buffer_mtx;
111 };
112
113 static int telemetry_process_sample(
114 const struct telemetry_target *target,
115 bool release_buffer_lock,
116 uint32_t *out_current_record_start);
117
118 static int telemetry_buffer_gather(
119 user_addr_t buffer,
120 uint32_t *length,
121 bool mark,
122 struct micro_snapshot_buffer *current_buffer);
123
124 #define TELEMETRY_DEFAULT_SAMPLE_RATE (1) /* 1 sample every 1 second */
125 #define TELEMETRY_DEFAULT_BUFFER_SIZE (16*1024)
126 #define TELEMETRY_MAX_BUFFER_SIZE (64*1024)
127
128 #define TELEMETRY_DEFAULT_NOTIFY_LEEWAY (4*1024) // Userland gets 4k of leeway to collect data after notification
129 #define TELEMETRY_MAX_UUID_COUNT (128) // Max of 128 non-shared-cache UUIDs to log for symbolication
130
131 uint32_t telemetry_sample_rate = 0;
132 volatile boolean_t telemetry_needs_record = FALSE;
133 volatile boolean_t telemetry_needs_timer_arming_record = FALSE;
134
135 /*
136 * If TRUE, record micro-stackshot samples for all tasks.
137 * If FALSE, only sample tasks which are marked for telemetry.
138 */
139 bool telemetry_sample_all_tasks = false;
140 bool telemetry_sample_pmis = false;
141 uint32_t telemetry_active_tasks = 0; // Number of tasks opted into telemetry
142
143 uint32_t telemetry_timestamp = 0;
144
145 /*
146 * The telemetry_buffer is responsible
147 * for timer samples and interrupt samples that are driven by
148 * compute_averages(). It will notify its client (if one
149 * exists) when it has enough data to be worth flushing.
150 */
151 struct micro_snapshot_buffer telemetry_buffer = {
152 .buffer = 0,
153 .size = 0,
154 .current_position = 0,
155 .end_point = 0
156 };
157
158 #if CONFIG_MACF
159 #define TELEMETRY_MACF_DEFAULT_BUFFER_SIZE (16*1024)
160 /*
161 * The MAC framework uses its own telemetry buffer for the purposes of auditing
162 * security-related work being done by userland threads.
163 */
164 struct micro_snapshot_buffer telemetry_macf_buffer = {
165 .buffer = 0,
166 .size = 0,
167 .current_position = 0,
168 .end_point = 0
169 };
170 #endif
171
172 int telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked?
173 int telemetry_buffer_notify_at = 0;
174
175 LCK_GRP_DECLARE(telemetry_lck_grp, "telemetry group");
176 LCK_MTX_DECLARE(telemetry_mtx, &telemetry_lck_grp);
177 LCK_MTX_DECLARE(telemetry_pmi_mtx, &telemetry_lck_grp);
178 LCK_MTX_DECLARE(telemetry_macf_mtx, &telemetry_lck_grp);
179
180 #define TELEMETRY_LOCK() do { lck_mtx_lock(&telemetry_mtx); } while (0)
181 #define TELEMETRY_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&telemetry_mtx)
182 #define TELEMETRY_UNLOCK() do { lck_mtx_unlock(&telemetry_mtx); } while (0)
183
184 #define TELEMETRY_PMI_LOCK() do { lck_mtx_lock(&telemetry_pmi_mtx); } while (0)
185 #define TELEMETRY_PMI_UNLOCK() do { lck_mtx_unlock(&telemetry_pmi_mtx); } while (0)
186
187 #define TELEMETRY_MACF_LOCK() do { lck_mtx_lock(&telemetry_macf_mtx); } while (0)
188 #define TELEMETRY_MACF_UNLOCK() do { lck_mtx_unlock(&telemetry_macf_mtx); } while (0)
189
190 #define TELEMETRY_BT_FRAMES (5)
191
192 /*
193 * Telemetry reporting is unsafe in interrupt context, since the CA framework
194 * relies on being able to successfully zalloc some memory for the event.
195 * Therefore we maintain a small buffer that is then flushed by an helper thread.
196 */
197 #define CA_ENTRIES_SIZE (5)
198
199 struct telemetry_ca_entry {
200 uint32_t type;
201 uint16_t code;
202 uint32_t num_frames;
203 uintptr_t faulting_address;
204 uintptr_t frames[TELEMETRY_BT_FRAMES];
205 };
206
207 LCK_GRP_DECLARE(ca_entries_lock_grp, "ca_entries_lck");
208 LCK_SPIN_DECLARE(ca_entries_lck, &ca_entries_lock_grp);
209
210 static struct telemetry_ca_entry ca_entries[CA_ENTRIES_SIZE];
211 static uint8_t ca_entries_index = 0;
212 static struct thread_call *telemetry_ca_send_callout;
213
214 CA_EVENT(kernel_breakpoint_event,
215 CA_INT, brk_type,
216 CA_INT, brk_code,
217 CA_INT, faulting_address,
218 CA_STATIC_STRING(CA_UBSANBUF_LEN), backtrace,
219 CA_STATIC_STRING(CA_UUID_LEN), uuid);
220
221 /* Rate-limit telemetry on last seen faulting address */
222 static uintptr_t PERCPU_DATA(brk_telemetry_cache_address);
223 /* Get out from the brk handler if the CPU is already servicing one */
224 static bool PERCPU_DATA(brk_telemetry_in_handler);
225
226 static void telemetry_flush_ca_events(thread_call_param_t, thread_call_param_t);
227
228 void
telemetry_init(void)229 telemetry_init(void)
230 {
231 kern_return_t ret;
232 uint32_t telemetry_notification_leeway;
233
234 if (!PE_parse_boot_argn("telemetry_buffer_size",
235 &telemetry_buffer.size, sizeof(telemetry_buffer.size))) {
236 telemetry_buffer.size = TELEMETRY_DEFAULT_BUFFER_SIZE;
237 }
238
239 if (telemetry_buffer.size > TELEMETRY_MAX_BUFFER_SIZE) {
240 telemetry_buffer.size = TELEMETRY_MAX_BUFFER_SIZE;
241 }
242
243 ret = kmem_alloc(kernel_map, &telemetry_buffer.buffer, telemetry_buffer.size,
244 KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
245 if (ret != KERN_SUCCESS) {
246 kprintf("Telemetry: Allocation failed: %d\n", ret);
247 return;
248 }
249
250 if (!PE_parse_boot_argn("telemetry_notification_leeway",
251 &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) {
252 /*
253 * By default, notify the user to collect the buffer when there is this much space left in the buffer.
254 */
255 telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
256 }
257 if (telemetry_notification_leeway >= telemetry_buffer.size) {
258 printf("telemetry: nonsensical telemetry_notification_leeway boot-arg %d changed to %d\n",
259 telemetry_notification_leeway, TELEMETRY_DEFAULT_NOTIFY_LEEWAY);
260 telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
261 }
262 telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway;
263
264 if (!PE_parse_boot_argn("telemetry_sample_rate",
265 &telemetry_sample_rate, sizeof(telemetry_sample_rate))) {
266 telemetry_sample_rate = TELEMETRY_DEFAULT_SAMPLE_RATE;
267 }
268
269 telemetry_ca_send_callout = thread_call_allocate_with_options(
270 telemetry_flush_ca_events, NULL, THREAD_CALL_PRIORITY_KERNEL,
271 THREAD_CALL_OPTIONS_ONCE);
272
273 assert(telemetry_ca_send_callout != NULL);
274 /*
275 * To enable telemetry for all tasks, include "telemetry_sample_all_tasks=1" in boot-args.
276 */
277 if (!PE_parse_boot_argn("telemetry_sample_all_tasks",
278 &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) {
279 #if !defined(XNU_TARGET_OS_OSX) && !(DEVELOPMENT || DEBUG)
280 telemetry_sample_all_tasks = false;
281 #else
282 telemetry_sample_all_tasks = true;
283 #endif /* !defined(XNU_TARGET_OS_OSX) && !(DEVELOPMENT || DEBUG) */
284 }
285
286 kprintf("Telemetry: Sampling %stasks once per %u second%s\n",
287 (telemetry_sample_all_tasks) ? "all " : "",
288 telemetry_sample_rate, telemetry_sample_rate == 1 ? "" : "s");
289 }
290
291 /*
292 * Enable or disable global microstackshots (ie telemetry_sample_all_tasks).
293 *
294 * enable_disable == 1: turn it on
295 * enable_disable == 0: turn it off
296 */
297 void
telemetry_global_ctl(int enable_disable)298 telemetry_global_ctl(int enable_disable)
299 {
300 if (enable_disable == 1) {
301 telemetry_sample_all_tasks = true;
302 } else {
303 telemetry_sample_all_tasks = false;
304 }
305 }
306
307 /*
308 * Opt the given task into or out of the telemetry stream.
309 *
310 * Supported reasons (callers may use any or all of):
311 * TF_CPUMON_WARNING
312 * TF_WAKEMON_WARNING
313 *
314 * enable_disable == 1: turn it on
315 * enable_disable == 0: turn it off
316 */
317 void
telemetry_task_ctl(task_t task,uint32_t reasons,int enable_disable)318 telemetry_task_ctl(task_t task, uint32_t reasons, int enable_disable)
319 {
320 task_lock(task);
321 telemetry_task_ctl_locked(task, reasons, enable_disable);
322 task_unlock(task);
323 }
324
325 void
telemetry_task_ctl_locked(task_t task,uint32_t reasons,int enable_disable)326 telemetry_task_ctl_locked(task_t task, uint32_t reasons, int enable_disable)
327 {
328 uint32_t origflags;
329
330 assert((reasons != 0) && ((reasons | TF_TELEMETRY) == TF_TELEMETRY));
331
332 task_lock_assert_owned(task);
333
334 origflags = task->t_flags;
335
336 if (enable_disable == 1) {
337 task->t_flags |= reasons;
338 if ((origflags & TF_TELEMETRY) == 0) {
339 OSIncrementAtomic(&telemetry_active_tasks);
340 #if TELEMETRY_DEBUG
341 printf("%s: telemetry OFF -> ON (%d active)\n", proc_name_address(get_bsdtask_info(task)), telemetry_active_tasks);
342 #endif
343 }
344 } else {
345 task->t_flags &= ~reasons;
346 if (((origflags & TF_TELEMETRY) != 0) && ((task->t_flags & TF_TELEMETRY) == 0)) {
347 /*
348 * If this task went from having at least one telemetry bit to having none,
349 * the net change was to disable telemetry for the task.
350 */
351 OSDecrementAtomic(&telemetry_active_tasks);
352 #if TELEMETRY_DEBUG
353 printf("%s: telemetry ON -> OFF (%d active)\n", proc_name_address(get_bsdtask_info(task)), telemetry_active_tasks);
354 #endif
355 }
356 }
357 }
358
359 /*
360 * Determine if the current thread is eligible for telemetry:
361 *
362 * telemetry_sample_all_tasks: All threads are eligible. This takes precedence.
363 * telemetry_active_tasks: Count of tasks opted in.
364 * task->t_flags & TF_TELEMETRY: This task is opted in.
365 */
366 static bool
telemetry_is_active(thread_t thread)367 telemetry_is_active(thread_t thread)
368 {
369 task_t task = get_threadtask(thread);
370
371 if (task == kernel_task) {
372 /* Kernel threads never return to an AST boundary, and are ineligible */
373 return false;
374 }
375
376 if (telemetry_sample_all_tasks || telemetry_sample_pmis) {
377 return true;
378 }
379
380 if ((telemetry_active_tasks > 0) && ((task->t_flags & TF_TELEMETRY) != 0)) {
381 return true;
382 }
383
384 return false;
385 }
386
387 /*
388 * Userland is arming a timer. If we are eligible for such a record,
389 * sample now. No need to do this one at the AST because we're already at
390 * a safe place in this system call.
391 */
392 int
telemetry_timer_event(__unused uint64_t deadline,__unused uint64_t interval,__unused uint64_t leeway)393 telemetry_timer_event(__unused uint64_t deadline, __unused uint64_t interval, __unused uint64_t leeway)
394 {
395 if (telemetry_needs_timer_arming_record == TRUE) {
396 telemetry_needs_timer_arming_record = FALSE;
397 telemetry_take_sample(current_thread(), (enum micro_snapshot_flags)(kTimerArmingRecord | kUserMode));
398 }
399
400 return 0;
401 }
402
403 #if CONFIG_CPU_COUNTERS
404 static void
telemetry_pmi_handler(bool user_mode,__unused void * ctx)405 telemetry_pmi_handler(bool user_mode, __unused void *ctx)
406 {
407 telemetry_mark_curthread(user_mode, TRUE);
408 }
409 #endif /* CONFIG_CPU_COUNTERS */
410
411 int
telemetry_pmi_setup(enum telemetry_pmi pmi_ctr,uint64_t period)412 telemetry_pmi_setup(enum telemetry_pmi pmi_ctr, uint64_t period)
413 {
414 #if CONFIG_CPU_COUNTERS
415 static bool sample_all_tasks_aside = false;
416 static uint32_t active_tasks_aside = false;
417 int error = 0;
418 const char *name = "?";
419
420 unsigned int ctr = 0;
421
422 TELEMETRY_PMI_LOCK();
423
424 switch (pmi_ctr) {
425 case TELEMETRY_PMI_NONE:
426 if (!telemetry_sample_pmis) {
427 error = 1;
428 goto out;
429 }
430
431 telemetry_sample_pmis = false;
432 telemetry_sample_all_tasks = sample_all_tasks_aside;
433 telemetry_active_tasks = active_tasks_aside;
434 error = mt_microstackshot_stop();
435 if (!error) {
436 printf("telemetry: disabling ustackshot on PMI\n");
437 }
438 goto out;
439
440 case TELEMETRY_PMI_INSTRS:
441 ctr = MT_CORE_INSTRS;
442 name = "instructions";
443 break;
444
445 case TELEMETRY_PMI_CYCLES:
446 ctr = MT_CORE_CYCLES;
447 name = "cycles";
448 break;
449
450 default:
451 error = 1;
452 goto out;
453 }
454
455 telemetry_sample_pmis = true;
456 sample_all_tasks_aside = telemetry_sample_all_tasks;
457 active_tasks_aside = telemetry_active_tasks;
458 telemetry_sample_all_tasks = false;
459 telemetry_active_tasks = 0;
460
461 error = mt_microstackshot_start(ctr, period, telemetry_pmi_handler, NULL);
462 if (!error) {
463 printf("telemetry: ustackshot every %llu %s\n", period, name);
464 }
465
466 out:
467 TELEMETRY_PMI_UNLOCK();
468 return error;
469 #else /* CONFIG_CPU_COUNTERS */
470 #pragma unused(pmi_ctr, period)
471 return 1;
472 #endif /* !CONFIG_CPU_COUNTERS */
473 }
474
475 /*
476 * Mark the current thread for an interrupt-based
477 * telemetry record, to be sampled at the next AST boundary.
478 */
479 void
telemetry_mark_curthread(boolean_t interrupted_userspace,boolean_t pmi)480 telemetry_mark_curthread(boolean_t interrupted_userspace, boolean_t pmi)
481 {
482 uint32_t ast_bits = 0;
483 thread_t thread = current_thread();
484
485 /*
486 * If telemetry isn't active for this thread, return and try
487 * again next time.
488 */
489 if (telemetry_is_active(thread) == false) {
490 return;
491 }
492
493 ast_bits |= (interrupted_userspace ? AST_TELEMETRY_USER : AST_TELEMETRY_KERNEL);
494 if (pmi) {
495 ast_bits |= AST_TELEMETRY_PMI;
496 }
497
498 telemetry_needs_record = FALSE;
499 thread_ast_set(thread, ast_bits);
500 ast_propagate(thread);
501 }
502
503 void
compute_telemetry(void * arg __unused)504 compute_telemetry(void *arg __unused)
505 {
506 if (telemetry_sample_all_tasks || (telemetry_active_tasks > 0)) {
507 if ((++telemetry_timestamp) % telemetry_sample_rate == 0) {
508 telemetry_needs_record = TRUE;
509 telemetry_needs_timer_arming_record = TRUE;
510 }
511 }
512 }
513
514 /*
515 * If userland has registered a port for telemetry notifications, send one now.
516 */
517 static void
telemetry_notify_user(void)518 telemetry_notify_user(void)
519 {
520 mach_port_t user_port = MACH_PORT_NULL;
521
522 kern_return_t kr = host_get_telemetry_port(host_priv_self(), &user_port);
523 if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
524 return;
525 }
526
527 telemetry_notification(user_port, 0);
528 ipc_port_release_send(user_port);
529 }
530
531 void
telemetry_ast(thread_t thread,ast_t reasons)532 telemetry_ast(thread_t thread, ast_t reasons)
533 {
534 assert((reasons & AST_TELEMETRY_ALL) != 0);
535
536 uint8_t record_type = 0;
537 if (reasons & AST_TELEMETRY_IO) {
538 record_type |= kIORecord;
539 }
540 if (reasons & (AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL)) {
541 record_type |= (reasons & AST_TELEMETRY_PMI) ? kPMIRecord :
542 kInterruptRecord;
543 }
544
545 if ((reasons & AST_TELEMETRY_MACF) != 0) {
546 record_type |= kMACFRecord;
547 }
548
549 enum micro_snapshot_flags user_telemetry = (reasons & AST_TELEMETRY_USER) ? kUserMode : 0;
550 enum micro_snapshot_flags microsnapshot_flags = record_type | user_telemetry;
551
552 if ((reasons & AST_TELEMETRY_MACF) != 0) {
553 telemetry_macf_take_sample(thread, microsnapshot_flags);
554 }
555
556 if ((reasons & (AST_TELEMETRY_IO | AST_TELEMETRY_KERNEL | AST_TELEMETRY_PMI
557 | AST_TELEMETRY_USER)) != 0) {
558 telemetry_take_sample(thread, microsnapshot_flags);
559 }
560 }
561
562 bool
telemetry_task_ready_for_sample(task_t task)563 telemetry_task_ready_for_sample(task_t task)
564 {
565 return task != TASK_NULL &&
566 task != kernel_task &&
567 !task_did_exec(task) &&
568 !task_is_exec_copy(task);
569 }
570
571 void
telemetry_instrumentation_begin(__unused struct micro_snapshot_buffer * buffer,__unused enum micro_snapshot_flags flags)572 telemetry_instrumentation_begin(
573 __unused struct micro_snapshot_buffer *buffer,
574 __unused enum micro_snapshot_flags flags)
575 {
576 /* telemetry_XXX accessed outside of lock for instrumentation only */
577 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_START,
578 flags, telemetry_bytes_since_last_mark, 0,
579 (&telemetry_buffer != buffer));
580 }
581
582 void
telemetry_instrumentation_end(__unused struct micro_snapshot_buffer * buffer)583 telemetry_instrumentation_end(__unused struct micro_snapshot_buffer *buffer)
584 {
585 /* telemetry_XXX accessed outside of lock for instrumentation only */
586 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_END,
587 (&telemetry_buffer == buffer), telemetry_bytes_since_last_mark,
588 buffer->current_position, buffer->end_point);
589 }
590
591 void
telemetry_take_sample(thread_t thread,enum micro_snapshot_flags flags)592 telemetry_take_sample(thread_t thread, enum micro_snapshot_flags flags)
593 {
594 task_t task;
595 uintptr_t frames[128];
596 size_t frames_len = sizeof(frames) / sizeof(frames[0]);
597 uint32_t btcount;
598 struct backtrace_user_info btinfo = BTUINFO_INIT;
599 uint16_t async_start_index = UINT16_MAX;
600
601 if (thread == THREAD_NULL) {
602 return;
603 }
604
605 /* Ensure task is ready for taking a sample. */
606 task = get_threadtask(thread);
607 if (!telemetry_task_ready_for_sample(task)) {
608 return;
609 }
610
611 telemetry_instrumentation_begin(&telemetry_buffer, flags);
612
613 /* Collect backtrace from user thread. */
614 btcount = backtrace_user(frames, frames_len, NULL, &btinfo);
615 if (btinfo.btui_error != 0) {
616 return;
617 }
618 if (btinfo.btui_async_frame_addr != 0 &&
619 btinfo.btui_async_start_index != 0) {
620 /*
621 * Put the async callstack inline after the frame pointer walk call
622 * stack.
623 */
624 async_start_index = (uint16_t)btinfo.btui_async_start_index;
625 uintptr_t frame_addr = btinfo.btui_async_frame_addr;
626 unsigned int frames_left = frames_len - async_start_index;
627 struct backtrace_control ctl = { .btc_frame_addr = frame_addr, };
628 btinfo = BTUINFO_INIT;
629 unsigned int async_filled = backtrace_user(frames + async_start_index,
630 frames_left, &ctl, &btinfo);
631 if (btinfo.btui_error == 0) {
632 btcount = MIN(async_start_index + async_filled, frames_len);
633 }
634 }
635
636 /* Process the backtrace. */
637 struct telemetry_target target = {
638 .thread = thread,
639 .frames = frames,
640 .frames_count = btcount,
641 .user64_regs = (btinfo.btui_info & BTI_64_BIT) != 0,
642 .microsnapshot_flags = flags,
643 .buffer = &telemetry_buffer,
644 .buffer_mtx = &telemetry_mtx,
645 .async_start_index = async_start_index,
646 };
647 telemetry_process_sample(&target, true, NULL);
648
649 telemetry_instrumentation_end(&telemetry_buffer);
650 }
651
652 #if CONFIG_MACF
653 void
telemetry_macf_take_sample(thread_t thread,enum micro_snapshot_flags flags)654 telemetry_macf_take_sample(thread_t thread, enum micro_snapshot_flags flags)
655 {
656 task_t task;
657
658 uintptr_t frames_stack[128];
659 vm_size_t btcapacity = ARRAY_COUNT(frames_stack);
660 uint32_t btcount = 0;
661 typedef uintptr_t telemetry_user_frame_t __kernel_data_semantics;
662 telemetry_user_frame_t *frames = frames_stack;
663 bool alloced_frames = false;
664
665 struct backtrace_user_info btinfo = BTUINFO_INIT;
666 struct backtrace_control btctl = BTCTL_INIT;
667
668 uint32_t retry_count = 0;
669 const uint32_t max_retries = 10;
670
671 bool initialized = false;
672 struct micro_snapshot_buffer *telbuf = &telemetry_macf_buffer;
673 uint32_t record_start = 0;
674 bool did_process = false;
675 int rv = 0;
676
677 if (thread == THREAD_NULL) {
678 return;
679 }
680
681 telemetry_instrumentation_begin(telbuf, flags);
682
683 /* Ensure task is ready for taking a sample. */
684 task = get_threadtask(thread);
685 if (!telemetry_task_ready_for_sample(task)) {
686 rv = EBUSY;
687 goto out;
688 }
689
690 /* Ensure MACF telemetry buffer was initialized. */
691 TELEMETRY_MACF_LOCK();
692 initialized = (telbuf->size > 0);
693 TELEMETRY_MACF_UNLOCK();
694
695 if (!initialized) {
696 rv = ENOMEM;
697 goto out;
698 }
699
700 /* Collect backtrace from user thread. */
701 while (retry_count < max_retries) {
702 btcount += backtrace_user(frames + btcount, btcapacity - btcount, &btctl, &btinfo);
703
704 if ((btinfo.btui_info & BTI_TRUNCATED) != 0 && btinfo.btui_next_frame_addr != 0) {
705 /*
706 * Fast path uses stack memory to avoid an allocation. We must
707 * pivot to heap memory in the case where we cannot write the
708 * complete backtrace to this buffer.
709 */
710 if (frames == frames_stack) {
711 btcapacity += 128;
712 frames = kalloc_data(btcapacity * sizeof(*frames), Z_WAITOK);
713
714 if (frames == NULL) {
715 break;
716 }
717
718 alloced_frames = true;
719
720 assert(btcapacity > sizeof(frames_stack) / sizeof(frames_stack[0]));
721 memcpy(frames, frames_stack, sizeof(frames_stack));
722 } else {
723 assert(alloced_frames);
724 frames = krealloc_data(frames,
725 btcapacity * sizeof(*frames),
726 (btcapacity + 128) * sizeof(*frames),
727 Z_WAITOK);
728
729 if (frames == NULL) {
730 break;
731 }
732
733 btcapacity += 128;
734 }
735
736 btctl.btc_frame_addr = btinfo.btui_next_frame_addr;
737 ++retry_count;
738 } else {
739 break;
740 }
741 }
742
743 if (frames == NULL) {
744 rv = ENOMEM;
745 goto out;
746 } else if (btinfo.btui_error != 0) {
747 rv = btinfo.btui_error;
748 goto out;
749 }
750
751 /* Process the backtrace. */
752 struct telemetry_target target = {
753 .thread = thread,
754 .frames = frames,
755 .frames_count = btcount,
756 .user64_regs = (btinfo.btui_info & BTI_64_BIT) != 0,
757 .microsnapshot_flags = flags,
758 .buffer = telbuf,
759 .buffer_mtx = &telemetry_macf_mtx
760 };
761 rv = telemetry_process_sample(&target, false, &record_start);
762 did_process = true;
763
764 out:
765 /* Immediately deliver the collected sample to MAC clients. */
766 if (rv == 0) {
767 assert(telbuf->current_position >= record_start);
768 mac_thread_telemetry(thread,
769 0,
770 (void *)(telbuf->buffer + record_start),
771 telbuf->current_position - record_start);
772 } else {
773 mac_thread_telemetry(thread, rv, NULL, 0);
774 }
775
776 /*
777 * The lock was taken by telemetry_process_sample, and we asked it not to
778 * unlock upon completion, so we must release the lock here.
779 */
780 if (did_process) {
781 TELEMETRY_MACF_UNLOCK();
782 }
783
784 if (alloced_frames && frames != NULL) {
785 kfree_data(frames, btcapacity * sizeof(*frames));
786 }
787
788 telemetry_instrumentation_end(telbuf);
789 }
790 #endif /* CONFIG_MACF */
791
792 int
telemetry_process_sample(const struct telemetry_target * target,bool release_buffer_lock,uint32_t * out_current_record_start)793 telemetry_process_sample(const struct telemetry_target *target,
794 bool release_buffer_lock,
795 uint32_t *out_current_record_start)
796 {
797 thread_t thread = target->thread;
798 uintptr_t *frames = target->frames;
799 size_t btcount = target->frames_count;
800 bool user64_regs = target->user64_regs;
801 enum micro_snapshot_flags microsnapshot_flags = target->microsnapshot_flags;
802 struct micro_snapshot_buffer *current_buffer = target->buffer;
803 lck_mtx_t *buffer_mtx = target->buffer_mtx;
804
805 task_t task;
806 void *p;
807 uint32_t bti;
808 struct micro_snapshot *msnap;
809 struct task_snapshot *tsnap;
810 struct thread_snapshot *thsnap;
811 clock_sec_t secs;
812 clock_usec_t usecs;
813 vm_size_t framesize;
814 uint32_t current_record_start;
815 uint32_t tmp = 0;
816 bool notify = false;
817 int rv = 0;
818
819 if (thread == THREAD_NULL) {
820 return EINVAL;
821 }
822
823 task = get_threadtask(thread);
824 p = get_bsdtask_info(task);
825 bool user64_va = task_has_64Bit_addr(task);
826
827 /*
828 * Retrieve the array of UUID's for binaries used by this task.
829 * We reach down into DYLD's data structures to find the array.
830 *
831 * XXX - make this common with kdp?
832 */
833 uint32_t uuid_info_count = 0;
834 mach_vm_address_t uuid_info_addr = 0;
835 uint32_t uuid_info_size = 0;
836 if (user64_va) {
837 uuid_info_size = sizeof(struct user64_dyld_uuid_info);
838 struct user64_dyld_all_image_infos task_image_infos;
839 if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
840 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
841 uuid_info_addr = task_image_infos.uuidArray;
842 }
843 } else {
844 uuid_info_size = sizeof(struct user32_dyld_uuid_info);
845 struct user32_dyld_all_image_infos task_image_infos;
846 if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
847 uuid_info_count = task_image_infos.uuidArrayCount;
848 uuid_info_addr = task_image_infos.uuidArray;
849 }
850 }
851
852 /*
853 * If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
854 * this data structure), we zero the uuid_info_count so that we won't even try to save load info
855 * for this task.
856 */
857 if (!uuid_info_addr) {
858 uuid_info_count = 0;
859 }
860
861 /*
862 * Don't copy in an unbounded amount of memory. The main binary and interesting
863 * non-shared-cache libraries should be in the first few images.
864 */
865 if (uuid_info_count > TELEMETRY_MAX_UUID_COUNT) {
866 uuid_info_count = TELEMETRY_MAX_UUID_COUNT;
867 }
868
869 uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
870 char *uuid_info_array = NULL;
871
872 if (uuid_info_count > 0) {
873 uuid_info_array = kalloc_data(uuid_info_array_size, Z_WAITOK);
874 if (uuid_info_array == NULL) {
875 return ENOMEM;
876 }
877
878 /*
879 * Copy in the UUID info array.
880 * It may be nonresident, in which case just fix up nloadinfos to 0 in the task snapshot.
881 */
882 if (copyin(uuid_info_addr, uuid_info_array, uuid_info_array_size) != 0) {
883 kfree_data(uuid_info_array, uuid_info_array_size);
884 uuid_info_array = NULL;
885 uuid_info_array_size = 0;
886 }
887 }
888
889 /*
890 * Look for a dispatch queue serial number, and copy it in from userland if present.
891 */
892 uint64_t dqserialnum = 0;
893 int dqserialnum_valid = 0;
894
895 uint64_t dqkeyaddr = thread_dispatchqaddr(thread);
896 if (dqkeyaddr != 0) {
897 uint64_t dqaddr = 0;
898 uint64_t dq_serialno_offset = get_task_dispatchqueue_serialno_offset(task);
899 if ((copyin(dqkeyaddr, (char *)&dqaddr, (user64_va ? 8 : 4)) == 0) &&
900 (dqaddr != 0) && (dq_serialno_offset != 0)) {
901 uint64_t dqserialnumaddr = dqaddr + dq_serialno_offset;
902 if (copyin(dqserialnumaddr, (char *)&dqserialnum, (user64_va ? 8 : 4)) == 0) {
903 dqserialnum_valid = 1;
904 }
905 }
906 }
907
908 clock_get_calendar_microtime(&secs, &usecs);
909
910 lck_mtx_lock(buffer_mtx);
911
912 /*
913 * If our buffer is not backed by anything,
914 * then we cannot take the sample. Meant to allow us to deallocate the window
915 * buffer if it is disabled.
916 */
917 if (!current_buffer->buffer) {
918 rv = EINVAL;
919 goto cancel_sample;
920 }
921
922 /*
923 * We do the bulk of the operation under the telemetry lock, on assumption that
924 * any page faults during execution will not cause another AST_TELEMETRY_ALL
925 * to deadlock; they will just block until we finish. This makes it easier
926 * to copy into the buffer directly. As soon as we unlock, userspace can copy
927 * out of our buffer.
928 */
929
930 copytobuffer:
931
932 current_record_start = current_buffer->current_position;
933
934 if ((current_buffer->size - current_buffer->current_position) < sizeof(struct micro_snapshot)) {
935 /*
936 * We can't fit a record in the space available, so wrap around to the beginning.
937 * Save the current position as the known end point of valid data.
938 */
939 current_buffer->end_point = current_record_start;
940 current_buffer->current_position = 0;
941 if (current_record_start == 0) {
942 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
943 rv = ERANGE;
944 goto cancel_sample;
945 }
946 goto copytobuffer;
947 }
948
949 msnap = (struct micro_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
950 msnap->snapshot_magic = STACKSHOT_MICRO_SNAPSHOT_MAGIC;
951 msnap->ms_flags = (uint8_t)microsnapshot_flags;
952 msnap->ms_opaque_flags = 0; /* namespace managed by userspace */
953 msnap->ms_cpu = cpu_number();
954 msnap->ms_time = secs;
955 msnap->ms_time_microsecs = usecs;
956
957 current_buffer->current_position += sizeof(struct micro_snapshot);
958
959 if ((current_buffer->size - current_buffer->current_position) < sizeof(struct task_snapshot)) {
960 current_buffer->end_point = current_record_start;
961 current_buffer->current_position = 0;
962 if (current_record_start == 0) {
963 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
964 rv = ERANGE;
965 goto cancel_sample;
966 }
967 goto copytobuffer;
968 }
969
970 tsnap = (struct task_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
971 bzero(tsnap, sizeof(*tsnap));
972 tsnap->snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC;
973 tsnap->pid = proc_pid(p);
974 tsnap->uniqueid = proc_uniqueid(p);
975 struct recount_times_mach times = recount_task_terminated_times(task);
976 tsnap->user_time_in_terminated_threads = times.rtm_user;
977 tsnap->system_time_in_terminated_threads = times.rtm_system;
978 tsnap->suspend_count = task->suspend_count;
979 tsnap->task_size = (typeof(tsnap->task_size))(get_task_phys_footprint(task) / PAGE_SIZE);
980 tsnap->faults = counter_load(&task->faults);
981 tsnap->pageins = counter_load(&task->pageins);
982 tsnap->cow_faults = counter_load(&task->cow_faults);
983 /*
984 * The throttling counters are maintained as 64-bit counters in the proc
985 * structure. However, we reserve 32-bits (each) for them in the task_snapshot
986 * struct to save space and since we do not expect them to overflow 32-bits. If we
987 * find these values overflowing in the future, the fix would be to simply
988 * upgrade these counters to 64-bit in the task_snapshot struct
989 */
990 tsnap->was_throttled = (uint32_t) proc_was_throttled(p);
991 tsnap->did_throttle = (uint32_t) proc_did_throttle(p);
992 #if CONFIG_COALITIONS
993 /*
994 * These fields are overloaded to represent the resource coalition ID of
995 * this task...
996 */
997 coalition_t rsrc_coal = task->coalition[COALITION_TYPE_RESOURCE];
998 tsnap->p_start_sec = rsrc_coal ? coalition_id(rsrc_coal) : 0;
999 /*
1000 * ... and the processes this thread is doing work on behalf of.
1001 */
1002 pid_t origin_pid = -1, proximate_pid = -1;
1003 (void)thread_get_voucher_origin_proximate_pid(thread, &origin_pid, &proximate_pid);
1004 tsnap->p_start_usec = ((uint64_t)proximate_pid << 32) | (uint32_t)origin_pid;
1005 #endif /* CONFIG_COALITIONS */
1006
1007 if (task->t_flags & TF_TELEMETRY) {
1008 tsnap->ss_flags |= kTaskRsrcFlagged;
1009 }
1010
1011 if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG)) {
1012 tsnap->ss_flags |= kTaskDarwinBG;
1013 }
1014
1015 proc_get_darwinbgstate(task, &tmp);
1016
1017 if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) {
1018 tsnap->ss_flags |= kTaskIsForeground;
1019 }
1020
1021 if (tmp & PROC_FLAG_ADAPTIVE_IMPORTANT) {
1022 tsnap->ss_flags |= kTaskIsBoosted;
1023 }
1024
1025 if (tmp & PROC_FLAG_SUPPRESSED) {
1026 tsnap->ss_flags |= kTaskIsSuppressed;
1027 }
1028
1029
1030 tsnap->latency_qos = task_grab_latency_qos(task);
1031
1032 strlcpy(tsnap->p_comm, proc_name_address(p), sizeof(tsnap->p_comm));
1033 const char *longname = proc_longname_address(p);
1034 if (longname[0] != '\0') {
1035 /*
1036 * XXX Stash the rest of the process's name in some unused fields.
1037 */
1038 strlcpy((char *)tsnap->io_priority_count, &longname[16], sizeof(tsnap->io_priority_count));
1039 }
1040 if (user64_va) {
1041 tsnap->ss_flags |= kUser64_p;
1042 }
1043
1044 if (task->task_shared_region_slide != -1) {
1045 tsnap->shared_cache_slide = task->task_shared_region_slide;
1046 bcopy(task->task_shared_region_uuid, tsnap->shared_cache_identifier,
1047 sizeof(task->task_shared_region_uuid));
1048 }
1049
1050 current_buffer->current_position += sizeof(struct task_snapshot);
1051
1052 /*
1053 * Directly after the task snapshot, place the array of UUID's corresponding to the binaries
1054 * used by this task.
1055 */
1056 if ((current_buffer->size - current_buffer->current_position) < uuid_info_array_size) {
1057 current_buffer->end_point = current_record_start;
1058 current_buffer->current_position = 0;
1059 if (current_record_start == 0) {
1060 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1061 rv = ERANGE;
1062 goto cancel_sample;
1063 }
1064 goto copytobuffer;
1065 }
1066
1067 /*
1068 * Copy the UUID info array into our sample.
1069 */
1070 if (uuid_info_array_size > 0) {
1071 bcopy(uuid_info_array, (char *)(current_buffer->buffer + current_buffer->current_position), uuid_info_array_size);
1072 tsnap->nloadinfos = uuid_info_count;
1073 }
1074
1075 current_buffer->current_position += uuid_info_array_size;
1076
1077 /*
1078 * After the task snapshot & list of binary UUIDs, we place a thread snapshot.
1079 */
1080
1081 if ((current_buffer->size - current_buffer->current_position) < sizeof(struct thread_snapshot)) {
1082 /* wrap and overwrite */
1083 current_buffer->end_point = current_record_start;
1084 current_buffer->current_position = 0;
1085 if (current_record_start == 0) {
1086 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1087 rv = ERANGE;
1088 goto cancel_sample;
1089 }
1090 goto copytobuffer;
1091 }
1092
1093 thsnap = (struct thread_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
1094 bzero(thsnap, sizeof(*thsnap));
1095
1096 thsnap->snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC;
1097 thsnap->thread_id = thread_tid(thread);
1098 thsnap->state = thread->state;
1099 thsnap->priority = thread->base_pri;
1100 thsnap->sched_pri = thread->sched_pri;
1101 thsnap->sched_flags = thread->sched_flags;
1102 thsnap->ss_flags |= kStacksPCOnly;
1103 thsnap->ts_qos = thread->effective_policy.thep_qos;
1104 thsnap->ts_rqos = thread->requested_policy.thrp_qos;
1105 thsnap->ts_rqos_override = MAX(thread->requested_policy.thrp_qos_override,
1106 thread->requested_policy.thrp_qos_workq_override);
1107 memcpy(thsnap->_reserved + 1, &target->async_start_index,
1108 sizeof(target->async_start_index));
1109
1110 if (proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG)) {
1111 thsnap->ss_flags |= kThreadDarwinBG;
1112 }
1113
1114 boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
1115 times = recount_current_thread_times();
1116 ml_set_interrupts_enabled(interrupt_state);
1117 thsnap->user_time = times.rtm_user;
1118 thsnap->system_time = times.rtm_system;
1119
1120 current_buffer->current_position += sizeof(struct thread_snapshot);
1121
1122 /*
1123 * If this thread has a dispatch queue serial number, include it here.
1124 */
1125 if (dqserialnum_valid) {
1126 if ((current_buffer->size - current_buffer->current_position) < sizeof(dqserialnum)) {
1127 /* wrap and overwrite */
1128 current_buffer->end_point = current_record_start;
1129 current_buffer->current_position = 0;
1130 if (current_record_start == 0) {
1131 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1132 rv = ERANGE;
1133 goto cancel_sample;
1134 }
1135 goto copytobuffer;
1136 }
1137
1138 thsnap->ss_flags |= kHasDispatchSerial;
1139 bcopy(&dqserialnum, (char *)current_buffer->buffer + current_buffer->current_position, sizeof(dqserialnum));
1140 current_buffer->current_position += sizeof(dqserialnum);
1141 }
1142
1143 if (user64_regs) {
1144 framesize = 8;
1145 thsnap->ss_flags |= kUser64_p;
1146 } else {
1147 framesize = 4;
1148 }
1149
1150 /*
1151 * If we can't fit this entire stacktrace then cancel this record, wrap to the beginning,
1152 * and start again there so that we always store a full record.
1153 */
1154 if ((current_buffer->size - current_buffer->current_position) / framesize < btcount) {
1155 current_buffer->end_point = current_record_start;
1156 current_buffer->current_position = 0;
1157 if (current_record_start == 0) {
1158 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1159 rv = ERANGE;
1160 goto cancel_sample;
1161 }
1162 goto copytobuffer;
1163 }
1164
1165 for (bti = 0; bti < btcount; bti++, current_buffer->current_position += framesize) {
1166 if (framesize == 8) {
1167 *(uint64_t *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position) = frames[bti];
1168 } else {
1169 *(uint32_t *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position) = (uint32_t)frames[bti];
1170 }
1171 }
1172
1173 if (current_buffer->end_point < current_buffer->current_position) {
1174 /*
1175 * Each time the cursor wraps around to the beginning, we leave a
1176 * differing amount of unused space at the end of the buffer. Make
1177 * sure the cursor pushes the end point in case we're making use of
1178 * more of the buffer than we did the last time we wrapped.
1179 */
1180 current_buffer->end_point = current_buffer->current_position;
1181 }
1182
1183 thsnap->nuser_frames = btcount;
1184
1185 /*
1186 * Now THIS is a hack.
1187 */
1188 if (current_buffer == &telemetry_buffer) {
1189 telemetry_bytes_since_last_mark += (current_buffer->current_position - current_record_start);
1190 if (telemetry_bytes_since_last_mark > telemetry_buffer_notify_at) {
1191 notify = true;
1192 }
1193 }
1194
1195 if (out_current_record_start != NULL) {
1196 *out_current_record_start = current_record_start;
1197 }
1198
1199 cancel_sample:
1200 if (release_buffer_lock) {
1201 lck_mtx_unlock(buffer_mtx);
1202 }
1203
1204 if (notify) {
1205 telemetry_notify_user();
1206 }
1207
1208 if (uuid_info_array != NULL) {
1209 kfree_data(uuid_info_array, uuid_info_array_size);
1210 }
1211
1212 return rv;
1213 }
1214
1215 #if TELEMETRY_DEBUG
1216 static void
log_telemetry_output(vm_offset_t buf,uint32_t pos,uint32_t sz)1217 log_telemetry_output(vm_offset_t buf, uint32_t pos, uint32_t sz)
1218 {
1219 struct micro_snapshot *p;
1220 uint32_t offset;
1221
1222 printf("Copying out %d bytes of telemetry at offset %d\n", sz, pos);
1223
1224 buf += pos;
1225
1226 /*
1227 * Find and log each timestamp in this chunk of buffer.
1228 */
1229 for (offset = 0; offset < sz; offset++) {
1230 p = (struct micro_snapshot *)(buf + offset);
1231 if (p->snapshot_magic == STACKSHOT_MICRO_SNAPSHOT_MAGIC) {
1232 printf("telemetry timestamp: %lld\n", p->ms_time);
1233 }
1234 }
1235 }
1236 #endif
1237
1238 int
telemetry_gather(user_addr_t buffer,uint32_t * length,bool mark)1239 telemetry_gather(user_addr_t buffer, uint32_t *length, bool mark)
1240 {
1241 return telemetry_buffer_gather(buffer, length, mark, &telemetry_buffer);
1242 }
1243
1244 int
telemetry_buffer_gather(user_addr_t buffer,uint32_t * length,bool mark,struct micro_snapshot_buffer * current_buffer)1245 telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, bool mark, struct micro_snapshot_buffer * current_buffer)
1246 {
1247 int result = 0;
1248 uint32_t oldest_record_offset;
1249
1250 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_START,
1251 mark, telemetry_bytes_since_last_mark, 0,
1252 (&telemetry_buffer != current_buffer));
1253
1254 TELEMETRY_LOCK();
1255
1256 if (current_buffer->buffer == 0) {
1257 *length = 0;
1258 goto out;
1259 }
1260
1261 if (*length < current_buffer->size) {
1262 result = KERN_NO_SPACE;
1263 goto out;
1264 }
1265
1266 /*
1267 * Copy the ring buffer out to userland in order sorted by time: least recent to most recent.
1268 * First, we need to search forward from the cursor to find the oldest record in our buffer.
1269 */
1270 oldest_record_offset = current_buffer->current_position;
1271 do {
1272 if (((oldest_record_offset + sizeof(uint32_t)) > current_buffer->size) ||
1273 ((oldest_record_offset + sizeof(uint32_t)) > current_buffer->end_point)) {
1274 if (*(uint32_t *)(uintptr_t)(current_buffer->buffer) == 0) {
1275 /*
1276 * There is no magic number at the start of the buffer, which means
1277 * it's empty; nothing to see here yet.
1278 */
1279 *length = 0;
1280 goto out;
1281 }
1282 /*
1283 * We've looked through the end of the active buffer without finding a valid
1284 * record; that means all valid records are in a single chunk, beginning at
1285 * the very start of the buffer.
1286 */
1287
1288 oldest_record_offset = 0;
1289 assert(*(uint32_t *)(uintptr_t)(current_buffer->buffer) == STACKSHOT_MICRO_SNAPSHOT_MAGIC);
1290 break;
1291 }
1292
1293 if (*(uint32_t *)(uintptr_t)(current_buffer->buffer + oldest_record_offset) == STACKSHOT_MICRO_SNAPSHOT_MAGIC) {
1294 break;
1295 }
1296
1297 /*
1298 * There are no alignment guarantees for micro-stackshot records, so we must search at each
1299 * byte offset.
1300 */
1301 oldest_record_offset++;
1302 } while (oldest_record_offset != current_buffer->current_position);
1303
1304 /*
1305 * If needed, copyout in two chunks: from the oldest record to the end of the buffer, and then
1306 * from the beginning of the buffer up to the current position.
1307 */
1308 if (oldest_record_offset != 0) {
1309 #if TELEMETRY_DEBUG
1310 log_telemetry_output(current_buffer->buffer, oldest_record_offset,
1311 current_buffer->end_point - oldest_record_offset);
1312 #endif
1313 if ((result = copyout((void *)(current_buffer->buffer + oldest_record_offset), buffer,
1314 current_buffer->end_point - oldest_record_offset)) != 0) {
1315 *length = 0;
1316 goto out;
1317 }
1318 *length = current_buffer->end_point - oldest_record_offset;
1319 } else {
1320 *length = 0;
1321 }
1322
1323 #if TELEMETRY_DEBUG
1324 log_telemetry_output(current_buffer->buffer, 0, current_buffer->current_position);
1325 #endif
1326 if ((result = copyout((void *)current_buffer->buffer, buffer + *length,
1327 current_buffer->current_position)) != 0) {
1328 *length = 0;
1329 goto out;
1330 }
1331 *length += (uint32_t)current_buffer->current_position;
1332
1333 out:
1334
1335 if (mark && (*length > 0)) {
1336 telemetry_bytes_since_last_mark = 0;
1337 }
1338
1339 TELEMETRY_UNLOCK();
1340
1341 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_END,
1342 current_buffer->current_position, *length,
1343 current_buffer->end_point, (&telemetry_buffer != current_buffer));
1344
1345 return result;
1346 }
1347
1348 #if CONFIG_MACF
1349 static int
telemetry_macf_init_locked(size_t buffer_size)1350 telemetry_macf_init_locked(size_t buffer_size)
1351 {
1352 kern_return_t kr;
1353
1354 if (buffer_size > TELEMETRY_MAX_BUFFER_SIZE) {
1355 buffer_size = TELEMETRY_MAX_BUFFER_SIZE;
1356 }
1357
1358 telemetry_macf_buffer.size = buffer_size;
1359
1360 kr = kmem_alloc(kernel_map, &telemetry_macf_buffer.buffer,
1361 telemetry_macf_buffer.size, KMA_DATA | KMA_ZERO | KMA_PERMANENT,
1362 VM_KERN_MEMORY_SECURITY);
1363
1364 if (kr != KERN_SUCCESS) {
1365 kprintf("Telemetry (MACF): Allocation failed: %d\n", kr);
1366 return ENOMEM;
1367 }
1368
1369 return 0;
1370 }
1371
1372 int
telemetry_macf_mark_curthread(void)1373 telemetry_macf_mark_curthread(void)
1374 {
1375 thread_t thread = current_thread();
1376 task_t task = get_threadtask(thread);
1377 int rv = 0;
1378
1379 if (task == kernel_task) {
1380 /* Kernel threads never return to an AST boundary, and are ineligible */
1381 return EINVAL;
1382 }
1383
1384 /* Initialize the MACF telemetry buffer if needed. */
1385 TELEMETRY_MACF_LOCK();
1386 if (__improbable(telemetry_macf_buffer.size == 0)) {
1387 rv = telemetry_macf_init_locked(TELEMETRY_MACF_DEFAULT_BUFFER_SIZE);
1388
1389 if (rv != 0) {
1390 return rv;
1391 }
1392 }
1393 TELEMETRY_MACF_UNLOCK();
1394
1395 act_set_macf_telemetry_ast(thread);
1396 return 0;
1397 }
1398 #endif /* CONFIG_MACF */
1399
1400
1401 static void
telemetry_stash_ca_event(kernel_brk_type_t type,uint16_t comment,uint32_t total_frames,uintptr_t * backtrace,uintptr_t faulting_address)1402 telemetry_stash_ca_event(
1403 kernel_brk_type_t type,
1404 uint16_t comment,
1405 uint32_t total_frames,
1406 uintptr_t *backtrace,
1407 uintptr_t faulting_address)
1408 {
1409 /* Skip telemetry if we accidentally took a fault while handling telemetry */
1410 bool *in_handler = PERCPU_GET(brk_telemetry_in_handler);
1411 if (*in_handler) {
1412 #if DEVELOPMENT
1413 panic("Breakpoint trap re-entered from within a spinlock");
1414 #endif
1415 return;
1416 }
1417
1418 /* Rate limit on repeatedly seeing the same address */
1419 uintptr_t *cache_address = PERCPU_GET(brk_telemetry_cache_address);
1420 if (*cache_address == faulting_address) {
1421 return;
1422 }
1423
1424 *cache_address = faulting_address;
1425
1426 lck_spin_lock(&ca_entries_lck);
1427 *in_handler = true;
1428
1429 if (__improbable(ca_entries_index > CA_ENTRIES_SIZE)) {
1430 panic("Invalid CA interrupt buffer index %d >= %d",
1431 ca_entries_index, CA_ENTRIES_SIZE);
1432 }
1433
1434 /* We're full, just drop the event */
1435 if (ca_entries_index == CA_ENTRIES_SIZE) {
1436 *in_handler = false;
1437 lck_spin_unlock(&ca_entries_lck);
1438 return;
1439 }
1440
1441 ca_entries[ca_entries_index].type = type;
1442 ca_entries[ca_entries_index].code = comment;
1443 ca_entries[ca_entries_index].faulting_address = faulting_address;
1444
1445 assert(total_frames <= TELEMETRY_BT_FRAMES);
1446
1447 if (total_frames <= TELEMETRY_BT_FRAMES) {
1448 ca_entries[ca_entries_index].num_frames = total_frames;
1449 memcpy(ca_entries[ca_entries_index].frames, backtrace,
1450 total_frames * sizeof(uintptr_t));
1451 }
1452
1453 ca_entries_index++;
1454
1455 *in_handler = false;
1456 lck_spin_unlock(&ca_entries_lck);
1457
1458 thread_call_enter(telemetry_ca_send_callout);
1459 }
1460
1461 static int
telemetry_backtrace_add_kernel(char * buf,size_t buflen)1462 telemetry_backtrace_add_kernel(
1463 char *buf,
1464 size_t buflen)
1465 {
1466 int rc = 0;
1467 #if defined(__arm__) || defined(__arm64__)
1468 extern vm_offset_t segTEXTEXECB;
1469 extern unsigned long segSizeTEXTEXEC;
1470 vm_address_t unslid = segTEXTEXECB - vm_kernel_stext;
1471
1472 rc += scnprintf(buf, buflen, "%s@%lx:%lx\n",
1473 kernel_uuid_string, unslid, unslid + segSizeTEXTEXEC - 1);
1474 #elif defined(__x86_64__)
1475 rc += scnprintf(buf, buflen, "%s@0:%lx\n",
1476 kernel_uuid_string, vm_kernel_etext - vm_kernel_stext);
1477 #else
1478 #pragma unused(buf, buflen)
1479 #endif
1480 return rc;
1481 }
1482
1483 void
telemetry_backtrace_to_string(char * buf,size_t buflen,uint32_t tot,uintptr_t * frames)1484 telemetry_backtrace_to_string(
1485 char *buf,
1486 size_t buflen,
1487 uint32_t tot,
1488 uintptr_t *frames)
1489 {
1490 size_t l = 0;
1491
1492 for (uint32_t i = 0; i < tot; i++) {
1493 l += scnprintf(buf + l, buflen - l, "%lx\n",
1494 frames[i] - vm_kernel_stext);
1495 }
1496 l += telemetry_backtrace_add_kernel(buf + l, buflen - l);
1497 telemetry_backtrace_add_kexts(buf + l, buflen - l, frames, tot);
1498 }
1499
1500 static void
telemetry_flush_ca_events(__unused thread_call_param_t p0,__unused thread_call_param_t p1)1501 telemetry_flush_ca_events(
1502 __unused thread_call_param_t p0,
1503 __unused thread_call_param_t p1)
1504 {
1505 struct telemetry_ca_entry local_entries[CA_ENTRIES_SIZE] = {0};
1506 uint8_t entry_cnt = 0;
1507 bool *in_handler = PERCPU_GET(brk_telemetry_in_handler);
1508
1509 lck_spin_lock(&ca_entries_lck);
1510 *in_handler = true;
1511
1512 if (__improbable(ca_entries_index > CA_ENTRIES_SIZE)) {
1513 panic("Invalid CA interrupt buffer index %d > %d", ca_entries_index,
1514 CA_ENTRIES_SIZE);
1515 }
1516
1517 if (ca_entries_index == 0) {
1518 *in_handler = false;
1519 lck_spin_unlock(&ca_entries_lck);
1520 return;
1521 } else {
1522 memcpy(local_entries, ca_entries, sizeof(local_entries));
1523 entry_cnt = ca_entries_index;
1524 ca_entries_index = 0;
1525 }
1526
1527 *in_handler = false;
1528 lck_spin_unlock(&ca_entries_lck);
1529
1530 /*
1531 * All addresses (faulting_address and backtrace) are relative to the
1532 * vm_kernel_stext which means that all offsets will be typically <=
1533 * 50M which uses 7 hex digits.
1534 *
1535 * We allow up to TELEMETRY_BT_FRAMES (5) entries,
1536 * and be formatted like this:
1537 *
1538 * <OFFSET1>\n
1539 * <OFFSET2>\n
1540 * ...
1541 * <UUID_a>@<TEXT_EXEC_BASE_OFFSET>:<TEXT_EXEC_END_OFFSET>\n
1542 * <UUID_b>@<TEXT_EXEC_BASE_OFFSET>:<TEXT_EXEC_END_OFFSET>\n
1543 * ...
1544 *
1545 * In general this backtrace takes 8 bytes per "frame",
1546 * with an extra 52 bytes per unique UUID referenced.
1547 *
1548 * The buffer we have is CA_UBSANBUF_LEN (256 bytes) long, which
1549 * accomodates for 4 full unique UUIDs which should be sufficient.
1550 */
1551
1552 /* Send the events */
1553 for (uint8_t i = 0; i < entry_cnt; i++) {
1554 ca_event_t ca_event = CA_EVENT_ALLOCATE(kernel_breakpoint_event);
1555 CA_EVENT_TYPE(kernel_breakpoint_event) * event = ca_event->data;
1556
1557 event->brk_type = local_entries[i].type;
1558 event->brk_code = local_entries[i].code;
1559 event->faulting_address = local_entries[i].faulting_address;
1560
1561 telemetry_backtrace_to_string(event->backtrace,
1562 sizeof(event->backtrace),
1563 local_entries[i].num_frames,
1564 local_entries[i].frames);
1565 strlcpy(event->uuid, kernel_uuid_string, CA_UUID_LEN);
1566
1567 CA_EVENT_SEND(ca_event);
1568 }
1569 }
1570
1571 void
telemetry_kernel_brk(kernel_brk_type_t type,kernel_brk_options_t options,void * tstate,uint16_t comment)1572 telemetry_kernel_brk(
1573 kernel_brk_type_t type,
1574 kernel_brk_options_t options,
1575 void *tstate,
1576 uint16_t comment)
1577 {
1578 #if __arm64__
1579 arm_saved_state_t *state = (arm_saved_state_t *)tstate;
1580
1581 uintptr_t faulting_address = get_saved_state_pc(state);
1582 uintptr_t saved_fp = get_saved_state_fp(state);
1583 #else
1584 x86_saved_state64_t *state = (x86_saved_state64_t *)tstate;
1585
1586 uintptr_t faulting_address = state->isf.rip;
1587 uintptr_t saved_fp = state->rbp;
1588 #endif
1589
1590 assert(options & KERNEL_BRK_TELEMETRY_OPTIONS);
1591
1592 if (startup_phase < STARTUP_SUB_THREAD_CALL) {
1593 #if DEVELOPMENT || DEBUG
1594 panic("Attempting kernel breakpoint telemetry in early boot.");
1595 #endif
1596 return;
1597 }
1598
1599 if (options & KERNEL_BRK_CORE_ANALYTICS) {
1600 uintptr_t frames[TELEMETRY_BT_FRAMES];
1601
1602 struct backtrace_control ctl = {
1603 .btc_frame_addr = (uintptr_t)saved_fp,
1604 };
1605
1606 uint32_t total_frames = backtrace(frames, TELEMETRY_BT_FRAMES, &ctl, NULL);
1607
1608 telemetry_stash_ca_event(type, comment, total_frames,
1609 frames, faulting_address - vm_kernel_stext);
1610 }
1611 }
1612
1613 /************************/
1614 /* BOOT PROFILE SUPPORT */
1615 /************************/
1616 /*
1617 * Boot Profiling
1618 *
1619 * The boot-profiling support is a mechanism to sample activity happening on the
1620 * system during boot. This mechanism sets up a periodic timer and on every timer fire,
1621 * captures a full backtrace into the boot profiling buffer. This buffer can be pulled
1622 * out and analyzed from user-space. It is turned on using the following boot-args:
1623 * "bootprofile_buffer_size" specifies the size of the boot profile buffer
1624 * "bootprofile_interval_ms" specifies the interval for the profiling timer
1625 *
1626 * Process Specific Boot Profiling
1627 *
1628 * The boot-arg "bootprofile_proc_name" can be used to specify a certain
1629 * process that needs to profiled during boot. Setting this boot-arg changes
1630 * the way stackshots are captured. At every timer fire, the code looks at the
1631 * currently running process and takes a stackshot only if the requested process
1632 * is on-core (which makes it unsuitable for MP systems).
1633 *
1634 * Trigger Events
1635 *
1636 * The boot-arg "bootprofile_type=boot" starts the timer during early boot. Using
1637 * "wake" starts the timer at AP wake from suspend-to-RAM.
1638 */
1639
1640 #define BOOTPROFILE_MAX_BUFFER_SIZE (64*1024*1024) /* see also COPYSIZELIMIT_PANIC */
1641
1642 vm_offset_t bootprofile_buffer = 0;
1643 uint32_t bootprofile_buffer_size = 0;
1644 uint32_t bootprofile_buffer_current_position = 0;
1645 uint32_t bootprofile_interval_ms = 0;
1646 uint64_t bootprofile_stackshot_flags = 0;
1647 uint64_t bootprofile_interval_abs = 0;
1648 uint64_t bootprofile_next_deadline = 0;
1649 uint32_t bootprofile_all_procs = 0;
1650 char bootprofile_proc_name[17];
1651 uint64_t bootprofile_delta_since_timestamp = 0;
1652 LCK_GRP_DECLARE(bootprofile_lck_grp, "bootprofile_group");
1653 LCK_MTX_DECLARE(bootprofile_mtx, &bootprofile_lck_grp);
1654
1655
1656 enum {
1657 kBootProfileDisabled = 0,
1658 kBootProfileStartTimerAtBoot,
1659 kBootProfileStartTimerAtWake
1660 } bootprofile_type = kBootProfileDisabled;
1661
1662
1663 static timer_call_data_t bootprofile_timer_call_entry;
1664
1665 #define BOOTPROFILE_LOCK() do { lck_mtx_lock(&bootprofile_mtx); } while(0)
1666 #define BOOTPROFILE_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&bootprofile_mtx)
1667 #define BOOTPROFILE_UNLOCK() do { lck_mtx_unlock(&bootprofile_mtx); } while(0)
1668
1669 static void bootprofile_timer_call(
1670 timer_call_param_t param0,
1671 timer_call_param_t param1);
1672
1673 void
bootprofile_init(void)1674 bootprofile_init(void)
1675 {
1676 kern_return_t ret;
1677 char type[32];
1678
1679 if (!PE_parse_boot_argn("bootprofile_buffer_size",
1680 &bootprofile_buffer_size, sizeof(bootprofile_buffer_size))) {
1681 bootprofile_buffer_size = 0;
1682 }
1683
1684 if (bootprofile_buffer_size > BOOTPROFILE_MAX_BUFFER_SIZE) {
1685 bootprofile_buffer_size = BOOTPROFILE_MAX_BUFFER_SIZE;
1686 }
1687
1688 if (!PE_parse_boot_argn("bootprofile_interval_ms",
1689 &bootprofile_interval_ms, sizeof(bootprofile_interval_ms))) {
1690 bootprofile_interval_ms = 0;
1691 }
1692
1693 if (!PE_parse_boot_argn("bootprofile_stackshot_flags",
1694 &bootprofile_stackshot_flags, sizeof(bootprofile_stackshot_flags))) {
1695 bootprofile_stackshot_flags = 0;
1696 }
1697
1698 if (!PE_parse_boot_argn("bootprofile_proc_name",
1699 &bootprofile_proc_name, sizeof(bootprofile_proc_name))) {
1700 bootprofile_all_procs = 1;
1701 bootprofile_proc_name[0] = '\0';
1702 }
1703
1704 if (PE_parse_boot_argn("bootprofile_type", type, sizeof(type))) {
1705 if (0 == strcmp(type, "boot")) {
1706 bootprofile_type = kBootProfileStartTimerAtBoot;
1707 } else if (0 == strcmp(type, "wake")) {
1708 bootprofile_type = kBootProfileStartTimerAtWake;
1709 } else {
1710 bootprofile_type = kBootProfileDisabled;
1711 }
1712 } else {
1713 bootprofile_type = kBootProfileDisabled;
1714 }
1715
1716 clock_interval_to_absolutetime_interval(bootprofile_interval_ms, NSEC_PER_MSEC, &bootprofile_interval_abs);
1717
1718 /* Both boot args must be set to enable */
1719 if ((bootprofile_type == kBootProfileDisabled) || (bootprofile_buffer_size == 0) || (bootprofile_interval_abs == 0)) {
1720 return;
1721 }
1722
1723 ret = kmem_alloc(kernel_map, &bootprofile_buffer, bootprofile_buffer_size,
1724 KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
1725 if (ret != KERN_SUCCESS) {
1726 kprintf("Boot profile: Allocation failed: %d\n", ret);
1727 return;
1728 }
1729
1730 kprintf("Boot profile: Sampling %s once per %u ms at %s\n",
1731 bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms,
1732 bootprofile_type == kBootProfileStartTimerAtBoot ? "boot" : (bootprofile_type == kBootProfileStartTimerAtWake ? "wake" : "unknown"));
1733
1734 timer_call_setup(&bootprofile_timer_call_entry,
1735 bootprofile_timer_call,
1736 NULL);
1737
1738 if (bootprofile_type == kBootProfileStartTimerAtBoot) {
1739 bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs;
1740 timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1741 NULL,
1742 bootprofile_next_deadline,
1743 0,
1744 TIMER_CALL_SYS_NORMAL,
1745 false);
1746 }
1747 }
1748
1749 void
bootprofile_wake_from_sleep(void)1750 bootprofile_wake_from_sleep(void)
1751 {
1752 if (bootprofile_type == kBootProfileStartTimerAtWake) {
1753 bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs;
1754 timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1755 NULL,
1756 bootprofile_next_deadline,
1757 0,
1758 TIMER_CALL_SYS_NORMAL,
1759 false);
1760 }
1761 }
1762
1763
1764 static void
bootprofile_timer_call(timer_call_param_t param0 __unused,timer_call_param_t param1 __unused)1765 bootprofile_timer_call(
1766 timer_call_param_t param0 __unused,
1767 timer_call_param_t param1 __unused)
1768 {
1769 unsigned retbytes = 0;
1770 int pid_to_profile = -1;
1771
1772 if (!BOOTPROFILE_TRY_SPIN_LOCK()) {
1773 goto reprogram;
1774 }
1775
1776 /* Check if process-specific boot profiling is turned on */
1777 if (!bootprofile_all_procs) {
1778 /*
1779 * Since boot profiling initializes really early in boot, it is
1780 * possible that at this point, the task/proc is not initialized.
1781 * Nothing to do in that case.
1782 */
1783
1784 if ((current_task() != NULL) && (get_bsdtask_info(current_task()) != NULL) &&
1785 (0 == strncmp(bootprofile_proc_name, proc_name_address(get_bsdtask_info(current_task())), 17))) {
1786 pid_to_profile = proc_selfpid();
1787 } else {
1788 /*
1789 * Process-specific boot profiling requested but the on-core process is
1790 * something else. Nothing to do here.
1791 */
1792 BOOTPROFILE_UNLOCK();
1793 goto reprogram;
1794 }
1795 }
1796
1797 /* initiate a stackshot with whatever portion of the buffer is left */
1798 if (bootprofile_buffer_current_position < bootprofile_buffer_size) {
1799 uint64_t flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_TRYLOCK | STACKSHOT_SAVE_LOADINFO
1800 | STACKSHOT_GET_GLOBAL_MEM_STATS;
1801 #if defined(XNU_TARGET_OS_OSX)
1802 flags |= STACKSHOT_SAVE_KEXT_LOADINFO;
1803 #endif
1804
1805
1806 /* OR on flags specified in boot-args */
1807 flags |= bootprofile_stackshot_flags;
1808 if ((flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) && (bootprofile_delta_since_timestamp == 0)) {
1809 /* Can't take deltas until the first one */
1810 flags &= ~STACKSHOT_COLLECT_DELTA_SNAPSHOT;
1811 }
1812
1813 uint64_t timestamp = 0;
1814 if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) {
1815 timestamp = mach_absolute_time();
1816 }
1817
1818 kern_return_t r = stack_snapshot_from_kernel(
1819 pid_to_profile, (void *)(bootprofile_buffer + bootprofile_buffer_current_position),
1820 bootprofile_buffer_size - bootprofile_buffer_current_position,
1821 flags, bootprofile_delta_since_timestamp, 0, &retbytes);
1822
1823 /*
1824 * We call with STACKSHOT_TRYLOCK because the stackshot lock is coarser
1825 * than the bootprofile lock. If someone else has the lock we'll just
1826 * try again later.
1827 */
1828
1829 if (r == KERN_LOCK_OWNED) {
1830 BOOTPROFILE_UNLOCK();
1831 goto reprogram;
1832 }
1833
1834 if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT &&
1835 r == KERN_SUCCESS) {
1836 bootprofile_delta_since_timestamp = timestamp;
1837 }
1838
1839 bootprofile_buffer_current_position += retbytes;
1840 }
1841
1842 BOOTPROFILE_UNLOCK();
1843
1844 /* If we didn't get any data or have run out of buffer space, stop profiling */
1845 if ((retbytes == 0) || (bootprofile_buffer_current_position == bootprofile_buffer_size)) {
1846 return;
1847 }
1848
1849
1850 reprogram:
1851 /* If the user gathered the buffer, no need to keep profiling */
1852 if (bootprofile_interval_abs == 0) {
1853 return;
1854 }
1855
1856 clock_deadline_for_periodic_event(bootprofile_interval_abs,
1857 mach_absolute_time(),
1858 &bootprofile_next_deadline);
1859 timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1860 NULL,
1861 bootprofile_next_deadline,
1862 0,
1863 TIMER_CALL_SYS_NORMAL,
1864 false);
1865 }
1866
1867 void
bootprofile_get(void ** buffer,uint32_t * length)1868 bootprofile_get(void **buffer, uint32_t *length)
1869 {
1870 BOOTPROFILE_LOCK();
1871 *buffer = (void*) bootprofile_buffer;
1872 *length = bootprofile_buffer_current_position;
1873 BOOTPROFILE_UNLOCK();
1874 }
1875
1876 int
bootprofile_gather(user_addr_t buffer,uint32_t * length)1877 bootprofile_gather(user_addr_t buffer, uint32_t *length)
1878 {
1879 int result = 0;
1880
1881 BOOTPROFILE_LOCK();
1882
1883 if (bootprofile_buffer == 0) {
1884 *length = 0;
1885 goto out;
1886 }
1887
1888 if (*length < bootprofile_buffer_current_position) {
1889 result = KERN_NO_SPACE;
1890 goto out;
1891 }
1892
1893 if ((result = copyout((void *)bootprofile_buffer, buffer,
1894 bootprofile_buffer_current_position)) != 0) {
1895 *length = 0;
1896 goto out;
1897 }
1898 *length = bootprofile_buffer_current_position;
1899
1900 /* cancel future timers */
1901 bootprofile_interval_abs = 0;
1902
1903 out:
1904
1905 BOOTPROFILE_UNLOCK();
1906
1907 return result;
1908 }
1909