1 /*
2 * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/host_priv.h>
29 #include <mach/host_special_ports.h>
30 #include <mach/mach_types.h>
31 #include <mach/telemetry_notification_server.h>
32
33 #include <kern/assert.h>
34 #include <kern/clock.h>
35 #include <kern/coalition.h>
36 #include <kern/debug.h>
37 #include <kern/host.h>
38 #include <kern/kalloc.h>
39 #include <kern/kern_types.h>
40 #include <kern/locks.h>
41 #include <kern/misc_protos.h>
42 #include <kern/sched.h>
43 #include <kern/sched_prim.h>
44 #include <kern/telemetry.h>
45 #include <kern/timer_call.h>
46 #include <kern/policy_internal.h>
47 #include <kern/kcdata.h>
48
49 #include <pexpert/pexpert.h>
50
51 #include <string.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_shared_region.h>
54
55 #include <kperf/callstack.h>
56 #include <kern/backtrace.h>
57 #include <kern/monotonic.h>
58
59 #include <security/mac_mach_internal.h>
60
61 #include <sys/errno.h>
62 #include <sys/kdebug.h>
63 #include <uuid/uuid.h>
64 #include <kdp/kdp_dyld.h>
65
66 #include <libkern/coreanalytics/coreanalytics.h>
67 #include <kern/thread_call.h>
68
69 #define TELEMETRY_DEBUG 0
70
71 struct proc;
72 extern int proc_pid(struct proc *);
73 extern char *proc_name_address(void *p);
74 extern uint64_t proc_uniqueid(void *p);
75 extern uint64_t proc_was_throttled(void *p);
76 extern uint64_t proc_did_throttle(void *p);
77 extern int proc_selfpid(void);
78 extern boolean_t task_did_exec(task_t task);
79 extern boolean_t task_is_exec_copy(task_t task);
80
81 struct micro_snapshot_buffer {
82 vm_offset_t buffer;
83 uint32_t size;
84 uint32_t current_position;
85 uint32_t end_point;
86 };
87
88 static bool telemetry_task_ready_for_sample(task_t task);
89
90 static void telemetry_instrumentation_begin(
91 struct micro_snapshot_buffer *buffer, enum micro_snapshot_flags flags);
92
93 static void telemetry_instrumentation_end(struct micro_snapshot_buffer *buffer);
94
95 static void telemetry_take_sample(thread_t thread, enum micro_snapshot_flags flags);
96
97 #if CONFIG_MACF
98 static void telemetry_macf_take_sample(thread_t thread, enum micro_snapshot_flags flags);
99 #endif
100
101 struct telemetry_target {
102 thread_t thread;
103 uintptr_t *frames;
104 size_t frames_count;
105 bool user64_regs;
106 uint16_t async_start_index;
107 enum micro_snapshot_flags microsnapshot_flags;
108 struct micro_snapshot_buffer *buffer;
109 lck_mtx_t *buffer_mtx;
110 };
111
112 static int telemetry_process_sample(
113 const struct telemetry_target *target,
114 bool release_buffer_lock,
115 uint32_t *out_current_record_start);
116
117 static int telemetry_buffer_gather(
118 user_addr_t buffer,
119 uint32_t *length,
120 bool mark,
121 struct micro_snapshot_buffer *current_buffer);
122
123 #define TELEMETRY_DEFAULT_SAMPLE_RATE (1) /* 1 sample every 1 second */
124 #define TELEMETRY_DEFAULT_BUFFER_SIZE (16*1024)
125 #define TELEMETRY_MAX_BUFFER_SIZE (64*1024)
126
127 #define TELEMETRY_DEFAULT_NOTIFY_LEEWAY (4*1024) // Userland gets 4k of leeway to collect data after notification
128 #define TELEMETRY_MAX_UUID_COUNT (128) // Max of 128 non-shared-cache UUIDs to log for symbolication
129
130 uint32_t telemetry_sample_rate = 0;
131 volatile boolean_t telemetry_needs_record = FALSE;
132 volatile boolean_t telemetry_needs_timer_arming_record = FALSE;
133
134 /*
135 * If TRUE, record micro-stackshot samples for all tasks.
136 * If FALSE, only sample tasks which are marked for telemetry.
137 */
138 bool telemetry_sample_all_tasks = false;
139 bool telemetry_sample_pmis = false;
140 uint32_t telemetry_active_tasks = 0; // Number of tasks opted into telemetry
141
142 uint32_t telemetry_timestamp = 0;
143
144 /*
145 * The telemetry_buffer is responsible
146 * for timer samples and interrupt samples that are driven by
147 * compute_averages(). It will notify its client (if one
148 * exists) when it has enough data to be worth flushing.
149 */
150 struct micro_snapshot_buffer telemetry_buffer = {
151 .buffer = 0,
152 .size = 0,
153 .current_position = 0,
154 .end_point = 0
155 };
156
157 #if CONFIG_MACF
158 #define TELEMETRY_MACF_DEFAULT_BUFFER_SIZE (16*1024)
159 /*
160 * The MAC framework uses its own telemetry buffer for the purposes of auditing
161 * security-related work being done by userland threads.
162 */
163 struct micro_snapshot_buffer telemetry_macf_buffer = {
164 .buffer = 0,
165 .size = 0,
166 .current_position = 0,
167 .end_point = 0
168 };
169 #endif
170
171 int telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked?
172 int telemetry_buffer_notify_at = 0;
173
174 LCK_GRP_DECLARE(telemetry_lck_grp, "telemetry group");
175 LCK_MTX_DECLARE(telemetry_mtx, &telemetry_lck_grp);
176 LCK_MTX_DECLARE(telemetry_pmi_mtx, &telemetry_lck_grp);
177 LCK_MTX_DECLARE(telemetry_macf_mtx, &telemetry_lck_grp);
178
179 #define TELEMETRY_LOCK() do { lck_mtx_lock(&telemetry_mtx); } while (0)
180 #define TELEMETRY_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&telemetry_mtx)
181 #define TELEMETRY_UNLOCK() do { lck_mtx_unlock(&telemetry_mtx); } while (0)
182
183 #define TELEMETRY_PMI_LOCK() do { lck_mtx_lock(&telemetry_pmi_mtx); } while (0)
184 #define TELEMETRY_PMI_UNLOCK() do { lck_mtx_unlock(&telemetry_pmi_mtx); } while (0)
185
186 #define TELEMETRY_MACF_LOCK() do { lck_mtx_lock(&telemetry_macf_mtx); } while (0)
187 #define TELEMETRY_MACF_UNLOCK() do { lck_mtx_unlock(&telemetry_macf_mtx); } while (0)
188
189 #define TELEMETRY_BT_FRAMES (5)
190 #define BACKTRACE_FRAMES_BUF (((TELEMETRY_BT_FRAMES) * 17) + 1)
191
192 _Static_assert(BACKTRACE_FRAMES_BUF == CA_UBSANBUF_LEN, "Telemetry buffer size should match.");
193 /*
194 * Telemetry reporting is unsafe in interrupt context, since the CA framework
195 * relies on being able to successfully zalloc some memory for the event.
196 * Therefore we maintain a small buffer that is then flushed by an helper thread.
197 */
198 #define CA_ENTRIES_SIZE (5)
199
200 struct telemetry_ca_entry {
201 uint32_t type;
202 uint16_t code;
203 uint32_t num_frames;
204 uintptr_t faulting_address;
205 uintptr_t frames[TELEMETRY_BT_FRAMES];
206 };
207
208 LCK_GRP_DECLARE(ca_entries_lock_grp, "ca_entries_lck");
209 LCK_SPIN_DECLARE(ca_entries_lck, &ca_entries_lock_grp);
210
211 static struct telemetry_ca_entry ca_entries[CA_ENTRIES_SIZE];
212 static uint8_t ca_entries_index = 0;
213 static struct thread_call *telemetry_ca_send_callout;
214
215 CA_EVENT(kernel_breakpoint_event,
216 CA_INT, brk_type,
217 CA_INT, brk_code,
218 CA_INT, faulting_address,
219 CA_STATIC_STRING(CA_UBSANBUF_LEN), backtrace,
220 CA_STATIC_STRING(CA_UUID_LEN), uuid);
221
222 /* Rate-limit telemetry on last seen faulting address */
223 static uintptr_t PERCPU_DATA(brk_telemetry_cache_address);
224 /* Get out from the brk handler if the CPU is already servicing one */
225 static bool PERCPU_DATA(brk_telemetry_in_handler);
226
227 static void telemetry_flush_ca_events(thread_call_param_t, thread_call_param_t);
228
229 void
telemetry_init(void)230 telemetry_init(void)
231 {
232 kern_return_t ret;
233 uint32_t telemetry_notification_leeway;
234
235 if (!PE_parse_boot_argn("telemetry_buffer_size",
236 &telemetry_buffer.size, sizeof(telemetry_buffer.size))) {
237 telemetry_buffer.size = TELEMETRY_DEFAULT_BUFFER_SIZE;
238 }
239
240 if (telemetry_buffer.size > TELEMETRY_MAX_BUFFER_SIZE) {
241 telemetry_buffer.size = TELEMETRY_MAX_BUFFER_SIZE;
242 }
243
244 ret = kmem_alloc(kernel_map, &telemetry_buffer.buffer, telemetry_buffer.size,
245 KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
246 if (ret != KERN_SUCCESS) {
247 kprintf("Telemetry: Allocation failed: %d\n", ret);
248 return;
249 }
250
251 if (!PE_parse_boot_argn("telemetry_notification_leeway",
252 &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) {
253 /*
254 * By default, notify the user to collect the buffer when there is this much space left in the buffer.
255 */
256 telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
257 }
258 if (telemetry_notification_leeway >= telemetry_buffer.size) {
259 printf("telemetry: nonsensical telemetry_notification_leeway boot-arg %d changed to %d\n",
260 telemetry_notification_leeway, TELEMETRY_DEFAULT_NOTIFY_LEEWAY);
261 telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
262 }
263 telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway;
264
265 if (!PE_parse_boot_argn("telemetry_sample_rate",
266 &telemetry_sample_rate, sizeof(telemetry_sample_rate))) {
267 telemetry_sample_rate = TELEMETRY_DEFAULT_SAMPLE_RATE;
268 }
269
270 telemetry_ca_send_callout = thread_call_allocate_with_options(
271 telemetry_flush_ca_events, NULL, THREAD_CALL_PRIORITY_KERNEL,
272 THREAD_CALL_OPTIONS_ONCE);
273
274 assert(telemetry_ca_send_callout != NULL);
275 /*
276 * To enable telemetry for all tasks, include "telemetry_sample_all_tasks=1" in boot-args.
277 */
278 if (!PE_parse_boot_argn("telemetry_sample_all_tasks",
279 &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) {
280 #if !defined(XNU_TARGET_OS_OSX) && !(DEVELOPMENT || DEBUG)
281 telemetry_sample_all_tasks = false;
282 #else
283 telemetry_sample_all_tasks = true;
284 #endif /* !defined(XNU_TARGET_OS_OSX) && !(DEVELOPMENT || DEBUG) */
285 }
286
287 kprintf("Telemetry: Sampling %stasks once per %u second%s\n",
288 (telemetry_sample_all_tasks) ? "all " : "",
289 telemetry_sample_rate, telemetry_sample_rate == 1 ? "" : "s");
290 }
291
292 /*
293 * Enable or disable global microstackshots (ie telemetry_sample_all_tasks).
294 *
295 * enable_disable == 1: turn it on
296 * enable_disable == 0: turn it off
297 */
298 void
telemetry_global_ctl(int enable_disable)299 telemetry_global_ctl(int enable_disable)
300 {
301 if (enable_disable == 1) {
302 telemetry_sample_all_tasks = true;
303 } else {
304 telemetry_sample_all_tasks = false;
305 }
306 }
307
308 /*
309 * Opt the given task into or out of the telemetry stream.
310 *
311 * Supported reasons (callers may use any or all of):
312 * TF_CPUMON_WARNING
313 * TF_WAKEMON_WARNING
314 *
315 * enable_disable == 1: turn it on
316 * enable_disable == 0: turn it off
317 */
318 void
telemetry_task_ctl(task_t task,uint32_t reasons,int enable_disable)319 telemetry_task_ctl(task_t task, uint32_t reasons, int enable_disable)
320 {
321 task_lock(task);
322 telemetry_task_ctl_locked(task, reasons, enable_disable);
323 task_unlock(task);
324 }
325
326 void
telemetry_task_ctl_locked(task_t task,uint32_t reasons,int enable_disable)327 telemetry_task_ctl_locked(task_t task, uint32_t reasons, int enable_disable)
328 {
329 uint32_t origflags;
330
331 assert((reasons != 0) && ((reasons | TF_TELEMETRY) == TF_TELEMETRY));
332
333 task_lock_assert_owned(task);
334
335 origflags = task->t_flags;
336
337 if (enable_disable == 1) {
338 task->t_flags |= reasons;
339 if ((origflags & TF_TELEMETRY) == 0) {
340 OSIncrementAtomic(&telemetry_active_tasks);
341 #if TELEMETRY_DEBUG
342 printf("%s: telemetry OFF -> ON (%d active)\n", proc_name_address(get_bsdtask_info(task)), telemetry_active_tasks);
343 #endif
344 }
345 } else {
346 task->t_flags &= ~reasons;
347 if (((origflags & TF_TELEMETRY) != 0) && ((task->t_flags & TF_TELEMETRY) == 0)) {
348 /*
349 * If this task went from having at least one telemetry bit to having none,
350 * the net change was to disable telemetry for the task.
351 */
352 OSDecrementAtomic(&telemetry_active_tasks);
353 #if TELEMETRY_DEBUG
354 printf("%s: telemetry ON -> OFF (%d active)\n", proc_name_address(get_bsdtask_info(task)), telemetry_active_tasks);
355 #endif
356 }
357 }
358 }
359
360 /*
361 * Determine if the current thread is eligible for telemetry:
362 *
363 * telemetry_sample_all_tasks: All threads are eligible. This takes precedence.
364 * telemetry_active_tasks: Count of tasks opted in.
365 * task->t_flags & TF_TELEMETRY: This task is opted in.
366 */
367 static bool
telemetry_is_active(thread_t thread)368 telemetry_is_active(thread_t thread)
369 {
370 task_t task = get_threadtask(thread);
371
372 if (task == kernel_task) {
373 /* Kernel threads never return to an AST boundary, and are ineligible */
374 return false;
375 }
376
377 if (telemetry_sample_all_tasks || telemetry_sample_pmis) {
378 return true;
379 }
380
381 if ((telemetry_active_tasks > 0) && ((task->t_flags & TF_TELEMETRY) != 0)) {
382 return true;
383 }
384
385 return false;
386 }
387
388 /*
389 * Userland is arming a timer. If we are eligible for such a record,
390 * sample now. No need to do this one at the AST because we're already at
391 * a safe place in this system call.
392 */
393 int
telemetry_timer_event(__unused uint64_t deadline,__unused uint64_t interval,__unused uint64_t leeway)394 telemetry_timer_event(__unused uint64_t deadline, __unused uint64_t interval, __unused uint64_t leeway)
395 {
396 if (telemetry_needs_timer_arming_record == TRUE) {
397 telemetry_needs_timer_arming_record = FALSE;
398 telemetry_take_sample(current_thread(), (enum micro_snapshot_flags)(kTimerArmingRecord | kUserMode));
399 }
400
401 return 0;
402 }
403
404 #if MONOTONIC
405 static void
telemetry_pmi_handler(bool user_mode,__unused void * ctx)406 telemetry_pmi_handler(bool user_mode, __unused void *ctx)
407 {
408 telemetry_mark_curthread(user_mode, TRUE);
409 }
410 #endif /* MONOTONIC */
411
412 int
telemetry_pmi_setup(enum telemetry_pmi pmi_ctr,uint64_t period)413 telemetry_pmi_setup(enum telemetry_pmi pmi_ctr, uint64_t period)
414 {
415 #if MONOTONIC
416 static bool sample_all_tasks_aside = false;
417 static uint32_t active_tasks_aside = false;
418 int error = 0;
419 const char *name = "?";
420
421 unsigned int ctr = 0;
422
423 TELEMETRY_PMI_LOCK();
424
425 switch (pmi_ctr) {
426 case TELEMETRY_PMI_NONE:
427 if (!telemetry_sample_pmis) {
428 error = 1;
429 goto out;
430 }
431
432 telemetry_sample_pmis = false;
433 telemetry_sample_all_tasks = sample_all_tasks_aside;
434 telemetry_active_tasks = active_tasks_aside;
435 error = mt_microstackshot_stop();
436 if (!error) {
437 printf("telemetry: disabling ustackshot on PMI\n");
438 }
439 goto out;
440
441 case TELEMETRY_PMI_INSTRS:
442 ctr = MT_CORE_INSTRS;
443 name = "instructions";
444 break;
445
446 case TELEMETRY_PMI_CYCLES:
447 ctr = MT_CORE_CYCLES;
448 name = "cycles";
449 break;
450
451 default:
452 error = 1;
453 goto out;
454 }
455
456 telemetry_sample_pmis = true;
457 sample_all_tasks_aside = telemetry_sample_all_tasks;
458 active_tasks_aside = telemetry_active_tasks;
459 telemetry_sample_all_tasks = false;
460 telemetry_active_tasks = 0;
461
462 error = mt_microstackshot_start(ctr, period, telemetry_pmi_handler, NULL);
463 if (!error) {
464 printf("telemetry: ustackshot every %llu %s\n", period, name);
465 }
466
467 out:
468 TELEMETRY_PMI_UNLOCK();
469 return error;
470 #else /* MONOTONIC */
471 #pragma unused(pmi_ctr, period)
472 return 1;
473 #endif /* !MONOTONIC */
474 }
475
476 /*
477 * Mark the current thread for an interrupt-based
478 * telemetry record, to be sampled at the next AST boundary.
479 */
480 void
telemetry_mark_curthread(boolean_t interrupted_userspace,boolean_t pmi)481 telemetry_mark_curthread(boolean_t interrupted_userspace, boolean_t pmi)
482 {
483 uint32_t ast_bits = 0;
484 thread_t thread = current_thread();
485
486 /*
487 * If telemetry isn't active for this thread, return and try
488 * again next time.
489 */
490 if (telemetry_is_active(thread) == false) {
491 return;
492 }
493
494 ast_bits |= (interrupted_userspace ? AST_TELEMETRY_USER : AST_TELEMETRY_KERNEL);
495 if (pmi) {
496 ast_bits |= AST_TELEMETRY_PMI;
497 }
498
499 telemetry_needs_record = FALSE;
500 thread_ast_set(thread, ast_bits);
501 ast_propagate(thread);
502 }
503
504 void
compute_telemetry(void * arg __unused)505 compute_telemetry(void *arg __unused)
506 {
507 if (telemetry_sample_all_tasks || (telemetry_active_tasks > 0)) {
508 if ((++telemetry_timestamp) % telemetry_sample_rate == 0) {
509 telemetry_needs_record = TRUE;
510 telemetry_needs_timer_arming_record = TRUE;
511 }
512 }
513 }
514
515 /*
516 * If userland has registered a port for telemetry notifications, send one now.
517 */
518 static void
telemetry_notify_user(void)519 telemetry_notify_user(void)
520 {
521 mach_port_t user_port = MACH_PORT_NULL;
522
523 kern_return_t kr = host_get_telemetry_port(host_priv_self(), &user_port);
524 if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
525 return;
526 }
527
528 telemetry_notification(user_port, 0);
529 ipc_port_release_send(user_port);
530 }
531
532 void
telemetry_ast(thread_t thread,ast_t reasons)533 telemetry_ast(thread_t thread, ast_t reasons)
534 {
535 assert((reasons & AST_TELEMETRY_ALL) != 0);
536
537 uint8_t record_type = 0;
538 if (reasons & AST_TELEMETRY_IO) {
539 record_type |= kIORecord;
540 }
541 if (reasons & (AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL)) {
542 record_type |= (reasons & AST_TELEMETRY_PMI) ? kPMIRecord :
543 kInterruptRecord;
544 }
545
546 if ((reasons & AST_TELEMETRY_MACF) != 0) {
547 record_type |= kMACFRecord;
548 }
549
550 enum micro_snapshot_flags user_telemetry = (reasons & AST_TELEMETRY_USER) ? kUserMode : 0;
551 enum micro_snapshot_flags microsnapshot_flags = record_type | user_telemetry;
552
553 if ((reasons & AST_TELEMETRY_MACF) != 0) {
554 telemetry_macf_take_sample(thread, microsnapshot_flags);
555 }
556
557 if ((reasons & (AST_TELEMETRY_IO | AST_TELEMETRY_KERNEL | AST_TELEMETRY_PMI
558 | AST_TELEMETRY_USER)) != 0) {
559 telemetry_take_sample(thread, microsnapshot_flags);
560 }
561 }
562
563 bool
telemetry_task_ready_for_sample(task_t task)564 telemetry_task_ready_for_sample(task_t task)
565 {
566 return task != TASK_NULL &&
567 task != kernel_task &&
568 !task_did_exec(task) &&
569 !task_is_exec_copy(task);
570 }
571
572 void
telemetry_instrumentation_begin(__unused struct micro_snapshot_buffer * buffer,__unused enum micro_snapshot_flags flags)573 telemetry_instrumentation_begin(
574 __unused struct micro_snapshot_buffer *buffer,
575 __unused enum micro_snapshot_flags flags)
576 {
577 /* telemetry_XXX accessed outside of lock for instrumentation only */
578 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_START,
579 flags, telemetry_bytes_since_last_mark, 0,
580 (&telemetry_buffer != buffer));
581 }
582
583 void
telemetry_instrumentation_end(__unused struct micro_snapshot_buffer * buffer)584 telemetry_instrumentation_end(__unused struct micro_snapshot_buffer *buffer)
585 {
586 /* telemetry_XXX accessed outside of lock for instrumentation only */
587 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_END,
588 (&telemetry_buffer == buffer), telemetry_bytes_since_last_mark,
589 buffer->current_position, buffer->end_point);
590 }
591
592 void
telemetry_take_sample(thread_t thread,enum micro_snapshot_flags flags)593 telemetry_take_sample(thread_t thread, enum micro_snapshot_flags flags)
594 {
595 task_t task;
596 uintptr_t frames[128];
597 size_t frames_len = sizeof(frames) / sizeof(frames[0]);
598 uint32_t btcount;
599 struct backtrace_user_info btinfo = BTUINFO_INIT;
600 uint16_t async_start_index = UINT16_MAX;
601
602 if (thread == THREAD_NULL) {
603 return;
604 }
605
606 /* Ensure task is ready for taking a sample. */
607 task = get_threadtask(thread);
608 if (!telemetry_task_ready_for_sample(task)) {
609 return;
610 }
611
612 telemetry_instrumentation_begin(&telemetry_buffer, flags);
613
614 /* Collect backtrace from user thread. */
615 btcount = backtrace_user(frames, frames_len, NULL, &btinfo);
616 if (btinfo.btui_error != 0) {
617 return;
618 }
619 if (btinfo.btui_async_frame_addr != 0 &&
620 btinfo.btui_async_start_index != 0) {
621 /*
622 * Put the async callstack inline after the frame pointer walk call
623 * stack.
624 */
625 async_start_index = (uint16_t)btinfo.btui_async_start_index;
626 uintptr_t frame_addr = btinfo.btui_async_frame_addr;
627 unsigned int frames_left = frames_len - async_start_index;
628 struct backtrace_control ctl = { .btc_frame_addr = frame_addr, };
629 btinfo = BTUINFO_INIT;
630 unsigned int async_filled = backtrace_user(frames + async_start_index,
631 frames_left, &ctl, &btinfo);
632 if (btinfo.btui_error == 0) {
633 btcount = MIN(async_start_index + async_filled, frames_len);
634 }
635 }
636
637 /* Process the backtrace. */
638 struct telemetry_target target = {
639 .thread = thread,
640 .frames = frames,
641 .frames_count = btcount,
642 .user64_regs = (btinfo.btui_info & BTI_64_BIT) != 0,
643 .microsnapshot_flags = flags,
644 .buffer = &telemetry_buffer,
645 .buffer_mtx = &telemetry_mtx,
646 .async_start_index = async_start_index,
647 };
648 telemetry_process_sample(&target, true, NULL);
649
650 telemetry_instrumentation_end(&telemetry_buffer);
651 }
652
653 #if CONFIG_MACF
654 void
telemetry_macf_take_sample(thread_t thread,enum micro_snapshot_flags flags)655 telemetry_macf_take_sample(thread_t thread, enum micro_snapshot_flags flags)
656 {
657 task_t task;
658
659 vm_size_t btcapacity = 128;
660 uintptr_t frames_stack[btcapacity];
661 uint32_t btcount = 0;
662 typedef uintptr_t telemetry_user_frame_t __kernel_data_semantics;
663 telemetry_user_frame_t *frames = frames_stack;
664 bool alloced_frames = false;
665
666 struct backtrace_user_info btinfo = BTUINFO_INIT;
667 struct backtrace_control btctl = BTCTL_INIT;
668
669 uint32_t retry_count = 0;
670 const uint32_t max_retries = 10;
671
672 bool initialized = false;
673 struct micro_snapshot_buffer *telbuf = &telemetry_macf_buffer;
674 uint32_t record_start = 0;
675 bool did_process = false;
676 int rv = 0;
677
678 if (thread == THREAD_NULL) {
679 return;
680 }
681
682 telemetry_instrumentation_begin(telbuf, flags);
683
684 /* Ensure task is ready for taking a sample. */
685 task = get_threadtask(thread);
686 if (!telemetry_task_ready_for_sample(task)) {
687 rv = EBUSY;
688 goto out;
689 }
690
691 /* Ensure MACF telemetry buffer was initialized. */
692 TELEMETRY_MACF_LOCK();
693 initialized = (telbuf->size > 0);
694 TELEMETRY_MACF_UNLOCK();
695
696 if (!initialized) {
697 rv = ENOMEM;
698 goto out;
699 }
700
701 /* Collect backtrace from user thread. */
702 while (retry_count < max_retries) {
703 btcount += backtrace_user(frames + btcount, btcapacity - btcount, &btctl, &btinfo);
704
705 if ((btinfo.btui_info & BTI_TRUNCATED) != 0 && btinfo.btui_next_frame_addr != 0) {
706 /*
707 * Fast path uses stack memory to avoid an allocation. We must
708 * pivot to heap memory in the case where we cannot write the
709 * complete backtrace to this buffer.
710 */
711 if (frames == frames_stack) {
712 btcapacity += 128;
713 frames = kalloc_data(btcapacity * sizeof(*frames), Z_WAITOK);
714
715 if (frames == NULL) {
716 break;
717 }
718
719 alloced_frames = true;
720
721 assert(btcapacity > sizeof(frames_stack) / sizeof(frames_stack[0]));
722 memcpy(frames, frames_stack, sizeof(frames_stack));
723 } else {
724 assert(alloced_frames);
725 frames = krealloc_data(frames,
726 btcapacity * sizeof(*frames),
727 (btcapacity + 128) * sizeof(*frames),
728 Z_WAITOK);
729
730 if (frames == NULL) {
731 break;
732 }
733
734 btcapacity += 128;
735 }
736
737 btctl.btc_frame_addr = btinfo.btui_next_frame_addr;
738 ++retry_count;
739 } else {
740 break;
741 }
742 }
743
744 if (frames == NULL) {
745 rv = ENOMEM;
746 goto out;
747 } else if (btinfo.btui_error != 0) {
748 rv = btinfo.btui_error;
749 goto out;
750 }
751
752 /* Process the backtrace. */
753 struct telemetry_target target = {
754 .thread = thread,
755 .frames = frames,
756 .frames_count = btcount,
757 .user64_regs = (btinfo.btui_info & BTI_64_BIT) != 0,
758 .microsnapshot_flags = flags,
759 .buffer = telbuf,
760 .buffer_mtx = &telemetry_macf_mtx
761 };
762 rv = telemetry_process_sample(&target, false, &record_start);
763 did_process = true;
764
765 out:
766 /* Immediately deliver the collected sample to MAC clients. */
767 if (rv == 0) {
768 assert(telbuf->current_position >= record_start);
769 mac_thread_telemetry(thread,
770 0,
771 (void *)(telbuf->buffer + record_start),
772 telbuf->current_position - record_start);
773 } else {
774 mac_thread_telemetry(thread, rv, NULL, 0);
775 }
776
777 /*
778 * The lock was taken by telemetry_process_sample, and we asked it not to
779 * unlock upon completion, so we must release the lock here.
780 */
781 if (did_process) {
782 TELEMETRY_MACF_UNLOCK();
783 }
784
785 if (alloced_frames && frames != NULL) {
786 kfree_data(frames, btcapacity * sizeof(*frames));
787 }
788
789 telemetry_instrumentation_end(telbuf);
790 }
791 #endif /* CONFIG_MACF */
792
793 int
telemetry_process_sample(const struct telemetry_target * target,bool release_buffer_lock,uint32_t * out_current_record_start)794 telemetry_process_sample(const struct telemetry_target *target,
795 bool release_buffer_lock,
796 uint32_t *out_current_record_start)
797 {
798 thread_t thread = target->thread;
799 uintptr_t *frames = target->frames;
800 size_t btcount = target->frames_count;
801 bool user64_regs = target->user64_regs;
802 enum micro_snapshot_flags microsnapshot_flags = target->microsnapshot_flags;
803 struct micro_snapshot_buffer *current_buffer = target->buffer;
804 lck_mtx_t *buffer_mtx = target->buffer_mtx;
805
806 task_t task;
807 void *p;
808 uint32_t bti;
809 struct micro_snapshot *msnap;
810 struct task_snapshot *tsnap;
811 struct thread_snapshot *thsnap;
812 clock_sec_t secs;
813 clock_usec_t usecs;
814 vm_size_t framesize;
815 uint32_t current_record_start;
816 uint32_t tmp = 0;
817 bool notify = false;
818 int rv = 0;
819
820 if (thread == THREAD_NULL) {
821 return EINVAL;
822 }
823
824 task = get_threadtask(thread);
825 p = get_bsdtask_info(task);
826 bool user64_va = task_has_64Bit_addr(task);
827
828 /*
829 * Retrieve the array of UUID's for binaries used by this task.
830 * We reach down into DYLD's data structures to find the array.
831 *
832 * XXX - make this common with kdp?
833 */
834 uint32_t uuid_info_count = 0;
835 mach_vm_address_t uuid_info_addr = 0;
836 uint32_t uuid_info_size = 0;
837 if (user64_va) {
838 uuid_info_size = sizeof(struct user64_dyld_uuid_info);
839 struct user64_dyld_all_image_infos task_image_infos;
840 if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
841 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
842 uuid_info_addr = task_image_infos.uuidArray;
843 }
844 } else {
845 uuid_info_size = sizeof(struct user32_dyld_uuid_info);
846 struct user32_dyld_all_image_infos task_image_infos;
847 if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
848 uuid_info_count = task_image_infos.uuidArrayCount;
849 uuid_info_addr = task_image_infos.uuidArray;
850 }
851 }
852
853 /*
854 * If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
855 * this data structure), we zero the uuid_info_count so that we won't even try to save load info
856 * for this task.
857 */
858 if (!uuid_info_addr) {
859 uuid_info_count = 0;
860 }
861
862 /*
863 * Don't copy in an unbounded amount of memory. The main binary and interesting
864 * non-shared-cache libraries should be in the first few images.
865 */
866 if (uuid_info_count > TELEMETRY_MAX_UUID_COUNT) {
867 uuid_info_count = TELEMETRY_MAX_UUID_COUNT;
868 }
869
870 uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
871 char *uuid_info_array = NULL;
872
873 if (uuid_info_count > 0) {
874 uuid_info_array = kalloc_data(uuid_info_array_size, Z_WAITOK);
875 if (uuid_info_array == NULL) {
876 return ENOMEM;
877 }
878
879 /*
880 * Copy in the UUID info array.
881 * It may be nonresident, in which case just fix up nloadinfos to 0 in the task snapshot.
882 */
883 if (copyin(uuid_info_addr, uuid_info_array, uuid_info_array_size) != 0) {
884 kfree_data(uuid_info_array, uuid_info_array_size);
885 uuid_info_array = NULL;
886 uuid_info_array_size = 0;
887 }
888 }
889
890 /*
891 * Look for a dispatch queue serial number, and copy it in from userland if present.
892 */
893 uint64_t dqserialnum = 0;
894 int dqserialnum_valid = 0;
895
896 uint64_t dqkeyaddr = thread_dispatchqaddr(thread);
897 if (dqkeyaddr != 0) {
898 uint64_t dqaddr = 0;
899 uint64_t dq_serialno_offset = get_task_dispatchqueue_serialno_offset(task);
900 if ((copyin(dqkeyaddr, (char *)&dqaddr, (user64_va ? 8 : 4)) == 0) &&
901 (dqaddr != 0) && (dq_serialno_offset != 0)) {
902 uint64_t dqserialnumaddr = dqaddr + dq_serialno_offset;
903 if (copyin(dqserialnumaddr, (char *)&dqserialnum, (user64_va ? 8 : 4)) == 0) {
904 dqserialnum_valid = 1;
905 }
906 }
907 }
908
909 clock_get_calendar_microtime(&secs, &usecs);
910
911 lck_mtx_lock(buffer_mtx);
912
913 /*
914 * If our buffer is not backed by anything,
915 * then we cannot take the sample. Meant to allow us to deallocate the window
916 * buffer if it is disabled.
917 */
918 if (!current_buffer->buffer) {
919 rv = EINVAL;
920 goto cancel_sample;
921 }
922
923 /*
924 * We do the bulk of the operation under the telemetry lock, on assumption that
925 * any page faults during execution will not cause another AST_TELEMETRY_ALL
926 * to deadlock; they will just block until we finish. This makes it easier
927 * to copy into the buffer directly. As soon as we unlock, userspace can copy
928 * out of our buffer.
929 */
930
931 copytobuffer:
932
933 current_record_start = current_buffer->current_position;
934
935 if ((current_buffer->size - current_buffer->current_position) < sizeof(struct micro_snapshot)) {
936 /*
937 * We can't fit a record in the space available, so wrap around to the beginning.
938 * Save the current position as the known end point of valid data.
939 */
940 current_buffer->end_point = current_record_start;
941 current_buffer->current_position = 0;
942 if (current_record_start == 0) {
943 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
944 rv = ERANGE;
945 goto cancel_sample;
946 }
947 goto copytobuffer;
948 }
949
950 msnap = (struct micro_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
951 msnap->snapshot_magic = STACKSHOT_MICRO_SNAPSHOT_MAGIC;
952 msnap->ms_flags = (uint8_t)microsnapshot_flags;
953 msnap->ms_opaque_flags = 0; /* namespace managed by userspace */
954 msnap->ms_cpu = cpu_number();
955 msnap->ms_time = secs;
956 msnap->ms_time_microsecs = usecs;
957
958 current_buffer->current_position += sizeof(struct micro_snapshot);
959
960 if ((current_buffer->size - current_buffer->current_position) < sizeof(struct task_snapshot)) {
961 current_buffer->end_point = current_record_start;
962 current_buffer->current_position = 0;
963 if (current_record_start == 0) {
964 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
965 rv = ERANGE;
966 goto cancel_sample;
967 }
968 goto copytobuffer;
969 }
970
971 tsnap = (struct task_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
972 bzero(tsnap, sizeof(*tsnap));
973 tsnap->snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC;
974 tsnap->pid = proc_pid(p);
975 tsnap->uniqueid = proc_uniqueid(p);
976 struct recount_times_mach times = recount_task_terminated_times(task);
977 tsnap->user_time_in_terminated_threads = times.rtm_user;
978 tsnap->system_time_in_terminated_threads = times.rtm_system;
979 tsnap->suspend_count = task->suspend_count;
980 tsnap->task_size = (typeof(tsnap->task_size))(get_task_phys_footprint(task) / PAGE_SIZE);
981 tsnap->faults = counter_load(&task->faults);
982 tsnap->pageins = counter_load(&task->pageins);
983 tsnap->cow_faults = counter_load(&task->cow_faults);
984 /*
985 * The throttling counters are maintained as 64-bit counters in the proc
986 * structure. However, we reserve 32-bits (each) for them in the task_snapshot
987 * struct to save space and since we do not expect them to overflow 32-bits. If we
988 * find these values overflowing in the future, the fix would be to simply
989 * upgrade these counters to 64-bit in the task_snapshot struct
990 */
991 tsnap->was_throttled = (uint32_t) proc_was_throttled(p);
992 tsnap->did_throttle = (uint32_t) proc_did_throttle(p);
993 #if CONFIG_COALITIONS
994 /*
995 * These fields are overloaded to represent the resource coalition ID of
996 * this task...
997 */
998 coalition_t rsrc_coal = task->coalition[COALITION_TYPE_RESOURCE];
999 tsnap->p_start_sec = rsrc_coal ? coalition_id(rsrc_coal) : 0;
1000 /*
1001 * ... and the process this thread is doing work on behalf of.
1002 */
1003 pid_t origin_pid = -1;
1004 if (thread_get_voucher_origin_pid(thread, &origin_pid) != KERN_SUCCESS) {
1005 origin_pid = -1;
1006 }
1007 tsnap->p_start_usec = origin_pid;
1008 #endif /* CONFIG_COALITIONS */
1009
1010 if (task->t_flags & TF_TELEMETRY) {
1011 tsnap->ss_flags |= kTaskRsrcFlagged;
1012 }
1013
1014 if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG)) {
1015 tsnap->ss_flags |= kTaskDarwinBG;
1016 }
1017
1018 proc_get_darwinbgstate(task, &tmp);
1019
1020 if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) {
1021 tsnap->ss_flags |= kTaskIsForeground;
1022 }
1023
1024 if (tmp & PROC_FLAG_ADAPTIVE_IMPORTANT) {
1025 tsnap->ss_flags |= kTaskIsBoosted;
1026 }
1027
1028 if (tmp & PROC_FLAG_SUPPRESSED) {
1029 tsnap->ss_flags |= kTaskIsSuppressed;
1030 }
1031
1032
1033 tsnap->latency_qos = task_grab_latency_qos(task);
1034
1035 strlcpy(tsnap->p_comm, proc_name_address(p), sizeof(tsnap->p_comm));
1036 if (user64_va) {
1037 tsnap->ss_flags |= kUser64_p;
1038 }
1039
1040 if (task->task_shared_region_slide != -1) {
1041 tsnap->shared_cache_slide = task->task_shared_region_slide;
1042 bcopy(task->task_shared_region_uuid, tsnap->shared_cache_identifier,
1043 sizeof(task->task_shared_region_uuid));
1044 }
1045
1046 current_buffer->current_position += sizeof(struct task_snapshot);
1047
1048 /*
1049 * Directly after the task snapshot, place the array of UUID's corresponding to the binaries
1050 * used by this task.
1051 */
1052 if ((current_buffer->size - current_buffer->current_position) < uuid_info_array_size) {
1053 current_buffer->end_point = current_record_start;
1054 current_buffer->current_position = 0;
1055 if (current_record_start == 0) {
1056 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1057 rv = ERANGE;
1058 goto cancel_sample;
1059 }
1060 goto copytobuffer;
1061 }
1062
1063 /*
1064 * Copy the UUID info array into our sample.
1065 */
1066 if (uuid_info_array_size > 0) {
1067 bcopy(uuid_info_array, (char *)(current_buffer->buffer + current_buffer->current_position), uuid_info_array_size);
1068 tsnap->nloadinfos = uuid_info_count;
1069 }
1070
1071 current_buffer->current_position += uuid_info_array_size;
1072
1073 /*
1074 * After the task snapshot & list of binary UUIDs, we place a thread snapshot.
1075 */
1076
1077 if ((current_buffer->size - current_buffer->current_position) < sizeof(struct thread_snapshot)) {
1078 /* wrap and overwrite */
1079 current_buffer->end_point = current_record_start;
1080 current_buffer->current_position = 0;
1081 if (current_record_start == 0) {
1082 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1083 rv = ERANGE;
1084 goto cancel_sample;
1085 }
1086 goto copytobuffer;
1087 }
1088
1089 thsnap = (struct thread_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
1090 bzero(thsnap, sizeof(*thsnap));
1091
1092 thsnap->snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC;
1093 thsnap->thread_id = thread_tid(thread);
1094 thsnap->state = thread->state;
1095 thsnap->priority = thread->base_pri;
1096 thsnap->sched_pri = thread->sched_pri;
1097 thsnap->sched_flags = thread->sched_flags;
1098 thsnap->ss_flags |= kStacksPCOnly;
1099 thsnap->ts_qos = thread->effective_policy.thep_qos;
1100 thsnap->ts_rqos = thread->requested_policy.thrp_qos;
1101 thsnap->ts_rqos_override = MAX(thread->requested_policy.thrp_qos_override,
1102 thread->requested_policy.thrp_qos_workq_override);
1103 memcpy(thsnap->_reserved + 1, &target->async_start_index,
1104 sizeof(target->async_start_index));
1105
1106 if (proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG)) {
1107 thsnap->ss_flags |= kThreadDarwinBG;
1108 }
1109
1110 boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
1111 times = recount_current_thread_times();
1112 ml_set_interrupts_enabled(interrupt_state);
1113 thsnap->user_time = times.rtm_user;
1114 thsnap->system_time = times.rtm_system;
1115
1116 current_buffer->current_position += sizeof(struct thread_snapshot);
1117
1118 /*
1119 * If this thread has a dispatch queue serial number, include it here.
1120 */
1121 if (dqserialnum_valid) {
1122 if ((current_buffer->size - current_buffer->current_position) < sizeof(dqserialnum)) {
1123 /* wrap and overwrite */
1124 current_buffer->end_point = current_record_start;
1125 current_buffer->current_position = 0;
1126 if (current_record_start == 0) {
1127 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1128 rv = ERANGE;
1129 goto cancel_sample;
1130 }
1131 goto copytobuffer;
1132 }
1133
1134 thsnap->ss_flags |= kHasDispatchSerial;
1135 bcopy(&dqserialnum, (char *)current_buffer->buffer + current_buffer->current_position, sizeof(dqserialnum));
1136 current_buffer->current_position += sizeof(dqserialnum);
1137 }
1138
1139 if (user64_regs) {
1140 framesize = 8;
1141 thsnap->ss_flags |= kUser64_p;
1142 } else {
1143 framesize = 4;
1144 }
1145
1146 /*
1147 * If we can't fit this entire stacktrace then cancel this record, wrap to the beginning,
1148 * and start again there so that we always store a full record.
1149 */
1150 if ((current_buffer->size - current_buffer->current_position) / framesize < btcount) {
1151 current_buffer->end_point = current_record_start;
1152 current_buffer->current_position = 0;
1153 if (current_record_start == 0) {
1154 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1155 rv = ERANGE;
1156 goto cancel_sample;
1157 }
1158 goto copytobuffer;
1159 }
1160
1161 for (bti = 0; bti < btcount; bti++, current_buffer->current_position += framesize) {
1162 if (framesize == 8) {
1163 *(uint64_t *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position) = frames[bti];
1164 } else {
1165 *(uint32_t *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position) = (uint32_t)frames[bti];
1166 }
1167 }
1168
1169 if (current_buffer->end_point < current_buffer->current_position) {
1170 /*
1171 * Each time the cursor wraps around to the beginning, we leave a
1172 * differing amount of unused space at the end of the buffer. Make
1173 * sure the cursor pushes the end point in case we're making use of
1174 * more of the buffer than we did the last time we wrapped.
1175 */
1176 current_buffer->end_point = current_buffer->current_position;
1177 }
1178
1179 thsnap->nuser_frames = btcount;
1180
1181 /*
1182 * Now THIS is a hack.
1183 */
1184 if (current_buffer == &telemetry_buffer) {
1185 telemetry_bytes_since_last_mark += (current_buffer->current_position - current_record_start);
1186 if (telemetry_bytes_since_last_mark > telemetry_buffer_notify_at) {
1187 notify = true;
1188 }
1189 }
1190
1191 if (out_current_record_start != NULL) {
1192 *out_current_record_start = current_record_start;
1193 }
1194
1195 cancel_sample:
1196 if (release_buffer_lock) {
1197 lck_mtx_unlock(buffer_mtx);
1198 }
1199
1200 if (notify) {
1201 telemetry_notify_user();
1202 }
1203
1204 if (uuid_info_array != NULL) {
1205 kfree_data(uuid_info_array, uuid_info_array_size);
1206 }
1207
1208 return rv;
1209 }
1210
1211 #if TELEMETRY_DEBUG
1212 static void
log_telemetry_output(vm_offset_t buf,uint32_t pos,uint32_t sz)1213 log_telemetry_output(vm_offset_t buf, uint32_t pos, uint32_t sz)
1214 {
1215 struct micro_snapshot *p;
1216 uint32_t offset;
1217
1218 printf("Copying out %d bytes of telemetry at offset %d\n", sz, pos);
1219
1220 buf += pos;
1221
1222 /*
1223 * Find and log each timestamp in this chunk of buffer.
1224 */
1225 for (offset = 0; offset < sz; offset++) {
1226 p = (struct micro_snapshot *)(buf + offset);
1227 if (p->snapshot_magic == STACKSHOT_MICRO_SNAPSHOT_MAGIC) {
1228 printf("telemetry timestamp: %lld\n", p->ms_time);
1229 }
1230 }
1231 }
1232 #endif
1233
1234 int
telemetry_gather(user_addr_t buffer,uint32_t * length,bool mark)1235 telemetry_gather(user_addr_t buffer, uint32_t *length, bool mark)
1236 {
1237 return telemetry_buffer_gather(buffer, length, mark, &telemetry_buffer);
1238 }
1239
1240 int
telemetry_buffer_gather(user_addr_t buffer,uint32_t * length,bool mark,struct micro_snapshot_buffer * current_buffer)1241 telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, bool mark, struct micro_snapshot_buffer * current_buffer)
1242 {
1243 int result = 0;
1244 uint32_t oldest_record_offset;
1245
1246 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_START,
1247 mark, telemetry_bytes_since_last_mark, 0,
1248 (&telemetry_buffer != current_buffer));
1249
1250 TELEMETRY_LOCK();
1251
1252 if (current_buffer->buffer == 0) {
1253 *length = 0;
1254 goto out;
1255 }
1256
1257 if (*length < current_buffer->size) {
1258 result = KERN_NO_SPACE;
1259 goto out;
1260 }
1261
1262 /*
1263 * Copy the ring buffer out to userland in order sorted by time: least recent to most recent.
1264 * First, we need to search forward from the cursor to find the oldest record in our buffer.
1265 */
1266 oldest_record_offset = current_buffer->current_position;
1267 do {
1268 if (((oldest_record_offset + sizeof(uint32_t)) > current_buffer->size) ||
1269 ((oldest_record_offset + sizeof(uint32_t)) > current_buffer->end_point)) {
1270 if (*(uint32_t *)(uintptr_t)(current_buffer->buffer) == 0) {
1271 /*
1272 * There is no magic number at the start of the buffer, which means
1273 * it's empty; nothing to see here yet.
1274 */
1275 *length = 0;
1276 goto out;
1277 }
1278 /*
1279 * We've looked through the end of the active buffer without finding a valid
1280 * record; that means all valid records are in a single chunk, beginning at
1281 * the very start of the buffer.
1282 */
1283
1284 oldest_record_offset = 0;
1285 assert(*(uint32_t *)(uintptr_t)(current_buffer->buffer) == STACKSHOT_MICRO_SNAPSHOT_MAGIC);
1286 break;
1287 }
1288
1289 if (*(uint32_t *)(uintptr_t)(current_buffer->buffer + oldest_record_offset) == STACKSHOT_MICRO_SNAPSHOT_MAGIC) {
1290 break;
1291 }
1292
1293 /*
1294 * There are no alignment guarantees for micro-stackshot records, so we must search at each
1295 * byte offset.
1296 */
1297 oldest_record_offset++;
1298 } while (oldest_record_offset != current_buffer->current_position);
1299
1300 /*
1301 * If needed, copyout in two chunks: from the oldest record to the end of the buffer, and then
1302 * from the beginning of the buffer up to the current position.
1303 */
1304 if (oldest_record_offset != 0) {
1305 #if TELEMETRY_DEBUG
1306 log_telemetry_output(current_buffer->buffer, oldest_record_offset,
1307 current_buffer->end_point - oldest_record_offset);
1308 #endif
1309 if ((result = copyout((void *)(current_buffer->buffer + oldest_record_offset), buffer,
1310 current_buffer->end_point - oldest_record_offset)) != 0) {
1311 *length = 0;
1312 goto out;
1313 }
1314 *length = current_buffer->end_point - oldest_record_offset;
1315 } else {
1316 *length = 0;
1317 }
1318
1319 #if TELEMETRY_DEBUG
1320 log_telemetry_output(current_buffer->buffer, 0, current_buffer->current_position);
1321 #endif
1322 if ((result = copyout((void *)current_buffer->buffer, buffer + *length,
1323 current_buffer->current_position)) != 0) {
1324 *length = 0;
1325 goto out;
1326 }
1327 *length += (uint32_t)current_buffer->current_position;
1328
1329 out:
1330
1331 if (mark && (*length > 0)) {
1332 telemetry_bytes_since_last_mark = 0;
1333 }
1334
1335 TELEMETRY_UNLOCK();
1336
1337 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_END,
1338 current_buffer->current_position, *length,
1339 current_buffer->end_point, (&telemetry_buffer != current_buffer));
1340
1341 return result;
1342 }
1343
1344 #if CONFIG_MACF
1345 static int
telemetry_macf_init_locked(size_t buffer_size)1346 telemetry_macf_init_locked(size_t buffer_size)
1347 {
1348 kern_return_t kr;
1349
1350 if (buffer_size > TELEMETRY_MAX_BUFFER_SIZE) {
1351 buffer_size = TELEMETRY_MAX_BUFFER_SIZE;
1352 }
1353
1354 telemetry_macf_buffer.size = buffer_size;
1355
1356 kr = kmem_alloc(kernel_map, &telemetry_macf_buffer.buffer,
1357 telemetry_macf_buffer.size, KMA_DATA | KMA_ZERO | KMA_PERMANENT,
1358 VM_KERN_MEMORY_SECURITY);
1359
1360 if (kr != KERN_SUCCESS) {
1361 kprintf("Telemetry (MACF): Allocation failed: %d\n", kr);
1362 return ENOMEM;
1363 }
1364
1365 return 0;
1366 }
1367
1368 int
telemetry_macf_mark_curthread(void)1369 telemetry_macf_mark_curthread(void)
1370 {
1371 thread_t thread = current_thread();
1372 task_t task = get_threadtask(thread);
1373 int rv = 0;
1374
1375 if (task == kernel_task) {
1376 /* Kernel threads never return to an AST boundary, and are ineligible */
1377 return EINVAL;
1378 }
1379
1380 /* Initialize the MACF telemetry buffer if needed. */
1381 TELEMETRY_MACF_LOCK();
1382 if (__improbable(telemetry_macf_buffer.size == 0)) {
1383 rv = telemetry_macf_init_locked(TELEMETRY_MACF_DEFAULT_BUFFER_SIZE);
1384
1385 if (rv != 0) {
1386 return rv;
1387 }
1388 }
1389 TELEMETRY_MACF_UNLOCK();
1390
1391 act_set_macf_telemetry_ast(thread);
1392 return 0;
1393 }
1394 #endif /* CONFIG_MACF */
1395
1396
1397 static void
telemetry_stash_ca_event(kernel_brk_type_t type,uint16_t comment,uint32_t total_frames,uintptr_t * backtrace,uintptr_t faulting_address)1398 telemetry_stash_ca_event(
1399 kernel_brk_type_t type,
1400 uint16_t comment,
1401 uint32_t total_frames,
1402 uintptr_t *backtrace,
1403 uintptr_t faulting_address)
1404 {
1405 /* Skip telemetry if we accidentally took a fault while handling telemetry */
1406 bool *in_handler = PERCPU_GET(brk_telemetry_in_handler);
1407 if (*in_handler) {
1408 #if DEVELOPMENT
1409 panic("Breakpoint trap re-entered from within a spinlock");
1410 #endif
1411 return;
1412 }
1413
1414 /* Rate limit on repeatedly seeing the same address */
1415 uintptr_t *cache_address = PERCPU_GET(brk_telemetry_cache_address);
1416 if (*cache_address == faulting_address) {
1417 return;
1418 }
1419
1420 *cache_address = faulting_address;
1421
1422 lck_spin_lock(&ca_entries_lck);
1423 *in_handler = true;
1424
1425 if (__improbable(ca_entries_index > CA_ENTRIES_SIZE)) {
1426 panic("Invalid CA interrupt buffer index %d >= %d",
1427 ca_entries_index, CA_ENTRIES_SIZE);
1428 }
1429
1430 /* We're full, just drop the event */
1431 if (ca_entries_index == CA_ENTRIES_SIZE) {
1432 *in_handler = false;
1433 lck_spin_unlock(&ca_entries_lck);
1434 return;
1435 }
1436
1437 ca_entries[ca_entries_index].type = type;
1438 ca_entries[ca_entries_index].code = comment;
1439 ca_entries[ca_entries_index].faulting_address = faulting_address;
1440
1441 assert(total_frames <= TELEMETRY_BT_FRAMES);
1442
1443 if (total_frames <= TELEMETRY_BT_FRAMES) {
1444 ca_entries[ca_entries_index].num_frames = total_frames;
1445 memcpy(ca_entries[ca_entries_index].frames, backtrace,
1446 total_frames * sizeof(uintptr_t));
1447 }
1448
1449 ca_entries_index++;
1450
1451 *in_handler = false;
1452 lck_spin_unlock(&ca_entries_lck);
1453
1454 thread_call_enter(telemetry_ca_send_callout);
1455 }
1456
1457 static void
telemetry_backtrace_to_string(char * buf,size_t buflen,uint32_t tot,uintptr_t * frames)1458 telemetry_backtrace_to_string(
1459 char *buf,
1460 size_t buflen,
1461 uint32_t tot,
1462 uintptr_t *frames)
1463 {
1464 size_t l = 0;
1465
1466 for (uint32_t i = 0; i < tot; i++) {
1467 l += scnprintf(buf + l, buflen - l, "%lx\n", VM_KERNEL_UNSLIDE(frames[i]));
1468 }
1469 }
1470
1471 static void
telemetry_flush_ca_events(__unused thread_call_param_t p0,__unused thread_call_param_t p1)1472 telemetry_flush_ca_events(
1473 __unused thread_call_param_t p0,
1474 __unused thread_call_param_t p1)
1475 {
1476 struct telemetry_ca_entry local_entries[CA_ENTRIES_SIZE] = {0};
1477 uint8_t entry_cnt = 0;
1478 bool *in_handler = PERCPU_GET(brk_telemetry_in_handler);
1479
1480 lck_spin_lock(&ca_entries_lck);
1481 *in_handler = true;
1482
1483 if (__improbable(ca_entries_index > CA_ENTRIES_SIZE)) {
1484 panic("Invalid CA interrupt buffer index %d > %d", ca_entries_index,
1485 CA_ENTRIES_SIZE);
1486 }
1487
1488 if (ca_entries_index == 0) {
1489 *in_handler = false;
1490 lck_spin_unlock(&ca_entries_lck);
1491 return;
1492 } else {
1493 memcpy(local_entries, ca_entries, sizeof(local_entries));
1494 entry_cnt = ca_entries_index;
1495 ca_entries_index = 0;
1496 }
1497
1498 *in_handler = false;
1499 lck_spin_unlock(&ca_entries_lck);
1500
1501 /* Send the events */
1502 for (uint8_t i = 0; i < entry_cnt; i++) {
1503 ca_event_t ca_event = CA_EVENT_ALLOCATE(kernel_breakpoint_event);
1504 CA_EVENT_TYPE(kernel_breakpoint_event) * event = ca_event->data;
1505
1506 event->brk_type = local_entries[i].type;
1507 event->brk_code = local_entries[i].code;
1508 event->faulting_address = local_entries[i].faulting_address;
1509
1510 telemetry_backtrace_to_string(event->backtrace, BACKTRACE_FRAMES_BUF,
1511 local_entries[i].num_frames, local_entries[i].frames);
1512 strlcpy(event->uuid, kernel_uuid_string, CA_UUID_LEN);
1513
1514 CA_EVENT_SEND(ca_event);
1515 }
1516 }
1517
1518 void
telemetry_kernel_brk(kernel_brk_type_t type,kernel_brk_options_t options,void * tstate,uint16_t comment)1519 telemetry_kernel_brk(
1520 kernel_brk_type_t type,
1521 kernel_brk_options_t options,
1522 void *tstate,
1523 uint16_t comment)
1524 {
1525 #if __arm64__
1526 arm_saved_state_t *state = (arm_saved_state_t *)tstate;
1527
1528 uintptr_t faulting_address = get_saved_state_pc(state);
1529 uintptr_t saved_fp = get_saved_state_fp(state);
1530 #else
1531 x86_saved_state64_t *state = (x86_saved_state64_t *)tstate;
1532
1533 uintptr_t faulting_address = state->isf.rip;
1534 uintptr_t saved_fp = state->rbp;
1535 #endif
1536
1537 assert(options & KERNEL_BRK_TELEMETRY_OPTIONS);
1538
1539 if (startup_phase < STARTUP_SUB_THREAD_CALL) {
1540 #if DEVELOPMENT || DEBUG
1541 panic("Attempting kernel breakpoint telemetry in early boot.");
1542 #endif
1543 return;
1544 }
1545
1546 if (options & KERNEL_BRK_CORE_ANALYTICS) {
1547 uintptr_t frames[TELEMETRY_BT_FRAMES];
1548
1549 struct backtrace_control ctl = {
1550 .btc_frame_addr = (uintptr_t)saved_fp,
1551 };
1552
1553 uint32_t total_frames = backtrace(frames, TELEMETRY_BT_FRAMES, &ctl, NULL);
1554
1555 telemetry_stash_ca_event(type, comment, total_frames,
1556 frames, VM_KERNEL_UNSLIDE(faulting_address));
1557 }
1558 }
1559
1560 /************************/
1561 /* BOOT PROFILE SUPPORT */
1562 /************************/
1563 /*
1564 * Boot Profiling
1565 *
1566 * The boot-profiling support is a mechanism to sample activity happening on the
1567 * system during boot. This mechanism sets up a periodic timer and on every timer fire,
1568 * captures a full backtrace into the boot profiling buffer. This buffer can be pulled
1569 * out and analyzed from user-space. It is turned on using the following boot-args:
1570 * "bootprofile_buffer_size" specifies the size of the boot profile buffer
1571 * "bootprofile_interval_ms" specifies the interval for the profiling timer
1572 *
1573 * Process Specific Boot Profiling
1574 *
1575 * The boot-arg "bootprofile_proc_name" can be used to specify a certain
1576 * process that needs to profiled during boot. Setting this boot-arg changes
1577 * the way stackshots are captured. At every timer fire, the code looks at the
1578 * currently running process and takes a stackshot only if the requested process
1579 * is on-core (which makes it unsuitable for MP systems).
1580 *
1581 * Trigger Events
1582 *
1583 * The boot-arg "bootprofile_type=boot" starts the timer during early boot. Using
1584 * "wake" starts the timer at AP wake from suspend-to-RAM.
1585 */
1586
1587 #define BOOTPROFILE_MAX_BUFFER_SIZE (64*1024*1024) /* see also COPYSIZELIMIT_PANIC */
1588
1589 vm_offset_t bootprofile_buffer = 0;
1590 uint32_t bootprofile_buffer_size = 0;
1591 uint32_t bootprofile_buffer_current_position = 0;
1592 uint32_t bootprofile_interval_ms = 0;
1593 uint64_t bootprofile_stackshot_flags = 0;
1594 uint64_t bootprofile_interval_abs = 0;
1595 uint64_t bootprofile_next_deadline = 0;
1596 uint32_t bootprofile_all_procs = 0;
1597 char bootprofile_proc_name[17];
1598 uint64_t bootprofile_delta_since_timestamp = 0;
1599 LCK_GRP_DECLARE(bootprofile_lck_grp, "bootprofile_group");
1600 LCK_MTX_DECLARE(bootprofile_mtx, &bootprofile_lck_grp);
1601
1602
1603 enum {
1604 kBootProfileDisabled = 0,
1605 kBootProfileStartTimerAtBoot,
1606 kBootProfileStartTimerAtWake
1607 } bootprofile_type = kBootProfileDisabled;
1608
1609
1610 static timer_call_data_t bootprofile_timer_call_entry;
1611
1612 #define BOOTPROFILE_LOCK() do { lck_mtx_lock(&bootprofile_mtx); } while(0)
1613 #define BOOTPROFILE_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&bootprofile_mtx)
1614 #define BOOTPROFILE_UNLOCK() do { lck_mtx_unlock(&bootprofile_mtx); } while(0)
1615
1616 static void bootprofile_timer_call(
1617 timer_call_param_t param0,
1618 timer_call_param_t param1);
1619
1620 void
bootprofile_init(void)1621 bootprofile_init(void)
1622 {
1623 kern_return_t ret;
1624 char type[32];
1625
1626 if (!PE_parse_boot_argn("bootprofile_buffer_size",
1627 &bootprofile_buffer_size, sizeof(bootprofile_buffer_size))) {
1628 bootprofile_buffer_size = 0;
1629 }
1630
1631 if (bootprofile_buffer_size > BOOTPROFILE_MAX_BUFFER_SIZE) {
1632 bootprofile_buffer_size = BOOTPROFILE_MAX_BUFFER_SIZE;
1633 }
1634
1635 if (!PE_parse_boot_argn("bootprofile_interval_ms",
1636 &bootprofile_interval_ms, sizeof(bootprofile_interval_ms))) {
1637 bootprofile_interval_ms = 0;
1638 }
1639
1640 if (!PE_parse_boot_argn("bootprofile_stackshot_flags",
1641 &bootprofile_stackshot_flags, sizeof(bootprofile_stackshot_flags))) {
1642 bootprofile_stackshot_flags = 0;
1643 }
1644
1645 if (!PE_parse_boot_argn("bootprofile_proc_name",
1646 &bootprofile_proc_name, sizeof(bootprofile_proc_name))) {
1647 bootprofile_all_procs = 1;
1648 bootprofile_proc_name[0] = '\0';
1649 }
1650
1651 if (PE_parse_boot_argn("bootprofile_type", type, sizeof(type))) {
1652 if (0 == strcmp(type, "boot")) {
1653 bootprofile_type = kBootProfileStartTimerAtBoot;
1654 } else if (0 == strcmp(type, "wake")) {
1655 bootprofile_type = kBootProfileStartTimerAtWake;
1656 } else {
1657 bootprofile_type = kBootProfileDisabled;
1658 }
1659 } else {
1660 bootprofile_type = kBootProfileDisabled;
1661 }
1662
1663 clock_interval_to_absolutetime_interval(bootprofile_interval_ms, NSEC_PER_MSEC, &bootprofile_interval_abs);
1664
1665 /* Both boot args must be set to enable */
1666 if ((bootprofile_type == kBootProfileDisabled) || (bootprofile_buffer_size == 0) || (bootprofile_interval_abs == 0)) {
1667 return;
1668 }
1669
1670 ret = kmem_alloc(kernel_map, &bootprofile_buffer, bootprofile_buffer_size,
1671 KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
1672 if (ret != KERN_SUCCESS) {
1673 kprintf("Boot profile: Allocation failed: %d\n", ret);
1674 return;
1675 }
1676
1677 kprintf("Boot profile: Sampling %s once per %u ms at %s\n",
1678 bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms,
1679 bootprofile_type == kBootProfileStartTimerAtBoot ? "boot" : (bootprofile_type == kBootProfileStartTimerAtWake ? "wake" : "unknown"));
1680
1681 timer_call_setup(&bootprofile_timer_call_entry,
1682 bootprofile_timer_call,
1683 NULL);
1684
1685 if (bootprofile_type == kBootProfileStartTimerAtBoot) {
1686 bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs;
1687 timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1688 NULL,
1689 bootprofile_next_deadline,
1690 0,
1691 TIMER_CALL_SYS_NORMAL,
1692 false);
1693 }
1694 }
1695
1696 void
bootprofile_wake_from_sleep(void)1697 bootprofile_wake_from_sleep(void)
1698 {
1699 if (bootprofile_type == kBootProfileStartTimerAtWake) {
1700 bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs;
1701 timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1702 NULL,
1703 bootprofile_next_deadline,
1704 0,
1705 TIMER_CALL_SYS_NORMAL,
1706 false);
1707 }
1708 }
1709
1710
1711 static void
bootprofile_timer_call(timer_call_param_t param0 __unused,timer_call_param_t param1 __unused)1712 bootprofile_timer_call(
1713 timer_call_param_t param0 __unused,
1714 timer_call_param_t param1 __unused)
1715 {
1716 unsigned retbytes = 0;
1717 int pid_to_profile = -1;
1718
1719 if (!BOOTPROFILE_TRY_SPIN_LOCK()) {
1720 goto reprogram;
1721 }
1722
1723 /* Check if process-specific boot profiling is turned on */
1724 if (!bootprofile_all_procs) {
1725 /*
1726 * Since boot profiling initializes really early in boot, it is
1727 * possible that at this point, the task/proc is not initialized.
1728 * Nothing to do in that case.
1729 */
1730
1731 if ((current_task() != NULL) && (get_bsdtask_info(current_task()) != NULL) &&
1732 (0 == strncmp(bootprofile_proc_name, proc_name_address(get_bsdtask_info(current_task())), 17))) {
1733 pid_to_profile = proc_selfpid();
1734 } else {
1735 /*
1736 * Process-specific boot profiling requested but the on-core process is
1737 * something else. Nothing to do here.
1738 */
1739 BOOTPROFILE_UNLOCK();
1740 goto reprogram;
1741 }
1742 }
1743
1744 /* initiate a stackshot with whatever portion of the buffer is left */
1745 if (bootprofile_buffer_current_position < bootprofile_buffer_size) {
1746 uint64_t flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_TRYLOCK | STACKSHOT_SAVE_LOADINFO
1747 | STACKSHOT_GET_GLOBAL_MEM_STATS;
1748 #if defined(XNU_TARGET_OS_OSX)
1749 flags |= STACKSHOT_SAVE_KEXT_LOADINFO;
1750 #endif
1751
1752
1753 /* OR on flags specified in boot-args */
1754 flags |= bootprofile_stackshot_flags;
1755 if ((flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) && (bootprofile_delta_since_timestamp == 0)) {
1756 /* Can't take deltas until the first one */
1757 flags &= ~STACKSHOT_COLLECT_DELTA_SNAPSHOT;
1758 }
1759
1760 uint64_t timestamp = 0;
1761 if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) {
1762 timestamp = mach_absolute_time();
1763 }
1764
1765 kern_return_t r = stack_snapshot_from_kernel(
1766 pid_to_profile, (void *)(bootprofile_buffer + bootprofile_buffer_current_position),
1767 bootprofile_buffer_size - bootprofile_buffer_current_position,
1768 flags, bootprofile_delta_since_timestamp, 0, &retbytes);
1769
1770 /*
1771 * We call with STACKSHOT_TRYLOCK because the stackshot lock is coarser
1772 * than the bootprofile lock. If someone else has the lock we'll just
1773 * try again later.
1774 */
1775
1776 if (r == KERN_LOCK_OWNED) {
1777 BOOTPROFILE_UNLOCK();
1778 goto reprogram;
1779 }
1780
1781 if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT &&
1782 r == KERN_SUCCESS) {
1783 bootprofile_delta_since_timestamp = timestamp;
1784 }
1785
1786 bootprofile_buffer_current_position += retbytes;
1787 }
1788
1789 BOOTPROFILE_UNLOCK();
1790
1791 /* If we didn't get any data or have run out of buffer space, stop profiling */
1792 if ((retbytes == 0) || (bootprofile_buffer_current_position == bootprofile_buffer_size)) {
1793 return;
1794 }
1795
1796
1797 reprogram:
1798 /* If the user gathered the buffer, no need to keep profiling */
1799 if (bootprofile_interval_abs == 0) {
1800 return;
1801 }
1802
1803 clock_deadline_for_periodic_event(bootprofile_interval_abs,
1804 mach_absolute_time(),
1805 &bootprofile_next_deadline);
1806 timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1807 NULL,
1808 bootprofile_next_deadline,
1809 0,
1810 TIMER_CALL_SYS_NORMAL,
1811 false);
1812 }
1813
1814 void
bootprofile_get(void ** buffer,uint32_t * length)1815 bootprofile_get(void **buffer, uint32_t *length)
1816 {
1817 BOOTPROFILE_LOCK();
1818 *buffer = (void*) bootprofile_buffer;
1819 *length = bootprofile_buffer_current_position;
1820 BOOTPROFILE_UNLOCK();
1821 }
1822
1823 int
bootprofile_gather(user_addr_t buffer,uint32_t * length)1824 bootprofile_gather(user_addr_t buffer, uint32_t *length)
1825 {
1826 int result = 0;
1827
1828 BOOTPROFILE_LOCK();
1829
1830 if (bootprofile_buffer == 0) {
1831 *length = 0;
1832 goto out;
1833 }
1834
1835 if (*length < bootprofile_buffer_current_position) {
1836 result = KERN_NO_SPACE;
1837 goto out;
1838 }
1839
1840 if ((result = copyout((void *)bootprofile_buffer, buffer,
1841 bootprofile_buffer_current_position)) != 0) {
1842 *length = 0;
1843 goto out;
1844 }
1845 *length = bootprofile_buffer_current_position;
1846
1847 /* cancel future timers */
1848 bootprofile_interval_abs = 0;
1849
1850 out:
1851
1852 BOOTPROFILE_UNLOCK();
1853
1854 return result;
1855 }
1856