1 /*
2 * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/host_priv.h>
29 #include <mach/host_special_ports.h>
30 #include <mach/mach_types.h>
31 #include <mach/telemetry_notification_server.h>
32
33 #include <kern/assert.h>
34 #include <kern/clock.h>
35 #include <kern/coalition.h>
36 #include <kern/debug.h>
37 #include <kern/host.h>
38 #include <kern/kalloc.h>
39 #include <kern/kern_types.h>
40 #include <kern/locks.h>
41 #include <kern/misc_protos.h>
42 #include <kern/sched.h>
43 #include <kern/sched_prim.h>
44 #include <kern/telemetry.h>
45 #include <kern/timer_call.h>
46 #include <kern/policy_internal.h>
47 #include <kern/kcdata.h>
48
49 #include <pexpert/pexpert.h>
50
51 #include <string.h>
52 #include <vm/vm_kern_xnu.h>
53 #include <vm/vm_shared_region.h>
54
55 #include <kperf/callstack.h>
56 #include <kern/backtrace.h>
57 #include <kern/monotonic.h>
58
59 #include <security/mac_mach_internal.h>
60
61 #include <sys/errno.h>
62 #include <sys/kdebug.h>
63 #include <uuid/uuid.h>
64 #include <kdp/kdp_dyld.h>
65
66 #include <libkern/coreanalytics/coreanalytics.h>
67 #include <kern/thread_call.h>
68
69 #define TELEMETRY_DEBUG 0
70
71 struct proc;
72 extern int proc_pid(struct proc *);
73 extern char *proc_name_address(void *p);
74 extern char *proc_longname_address(void *p);
75 extern uint64_t proc_uniqueid(void *p);
76 extern uint64_t proc_was_throttled(void *p);
77 extern uint64_t proc_did_throttle(void *p);
78 extern int proc_selfpid(void);
79 extern boolean_t task_did_exec(task_t task);
80 extern boolean_t task_is_exec_copy(task_t task);
81
82 struct micro_snapshot_buffer {
83 vm_offset_t buffer;
84 uint32_t size;
85 uint32_t current_position;
86 uint32_t end_point;
87 };
88
89 static bool telemetry_task_ready_for_sample(task_t task);
90
91 static void telemetry_instrumentation_begin(
92 struct micro_snapshot_buffer *buffer, enum micro_snapshot_flags flags);
93
94 static void telemetry_instrumentation_end(struct micro_snapshot_buffer *buffer);
95
96 static void telemetry_take_sample(thread_t thread, enum micro_snapshot_flags flags);
97
98 #if CONFIG_MACF
99 static void telemetry_macf_take_sample(thread_t thread, enum micro_snapshot_flags flags);
100 #endif
101
102 struct telemetry_target {
103 thread_t thread;
104 uintptr_t *frames;
105 size_t frames_count;
106 bool user64_regs;
107 uint16_t async_start_index;
108 enum micro_snapshot_flags microsnapshot_flags;
109 bool include_metadata;
110 struct micro_snapshot_buffer *buffer;
111 lck_mtx_t *buffer_mtx;
112 };
113
114 static int telemetry_process_sample(
115 const struct telemetry_target *target,
116 bool release_buffer_lock,
117 uint32_t *out_current_record_start);
118
119 static int telemetry_buffer_gather(
120 user_addr_t buffer,
121 uint32_t *length,
122 bool mark,
123 struct micro_snapshot_buffer *current_buffer);
124
125 #define TELEMETRY_DEFAULT_SAMPLE_RATE (1) /* 1 sample every 1 second */
126 #define TELEMETRY_DEFAULT_BUFFER_SIZE (16*1024)
127 #define TELEMETRY_MAX_BUFFER_SIZE (64*1024)
128
129 #define TELEMETRY_DEFAULT_NOTIFY_LEEWAY (4*1024) // Userland gets 4k of leeway to collect data after notification
130 #define TELEMETRY_MAX_UUID_COUNT (128) // Max of 128 non-shared-cache UUIDs to log for symbolication
131
132 uint32_t telemetry_sample_rate = 0;
133 volatile boolean_t telemetry_needs_record = FALSE;
134 volatile boolean_t telemetry_needs_timer_arming_record = FALSE;
135
136 bool telemetry_sample_pmis = false;
137
138 uint32_t telemetry_timestamp = 0;
139
140 struct telemetry_metadata {
141 /*
142 * The current generation of microstackshot-based telemetry.
143 * Incremented whenever the settings change.
144 */
145 uint32_t tm_generation;
146 /*
147 * The total number of samples recorded.
148 */
149 uint64_t tm_samples_recorded;
150 /*
151 * The total number of samples that were skipped.
152 */
153 uint64_t tm_samples_skipped;
154 /*
155 * What's triggering the microstackshot samples.
156 */
157 enum telemetry_source {
158 TMSRC_NONE = 0,
159 TMSRC_UNKNOWN,
160 TMSRC_TIME,
161 TMSRC_INSTRUCTIONS,
162 TMSRC_CYCLES,
163 } tm_source;
164 /*
165 * The interval used for periodic sampling.
166 */
167 uint64_t tm_period;
168 };
169
170 /*
171 * The telemetry_buffer is responsible
172 * for timer samples and interrupt samples that are driven by
173 * compute_averages(). It will notify its client (if one
174 * exists) when it has enough data to be worth flushing.
175 */
176 struct micro_snapshot_buffer telemetry_buffer = {
177 .buffer = 0,
178 .size = 0,
179 .current_position = 0,
180 .end_point = 0
181 };
182
183 #if CONFIG_MACF
184 #define TELEMETRY_MACF_DEFAULT_BUFFER_SIZE (16*1024)
185 /*
186 * The MAC framework uses its own telemetry buffer for the purposes of auditing
187 * security-related work being done by userland threads.
188 */
189 struct micro_snapshot_buffer telemetry_macf_buffer = {
190 .buffer = 0,
191 .size = 0,
192 .current_position = 0,
193 .end_point = 0
194 };
195 #endif
196
197 int telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked?
198 int telemetry_buffer_notify_at = 0;
199
200 LCK_GRP_DECLARE(telemetry_lck_grp, "telemetry group");
201 LCK_MTX_DECLARE(telemetry_mtx, &telemetry_lck_grp);
202 LCK_MTX_DECLARE(telemetry_pmi_mtx, &telemetry_lck_grp);
203 LCK_MTX_DECLARE(telemetry_macf_mtx, &telemetry_lck_grp);
204 LCK_SPIN_DECLARE(telemetry_metadata_lck, &telemetry_lck_grp);
205
206 #define TELEMETRY_LOCK() do { lck_mtx_lock(&telemetry_mtx); } while (0)
207 #define TELEMETRY_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&telemetry_mtx)
208 #define TELEMETRY_UNLOCK() do { lck_mtx_unlock(&telemetry_mtx); } while (0)
209
210 #define TELEMETRY_PMI_LOCK() do { lck_mtx_lock(&telemetry_pmi_mtx); } while (0)
211 #define TELEMETRY_PMI_UNLOCK() do { lck_mtx_unlock(&telemetry_pmi_mtx); } while (0)
212
213 #define TELEMETRY_MACF_LOCK() do { lck_mtx_lock(&telemetry_macf_mtx); } while (0)
214 #define TELEMETRY_MACF_UNLOCK() do { lck_mtx_unlock(&telemetry_macf_mtx); } while (0)
215
216 /*
217 * Protected by the telemetry_metadata_lck spinlock.
218 */
219 struct telemetry_metadata telemetry_metadata = { 0 };
220
221 #define TELEMETRY_BT_FRAMES (5)
222
223 /*
224 * Telemetry reporting is unsafe in interrupt context, since the CA framework
225 * relies on being able to successfully zalloc some memory for the event.
226 * Therefore we maintain a small buffer that is then flushed by an helper thread.
227 */
228 #define CA_ENTRIES_SIZE (5)
229
230 struct telemetry_ca_entry {
231 uint32_t type;
232 uint16_t code;
233 uint32_t num_frames;
234 uintptr_t faulting_address;
235 uintptr_t frames[TELEMETRY_BT_FRAMES];
236 };
237
238 LCK_GRP_DECLARE(ca_entries_lock_grp, "ca_entries_lck");
239 LCK_SPIN_DECLARE(ca_entries_lck, &ca_entries_lock_grp);
240
241 static struct telemetry_ca_entry ca_entries[CA_ENTRIES_SIZE];
242 static uint8_t ca_entries_index = 0;
243 static struct thread_call *telemetry_ca_send_callout;
244
245 CA_EVENT(kernel_breakpoint_event,
246 CA_INT, brk_type,
247 CA_INT, brk_code,
248 CA_INT, faulting_address,
249 CA_STATIC_STRING(CA_UBSANBUF_LEN), backtrace,
250 CA_STATIC_STRING(CA_UUID_LEN), uuid);
251
252 /* Rate-limit telemetry on last seen faulting address */
253 static uintptr_t PERCPU_DATA(brk_telemetry_cache_address);
254 /* Get out from the brk handler if the CPU is already servicing one */
255 static bool PERCPU_DATA(brk_telemetry_in_handler);
256
257 static void telemetry_flush_ca_events(thread_call_param_t, thread_call_param_t);
258
259 void
telemetry_init(void)260 telemetry_init(void)
261 {
262 kern_return_t ret;
263 uint32_t telemetry_notification_leeway;
264
265 if (!PE_parse_boot_argn("telemetry_buffer_size",
266 &telemetry_buffer.size, sizeof(telemetry_buffer.size))) {
267 telemetry_buffer.size = TELEMETRY_DEFAULT_BUFFER_SIZE;
268 }
269
270 if (telemetry_buffer.size > TELEMETRY_MAX_BUFFER_SIZE) {
271 telemetry_buffer.size = TELEMETRY_MAX_BUFFER_SIZE;
272 }
273
274 ret = kmem_alloc(kernel_map, &telemetry_buffer.buffer, telemetry_buffer.size,
275 KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
276 if (ret != KERN_SUCCESS) {
277 kprintf("Telemetry: Allocation failed: %d\n", ret);
278 return;
279 }
280
281 if (!PE_parse_boot_argn("telemetry_notification_leeway",
282 &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) {
283 /*
284 * By default, notify the user to collect the buffer when there is this much space left in the buffer.
285 */
286 telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
287 }
288 if (telemetry_notification_leeway >= telemetry_buffer.size) {
289 printf("telemetry: nonsensical telemetry_notification_leeway boot-arg %d changed to %d\n",
290 telemetry_notification_leeway, TELEMETRY_DEFAULT_NOTIFY_LEEWAY);
291 telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
292 }
293 telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway;
294
295 if (!PE_parse_boot_argn("telemetry_sample_rate",
296 &telemetry_sample_rate, sizeof(telemetry_sample_rate))) {
297 telemetry_sample_rate = TELEMETRY_DEFAULT_SAMPLE_RATE;
298 }
299
300 telemetry_ca_send_callout = thread_call_allocate_with_options(
301 telemetry_flush_ca_events, NULL, THREAD_CALL_PRIORITY_KERNEL,
302 THREAD_CALL_OPTIONS_ONCE);
303
304 assert(telemetry_ca_send_callout != NULL);
305 }
306
307 /*
308 * Determine if the current thread is eligible for telemetry:
309 */
310 static bool
telemetry_is_active(thread_t thread)311 telemetry_is_active(thread_t thread)
312 {
313 task_t task = get_threadtask(thread);
314
315 if (task == kernel_task) {
316 /* Kernel threads are currently exempted from PMI-based sampling. */
317 return false;
318 }
319
320 return telemetry_sample_pmis;
321 }
322
323 #if CONFIG_CPU_COUNTERS
324 static void
telemetry_pmi_handler(bool user_mode,__unused void * ctx)325 telemetry_pmi_handler(bool user_mode, __unused void *ctx)
326 {
327 telemetry_mark_curthread(user_mode, TRUE);
328 }
329 #endif /* CONFIG_CPU_COUNTERS */
330
331 int
telemetry_pmi_setup(enum telemetry_pmi pmi_ctr,uint64_t period)332 telemetry_pmi_setup(enum telemetry_pmi pmi_ctr, uint64_t period)
333 {
334 #if CONFIG_CPU_COUNTERS
335 enum telemetry_source source = TMSRC_NONE;
336 int error = 0;
337 const char *name = "?";
338
339 unsigned int ctr = 0;
340
341 TELEMETRY_PMI_LOCK();
342
343 switch (pmi_ctr) {
344 case TELEMETRY_PMI_NONE:
345 if (!telemetry_sample_pmis) {
346 error = 1;
347 goto out;
348 }
349
350 telemetry_sample_pmis = false;
351 error = mt_microstackshot_stop();
352 if (!error) {
353 printf("telemetry: disabling ustackshot on PMI\n");
354 int intrs_en = ml_set_interrupts_enabled(FALSE);
355 lck_spin_lock(&telemetry_metadata_lck);
356 telemetry_metadata.tm_period = 0;
357 telemetry_metadata.tm_source = TMSRC_NONE;
358 lck_spin_unlock(&telemetry_metadata_lck);
359 ml_set_interrupts_enabled(intrs_en);
360 }
361 goto out;
362
363 case TELEMETRY_PMI_INSTRS:
364 ctr = MT_CORE_INSTRS;
365 name = "instructions";
366 source = TMSRC_INSTRUCTIONS;
367 break;
368
369 case TELEMETRY_PMI_CYCLES:
370 ctr = MT_CORE_CYCLES;
371 name = "cycles";
372 source = TMSRC_CYCLES;
373 break;
374
375 default:
376 error = 1;
377 goto out;
378 }
379
380 telemetry_sample_pmis = true;
381
382 error = mt_microstackshot_start(ctr, period, telemetry_pmi_handler, NULL);
383 if (!error) {
384 printf("telemetry: ustackshot every %llu %s\n", period, name);
385
386 int intrs_en = ml_set_interrupts_enabled(FALSE);
387 lck_spin_lock(&telemetry_metadata_lck);
388 telemetry_metadata.tm_period = period;
389 telemetry_metadata.tm_source = source;
390 telemetry_metadata.tm_generation += 1;
391 lck_spin_unlock(&telemetry_metadata_lck);
392 ml_set_interrupts_enabled(intrs_en);
393 }
394
395 out:
396 TELEMETRY_PMI_UNLOCK();
397 return error;
398 #else /* CONFIG_CPU_COUNTERS */
399 #pragma unused(pmi_ctr, period)
400 return 1;
401 #endif /* !CONFIG_CPU_COUNTERS */
402 }
403
404 /*
405 * Mark the current thread for an interrupt-based
406 * telemetry record, to be sampled at the next AST boundary.
407 */
408 void
telemetry_mark_curthread(boolean_t interrupted_userspace,boolean_t pmi)409 telemetry_mark_curthread(boolean_t interrupted_userspace, boolean_t pmi)
410 {
411 uint32_t ast_bits = 0;
412 thread_t thread = current_thread();
413
414 /*
415 * If telemetry isn't active for this thread, return and try
416 * again next time.
417 */
418 if (telemetry_is_active(thread) == false) {
419 if (pmi) {
420 int intrs_en = ml_set_interrupts_enabled(FALSE);
421 lck_spin_lock(&telemetry_metadata_lck);
422 telemetry_metadata.tm_samples_skipped += 1;
423 lck_spin_unlock(&telemetry_metadata_lck);
424 ml_set_interrupts_enabled(intrs_en);
425 }
426 return;
427 }
428
429 ast_bits |= (interrupted_userspace ? AST_TELEMETRY_USER : AST_TELEMETRY_KERNEL);
430 if (pmi) {
431 ast_bits |= AST_TELEMETRY_PMI;
432 }
433
434 telemetry_needs_record = FALSE;
435 thread_ast_set(thread, ast_bits);
436 ast_propagate(thread);
437 }
438
439 /*
440 * If userland has registered a port for telemetry notifications, send one now.
441 */
442 static void
telemetry_notify_user(void)443 telemetry_notify_user(void)
444 {
445 mach_port_t user_port = MACH_PORT_NULL;
446
447 kern_return_t kr = host_get_telemetry_port(host_priv_self(), &user_port);
448 if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
449 return;
450 }
451
452 telemetry_notification(user_port, 0);
453 ipc_port_release_send(user_port);
454 }
455
456 void
telemetry_ast(thread_t thread,ast_t reasons)457 telemetry_ast(thread_t thread, ast_t reasons)
458 {
459 assert((reasons & AST_TELEMETRY_ALL) != 0);
460
461 uint8_t record_type = 0;
462 if (reasons & AST_TELEMETRY_IO) {
463 record_type |= kIORecord;
464 }
465 if (reasons & (AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL)) {
466 record_type |= (reasons & AST_TELEMETRY_PMI) ? kPMIRecord :
467 kInterruptRecord;
468 }
469
470 if ((reasons & AST_TELEMETRY_MACF) != 0) {
471 record_type |= kMACFRecord;
472 }
473
474 enum micro_snapshot_flags user_telemetry = (reasons & AST_TELEMETRY_USER) ? kUserMode : 0;
475 enum micro_snapshot_flags microsnapshot_flags = record_type | user_telemetry;
476
477 if ((reasons & AST_TELEMETRY_MACF) != 0) {
478 telemetry_macf_take_sample(thread, microsnapshot_flags);
479 }
480
481 if ((reasons & (AST_TELEMETRY_IO | AST_TELEMETRY_KERNEL | AST_TELEMETRY_PMI
482 | AST_TELEMETRY_USER)) != 0) {
483 telemetry_take_sample(thread, microsnapshot_flags);
484 }
485 }
486
487 bool
telemetry_task_ready_for_sample(task_t task)488 telemetry_task_ready_for_sample(task_t task)
489 {
490 return task != TASK_NULL &&
491 task != kernel_task &&
492 !task_did_exec(task) &&
493 !task_is_exec_copy(task);
494 }
495
496 void
telemetry_instrumentation_begin(__unused struct micro_snapshot_buffer * buffer,__unused enum micro_snapshot_flags flags)497 telemetry_instrumentation_begin(
498 __unused struct micro_snapshot_buffer *buffer,
499 __unused enum micro_snapshot_flags flags)
500 {
501 /* telemetry_XXX accessed outside of lock for instrumentation only */
502 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_START,
503 flags, telemetry_bytes_since_last_mark, 0,
504 (&telemetry_buffer != buffer));
505 }
506
507 void
telemetry_instrumentation_end(__unused struct micro_snapshot_buffer * buffer)508 telemetry_instrumentation_end(__unused struct micro_snapshot_buffer *buffer)
509 {
510 /* telemetry_XXX accessed outside of lock for instrumentation only */
511 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_END,
512 (&telemetry_buffer == buffer), telemetry_bytes_since_last_mark,
513 buffer->current_position, buffer->end_point);
514 }
515
516 void
telemetry_take_sample(thread_t thread,enum micro_snapshot_flags flags)517 telemetry_take_sample(thread_t thread, enum micro_snapshot_flags flags)
518 {
519 task_t task;
520 uintptr_t frames[128];
521 size_t frames_len = sizeof(frames) / sizeof(frames[0]);
522 uint32_t btcount;
523 struct backtrace_user_info btinfo = BTUINFO_INIT;
524 uint16_t async_start_index = UINT16_MAX;
525
526 if (thread == THREAD_NULL) {
527 return;
528 }
529
530 /* Ensure task is ready for taking a sample. */
531 task = get_threadtask(thread);
532 if (!telemetry_task_ready_for_sample(task)) {
533 return;
534 }
535
536 telemetry_instrumentation_begin(&telemetry_buffer, flags);
537
538 /* Collect backtrace from user thread. */
539 btcount = backtrace_user(frames, frames_len, NULL, &btinfo);
540 if (btinfo.btui_error != 0) {
541 return;
542 }
543 if (btinfo.btui_async_frame_addr != 0 &&
544 btinfo.btui_async_start_index != 0) {
545 /*
546 * Put the async callstack inline after the frame pointer walk call
547 * stack.
548 */
549 async_start_index = (uint16_t)btinfo.btui_async_start_index;
550 uintptr_t frame_addr = btinfo.btui_async_frame_addr;
551 unsigned int frames_left = frames_len - async_start_index;
552 struct backtrace_control ctl = { .btc_frame_addr = frame_addr, };
553 btinfo = BTUINFO_INIT;
554 unsigned int async_filled = backtrace_user(frames + async_start_index,
555 frames_left, &ctl, &btinfo);
556 if (btinfo.btui_error == 0) {
557 btcount = MIN(async_start_index + async_filled, frames_len);
558 }
559 }
560
561 /* Process the backtrace. */
562 struct telemetry_target target = {
563 .thread = thread,
564 .frames = frames,
565 .frames_count = btcount,
566 .user64_regs = (btinfo.btui_info & BTI_64_BIT) != 0,
567 .microsnapshot_flags = flags,
568 .include_metadata = flags & kPMIRecord,
569 .buffer = &telemetry_buffer,
570 .buffer_mtx = &telemetry_mtx,
571 .async_start_index = async_start_index,
572 };
573 telemetry_process_sample(&target, true, NULL);
574
575 telemetry_instrumentation_end(&telemetry_buffer);
576 }
577
578 #if CONFIG_MACF
579 void
telemetry_macf_take_sample(thread_t thread,enum micro_snapshot_flags flags)580 telemetry_macf_take_sample(thread_t thread, enum micro_snapshot_flags flags)
581 {
582 task_t task;
583
584 uintptr_t frames_stack[128];
585 vm_size_t btcapacity = ARRAY_COUNT(frames_stack);
586 uint32_t btcount = 0;
587 typedef uintptr_t telemetry_user_frame_t __kernel_data_semantics;
588 telemetry_user_frame_t *frames = frames_stack;
589 bool alloced_frames = false;
590
591 struct backtrace_user_info btinfo = BTUINFO_INIT;
592 struct backtrace_control btctl = BTCTL_INIT;
593
594 uint32_t retry_count = 0;
595 const uint32_t max_retries = 10;
596
597 bool initialized = false;
598 struct micro_snapshot_buffer *telbuf = &telemetry_macf_buffer;
599 uint32_t record_start = 0;
600 bool did_process = false;
601 int rv = 0;
602
603 if (thread == THREAD_NULL) {
604 return;
605 }
606
607 telemetry_instrumentation_begin(telbuf, flags);
608
609 /* Ensure task is ready for taking a sample. */
610 task = get_threadtask(thread);
611 if (!telemetry_task_ready_for_sample(task)) {
612 rv = EBUSY;
613 goto out;
614 }
615
616 /* Ensure MACF telemetry buffer was initialized. */
617 TELEMETRY_MACF_LOCK();
618 initialized = (telbuf->size > 0);
619 TELEMETRY_MACF_UNLOCK();
620
621 if (!initialized) {
622 rv = ENOMEM;
623 goto out;
624 }
625
626 /* Collect backtrace from user thread. */
627 while (retry_count < max_retries) {
628 btcount += backtrace_user(frames + btcount, btcapacity - btcount, &btctl, &btinfo);
629
630 if ((btinfo.btui_info & BTI_TRUNCATED) != 0 && btinfo.btui_next_frame_addr != 0) {
631 /*
632 * Fast path uses stack memory to avoid an allocation. We must
633 * pivot to heap memory in the case where we cannot write the
634 * complete backtrace to this buffer.
635 */
636 if (frames == frames_stack) {
637 btcapacity += 128;
638 frames = kalloc_data(btcapacity * sizeof(*frames), Z_WAITOK);
639
640 if (frames == NULL) {
641 break;
642 }
643
644 alloced_frames = true;
645
646 assert(btcapacity > sizeof(frames_stack) / sizeof(frames_stack[0]));
647 memcpy(frames, frames_stack, sizeof(frames_stack));
648 } else {
649 assert(alloced_frames);
650 frames = krealloc_data(frames,
651 btcapacity * sizeof(*frames),
652 (btcapacity + 128) * sizeof(*frames),
653 Z_WAITOK);
654
655 if (frames == NULL) {
656 break;
657 }
658
659 btcapacity += 128;
660 }
661
662 btctl.btc_frame_addr = btinfo.btui_next_frame_addr;
663 ++retry_count;
664 } else {
665 break;
666 }
667 }
668
669 if (frames == NULL) {
670 rv = ENOMEM;
671 goto out;
672 } else if (btinfo.btui_error != 0) {
673 rv = btinfo.btui_error;
674 goto out;
675 }
676
677 /* Process the backtrace. */
678 struct telemetry_target target = {
679 .thread = thread,
680 .frames = frames,
681 .frames_count = btcount,
682 .user64_regs = (btinfo.btui_info & BTI_64_BIT) != 0,
683 .microsnapshot_flags = flags,
684 .include_metadata = false,
685 .buffer = telbuf,
686 .buffer_mtx = &telemetry_macf_mtx
687 };
688 rv = telemetry_process_sample(&target, false, &record_start);
689 did_process = true;
690
691 out:
692 /* Immediately deliver the collected sample to MAC clients. */
693 if (rv == 0) {
694 assert(telbuf->current_position >= record_start);
695 mac_thread_telemetry(thread,
696 0,
697 (void *)(telbuf->buffer + record_start),
698 telbuf->current_position - record_start);
699 } else {
700 mac_thread_telemetry(thread, rv, NULL, 0);
701 }
702
703 /*
704 * The lock was taken by telemetry_process_sample, and we asked it not to
705 * unlock upon completion, so we must release the lock here.
706 */
707 if (did_process) {
708 TELEMETRY_MACF_UNLOCK();
709 }
710
711 if (alloced_frames && frames != NULL) {
712 kfree_data(frames, btcapacity * sizeof(*frames));
713 }
714
715 telemetry_instrumentation_end(telbuf);
716 }
717 #endif /* CONFIG_MACF */
718
719 int
telemetry_process_sample(const struct telemetry_target * target,bool release_buffer_lock,uint32_t * out_current_record_start)720 telemetry_process_sample(const struct telemetry_target *target,
721 bool release_buffer_lock,
722 uint32_t *out_current_record_start)
723 {
724 thread_t thread = target->thread;
725 uintptr_t *frames = target->frames;
726 size_t btcount = target->frames_count;
727 bool user64_regs = target->user64_regs;
728 enum micro_snapshot_flags microsnapshot_flags = target->microsnapshot_flags;
729 struct micro_snapshot_buffer *current_buffer = target->buffer;
730 lck_mtx_t *buffer_mtx = target->buffer_mtx;
731
732 task_t task;
733 void *p;
734 uint32_t bti;
735 struct micro_snapshot *msnap;
736 struct task_snapshot *tsnap;
737 struct thread_snapshot *thsnap;
738 clock_sec_t secs;
739 clock_usec_t usecs;
740 vm_size_t framesize;
741 uint32_t current_record_start;
742 uint32_t tmp = 0;
743 bool notify = false;
744 int rv = 0;
745
746 if (thread == THREAD_NULL) {
747 return EINVAL;
748 }
749
750 task = get_threadtask(thread);
751 p = get_bsdtask_info(task);
752 bool user64_va = task_has_64Bit_addr(task);
753
754 /*
755 * Retrieve the array of UUID's for binaries used by this task.
756 * We reach down into DYLD's data structures to find the array.
757 *
758 * XXX - make this common with kdp?
759 */
760 uint32_t uuid_info_count = 0;
761 mach_vm_address_t uuid_info_addr = 0;
762 uint32_t uuid_info_size = 0;
763 if (user64_va) {
764 uuid_info_size = sizeof(struct user64_dyld_uuid_info);
765 struct user64_dyld_all_image_infos task_image_infos;
766 if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
767 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
768 uuid_info_addr = task_image_infos.uuidArray;
769 }
770 } else {
771 uuid_info_size = sizeof(struct user32_dyld_uuid_info);
772 struct user32_dyld_all_image_infos task_image_infos;
773 if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
774 uuid_info_count = task_image_infos.uuidArrayCount;
775 uuid_info_addr = task_image_infos.uuidArray;
776 }
777 }
778
779 /*
780 * If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
781 * this data structure), we zero the uuid_info_count so that we won't even try to save load info
782 * for this task.
783 */
784 if (!uuid_info_addr) {
785 uuid_info_count = 0;
786 }
787
788 /*
789 * Don't copy in an unbounded amount of memory. The main binary and interesting
790 * non-shared-cache libraries should be in the first few images.
791 */
792 if (uuid_info_count > TELEMETRY_MAX_UUID_COUNT) {
793 uuid_info_count = TELEMETRY_MAX_UUID_COUNT;
794 }
795
796 uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
797 char *uuid_info_array = NULL;
798
799 if (uuid_info_count > 0) {
800 uuid_info_array = kalloc_data(uuid_info_array_size, Z_WAITOK);
801 if (uuid_info_array == NULL) {
802 return ENOMEM;
803 }
804
805 /*
806 * Copy in the UUID info array.
807 * It may be nonresident, in which case just fix up nloadinfos to 0 in the task snapshot.
808 */
809 if (copyin(uuid_info_addr, uuid_info_array, uuid_info_array_size) != 0) {
810 kfree_data(uuid_info_array, uuid_info_array_size);
811 uuid_info_array = NULL;
812 uuid_info_array_size = 0;
813 }
814 }
815
816 /*
817 * Look for a dispatch queue serial number, and copy it in from userland if present.
818 */
819 uint64_t dqserialnum = 0;
820 int dqserialnum_valid = 0;
821
822 uint64_t dqkeyaddr = thread_dispatchqaddr(thread);
823 if (dqkeyaddr != 0) {
824 uint64_t dqaddr = 0;
825 uint64_t dq_serialno_offset = get_task_dispatchqueue_serialno_offset(task);
826 if ((copyin(dqkeyaddr, (char *)&dqaddr, (user64_va ? 8 : 4)) == 0) &&
827 (dqaddr != 0) && (dq_serialno_offset != 0)) {
828 uint64_t dqserialnumaddr = dqaddr + dq_serialno_offset;
829 if (copyin(dqserialnumaddr, (char *)&dqserialnum, (user64_va ? 8 : 4)) == 0) {
830 dqserialnum_valid = 1;
831 }
832 }
833 }
834
835 clock_get_calendar_microtime(&secs, &usecs);
836
837 lck_mtx_lock(buffer_mtx);
838
839 if (target->include_metadata) {
840 int intrs_en = ml_set_interrupts_enabled(FALSE);
841 lck_spin_lock(&telemetry_metadata_lck);
842 telemetry_metadata.tm_samples_recorded += 1;
843 lck_spin_unlock(&telemetry_metadata_lck);
844 ml_set_interrupts_enabled(intrs_en);
845 }
846
847 /*
848 * If our buffer is not backed by anything,
849 * then we cannot take the sample. Meant to allow us to deallocate the window
850 * buffer if it is disabled.
851 */
852 if (!current_buffer->buffer) {
853 rv = EINVAL;
854 goto cancel_sample;
855 }
856
857 /*
858 * We do the bulk of the operation under the telemetry lock, on assumption that
859 * any page faults during execution will not cause another AST_TELEMETRY_ALL
860 * to deadlock; they will just block until we finish. This makes it easier
861 * to copy into the buffer directly. As soon as we unlock, userspace can copy
862 * out of our buffer.
863 */
864
865 copytobuffer:
866
867 current_record_start = current_buffer->current_position;
868
869 if ((current_buffer->size - current_buffer->current_position) < sizeof(struct micro_snapshot)) {
870 /*
871 * We can't fit a record in the space available, so wrap around to the beginning.
872 * Save the current position as the known end point of valid data.
873 */
874 current_buffer->end_point = current_record_start;
875 current_buffer->current_position = 0;
876 if (current_record_start == 0) {
877 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
878 rv = ERANGE;
879 goto cancel_sample;
880 }
881 goto copytobuffer;
882 }
883
884 msnap = (struct micro_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
885 msnap->snapshot_magic = STACKSHOT_MICRO_SNAPSHOT_MAGIC;
886 msnap->ms_flags = (uint8_t)microsnapshot_flags;
887 msnap->ms_opaque_flags = 0; /* namespace managed by userspace */
888 msnap->ms_cpu = cpu_number();
889 msnap->ms_time = secs;
890 msnap->ms_time_microsecs = usecs;
891
892 current_buffer->current_position += sizeof(struct micro_snapshot);
893
894 if ((current_buffer->size - current_buffer->current_position) < sizeof(struct task_snapshot)) {
895 current_buffer->end_point = current_record_start;
896 current_buffer->current_position = 0;
897 if (current_record_start == 0) {
898 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
899 rv = ERANGE;
900 goto cancel_sample;
901 }
902 goto copytobuffer;
903 }
904
905 tsnap = (struct task_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
906 bzero(tsnap, sizeof(*tsnap));
907 tsnap->snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC;
908 tsnap->pid = proc_pid(p);
909 tsnap->uniqueid = proc_uniqueid(p);
910 struct recount_times_mach times = recount_task_terminated_times(task);
911 tsnap->user_time_in_terminated_threads = times.rtm_user;
912 tsnap->system_time_in_terminated_threads = times.rtm_system;
913 tsnap->suspend_count = task->suspend_count;
914 tsnap->task_size = (typeof(tsnap->task_size))(get_task_phys_footprint(task) / PAGE_SIZE);
915 tsnap->faults = counter_load(&task->faults);
916 tsnap->pageins = counter_load(&task->pageins);
917 tsnap->cow_faults = counter_load(&task->cow_faults);
918 /*
919 * The throttling counters are maintained as 64-bit counters in the proc
920 * structure. However, we reserve 32-bits (each) for them in the task_snapshot
921 * struct to save space and since we do not expect them to overflow 32-bits. If we
922 * find these values overflowing in the future, the fix would be to simply
923 * upgrade these counters to 64-bit in the task_snapshot struct
924 */
925 tsnap->was_throttled = (uint32_t) proc_was_throttled(p);
926 tsnap->did_throttle = (uint32_t) proc_did_throttle(p);
927 #if CONFIG_COALITIONS
928 /*
929 * These fields are overloaded to represent the resource coalition ID of
930 * this task...
931 */
932 coalition_t rsrc_coal = task->coalition[COALITION_TYPE_RESOURCE];
933 tsnap->p_start_sec = rsrc_coal ? coalition_id(rsrc_coal) : 0;
934 /*
935 * ... and the processes this thread is doing work on behalf of.
936 */
937 pid_t origin_pid = -1, proximate_pid = -1;
938 (void)thread_get_voucher_origin_proximate_pid(thread, &origin_pid, &proximate_pid);
939 tsnap->p_start_usec = ((uint64_t)proximate_pid << 32) | (uint32_t)origin_pid;
940 #endif /* CONFIG_COALITIONS */
941
942 if (task->t_flags & TF_TELEMETRY) {
943 tsnap->ss_flags |= kTaskRsrcFlagged;
944 }
945
946 if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG)) {
947 tsnap->ss_flags |= kTaskDarwinBG;
948 }
949
950 proc_get_darwinbgstate(task, &tmp);
951
952 if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) {
953 tsnap->ss_flags |= kTaskIsForeground;
954 }
955
956 if (tmp & PROC_FLAG_ADAPTIVE_IMPORTANT) {
957 tsnap->ss_flags |= kTaskIsBoosted;
958 }
959
960 if (tmp & PROC_FLAG_SUPPRESSED) {
961 tsnap->ss_flags |= kTaskIsSuppressed;
962 }
963
964
965 tsnap->latency_qos = task_grab_latency_qos(task);
966
967 strlcpy(tsnap->p_comm, proc_name_address(p), sizeof(tsnap->p_comm));
968 const char *longname = proc_longname_address(p);
969 if (longname[0] != '\0') {
970 /*
971 * XXX Stash the rest of the process's name in some unused fields.
972 */
973 strlcpy((char *)tsnap->io_priority_count, &longname[16], sizeof(tsnap->io_priority_count));
974 }
975 if (target->include_metadata) {
976 int intrs_en = ml_set_interrupts_enabled(FALSE);
977 lck_spin_lock(&telemetry_metadata_lck);
978 tsnap->io_priority_size[0] = ((uint64_t)telemetry_metadata.tm_source << 32) | telemetry_metadata.tm_generation;
979 tsnap->io_priority_size[1] = telemetry_metadata.tm_period;
980 tsnap->io_priority_size[2] = telemetry_metadata.tm_samples_recorded;
981 tsnap->io_priority_size[3] = telemetry_metadata.tm_samples_skipped;
982 lck_spin_unlock(&telemetry_metadata_lck);
983 ml_set_interrupts_enabled(intrs_en);
984 }
985 if (user64_va) {
986 tsnap->ss_flags |= kUser64_p;
987 }
988
989 if (task->task_shared_region_slide != -1) {
990 tsnap->shared_cache_slide = task->task_shared_region_slide;
991 bcopy(task->task_shared_region_uuid, tsnap->shared_cache_identifier,
992 sizeof(task->task_shared_region_uuid));
993 }
994
995 current_buffer->current_position += sizeof(struct task_snapshot);
996
997 /*
998 * Directly after the task snapshot, place the array of UUID's corresponding to the binaries
999 * used by this task.
1000 */
1001 if ((current_buffer->size - current_buffer->current_position) < uuid_info_array_size) {
1002 current_buffer->end_point = current_record_start;
1003 current_buffer->current_position = 0;
1004 if (current_record_start == 0) {
1005 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1006 rv = ERANGE;
1007 goto cancel_sample;
1008 }
1009 goto copytobuffer;
1010 }
1011
1012 /*
1013 * Copy the UUID info array into our sample.
1014 */
1015 if (uuid_info_array_size > 0) {
1016 bcopy(uuid_info_array, (char *)(current_buffer->buffer + current_buffer->current_position), uuid_info_array_size);
1017 tsnap->nloadinfos = uuid_info_count;
1018 }
1019
1020 current_buffer->current_position += uuid_info_array_size;
1021
1022 /*
1023 * After the task snapshot & list of binary UUIDs, we place a thread snapshot.
1024 */
1025
1026 if ((current_buffer->size - current_buffer->current_position) < sizeof(struct thread_snapshot)) {
1027 /* wrap and overwrite */
1028 current_buffer->end_point = current_record_start;
1029 current_buffer->current_position = 0;
1030 if (current_record_start == 0) {
1031 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1032 rv = ERANGE;
1033 goto cancel_sample;
1034 }
1035 goto copytobuffer;
1036 }
1037
1038 thsnap = (struct thread_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
1039 bzero(thsnap, sizeof(*thsnap));
1040
1041 thsnap->snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC;
1042 thsnap->thread_id = thread_tid(thread);
1043 thsnap->state = thread->state;
1044 thsnap->priority = thread->base_pri;
1045 thsnap->sched_pri = thread->sched_pri;
1046 thsnap->sched_flags = thread->sched_flags;
1047 thsnap->ss_flags |= kStacksPCOnly;
1048 thsnap->ts_qos = thread->effective_policy.thep_qos;
1049 thsnap->ts_rqos = thread->requested_policy.thrp_qos;
1050 thsnap->ts_rqos_override = MAX(thread->requested_policy.thrp_qos_override,
1051 thread->requested_policy.thrp_qos_workq_override);
1052 memcpy(thsnap->_reserved + 1, &target->async_start_index,
1053 sizeof(target->async_start_index));
1054
1055 if (proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG)) {
1056 thsnap->ss_flags |= kThreadDarwinBG;
1057 }
1058
1059 boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
1060 times = recount_current_thread_times();
1061 ml_set_interrupts_enabled(interrupt_state);
1062 thsnap->user_time = times.rtm_user;
1063 thsnap->system_time = times.rtm_system;
1064
1065 current_buffer->current_position += sizeof(struct thread_snapshot);
1066
1067 /*
1068 * If this thread has a dispatch queue serial number, include it here.
1069 */
1070 if (dqserialnum_valid) {
1071 if ((current_buffer->size - current_buffer->current_position) < sizeof(dqserialnum)) {
1072 /* wrap and overwrite */
1073 current_buffer->end_point = current_record_start;
1074 current_buffer->current_position = 0;
1075 if (current_record_start == 0) {
1076 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1077 rv = ERANGE;
1078 goto cancel_sample;
1079 }
1080 goto copytobuffer;
1081 }
1082
1083 thsnap->ss_flags |= kHasDispatchSerial;
1084 bcopy(&dqserialnum, (char *)current_buffer->buffer + current_buffer->current_position, sizeof(dqserialnum));
1085 current_buffer->current_position += sizeof(dqserialnum);
1086 }
1087
1088 if (user64_regs) {
1089 framesize = 8;
1090 thsnap->ss_flags |= kUser64_p;
1091 } else {
1092 framesize = 4;
1093 }
1094
1095 /*
1096 * If we can't fit this entire stacktrace then cancel this record, wrap to the beginning,
1097 * and start again there so that we always store a full record.
1098 */
1099 if ((current_buffer->size - current_buffer->current_position) / framesize < btcount) {
1100 current_buffer->end_point = current_record_start;
1101 current_buffer->current_position = 0;
1102 if (current_record_start == 0) {
1103 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1104 rv = ERANGE;
1105 goto cancel_sample;
1106 }
1107 goto copytobuffer;
1108 }
1109
1110 for (bti = 0; bti < btcount; bti++, current_buffer->current_position += framesize) {
1111 if (framesize == 8) {
1112 *(uint64_t *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position) = frames[bti];
1113 } else {
1114 *(uint32_t *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position) = (uint32_t)frames[bti];
1115 }
1116 }
1117
1118 if (current_buffer->end_point < current_buffer->current_position) {
1119 /*
1120 * Each time the cursor wraps around to the beginning, we leave a
1121 * differing amount of unused space at the end of the buffer. Make
1122 * sure the cursor pushes the end point in case we're making use of
1123 * more of the buffer than we did the last time we wrapped.
1124 */
1125 current_buffer->end_point = current_buffer->current_position;
1126 }
1127
1128 thsnap->nuser_frames = btcount;
1129
1130 /*
1131 * Now THIS is a hack.
1132 */
1133 if (current_buffer == &telemetry_buffer) {
1134 telemetry_bytes_since_last_mark += (current_buffer->current_position - current_record_start);
1135 if (telemetry_bytes_since_last_mark > telemetry_buffer_notify_at) {
1136 notify = true;
1137 }
1138 }
1139
1140 if (out_current_record_start != NULL) {
1141 *out_current_record_start = current_record_start;
1142 }
1143
1144 cancel_sample:
1145 if (release_buffer_lock) {
1146 lck_mtx_unlock(buffer_mtx);
1147 }
1148
1149 if (notify) {
1150 telemetry_notify_user();
1151 }
1152
1153 if (uuid_info_array != NULL) {
1154 kfree_data(uuid_info_array, uuid_info_array_size);
1155 }
1156
1157 return rv;
1158 }
1159
1160 #if TELEMETRY_DEBUG
1161 static void
log_telemetry_output(vm_offset_t buf,uint32_t pos,uint32_t sz)1162 log_telemetry_output(vm_offset_t buf, uint32_t pos, uint32_t sz)
1163 {
1164 struct micro_snapshot *p;
1165 uint32_t offset;
1166
1167 printf("Copying out %d bytes of telemetry at offset %d\n", sz, pos);
1168
1169 buf += pos;
1170
1171 /*
1172 * Find and log each timestamp in this chunk of buffer.
1173 */
1174 for (offset = 0; offset < sz; offset++) {
1175 p = (struct micro_snapshot *)(buf + offset);
1176 if (p->snapshot_magic == STACKSHOT_MICRO_SNAPSHOT_MAGIC) {
1177 printf("telemetry timestamp: %lld\n", p->ms_time);
1178 }
1179 }
1180 }
1181 #endif
1182
1183 int
telemetry_gather(user_addr_t buffer,uint32_t * length,bool mark)1184 telemetry_gather(user_addr_t buffer, uint32_t *length, bool mark)
1185 {
1186 return telemetry_buffer_gather(buffer, length, mark, &telemetry_buffer);
1187 }
1188
1189 int
telemetry_buffer_gather(user_addr_t buffer,uint32_t * length,bool mark,struct micro_snapshot_buffer * current_buffer)1190 telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, bool mark, struct micro_snapshot_buffer * current_buffer)
1191 {
1192 int result = 0;
1193 uint32_t oldest_record_offset;
1194
1195 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_START,
1196 mark, telemetry_bytes_since_last_mark, 0,
1197 (&telemetry_buffer != current_buffer));
1198
1199 TELEMETRY_LOCK();
1200
1201 if (current_buffer->buffer == 0) {
1202 *length = 0;
1203 goto out;
1204 }
1205
1206 if (*length < current_buffer->size) {
1207 result = KERN_NO_SPACE;
1208 goto out;
1209 }
1210
1211 /*
1212 * Copy the ring buffer out to userland in order sorted by time: least recent to most recent.
1213 * First, we need to search forward from the cursor to find the oldest record in our buffer.
1214 */
1215 oldest_record_offset = current_buffer->current_position;
1216 do {
1217 if (((oldest_record_offset + sizeof(uint32_t)) > current_buffer->size) ||
1218 ((oldest_record_offset + sizeof(uint32_t)) > current_buffer->end_point)) {
1219 if (*(uint32_t *)(uintptr_t)(current_buffer->buffer) == 0) {
1220 /*
1221 * There is no magic number at the start of the buffer, which means
1222 * it's empty; nothing to see here yet.
1223 */
1224 *length = 0;
1225 goto out;
1226 }
1227 /*
1228 * We've looked through the end of the active buffer without finding a valid
1229 * record; that means all valid records are in a single chunk, beginning at
1230 * the very start of the buffer.
1231 */
1232
1233 oldest_record_offset = 0;
1234 assert(*(uint32_t *)(uintptr_t)(current_buffer->buffer) == STACKSHOT_MICRO_SNAPSHOT_MAGIC);
1235 break;
1236 }
1237
1238 if (*(uint32_t *)(uintptr_t)(current_buffer->buffer + oldest_record_offset) == STACKSHOT_MICRO_SNAPSHOT_MAGIC) {
1239 break;
1240 }
1241
1242 /*
1243 * There are no alignment guarantees for micro-stackshot records, so we must search at each
1244 * byte offset.
1245 */
1246 oldest_record_offset++;
1247 } while (oldest_record_offset != current_buffer->current_position);
1248
1249 /*
1250 * If needed, copyout in two chunks: from the oldest record to the end of the buffer, and then
1251 * from the beginning of the buffer up to the current position.
1252 */
1253 if (oldest_record_offset != 0) {
1254 #if TELEMETRY_DEBUG
1255 log_telemetry_output(current_buffer->buffer, oldest_record_offset,
1256 current_buffer->end_point - oldest_record_offset);
1257 #endif
1258 if ((result = copyout((void *)(current_buffer->buffer + oldest_record_offset), buffer,
1259 current_buffer->end_point - oldest_record_offset)) != 0) {
1260 *length = 0;
1261 goto out;
1262 }
1263 *length = current_buffer->end_point - oldest_record_offset;
1264 } else {
1265 *length = 0;
1266 }
1267
1268 #if TELEMETRY_DEBUG
1269 log_telemetry_output(current_buffer->buffer, 0, current_buffer->current_position);
1270 #endif
1271 if ((result = copyout((void *)current_buffer->buffer, buffer + *length,
1272 current_buffer->current_position)) != 0) {
1273 *length = 0;
1274 goto out;
1275 }
1276 *length += (uint32_t)current_buffer->current_position;
1277
1278 out:
1279
1280 if (mark && (*length > 0)) {
1281 telemetry_bytes_since_last_mark = 0;
1282 }
1283
1284 TELEMETRY_UNLOCK();
1285
1286 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_END,
1287 current_buffer->current_position, *length,
1288 current_buffer->end_point, (&telemetry_buffer != current_buffer));
1289
1290 return result;
1291 }
1292
1293 #if CONFIG_MACF
1294 static int
telemetry_macf_init_locked(size_t buffer_size)1295 telemetry_macf_init_locked(size_t buffer_size)
1296 {
1297 kern_return_t kr;
1298
1299 if (buffer_size > TELEMETRY_MAX_BUFFER_SIZE) {
1300 buffer_size = TELEMETRY_MAX_BUFFER_SIZE;
1301 }
1302
1303 telemetry_macf_buffer.size = buffer_size;
1304
1305 kr = kmem_alloc(kernel_map, &telemetry_macf_buffer.buffer,
1306 telemetry_macf_buffer.size, KMA_DATA | KMA_ZERO | KMA_PERMANENT,
1307 VM_KERN_MEMORY_SECURITY);
1308
1309 if (kr != KERN_SUCCESS) {
1310 kprintf("Telemetry (MACF): Allocation failed: %d\n", kr);
1311 return ENOMEM;
1312 }
1313
1314 return 0;
1315 }
1316
1317 int
telemetry_macf_mark_curthread(void)1318 telemetry_macf_mark_curthread(void)
1319 {
1320 thread_t thread = current_thread();
1321 task_t task = get_threadtask(thread);
1322 int rv = 0;
1323
1324 if (task == kernel_task) {
1325 /* Kernel threads never return to an AST boundary, and are ineligible */
1326 return EINVAL;
1327 }
1328
1329 /* Initialize the MACF telemetry buffer if needed. */
1330 TELEMETRY_MACF_LOCK();
1331 if (__improbable(telemetry_macf_buffer.size == 0)) {
1332 rv = telemetry_macf_init_locked(TELEMETRY_MACF_DEFAULT_BUFFER_SIZE);
1333
1334 if (rv != 0) {
1335 return rv;
1336 }
1337 }
1338 TELEMETRY_MACF_UNLOCK();
1339
1340 act_set_macf_telemetry_ast(thread);
1341 return 0;
1342 }
1343 #endif /* CONFIG_MACF */
1344
1345
1346 static void
telemetry_stash_ca_event(kernel_brk_type_t type,uint16_t comment,uint32_t total_frames,uintptr_t * backtrace,uintptr_t faulting_address)1347 telemetry_stash_ca_event(
1348 kernel_brk_type_t type,
1349 uint16_t comment,
1350 uint32_t total_frames,
1351 uintptr_t *backtrace,
1352 uintptr_t faulting_address)
1353 {
1354 /* Skip telemetry if we accidentally took a fault while handling telemetry */
1355 bool *in_handler = PERCPU_GET(brk_telemetry_in_handler);
1356 if (*in_handler) {
1357 #if DEVELOPMENT
1358 panic("Breakpoint trap re-entered from within a spinlock");
1359 #endif
1360 return;
1361 }
1362
1363 /* Rate limit on repeatedly seeing the same address */
1364 uintptr_t *cache_address = PERCPU_GET(brk_telemetry_cache_address);
1365 if (*cache_address == faulting_address) {
1366 return;
1367 }
1368
1369 *cache_address = faulting_address;
1370
1371 lck_spin_lock(&ca_entries_lck);
1372 *in_handler = true;
1373
1374 if (__improbable(ca_entries_index > CA_ENTRIES_SIZE)) {
1375 panic("Invalid CA interrupt buffer index %d >= %d",
1376 ca_entries_index, CA_ENTRIES_SIZE);
1377 }
1378
1379 /* We're full, just drop the event */
1380 if (ca_entries_index == CA_ENTRIES_SIZE) {
1381 *in_handler = false;
1382 lck_spin_unlock(&ca_entries_lck);
1383 return;
1384 }
1385
1386 ca_entries[ca_entries_index].type = type;
1387 ca_entries[ca_entries_index].code = comment;
1388 ca_entries[ca_entries_index].faulting_address = faulting_address;
1389
1390 assert(total_frames <= TELEMETRY_BT_FRAMES);
1391
1392 if (total_frames <= TELEMETRY_BT_FRAMES) {
1393 ca_entries[ca_entries_index].num_frames = total_frames;
1394 memcpy(ca_entries[ca_entries_index].frames, backtrace,
1395 total_frames * sizeof(uintptr_t));
1396 }
1397
1398 ca_entries_index++;
1399
1400 *in_handler = false;
1401 lck_spin_unlock(&ca_entries_lck);
1402
1403 thread_call_enter(telemetry_ca_send_callout);
1404 }
1405
1406 static int
telemetry_backtrace_add_kernel(char * buf,size_t buflen)1407 telemetry_backtrace_add_kernel(
1408 char *buf,
1409 size_t buflen)
1410 {
1411 int rc = 0;
1412 #if defined(__arm__) || defined(__arm64__)
1413 extern vm_offset_t segTEXTEXECB;
1414 extern unsigned long segSizeTEXTEXEC;
1415 vm_address_t unslid = segTEXTEXECB - vm_kernel_stext;
1416
1417 rc += scnprintf(buf, buflen, "%s@%lx:%lx\n",
1418 kernel_uuid_string, unslid, unslid + segSizeTEXTEXEC - 1);
1419 #elif defined(__x86_64__)
1420 rc += scnprintf(buf, buflen, "%s@0:%lx\n",
1421 kernel_uuid_string, vm_kernel_etext - vm_kernel_stext);
1422 #else
1423 #pragma unused(buf, buflen)
1424 #endif
1425 return rc;
1426 }
1427
1428 void
telemetry_backtrace_to_string(char * buf,size_t buflen,uint32_t tot,uintptr_t * frames)1429 telemetry_backtrace_to_string(
1430 char *buf,
1431 size_t buflen,
1432 uint32_t tot,
1433 uintptr_t *frames)
1434 {
1435 size_t l = 0;
1436
1437 for (uint32_t i = 0; i < tot; i++) {
1438 l += scnprintf(buf + l, buflen - l, "%lx\n",
1439 frames[i] - vm_kernel_stext);
1440 }
1441 l += telemetry_backtrace_add_kernel(buf + l, buflen - l);
1442 telemetry_backtrace_add_kexts(buf + l, buflen - l, frames, tot);
1443 }
1444
1445 static void
telemetry_flush_ca_events(__unused thread_call_param_t p0,__unused thread_call_param_t p1)1446 telemetry_flush_ca_events(
1447 __unused thread_call_param_t p0,
1448 __unused thread_call_param_t p1)
1449 {
1450 struct telemetry_ca_entry local_entries[CA_ENTRIES_SIZE] = {0};
1451 uint8_t entry_cnt = 0;
1452 bool *in_handler = PERCPU_GET(brk_telemetry_in_handler);
1453
1454 lck_spin_lock(&ca_entries_lck);
1455 *in_handler = true;
1456
1457 if (__improbable(ca_entries_index > CA_ENTRIES_SIZE)) {
1458 panic("Invalid CA interrupt buffer index %d > %d", ca_entries_index,
1459 CA_ENTRIES_SIZE);
1460 }
1461
1462 if (ca_entries_index == 0) {
1463 *in_handler = false;
1464 lck_spin_unlock(&ca_entries_lck);
1465 return;
1466 } else {
1467 memcpy(local_entries, ca_entries, sizeof(local_entries));
1468 entry_cnt = ca_entries_index;
1469 ca_entries_index = 0;
1470 }
1471
1472 *in_handler = false;
1473 lck_spin_unlock(&ca_entries_lck);
1474
1475 /*
1476 * All addresses (faulting_address and backtrace) are relative to the
1477 * vm_kernel_stext which means that all offsets will be typically <=
1478 * 50M which uses 7 hex digits.
1479 *
1480 * We allow up to TELEMETRY_BT_FRAMES (5) entries,
1481 * and be formatted like this:
1482 *
1483 * <OFFSET1>\n
1484 * <OFFSET2>\n
1485 * ...
1486 * <UUID_a>@<TEXT_EXEC_BASE_OFFSET>:<TEXT_EXEC_END_OFFSET>\n
1487 * <UUID_b>@<TEXT_EXEC_BASE_OFFSET>:<TEXT_EXEC_END_OFFSET>\n
1488 * ...
1489 *
1490 * In general this backtrace takes 8 bytes per "frame",
1491 * with an extra 52 bytes per unique UUID referenced.
1492 *
1493 * The buffer we have is CA_UBSANBUF_LEN (256 bytes) long, which
1494 * accomodates for 4 full unique UUIDs which should be sufficient.
1495 */
1496
1497 /* Send the events */
1498 for (uint8_t i = 0; i < entry_cnt; i++) {
1499 ca_event_t ca_event = CA_EVENT_ALLOCATE(kernel_breakpoint_event);
1500 CA_EVENT_TYPE(kernel_breakpoint_event) * event = ca_event->data;
1501
1502 event->brk_type = local_entries[i].type;
1503 event->brk_code = local_entries[i].code;
1504 event->faulting_address = local_entries[i].faulting_address;
1505
1506 telemetry_backtrace_to_string(event->backtrace,
1507 sizeof(event->backtrace),
1508 local_entries[i].num_frames,
1509 local_entries[i].frames);
1510 strlcpy(event->uuid, kernel_uuid_string, CA_UUID_LEN);
1511
1512 CA_EVENT_SEND(ca_event);
1513 }
1514 }
1515
1516 void
telemetry_kernel_brk(kernel_brk_type_t type,kernel_brk_options_t options,void * tstate,uint16_t comment)1517 telemetry_kernel_brk(
1518 kernel_brk_type_t type,
1519 kernel_brk_options_t options,
1520 void *tstate,
1521 uint16_t comment)
1522 {
1523 #if __arm64__
1524 arm_saved_state_t *state = (arm_saved_state_t *)tstate;
1525
1526 uintptr_t faulting_address = get_saved_state_pc(state);
1527 uintptr_t saved_fp = get_saved_state_fp(state);
1528 #else
1529 x86_saved_state64_t *state = (x86_saved_state64_t *)tstate;
1530
1531 uintptr_t faulting_address = state->isf.rip;
1532 uintptr_t saved_fp = state->rbp;
1533 #endif
1534
1535 assert(options & KERNEL_BRK_TELEMETRY_OPTIONS);
1536
1537 if (startup_phase < STARTUP_SUB_THREAD_CALL) {
1538 #if DEVELOPMENT || DEBUG
1539 panic("Attempting kernel breakpoint telemetry in early boot.");
1540 #endif
1541 return;
1542 }
1543
1544 if (options & KERNEL_BRK_CORE_ANALYTICS) {
1545 uintptr_t frames[TELEMETRY_BT_FRAMES];
1546
1547 struct backtrace_control ctl = {
1548 .btc_frame_addr = (uintptr_t)saved_fp,
1549 };
1550
1551 uint32_t total_frames = backtrace(frames, TELEMETRY_BT_FRAMES, &ctl, NULL);
1552
1553 telemetry_stash_ca_event(type, comment, total_frames,
1554 frames, faulting_address - vm_kernel_stext);
1555 }
1556 }
1557
1558 /************************/
1559 /* BOOT PROFILE SUPPORT */
1560 /************************/
1561 /*
1562 * Boot Profiling
1563 *
1564 * The boot-profiling support is a mechanism to sample activity happening on the
1565 * system during boot. This mechanism sets up a periodic timer and on every timer fire,
1566 * captures a full backtrace into the boot profiling buffer. This buffer can be pulled
1567 * out and analyzed from user-space. It is turned on using the following boot-args:
1568 * "bootprofile_buffer_size" specifies the size of the boot profile buffer
1569 * "bootprofile_interval_ms" specifies the interval for the profiling timer
1570 *
1571 * Process Specific Boot Profiling
1572 *
1573 * The boot-arg "bootprofile_proc_name" can be used to specify a certain
1574 * process that needs to profiled during boot. Setting this boot-arg changes
1575 * the way stackshots are captured. At every timer fire, the code looks at the
1576 * currently running process and takes a stackshot only if the requested process
1577 * is on-core (which makes it unsuitable for MP systems).
1578 *
1579 * Trigger Events
1580 *
1581 * The boot-arg "bootprofile_type=boot" starts the timer during early boot. Using
1582 * "wake" starts the timer at AP wake from suspend-to-RAM.
1583 */
1584
1585 #define BOOTPROFILE_MAX_BUFFER_SIZE (64*1024*1024) /* see also COPYSIZELIMIT_PANIC */
1586
1587 vm_offset_t bootprofile_buffer = 0;
1588 uint32_t bootprofile_buffer_size = 0;
1589 uint32_t bootprofile_buffer_current_position = 0;
1590 uint32_t bootprofile_interval_ms = 0;
1591 uint64_t bootprofile_stackshot_flags = 0;
1592 uint64_t bootprofile_interval_abs = 0;
1593 uint64_t bootprofile_next_deadline = 0;
1594 uint32_t bootprofile_all_procs = 0;
1595 char bootprofile_proc_name[17];
1596 uint64_t bootprofile_delta_since_timestamp = 0;
1597 LCK_GRP_DECLARE(bootprofile_lck_grp, "bootprofile_group");
1598 LCK_MTX_DECLARE(bootprofile_mtx, &bootprofile_lck_grp);
1599
1600
1601 enum {
1602 kBootProfileDisabled = 0,
1603 kBootProfileStartTimerAtBoot,
1604 kBootProfileStartTimerAtWake
1605 } bootprofile_type = kBootProfileDisabled;
1606
1607
1608 static timer_call_data_t bootprofile_timer_call_entry;
1609
1610 #define BOOTPROFILE_LOCK() do { lck_mtx_lock(&bootprofile_mtx); } while(0)
1611 #define BOOTPROFILE_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&bootprofile_mtx)
1612 #define BOOTPROFILE_UNLOCK() do { lck_mtx_unlock(&bootprofile_mtx); } while(0)
1613
1614 static void bootprofile_timer_call(
1615 timer_call_param_t param0,
1616 timer_call_param_t param1);
1617
1618 void
bootprofile_init(void)1619 bootprofile_init(void)
1620 {
1621 kern_return_t ret;
1622 char type[32];
1623
1624 if (!PE_parse_boot_argn("bootprofile_buffer_size",
1625 &bootprofile_buffer_size, sizeof(bootprofile_buffer_size))) {
1626 bootprofile_buffer_size = 0;
1627 }
1628
1629 if (bootprofile_buffer_size > BOOTPROFILE_MAX_BUFFER_SIZE) {
1630 bootprofile_buffer_size = BOOTPROFILE_MAX_BUFFER_SIZE;
1631 }
1632
1633 if (!PE_parse_boot_argn("bootprofile_interval_ms",
1634 &bootprofile_interval_ms, sizeof(bootprofile_interval_ms))) {
1635 bootprofile_interval_ms = 0;
1636 }
1637
1638 if (!PE_parse_boot_argn("bootprofile_stackshot_flags",
1639 &bootprofile_stackshot_flags, sizeof(bootprofile_stackshot_flags))) {
1640 bootprofile_stackshot_flags = 0;
1641 }
1642
1643 if (!PE_parse_boot_argn("bootprofile_proc_name",
1644 &bootprofile_proc_name, sizeof(bootprofile_proc_name))) {
1645 bootprofile_all_procs = 1;
1646 bootprofile_proc_name[0] = '\0';
1647 }
1648
1649 if (PE_parse_boot_argn("bootprofile_type", type, sizeof(type))) {
1650 if (0 == strcmp(type, "boot")) {
1651 bootprofile_type = kBootProfileStartTimerAtBoot;
1652 } else if (0 == strcmp(type, "wake")) {
1653 bootprofile_type = kBootProfileStartTimerAtWake;
1654 } else {
1655 bootprofile_type = kBootProfileDisabled;
1656 }
1657 } else {
1658 bootprofile_type = kBootProfileDisabled;
1659 }
1660
1661 clock_interval_to_absolutetime_interval(bootprofile_interval_ms, NSEC_PER_MSEC, &bootprofile_interval_abs);
1662
1663 /* Both boot args must be set to enable */
1664 if ((bootprofile_type == kBootProfileDisabled) || (bootprofile_buffer_size == 0) || (bootprofile_interval_abs == 0)) {
1665 return;
1666 }
1667
1668 ret = kmem_alloc(kernel_map, &bootprofile_buffer, bootprofile_buffer_size,
1669 KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
1670 if (ret != KERN_SUCCESS) {
1671 kprintf("Boot profile: Allocation failed: %d\n", ret);
1672 return;
1673 }
1674
1675 kprintf("Boot profile: Sampling %s once per %u ms at %s\n",
1676 bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms,
1677 bootprofile_type == kBootProfileStartTimerAtBoot ? "boot" : (bootprofile_type == kBootProfileStartTimerAtWake ? "wake" : "unknown"));
1678
1679 timer_call_setup(&bootprofile_timer_call_entry,
1680 bootprofile_timer_call,
1681 NULL);
1682
1683 if (bootprofile_type == kBootProfileStartTimerAtBoot) {
1684 bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs;
1685 timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1686 NULL,
1687 bootprofile_next_deadline,
1688 0,
1689 TIMER_CALL_SYS_NORMAL,
1690 false);
1691 }
1692 }
1693
1694 void
bootprofile_wake_from_sleep(void)1695 bootprofile_wake_from_sleep(void)
1696 {
1697 if (bootprofile_type == kBootProfileStartTimerAtWake) {
1698 bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs;
1699 timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1700 NULL,
1701 bootprofile_next_deadline,
1702 0,
1703 TIMER_CALL_SYS_NORMAL,
1704 false);
1705 }
1706 }
1707
1708
1709 static void
bootprofile_timer_call(timer_call_param_t param0 __unused,timer_call_param_t param1 __unused)1710 bootprofile_timer_call(
1711 timer_call_param_t param0 __unused,
1712 timer_call_param_t param1 __unused)
1713 {
1714 unsigned retbytes = 0;
1715 int pid_to_profile = -1;
1716
1717 if (!BOOTPROFILE_TRY_SPIN_LOCK()) {
1718 goto reprogram;
1719 }
1720
1721 /* Check if process-specific boot profiling is turned on */
1722 if (!bootprofile_all_procs) {
1723 /*
1724 * Since boot profiling initializes really early in boot, it is
1725 * possible that at this point, the task/proc is not initialized.
1726 * Nothing to do in that case.
1727 */
1728
1729 if ((current_task() != NULL) && (get_bsdtask_info(current_task()) != NULL) &&
1730 (0 == strncmp(bootprofile_proc_name, proc_name_address(get_bsdtask_info(current_task())), 17))) {
1731 pid_to_profile = proc_selfpid();
1732 } else {
1733 /*
1734 * Process-specific boot profiling requested but the on-core process is
1735 * something else. Nothing to do here.
1736 */
1737 BOOTPROFILE_UNLOCK();
1738 goto reprogram;
1739 }
1740 }
1741
1742 /* initiate a stackshot with whatever portion of the buffer is left */
1743 if (bootprofile_buffer_current_position < bootprofile_buffer_size) {
1744 uint64_t flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_TRYLOCK | STACKSHOT_SAVE_LOADINFO
1745 | STACKSHOT_GET_GLOBAL_MEM_STATS;
1746 #if defined(XNU_TARGET_OS_OSX)
1747 flags |= STACKSHOT_SAVE_KEXT_LOADINFO;
1748 #endif
1749
1750
1751 /* OR on flags specified in boot-args */
1752 flags |= bootprofile_stackshot_flags;
1753 if ((flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) && (bootprofile_delta_since_timestamp == 0)) {
1754 /* Can't take deltas until the first one */
1755 flags &= ~STACKSHOT_COLLECT_DELTA_SNAPSHOT;
1756 }
1757
1758 uint64_t timestamp = 0;
1759 if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) {
1760 timestamp = mach_absolute_time();
1761 }
1762
1763 kern_return_t r = stack_snapshot_from_kernel(
1764 pid_to_profile, (void *)(bootprofile_buffer + bootprofile_buffer_current_position),
1765 bootprofile_buffer_size - bootprofile_buffer_current_position,
1766 flags, bootprofile_delta_since_timestamp, 0, &retbytes);
1767
1768 /*
1769 * We call with STACKSHOT_TRYLOCK because the stackshot lock is coarser
1770 * than the bootprofile lock. If someone else has the lock we'll just
1771 * try again later.
1772 */
1773
1774 if (r == KERN_LOCK_OWNED) {
1775 BOOTPROFILE_UNLOCK();
1776 goto reprogram;
1777 }
1778
1779 if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT &&
1780 r == KERN_SUCCESS) {
1781 bootprofile_delta_since_timestamp = timestamp;
1782 }
1783
1784 bootprofile_buffer_current_position += retbytes;
1785 }
1786
1787 BOOTPROFILE_UNLOCK();
1788
1789 /* If we didn't get any data or have run out of buffer space, stop profiling */
1790 if ((retbytes == 0) || (bootprofile_buffer_current_position == bootprofile_buffer_size)) {
1791 return;
1792 }
1793
1794
1795 reprogram:
1796 /* If the user gathered the buffer, no need to keep profiling */
1797 if (bootprofile_interval_abs == 0) {
1798 return;
1799 }
1800
1801 clock_deadline_for_periodic_event(bootprofile_interval_abs,
1802 mach_absolute_time(),
1803 &bootprofile_next_deadline);
1804 timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1805 NULL,
1806 bootprofile_next_deadline,
1807 0,
1808 TIMER_CALL_SYS_NORMAL,
1809 false);
1810 }
1811
1812 void
bootprofile_get(void ** buffer,uint32_t * length)1813 bootprofile_get(void **buffer, uint32_t *length)
1814 {
1815 BOOTPROFILE_LOCK();
1816 *buffer = (void*) bootprofile_buffer;
1817 *length = bootprofile_buffer_current_position;
1818 BOOTPROFILE_UNLOCK();
1819 }
1820
1821 int
bootprofile_gather(user_addr_t buffer,uint32_t * length)1822 bootprofile_gather(user_addr_t buffer, uint32_t *length)
1823 {
1824 int result = 0;
1825
1826 BOOTPROFILE_LOCK();
1827
1828 if (bootprofile_buffer == 0) {
1829 *length = 0;
1830 goto out;
1831 }
1832
1833 if (*length < bootprofile_buffer_current_position) {
1834 result = KERN_NO_SPACE;
1835 goto out;
1836 }
1837
1838 if ((result = copyout((void *)bootprofile_buffer, buffer,
1839 bootprofile_buffer_current_position)) != 0) {
1840 *length = 0;
1841 goto out;
1842 }
1843 *length = bootprofile_buffer_current_position;
1844
1845 /* cancel future timers */
1846 bootprofile_interval_abs = 0;
1847
1848 out:
1849
1850 BOOTPROFILE_UNLOCK();
1851
1852 return result;
1853 }
1854