1 /*
2 * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/host_priv.h>
30 #include <mach/host_special_ports.h>
31 #include <mach/mach_types.h>
32 #include <mach/telemetry_notification_server.h>
33
34 #include <kern/assert.h>
35 #include <kern/clock.h>
36 #include <kern/coalition.h>
37 #include <kern/counter.h>
38 #include <kern/debug.h>
39 #include <kern/host.h>
40 #include <kern/kalloc.h>
41 #include <kern/kern_types.h>
42 #include <kern/locks.h>
43 #include <kern/misc_protos.h>
44 #include <kern/sched.h>
45 #include <kern/sched_prim.h>
46 #include <kern/thread.h>
47 #include <kern/telemetry.h>
48 #include <kern/timer_call.h>
49 #include <kern/policy_internal.h>
50 #include <kern/kcdata.h>
51 #include <kern/percpu.h>
52 #include <kern/mpsc_ring.h>
53 #include <kern/kern_stackshot.h>
54
55 #include <pexpert/pexpert.h>
56
57 #include <string.h>
58 #include <vm/vm_kern_xnu.h>
59 #include <vm/vm_shared_region.h>
60
61 #include <kperf/callstack.h>
62 #include <kern/backtrace.h>
63 #include <kern/monotonic.h>
64
65 #include <security/mac_mach_internal.h>
66
67 #include <sys/errno.h>
68 #include <sys/kdebug.h>
69 #include <uuid/uuid.h>
70 #include <kdp/kdp_dyld.h>
71
72 #include <libkern/OSAtomic.h>
73 #include <libkern/coreanalytics/coreanalytics.h>
74 #include <kern/thread_call.h>
75
76 struct proc;
77 extern int proc_pid(struct proc *);
78 extern char *proc_name_address(void *p);
79 extern char *proc_longname_address(void *p);
80 extern uint64_t proc_uniqueid(void *p);
81 extern uint64_t proc_was_throttled(void *p);
82 extern uint64_t proc_did_throttle(void *p);
83 extern boolean_t task_did_exec(task_t task);
84 extern boolean_t task_is_exec_copy(task_t task);
85
86 #if CONFIG_CPU_COUNTERS
87 #define HAS_PMI_MICROSTACKSHOTS 1
88 #endif /* CONFIG_CPU_COUNTERS */
89
90 struct micro_snapshot_buffer {
91 vm_offset_t buffer;
92 uint32_t size;
93 uint32_t current_position;
94 uint32_t end_point;
95 };
96
97 static const size_t _telemetry_sample_size_static = sizeof(struct micro_snapshot) +
98 sizeof(struct task_snapshot) +
99 sizeof(struct thread_snapshot);
100
101 static void telemetry_instrumentation_begin(
102 struct micro_snapshot_buffer *buffer, enum micro_snapshot_flags flags);
103
104 static void telemetry_instrumentation_end(struct micro_snapshot_buffer *buffer);
105
106 static void telemetry_take_sample(thread_t thread, enum micro_snapshot_flags flags);
107
108 #if HAS_PMI_MICROSTACKSHOTS
109 static void _telemetry_take_sample_kernel(thread_t thread, enum micro_snapshot_flags flags);
110 static void _telemetry_mark_curthread(bool interrupted_userspace);
111 #endif /* HAS_PMI_MICROSTACKSHOTS */
112
113 #if CONFIG_MACF
114 static void telemetry_macf_take_sample(thread_t thread, enum micro_snapshot_flags flags);
115 #endif
116
117 struct telemetry_target {
118 thread_t thread;
119 uintptr_t *frames;
120 size_t frames_count;
121 bool user64_regs;
122 uint16_t async_start_index;
123 enum micro_snapshot_flags microsnapshot_flags;
124 bool include_metadata;
125 struct micro_snapshot_buffer *buffer;
126 lck_mtx_t *buffer_mtx;
127 };
128
129 static int telemetry_process_sample(
130 const struct telemetry_target *target,
131 bool release_buffer_lock,
132 uint32_t *out_current_record_start);
133
134 static int telemetry_buffer_gather(
135 user_addr_t buffer,
136 uint32_t *length,
137 bool mark,
138 struct micro_snapshot_buffer *current_buffer);
139
140 #define TELEMETRY_DEFAULT_BUFFER_SIZE (16 * 1024)
141 #define TELEMETRY_MAX_BUFFER_SIZE (64 * 1024)
142
143 #define TELEMETRY_DEFAULT_NOTIFY_LEEWAY (4*1024) // Userland gets 4k of leeway to collect data after notification
144 #define TELEMETRY_MAX_UUID_COUNT (128) // Max of 128 non-shared-cache UUIDs to log for symbolication
145
146 bool telemetry_sample_pmis = false;
147
148 uint32_t telemetry_timestamp = 0;
149
150 struct telemetry_metadata {
151 /*
152 * The current generation of microstackshot-based telemetry.
153 * Incremented whenever the settings change.
154 */
155 uint32_t tm_generation;
156 /*
157 * The total number of samples recorded.
158 */
159 uint64_t tm_samples_recorded;
160 /*
161 * The total number of samples that were skipped.
162 */
163 uint64_t tm_samples_skipped;
164 /*
165 * What's triggering the microstackshot samples.
166 */
167 enum telemetry_source {
168 TMSRC_NONE = 0,
169 TMSRC_UNKNOWN,
170 TMSRC_TIME,
171 TMSRC_INSTRUCTIONS,
172 TMSRC_CYCLES,
173 } tm_source;
174 /*
175 * The interval used for periodic sampling.
176 */
177 uint64_t tm_period;
178 };
179
180 /*
181 * The telemetry_buffer is responsible
182 * for timer samples and interrupt samples that are driven by
183 * compute_averages(). It will notify its client (if one
184 * exists) when it has enough data to be worth flushing.
185 */
186 struct micro_snapshot_buffer telemetry_buffer = {
187 .buffer = 0,
188 .size = 0,
189 .current_position = 0,
190 .end_point = 0
191 };
192
193 #if CONFIG_MACF
194 #define TELEMETRY_MACF_DEFAULT_BUFFER_SIZE (16*1024)
195 /*
196 * The MAC framework uses its own telemetry buffer for the purposes of auditing
197 * security-related work being done by userland threads.
198 */
199 struct micro_snapshot_buffer telemetry_macf_buffer = {
200 .buffer = 0,
201 .size = 0,
202 .current_position = 0,
203 .end_point = 0
204 };
205 #endif /* CONFIG_MACF */
206
207 int telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked?
208 int telemetry_buffer_notify_at = 0;
209
210 LCK_GRP_DECLARE(telemetry_lck_grp, "telemetry group");
211 LCK_MTX_DECLARE(telemetry_mtx, &telemetry_lck_grp);
212 LCK_MTX_DECLARE(telemetry_pmi_mtx, &telemetry_lck_grp);
213 LCK_MTX_DECLARE(telemetry_macf_mtx, &telemetry_lck_grp);
214 LCK_SPIN_DECLARE(telemetry_metadata_lck, &telemetry_lck_grp);
215
216 #define TELEMETRY_LOCK() do { lck_mtx_lock(&telemetry_mtx); } while (0)
217 #define TELEMETRY_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&telemetry_mtx)
218 #define TELEMETRY_UNLOCK() do { lck_mtx_unlock(&telemetry_mtx); } while (0)
219
220 #define TELEMETRY_PMI_LOCK() do { lck_mtx_lock(&telemetry_pmi_mtx); } while (0)
221 #define TELEMETRY_PMI_UNLOCK() do { lck_mtx_unlock(&telemetry_pmi_mtx); } while (0)
222
223 #define TELEMETRY_MACF_LOCK() do { lck_mtx_lock(&telemetry_macf_mtx); } while (0)
224 #define TELEMETRY_MACF_UNLOCK() do { lck_mtx_unlock(&telemetry_macf_mtx); } while (0)
225
226 /*
227 * Protected by the telemetry_metadata_lck spinlock.
228 */
229 struct telemetry_metadata telemetry_metadata = { 0 };
230
231 #if HAS_PMI_MICROSTACKSHOTS
232 static __security_const_late thread_call_t _telemetry_kernel_notify_thread;
233 _Atomic bool _telemetry_kernel_notified = false;
234 static struct mpsc_ring _telemetry_kernel_ring;
235
236 static void _telemetry_kernel_notify(void *, void *);
237 #endif /* HAS_PMI_MICROSTACKSHOTS */
238
239 TUNABLE(uint32_t, telemetry_buffer_size, "telemetry_buffer_size", TELEMETRY_DEFAULT_BUFFER_SIZE);
240 TUNABLE(uint8_t, telemetry_kernel_buffer_size_pow_2, "telemetry_kernel_buffer_size_pow_2", 16);
241 TUNABLE(uint32_t, telemetry_notification_leeway, "telemetry_notification_leeway", TELEMETRY_DEFAULT_NOTIFY_LEEWAY);
242
243 __startup_func
244 static void
_telemetry_init(void)245 _telemetry_init(void)
246 {
247 telemetry_buffer.size = MIN(telemetry_buffer_size, TELEMETRY_MAX_BUFFER_SIZE);
248
249 kern_return_t ret = kmem_alloc(kernel_map, &telemetry_buffer.buffer, telemetry_buffer.size,
250 KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
251 if (ret != KERN_SUCCESS) {
252 printf("telemetry: allocation failed: %d\n", ret);
253 return;
254 }
255
256 if (telemetry_notification_leeway >= telemetry_buffer.size) {
257 printf("telemetry: nonsensical telemetry_notification_leeway boot-arg %d changed to %d\n",
258 telemetry_notification_leeway, TELEMETRY_DEFAULT_NOTIFY_LEEWAY);
259 telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
260 }
261 telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway;
262
263 #if HAS_PMI_MICROSTACKSHOTS
264 #if __arm__ || __arm64__
265 unsigned int cpu_count = ml_get_cpu_count();
266 #else // __arm__ || __arm64__
267 unsigned int cpu_count = ml_early_cpu_max_number() + 1;
268 #endif // !__arm__ && !__arm64__
269
270 mpsc_ring_init(&_telemetry_kernel_ring, telemetry_kernel_buffer_size_pow_2, (uint8_t)cpu_count);
271
272 _telemetry_kernel_notify_thread = thread_call_allocate_with_options(
273 _telemetry_kernel_notify, NULL, THREAD_CALL_PRIORITY_USER,
274 THREAD_CALL_OPTIONS_ONCE);
275 if (!_telemetry_kernel_notify_thread) {
276 panic("telemetry_init: failed to allocate kernel notification thread call");
277 }
278 #endif /* !HAS_PMI_MICROSTACKSHOTS */
279 }
280
281 STARTUP(MACH_IPC, STARTUP_RANK_FIRST, _telemetry_init);
282
283 /*
284 * If userland has registered a port for telemetry notifications, send one now.
285 */
286 static void
_telemetry_notify_user(telemetry_notice_t flags)287 _telemetry_notify_user(telemetry_notice_t flags)
288 {
289 mach_port_t user_port = MACH_PORT_NULL;
290
291 kern_return_t kr = host_get_telemetry_port(host_priv_self(), &user_port);
292 if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
293 return;
294 }
295
296 telemetry_notification(user_port, flags);
297 ipc_port_release_send(user_port);
298 }
299
300 #if HAS_PMI_MICROSTACKSHOTS
301
302 static void
telemetry_pmi_handler(bool user_mode,__unused void * ctx)303 telemetry_pmi_handler(bool user_mode, __unused void *ctx)
304 {
305 thread_t thread = current_thread();
306 if (get_threadtask(thread) == kernel_task) {
307 _telemetry_take_sample_kernel(thread, kPMIRecord);
308 } else {
309 _telemetry_mark_curthread(user_mode);
310 }
311 }
312
313 #endif /* HAS_PMI_MICROSTACKSHOTS */
314
315 int
telemetry_pmi_setup(enum telemetry_pmi pmi_ctr,uint64_t period)316 telemetry_pmi_setup(enum telemetry_pmi pmi_ctr, uint64_t period)
317 {
318 #if HAS_PMI_MICROSTACKSHOTS
319 enum telemetry_source source = TMSRC_NONE;
320 int error = 0;
321 const char *name = "?";
322
323 unsigned int ctr = 0;
324
325 TELEMETRY_PMI_LOCK();
326
327 switch (pmi_ctr) {
328 case TELEMETRY_PMI_NONE:
329 if (!telemetry_sample_pmis) {
330 error = 1;
331 goto out;
332 }
333
334 telemetry_sample_pmis = false;
335 error = mt_microstackshot_stop();
336 if (!error) {
337 printf("telemetry: disabling ustackshot on PMI\n");
338 int intrs_en = ml_set_interrupts_enabled(FALSE);
339 lck_spin_lock(&telemetry_metadata_lck);
340 telemetry_metadata.tm_period = 0;
341 telemetry_metadata.tm_source = TMSRC_NONE;
342 lck_spin_unlock(&telemetry_metadata_lck);
343 ml_set_interrupts_enabled(intrs_en);
344 }
345 goto out;
346
347 case TELEMETRY_PMI_INSTRS:
348 ctr = MT_CORE_INSTRS;
349 name = "instructions";
350 source = TMSRC_INSTRUCTIONS;
351 break;
352
353 case TELEMETRY_PMI_CYCLES:
354 ctr = MT_CORE_CYCLES;
355 name = "cycles";
356 source = TMSRC_CYCLES;
357 break;
358
359 default:
360 error = 1;
361 goto out;
362 }
363
364 telemetry_sample_pmis = true;
365
366 error = mt_microstackshot_start(ctr, period, telemetry_pmi_handler, NULL);
367 if (!error) {
368 printf("telemetry: ustackshot every %llu %s\n", period, name);
369
370 int intrs_en = ml_set_interrupts_enabled(FALSE);
371 lck_spin_lock(&telemetry_metadata_lck);
372 telemetry_metadata.tm_period = period;
373 telemetry_metadata.tm_source = source;
374 telemetry_metadata.tm_generation += 1;
375 lck_spin_unlock(&telemetry_metadata_lck);
376 ml_set_interrupts_enabled(intrs_en);
377 }
378
379 out:
380 TELEMETRY_PMI_UNLOCK();
381 return error;
382 #else /* HAS_PMI_MICROSTACKSHOTS */
383 #pragma unused(pmi_ctr, period)
384 return 1;
385 #endif /* !HAS_PMI_MICROSTACKSHOTS */
386 }
387
388 #if HAS_PMI_MICROSTACKSHOTS
389
390 /*
391 * Mark the current thread for an interrupt-based
392 * telemetry record, to be sampled at the next AST boundary.
393 */
394 static void
_telemetry_mark_curthread(bool interrupted_userspace)395 _telemetry_mark_curthread(bool interrupted_userspace)
396 {
397 uint32_t ast_bits = AST_TELEMETRY_PMI;
398 thread_t thread = current_thread();
399
400 /*
401 * PMI handler was called but microstackshot expected sampling to be
402 * disabled; log it for telemetry and ignore the sample.
403 */
404 if (!telemetry_sample_pmis) {
405 os_atomic_inc(&telemetry_metadata.tm_samples_skipped, relaxed);
406 return;
407 }
408
409 ast_bits |= (interrupted_userspace ? AST_TELEMETRY_USER : AST_TELEMETRY_KERNEL);
410 thread_ast_set(thread, ast_bits);
411 ast_propagate(thread);
412 }
413
414 static void
_telemetry_kernel_notify(void * __unused p1,void * __unused p2)415 _telemetry_kernel_notify(void * __unused p1, void * __unused p2)
416 {
417 _telemetry_notify_user(TELEMETRY_NOTICE_KERNEL_MICROSTACKSHOT);
418 }
419
420 #endif /* HAS_PMI_MICROSTACKSHOTS */
421
422 void
telemetry_ast(thread_t thread,ast_t reasons)423 telemetry_ast(thread_t thread, ast_t reasons)
424 {
425 assert((reasons & AST_TELEMETRY_ALL) != 0);
426
427 uint8_t record_type = 0;
428 if (reasons & AST_TELEMETRY_IO) {
429 record_type |= kIORecord;
430 }
431 if (reasons & (AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL)) {
432 record_type |= (reasons & AST_TELEMETRY_PMI) ? kPMIRecord :
433 kInterruptRecord;
434 }
435
436 if ((reasons & AST_TELEMETRY_MACF) != 0) {
437 record_type |= kMACFRecord;
438 }
439
440 enum micro_snapshot_flags user_telemetry = (reasons & AST_TELEMETRY_USER) ? kUserMode : 0;
441 enum micro_snapshot_flags microsnapshot_flags = record_type | user_telemetry;
442
443 if ((reasons & AST_TELEMETRY_MACF) != 0) {
444 telemetry_macf_take_sample(thread, microsnapshot_flags);
445 }
446
447 if ((reasons & (AST_TELEMETRY_IO | AST_TELEMETRY_KERNEL | AST_TELEMETRY_PMI
448 | AST_TELEMETRY_USER)) != 0) {
449 telemetry_take_sample(thread, microsnapshot_flags);
450 }
451 }
452
453 static bool
_telemetry_task_can_sample(task_t task)454 _telemetry_task_can_sample(task_t task)
455 {
456 return (task != TASK_NULL) && !task_did_exec(task) && !task_is_exec_copy(task);
457 }
458
459 /*
460 * Kernel Thread Microstackshot Support
461 */
462
463 #define TELEMETRY_KERNEL_FRAMES_MAX (128)
464
465 #if HAS_PMI_MICROSTACKSHOTS
466
467 static const uint32_t TKS_MAGIC = 0x83a83f29;
468
469 /*
470 * The bare minimum needed to record a sample from interrupt context, stored in
471 * a ringbuffer for later collection.
472 */
473 struct _telemetry_kernel_sample {
474 clock_sec_t tks_time_secs;
475 uint64_t tks_serial_number;
476 uint64_t tks_telemetry_skipped;
477 uint64_t tks_telemetry_period;
478
479 uint64_t tks_system_time_in_terminated_threads;
480 uint64_t tks_task_size;
481 uint64_t tks_pageins;
482 uint64_t tks_faults;
483 uint64_t tks_cow_faults;
484
485 uint64_t tks_thread_id;
486 uint64_t tks_system_time;
487 clock_usec_t tks_time_usecs;
488 uint32_t tks_magic;
489 uint32_t tks_thread_state;
490 uint32_t tks_sched_pri;
491 uint32_t tks_base_pri;
492 uint32_t tks_sched_flags;
493 uint32_t tks_call_stack_size;
494 uint32_t tks_telemetry_source;
495 uint32_t tks_telemetry_generation;
496 uint8_t tks_cpu;
497 uint8_t tks_io_tier;
498 char tks_thread_name[MAXTHREADNAMESIZE];
499 };
500
501 /*
502 * Only collect call stacks up to this maximum length.
503 */
504 #define TELEMETRY_KERNEL_FRAMES_MAX (128)
505
506 /*
507 * A scratch buffer that mirrors the format of data stored in the ringbuffer so
508 * it can be written contiguously in a single update.
509 */
510 struct _telemetry_scratch {
511 struct _telemetry_kernel_sample ts_sample;
512 uintptr_t ts_call_stack[TELEMETRY_KERNEL_FRAMES_MAX];
513 };
514
515 /*
516 * Each writer in interrupt context needs a place off the stack to store these
517 * scratch buffers.
518 */
519 static struct _telemetry_scratch PERCPU_DATA(_telemetry_pcpu);
520
521 /*
522 * Collect a sample for the current kernel thread. Must be called in interrupt
523 * context.
524 */
525 static void
_telemetry_take_sample_kernel(thread_t thread,enum micro_snapshot_flags __unused flags)526 _telemetry_take_sample_kernel(thread_t thread, enum micro_snapshot_flags __unused flags)
527 {
528 assert(ml_at_interrupt_context());
529 struct _telemetry_scratch *scratch = PERCPU_GET(_telemetry_pcpu);
530
531 /*
532 * Collect the call stack in a packed representation to fit more of these
533 * samples into the ringbuffer.
534 */
535 struct backtrace_control ctl = {
536 .btc_flags = BTF_KERN_INTERRUPTED,
537 };
538 backtrace_info_t info = BTI_NONE;
539 unsigned int call_stack_count = backtrace(scratch->ts_call_stack,
540 TELEMETRY_KERNEL_FRAMES_MAX,
541 &ctl,
542 &info);
543 unsigned int call_stack_size = call_stack_count * sizeof(scratch->ts_call_stack[0]);
544
545 /*
546 * Relaxed here, which allows the samples to be non-monotonically
547 * increasing, but avoids any further synchronization with writers.
548 */
549 uint64_t serial_number = os_atomic_inc(&telemetry_metadata.tm_samples_recorded, relaxed);
550
551 struct recount_times_mach term_times = recount_task_terminated_times(kernel_task);
552 struct recount_times_mach thread_times = recount_current_thread_times();
553
554 clock_sec_t secs = 0;
555 clock_usec_t usecs = 0;
556 clock_get_calendar_microtime(&secs, &usecs);
557 uint8_t cpu = (uint8_t)cpu_number();
558 scratch->ts_sample = (struct _telemetry_kernel_sample){
559 .tks_magic = TKS_MAGIC,
560 .tks_serial_number = serial_number,
561 .tks_telemetry_skipped = os_atomic_load(&telemetry_metadata.tm_samples_skipped, relaxed),
562 .tks_telemetry_period = telemetry_metadata.tm_period,
563 .tks_telemetry_source = telemetry_metadata.tm_source,
564 .tks_telemetry_generation = telemetry_metadata.tm_generation,
565 .tks_cpu = cpu,
566 .tks_time_secs = secs,
567 .tks_time_usecs = usecs,
568 .tks_thread_id = thread_tid(thread),
569 .tks_pageins = counter_load(&kernel_task->pageins),
570 .tks_faults = counter_load(&kernel_task->faults),
571 .tks_cow_faults = counter_load(&kernel_task->cow_faults),
572 .tks_system_time_in_terminated_threads = term_times.rtm_system,
573 .tks_system_time = thread_times.rtm_system,
574 .tks_thread_state = thread->state,
575 .tks_sched_pri = thread->sched_pri,
576 .tks_base_pri = thread->base_pri,
577 .tks_io_tier = (uint8_t)proc_get_effective_thread_policy(thread, TASK_POLICY_IO),
578 .tks_call_stack_size = call_stack_size,
579 };
580 thread_get_thread_name(thread, scratch->ts_sample.tks_thread_name);
581
582 /*
583 * Write just the amount needed to store the sample information and call
584 * stack.
585 */
586 uint32_t size_needed = sizeof(struct _telemetry_kernel_sample) + call_stack_size;
587 uint32_t available =
588 mpsc_ring_write(&_telemetry_kernel_ring, cpu, scratch, size_needed);
589
590 /*
591 * Check that there was enough space to store the sample.
592 */
593 bool skipped = available < size_needed;
594 /*
595 * Incrementing samples-recorded in the metadata will cover indicating this
596 * sample is missing to user space.
597 */
598 if (skipped || available - size_needed <= telemetry_notification_leeway) {
599 if (os_atomic_cmpxchg(&_telemetry_kernel_notified, false, true, relaxed)) {
600 thread_call_enter(_telemetry_kernel_notify_thread);
601 }
602 }
603 }
604
605 /*
606 * The format of sample data that user space can parse, with no UUIDs present,
607 * as is the case for kernel samples.
608 */
609 struct _telemetry_kernel_snapshots {
610 struct micro_snapshot tkse_micro_snap;
611 struct task_snapshot tkse_task_snap;
612 struct thread_snapshot tkse_thread_snap;
613 };
614
615 /*
616 * Convert a kernel sample into the trio of snapshots that user space can parse.
617 */
618 static void
_telemetry_kernel_snapshot(struct _telemetry_kernel_snapshots * snaps,struct _telemetry_kernel_sample * sample)619 _telemetry_kernel_snapshot(
620 struct _telemetry_kernel_snapshots *snaps,
621 struct _telemetry_kernel_sample *sample)
622 {
623 snaps->tkse_micro_snap = (struct micro_snapshot){
624 .snapshot_magic = STACKSHOT_MICRO_SNAPSHOT_MAGIC,
625 .ms_flags = (uint8_t)(kPMIRecord | kKernelThread),
626 .ms_cpu = sample->tks_cpu,
627 .ms_time = sample->tks_time_secs,
628 .ms_time_microsecs = sample->tks_time_usecs,
629 };
630 snaps->tkse_task_snap = (struct task_snapshot){
631 .snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC,
632 .ss_flags = kKernel64_p,
633 .pid = 0,
634 .uniqueid = 0,
635 .system_time_in_terminated_threads =
636 sample->tks_system_time_in_terminated_threads,
637 .task_size = sample->tks_task_size,
638 .faults = sample->tks_faults,
639 .pageins = sample->tks_pageins,
640 .cow_faults = sample->tks_cow_faults,
641 .p_comm = "kernel_task",
642 .was_throttled = 0,
643 .did_throttle = 0,
644 .p_start_sec = coalition_id(kernel_task->coalition[COALITION_TYPE_RESOURCE]),
645 /* Set the on-behalf-of pids to -1. */
646 .p_start_usec = UINT64_MAX,
647 .latency_qos = LATENCY_QOS_TIER_UNSPECIFIED,
648 .io_priority_size = {
649 [0] = ((uint64_t)sample->tks_telemetry_source << 32) | sample->tks_telemetry_generation,
650 [1] = sample->tks_telemetry_period,
651 [2] = sample->tks_serial_number,
652 [3] = sample->tks_telemetry_skipped,
653 },
654 };
655 snaps->tkse_thread_snap = (struct thread_snapshot){
656 .snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC,
657 .ss_flags = kKernel64_p,
658 .nkern_frames = sample->tks_call_stack_size / sizeof(uintptr_t),
659 .wait_event = 0,
660 .continuation = 0,
661 .thread_id = sample->tks_thread_id,
662 .system_time = sample->tks_system_time,
663 .state = sample->tks_thread_state,
664 .priority = sample->tks_base_pri,
665 .sched_pri = sample->tks_sched_pri,
666 .io_tier = sample->tks_io_tier,
667 };
668 memset(snaps->tkse_thread_snap.pth_name, 0, sizeof(snaps->tkse_thread_snap.pth_name));
669 strlcpy(snaps->tkse_thread_snap.pth_name,
670 sample->tks_thread_name,
671 sizeof(snaps->tkse_thread_snap.pth_name));
672 }
673
674 #endif /* HAS_PMI_MICROSTACKSHOTS */
675
676 int
telemetry_kernel_gather(user_addr_t user_buffer,uint32_t * user_length)677 telemetry_kernel_gather(user_addr_t user_buffer, uint32_t *user_length)
678 {
679 #if HAS_PMI_MICROSTACKSHOTS
680 int result = 0;
681 /*
682 * Track how much data has been copied out to the user buffer.
683 */
684 uint32_t copied = 0;
685 uint32_t copy_length = *user_length;
686
687 *user_length = 0;
688
689 /*
690 * Get a cursor to read from the ringbuffer.
691 */
692 mpsc_ring_cursor_t cursor = mpsc_ring_read_start(&_telemetry_kernel_ring);
693
694 while (copied < copy_length) {
695 /*
696 * This function is called directly off a syscall, so it can afford to
697 * use some stack space.
698 */
699 struct _telemetry_kernel_snapshots snaps = { 0 };
700
701 /*
702 * Check that the user buffer still has enough space for at least the
703 * snapshot structures.
704 */
705 if (sizeof(snaps) > copy_length - copied) {
706 break;
707 }
708
709 /*
710 * Read the sample from the ringbuffer.
711 */
712 struct _telemetry_kernel_sample sample = { 0 };
713 bool advanced = mpsc_ring_cursor_advance(
714 &_telemetry_kernel_ring,
715 &cursor,
716 &sample,
717 sizeof(sample));
718 /*
719 * If there's no more data, return to user space.
720 */
721 if (!advanced) {
722 break;
723 }
724
725 if (sample.tks_magic != TKS_MAGIC) {
726 panic("microstackshot: kernel sample magic is invalid");
727 }
728 /*
729 * Compute the size needed for the snapshots and call stack and bail
730 * out if there's not enough room in the user's buffer.
731 */
732 assert3u(sample.tks_call_stack_size, <, sizeof(uintptr_t) * TELEMETRY_KERNEL_FRAMES_MAX);
733 uint32_t size_needed = sizeof(snaps) + sample.tks_call_stack_size;
734 if (size_needed > copy_length - copied) {
735 break;
736 }
737
738 /*
739 * Convert the sample into snapshots suitable for user space and copy
740 * them out.
741 */
742 _telemetry_kernel_snapshot(&snaps, &sample);
743 result = copyout(&snaps, user_buffer + copied, sizeof(snaps));
744 if (result != 0) {
745 break;
746 }
747 copied += sizeof(snaps);
748
749 /*
750 * Copy the call stack out of the ringbuffer.
751 */
752 uintptr_t call_stack[TELEMETRY_KERNEL_FRAMES_MAX] = { 0 };
753 assert3u(sizeof(call_stack), >=, sample.tks_call_stack_size);
754 advanced = mpsc_ring_cursor_advance(
755 &_telemetry_kernel_ring,
756 &cursor,
757 &call_stack,
758 sample.tks_call_stack_size);
759 /*
760 * There must be a call stack after the sample, otherwise something got
761 * corrupted and there's no more framing information for the reader.
762 */
763 assert(advanced);
764 uint32_t call_stack_count = sample.tks_call_stack_size / sizeof(uintptr_t);
765 for (uint32_t i = 0; i < call_stack_count; i++) {
766 /*
767 * The last frame of the call stack can sometimes be 0, ignore it.
768 */
769 if (call_stack[i] != 0) {
770 call_stack[i] = VM_KERNEL_UNSLIDE(call_stack[i]);
771 }
772 }
773
774 /*
775 * Copy the unpacked call stack out to user space.
776 */
777 result = copyout(&call_stack, user_buffer + copied,
778 sample.tks_call_stack_size);
779 if (result != 0) {
780 break;
781 }
782 copied += sample.tks_call_stack_size;
783 mpsc_ring_cursor_commit(&_telemetry_kernel_ring, &cursor);
784 }
785
786 /*
787 * On success, store the number of bytes copied.
788 *
789 * Some partial data may have been copied out, but user space shouldn't
790 * try to inspect it.
791 */
792 if (result == 0) {
793 /*
794 * Complete the read operation and sync any progress back to the ringbuffer.
795 */
796 mpsc_ring_read_finish(&_telemetry_kernel_ring, cursor);
797 os_atomic_store(&_telemetry_kernel_notified, false, relaxed);
798 *user_length = copied;
799 } else {
800 mpsc_ring_read_cancel(&_telemetry_kernel_ring, cursor);
801 }
802 return result;
803 #else /* HAS_PMI_MICROSTACKSHOTS */
804 #pragma unused(user_buffer, user_length)
805 return ENOTSUP;
806 #endif /* !HAS_PMI_MICROSTACKSHOTS */
807 }
808
809 void
telemetry_instrumentation_begin(__unused struct micro_snapshot_buffer * buffer,__unused enum micro_snapshot_flags flags)810 telemetry_instrumentation_begin(
811 __unused struct micro_snapshot_buffer *buffer,
812 __unused enum micro_snapshot_flags flags)
813 {
814 /* telemetry_XXX accessed outside of lock for instrumentation only */
815 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_START,
816 flags, telemetry_bytes_since_last_mark, 0,
817 (&telemetry_buffer != buffer));
818 }
819
820 void
telemetry_instrumentation_end(__unused struct micro_snapshot_buffer * buffer)821 telemetry_instrumentation_end(__unused struct micro_snapshot_buffer *buffer)
822 {
823 /* telemetry_XXX accessed outside of lock for instrumentation only */
824 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_END,
825 (&telemetry_buffer == buffer), telemetry_bytes_since_last_mark,
826 buffer->current_position, buffer->end_point);
827 }
828
829 static void
_telemetry_take_sample_user(thread_t thread,enum micro_snapshot_flags flags)830 _telemetry_take_sample_user(thread_t thread, enum micro_snapshot_flags flags)
831 {
832 uintptr_t frames[128];
833 size_t frames_len = sizeof(frames) / sizeof(frames[0]);
834 uint32_t btcount;
835 struct backtrace_user_info btinfo = BTUINFO_INIT;
836 uint16_t async_start_index = UINT16_MAX;
837
838 /* Collect backtrace from user thread. */
839 btcount = backtrace_user(frames, frames_len, NULL, &btinfo);
840 if (btinfo.btui_error != 0) {
841 return;
842 }
843 if (btinfo.btui_async_frame_addr != 0 &&
844 btinfo.btui_async_start_index != 0) {
845 /*
846 * Put the async callstack inline after the frame pointer walk call
847 * stack.
848 */
849 async_start_index = (uint16_t)btinfo.btui_async_start_index;
850 uintptr_t frame_addr = btinfo.btui_async_frame_addr;
851 unsigned int frames_left = frames_len - async_start_index;
852 struct backtrace_control ctl = { .btc_frame_addr = frame_addr, };
853 btinfo = BTUINFO_INIT;
854 unsigned int async_filled = backtrace_user(frames + async_start_index,
855 frames_left, &ctl, &btinfo);
856 if (btinfo.btui_error == 0) {
857 btcount = MIN(async_start_index + async_filled, frames_len);
858 }
859 }
860
861 /*
862 * Capture any other metadata and write it to the telemetry buffer.
863 */
864 struct telemetry_target target = {
865 .thread = thread,
866 .frames = frames,
867 .frames_count = btcount,
868 .user64_regs = (btinfo.btui_info & BTI_64_BIT) != 0,
869 .microsnapshot_flags = flags,
870 .include_metadata = flags & kPMIRecord,
871 .buffer = &telemetry_buffer,
872 .buffer_mtx = &telemetry_mtx,
873 .async_start_index = async_start_index,
874 };
875 telemetry_process_sample(&target, true, NULL);
876 }
877
878 void
telemetry_take_sample(thread_t thread,enum micro_snapshot_flags flags)879 telemetry_take_sample(thread_t thread, enum micro_snapshot_flags flags)
880 {
881 if (thread == THREAD_NULL) {
882 return;
883 }
884
885 /* Ensure task is ready for taking a sample. */
886 task_t task = get_threadtask(thread);
887 if (!_telemetry_task_can_sample(task)) {
888 os_atomic_inc(&telemetry_metadata.tm_samples_skipped, relaxed);
889 return;
890 }
891
892 telemetry_instrumentation_begin(&telemetry_buffer, flags);
893 _telemetry_take_sample_user(thread, flags);
894 telemetry_instrumentation_end(&telemetry_buffer);
895 }
896
897 #if CONFIG_MACF
898 void
telemetry_macf_take_sample(thread_t thread,enum micro_snapshot_flags flags)899 telemetry_macf_take_sample(thread_t thread, enum micro_snapshot_flags flags)
900 {
901 task_t task;
902
903 uintptr_t frames_stack[128];
904 vm_size_t btcapacity = ARRAY_COUNT(frames_stack);
905 uint32_t btcount = 0;
906 typedef uintptr_t telemetry_user_frame_t __kernel_data_semantics;
907 telemetry_user_frame_t *frames = frames_stack;
908 bool alloced_frames = false;
909
910 struct backtrace_user_info btinfo = BTUINFO_INIT;
911 struct backtrace_control btctl = BTCTL_INIT;
912
913 uint32_t retry_count = 0;
914 const uint32_t max_retries = 10;
915
916 bool initialized = false;
917 struct micro_snapshot_buffer *telbuf = &telemetry_macf_buffer;
918 uint32_t record_start = 0;
919 bool did_process = false;
920 int rv = 0;
921
922 if (thread == THREAD_NULL) {
923 return;
924 }
925
926 telemetry_instrumentation_begin(telbuf, flags);
927
928 /* Ensure task is ready for taking a sample. */
929 task = get_threadtask(thread);
930 if (!_telemetry_task_can_sample(task) || task == kernel_task) {
931 rv = EBUSY;
932 goto out;
933 }
934
935 /* Ensure MACF telemetry buffer was initialized. */
936 TELEMETRY_MACF_LOCK();
937 initialized = (telbuf->size > 0);
938 TELEMETRY_MACF_UNLOCK();
939
940 if (!initialized) {
941 rv = ENOMEM;
942 goto out;
943 }
944
945 /* Collect backtrace from user thread. */
946 while (retry_count < max_retries) {
947 btcount += backtrace_user(frames + btcount, btcapacity - btcount, &btctl, &btinfo);
948
949 if ((btinfo.btui_info & BTI_TRUNCATED) != 0 && btinfo.btui_next_frame_addr != 0) {
950 /*
951 * Fast path uses stack memory to avoid an allocation. We must
952 * pivot to heap memory in the case where we cannot write the
953 * complete backtrace to this buffer.
954 */
955 if (frames == frames_stack) {
956 btcapacity += 128;
957 frames = kalloc_data(btcapacity * sizeof(*frames), Z_WAITOK);
958
959 if (frames == NULL) {
960 break;
961 }
962
963 alloced_frames = true;
964
965 assert(btcapacity > sizeof(frames_stack) / sizeof(frames_stack[0]));
966 memcpy(frames, frames_stack, sizeof(frames_stack));
967 } else {
968 assert(alloced_frames);
969 frames = krealloc_data(frames,
970 btcapacity * sizeof(*frames),
971 (btcapacity + 128) * sizeof(*frames),
972 Z_WAITOK);
973
974 if (frames == NULL) {
975 break;
976 }
977
978 btcapacity += 128;
979 }
980
981 btctl.btc_frame_addr = btinfo.btui_next_frame_addr;
982 ++retry_count;
983 } else {
984 break;
985 }
986 }
987
988 if (frames == NULL) {
989 rv = ENOMEM;
990 goto out;
991 } else if (btinfo.btui_error != 0) {
992 rv = btinfo.btui_error;
993 goto out;
994 }
995
996 /* Process the backtrace. */
997 struct telemetry_target target = {
998 .thread = thread,
999 .frames = frames,
1000 .frames_count = btcount,
1001 .user64_regs = (btinfo.btui_info & BTI_64_BIT) != 0,
1002 .microsnapshot_flags = flags,
1003 .include_metadata = false,
1004 .buffer = telbuf,
1005 .buffer_mtx = &telemetry_macf_mtx
1006 };
1007 rv = telemetry_process_sample(&target, false, &record_start);
1008 did_process = true;
1009
1010 out:
1011 /* Immediately deliver the collected sample to MAC clients. */
1012 if (rv == 0) {
1013 assert(telbuf->current_position >= record_start);
1014 mac_thread_telemetry(thread,
1015 0,
1016 (void *)(telbuf->buffer + record_start),
1017 telbuf->current_position - record_start);
1018 } else {
1019 mac_thread_telemetry(thread, rv, NULL, 0);
1020 }
1021
1022 /*
1023 * The lock was taken by telemetry_process_sample, and we asked it not to
1024 * unlock upon completion, so we must release the lock here.
1025 */
1026 if (did_process) {
1027 TELEMETRY_MACF_UNLOCK();
1028 }
1029
1030 if (alloced_frames && frames != NULL) {
1031 kfree_data(frames, btcapacity * sizeof(*frames));
1032 }
1033
1034 telemetry_instrumentation_end(telbuf);
1035 }
1036 #endif /* CONFIG_MACF */
1037
1038 static void
_write_task_snapshot(struct task_snapshot * tsnap,const struct telemetry_target * target)1039 _write_task_snapshot(
1040 struct task_snapshot *tsnap,
1041 const struct telemetry_target *target)
1042 {
1043 struct task *task = get_threadtask(target->thread);
1044 struct proc *p = get_bsdtask_info(task);
1045
1046 tsnap->snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC;
1047 tsnap->pid = proc_pid(p);
1048 tsnap->uniqueid = proc_uniqueid(p);
1049 struct recount_times_mach times = recount_task_terminated_times(task);
1050 tsnap->user_time_in_terminated_threads = times.rtm_user;
1051 tsnap->system_time_in_terminated_threads = times.rtm_system;
1052 tsnap->suspend_count = task->suspend_count;
1053 tsnap->task_size = (typeof(tsnap->task_size))(get_task_phys_footprint(task) / PAGE_SIZE);
1054 tsnap->faults = counter_load(&task->faults);
1055 tsnap->pageins = counter_load(&task->pageins);
1056 tsnap->cow_faults = counter_load(&task->cow_faults);
1057 /*
1058 * The throttling counters are maintained as 64-bit counters in the proc
1059 * structure. However, we reserve 32-bits (each) for them in the task_snapshot
1060 * struct to save space and since we do not expect them to overflow 32-bits. If we
1061 * find these values overflowing in the future, the fix would be to simply
1062 * upgrade these counters to 64-bit in the task_snapshot struct
1063 */
1064 tsnap->was_throttled = (uint32_t) proc_was_throttled(p);
1065 tsnap->did_throttle = (uint32_t) proc_did_throttle(p);
1066
1067 #if CONFIG_COALITIONS
1068 /*
1069 * These fields are overloaded to represent the resource coalition ID of
1070 * this task...
1071 */
1072 coalition_t rsrc_coal = task->coalition[COALITION_TYPE_RESOURCE];
1073 tsnap->p_start_sec = rsrc_coal ? coalition_id(rsrc_coal) : 0;
1074 /*
1075 * ... and the processes this thread is doing work on behalf of.
1076 */
1077 pid_t origin_pid = -1, proximate_pid = -1;
1078 (void)thread_get_voucher_origin_proximate_pid(target->thread, &origin_pid, &proximate_pid);
1079 tsnap->p_start_usec = ((uint64_t)proximate_pid << 32) | (uint32_t)origin_pid;
1080 #endif /* CONFIG_COALITIONS */
1081
1082 uint64_t ss_flags = kcdata_get_task_ss_flags(task, false);
1083
1084 /*
1085 * sadly the original ss_flags field is not big enough, replicate the
1086 * full flags in the unused disk_reads_count field
1087 */
1088 tsnap->ss_flags = (uint32_t)ss_flags;
1089 tsnap->disk_reads_count = ss_flags;
1090
1091 tsnap->latency_qos = task_grab_latency_qos(task);
1092
1093 strlcpy(tsnap->p_comm, proc_name_address(p), sizeof(tsnap->p_comm));
1094 const char *longname = proc_longname_address(p);
1095 if (longname[0] != '\0') {
1096 /*
1097 * XXX Stash the rest of the process's name in some unused fields.
1098 */
1099 strlcpy((char *)tsnap->io_priority_count, &longname[16], sizeof(tsnap->io_priority_count));
1100 }
1101 if (target->include_metadata) {
1102 tsnap->io_priority_size[0] = ((uint64_t)telemetry_metadata.tm_source << 32) | telemetry_metadata.tm_generation;
1103 tsnap->io_priority_size[1] = telemetry_metadata.tm_period;
1104 tsnap->io_priority_size[2] = os_atomic_inc(&telemetry_metadata.tm_samples_recorded, relaxed);
1105 tsnap->io_priority_size[3] = telemetry_metadata.tm_samples_skipped;
1106 }
1107 if (task->task_shared_region_slide != -1) {
1108 tsnap->shared_cache_slide = task->task_shared_region_slide;
1109 bcopy(task->task_shared_region_uuid, tsnap->shared_cache_identifier,
1110 sizeof(task->task_shared_region_uuid));
1111 }
1112 }
1113
1114 static void
_write_thread_snapshot(struct thread_snapshot * thsnap,const struct telemetry_target * target)1115 _write_thread_snapshot(struct thread_snapshot *thsnap, const struct telemetry_target *target)
1116 {
1117 struct thread *thread = target->thread;
1118
1119 thsnap->snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC;
1120 thsnap->thread_id = thread_tid(thread);
1121 thsnap->state = thread->state;
1122 thsnap->priority = thread->base_pri;
1123 thsnap->sched_pri = thread->sched_pri;
1124 thsnap->sched_flags = thread->sched_flags;
1125 thsnap->ss_flags |= kStacksPCOnly;
1126 thsnap->ts_qos = thread->effective_policy.thep_qos;
1127 thsnap->ts_rqos = thread->requested_policy.thrp_qos;
1128 thsnap->ts_rqos_override = MAX(thread->requested_policy.thrp_qos_override,
1129 thread->requested_policy.thrp_qos_workq_override);
1130 thsnap->nuser_frames = target->frames_count;
1131 memcpy(thsnap->_reserved + 1, &target->async_start_index,
1132 sizeof(target->async_start_index));
1133
1134 if (proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG)) {
1135 thsnap->ss_flags |= kThreadDarwinBG;
1136 }
1137 if (target->user64_regs) {
1138 thsnap->ss_flags |= kUser64_p;
1139 }
1140
1141 boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
1142 struct recount_times_mach times = recount_current_thread_times();
1143 ml_set_interrupts_enabled(interrupt_state);
1144 thsnap->user_time = times.rtm_user;
1145 thsnap->system_time = times.rtm_system;
1146 }
1147
1148 struct _telemetry_uuids {
1149 errno_t error;
1150 void *uuid_info;
1151 uint32_t uuid_info_count;
1152 uint32_t uuid_info_size;
1153 };
1154
1155 /*
1156 * Retrieve the array of UUIDs for binaries used by this task.
1157 */
1158 static struct _telemetry_uuids
_telemetry_sample_uuids(task_t task)1159 _telemetry_sample_uuids(task_t task)
1160 {
1161 bool const user64_va = task_has_64Bit_addr(task);
1162 uint32_t uuid_info_count = 0;
1163 mach_vm_address_t uuid_info_addr = 0;
1164 uint32_t uuid_info_size = 0;
1165 if (user64_va) {
1166 uuid_info_size = sizeof(struct user64_dyld_uuid_info);
1167 struct user64_dyld_all_image_infos task_image_infos;
1168 if (copyin(task->all_image_info_addr, &task_image_infos, sizeof(task_image_infos)) == 0) {
1169 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1170 uuid_info_addr = task_image_infos.uuidArray;
1171 }
1172 } else {
1173 uuid_info_size = sizeof(struct user32_dyld_uuid_info);
1174 struct user32_dyld_all_image_infos task_image_infos;
1175 if (copyin(task->all_image_info_addr, &task_image_infos, sizeof(task_image_infos)) == 0) {
1176 uuid_info_count = task_image_infos.uuidArrayCount;
1177 uuid_info_addr = task_image_infos.uuidArray;
1178 }
1179 }
1180
1181 /*
1182 * If dyld is updating the data structure (indicated by a NULL uuidArray field),
1183 * do not provide any UUIDs with the sample.
1184 */
1185 if (uuid_info_addr == USER_ADDR_NULL) {
1186 return (struct _telemetry_uuids){};
1187 }
1188
1189 /*
1190 * The main binary and interesting non-shared-cache libraries should be in the first few images.
1191 */
1192 uuid_info_count = MIN(uuid_info_count, TELEMETRY_MAX_UUID_COUNT);
1193 uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
1194 char *uuid_info_array = kalloc_data(uuid_info_array_size, Z_WAITOK);
1195 if (uuid_info_array == NULL) {
1196 return (struct _telemetry_uuids){
1197 .error = ENOMEM,
1198 };
1199 }
1200
1201 /*
1202 * Copy in the UUID info array. Ignore any failures to copyin.
1203 */
1204 if (copyin(uuid_info_addr, uuid_info_array, uuid_info_array_size) != 0) {
1205 kfree_data(uuid_info_array, uuid_info_array_size);
1206 uuid_info_array = NULL;
1207 uuid_info_array_size = 0;
1208 }
1209
1210 return (struct _telemetry_uuids){
1211 .uuid_info = uuid_info_array,
1212 .uuid_info_count = uuid_info_count,
1213 .uuid_info_size = uuid_info_array_size,
1214 };
1215 }
1216
1217 static bool
_telemetry_sample_dispatch_serialno(task_t task,thread_t thread,uint64_t * serialno_out)1218 _telemetry_sample_dispatch_serialno(task_t task, thread_t thread, uint64_t *serialno_out)
1219 {
1220 uint64_t const dqkeyaddr = thread_dispatchqaddr(thread);
1221 if (dqkeyaddr != 0) {
1222 uint64_t dqaddr = 0;
1223 size_t const user_ptr_size = task_has_64Bit_addr(task) ? 8 : 4;
1224
1225 uint64_t const dq_serialno_offset = get_task_dispatchqueue_serialno_offset(task);
1226 if ((copyin(dqkeyaddr, (char *)&dqaddr, user_ptr_size) == 0) &&
1227 (dqaddr != 0) && (dq_serialno_offset != 0)) {
1228 uint64_t dqserialnumaddr = dqaddr + dq_serialno_offset;
1229 if (copyin(dqserialnumaddr, serialno_out, user_ptr_size) == 0) {
1230 return true;
1231 }
1232 }
1233 }
1234
1235 return false;
1236 }
1237
1238 static void *
_telemetry_buffer_alloc(struct micro_snapshot_buffer * buf,size_t size)1239 _telemetry_buffer_alloc(struct micro_snapshot_buffer *buf, size_t size)
1240 {
1241 void *alloc = (void *)(uintptr_t)(buf->buffer + buf->current_position);
1242 memset(alloc, 0, size);
1243 buf->current_position += size;
1244 assert3u(buf->current_position, <=, buf->size);
1245 return alloc;
1246 }
1247
1248 int
telemetry_process_sample(const struct telemetry_target * target,bool release_buffer_lock,uint32_t * out_current_record_start)1249 telemetry_process_sample(const struct telemetry_target *target,
1250 bool release_buffer_lock,
1251 uint32_t *out_current_record_start)
1252 {
1253 thread_t const thread = target->thread;
1254 size_t const btcount = target->frames_count;
1255 bool const user64_regs = target->user64_regs;
1256 struct micro_snapshot_buffer * const current_buffer = target->buffer;
1257 lck_mtx_t * const buffer_mtx = target->buffer_mtx;
1258
1259 clock_sec_t secs;
1260 clock_usec_t usecs;
1261 bool notify = false;
1262 int rv = 0;
1263
1264 if (thread == THREAD_NULL) {
1265 return EINVAL;
1266 }
1267
1268 task_t const task = get_threadtask(thread);
1269
1270 struct _telemetry_uuids uuids = _telemetry_sample_uuids(task);
1271
1272 /*
1273 * Look for a dispatch queue serial number, and copy it in from userland if present.
1274 */
1275 uint64_t dqserial = 0;
1276 bool dqserial_valid = _telemetry_sample_dispatch_serialno(task, thread, &dqserial);
1277
1278 size_t const frames_size = btcount * (user64_regs ? 8 : 4);
1279 size_t const sample_size = _telemetry_sample_size_static +
1280 uuids.uuid_info_size + (dqserial_valid ? sizeof(dqserial) : 0) + frames_size;
1281
1282 clock_get_calendar_microtime(&secs, &usecs);
1283
1284 /*
1285 * We do the bulk of the operation under the telemetry lock, on assumption that
1286 * any page faults during execution will not cause another AST_TELEMETRY_ALL
1287 * to deadlock; they will just block until we finish. This makes it easier
1288 * to copy into the buffer directly. As soon as we unlock, userspace can copy
1289 * out of our buffer.
1290 */
1291 lck_mtx_lock(buffer_mtx);
1292
1293 /*
1294 * If the buffer has been deallocated, there's no way to take a sample.
1295 */
1296 if (!current_buffer->buffer) {
1297 rv = EINVAL;
1298 }
1299
1300 /*
1301 * If the sample would be larger than the entire buffer, ignore it.
1302 */
1303 if (rv == 0 && current_buffer->size < sample_size) {
1304 rv = ERANGE;
1305 }
1306
1307 if (rv == 0) {
1308 if ((current_buffer->size - current_buffer->current_position) < sample_size) {
1309 /*
1310 * We can't fit a record in the space available, so wrap around to the beginning.
1311 * Save the current position as the known end point of valid data.
1312 */
1313 current_buffer->end_point = current_buffer->current_position;
1314 current_buffer->current_position = 0;
1315 }
1316 uint32_t current_record_start = current_buffer->current_position;
1317
1318 /*
1319 * Write the snapshots and variable-length arrays into the telemetry buffer.
1320 */
1321
1322 struct micro_snapshot *msnap = _telemetry_buffer_alloc(current_buffer, sizeof(*msnap));
1323 *msnap = (struct micro_snapshot){
1324 .snapshot_magic = STACKSHOT_MICRO_SNAPSHOT_MAGIC,
1325 .ms_flags = (uint8_t)target->microsnapshot_flags,
1326 .ms_cpu = cpu_number(),
1327 .ms_time = secs,
1328 .ms_time_microsecs = usecs,
1329 };
1330
1331 struct task_snapshot *tsnap = _telemetry_buffer_alloc(current_buffer, sizeof(*tsnap));
1332 _write_task_snapshot(tsnap, target);
1333
1334 if (uuids.uuid_info_size > 0) {
1335 void *uuid_info_buf = _telemetry_buffer_alloc(current_buffer, uuids.uuid_info_size);
1336 memcpy(uuid_info_buf, uuids.uuid_info, uuids.uuid_info_size);
1337 tsnap->nloadinfos = uuids.uuid_info_count;
1338 }
1339
1340 struct thread_snapshot *thsnap = _telemetry_buffer_alloc(current_buffer, sizeof(*thsnap));
1341 _write_thread_snapshot(thsnap, target);
1342
1343 if (dqserial_valid) {
1344 thsnap->ss_flags |= kHasDispatchSerial;
1345 uint64_t *dqserial_buf = _telemetry_buffer_alloc(current_buffer, sizeof(*dqserial_buf));
1346 memcpy(dqserial_buf, &dqserial, sizeof(dqserial));
1347 }
1348
1349 void *frames_buf = _telemetry_buffer_alloc(current_buffer, frames_size);
1350 if (user64_regs) {
1351 memcpy(frames_buf, target->frames, frames_size);
1352 } else {
1353 uint32_t *frames_32 = frames_buf;
1354 for (int i = 0; i < btcount; i++) {
1355 frames_32[i] = (uint32_t)target->frames[i];
1356 }
1357 }
1358
1359 if (current_buffer->end_point < current_buffer->current_position) {
1360 /*
1361 * Each time the cursor wraps around to the beginning, we leave a
1362 * differing amount of unused space at the end of the buffer. Make
1363 * sure the cursor pushes the end point in case we're making use of
1364 * more of the buffer than we did the last time we wrapped.
1365 */
1366 current_buffer->end_point = current_buffer->current_position;
1367 }
1368
1369 /*
1370 * Now THIS is a hack.
1371 */
1372 if (current_buffer == &telemetry_buffer) {
1373 telemetry_bytes_since_last_mark += (current_buffer->current_position - current_record_start);
1374 if (telemetry_bytes_since_last_mark > telemetry_buffer_notify_at) {
1375 notify = true;
1376 }
1377 }
1378
1379 if (out_current_record_start != NULL) {
1380 *out_current_record_start = current_record_start;
1381 }
1382 }
1383
1384 if (release_buffer_lock) {
1385 lck_mtx_unlock(buffer_mtx);
1386 }
1387
1388 if (notify) {
1389 _telemetry_notify_user(TELEMETRY_NOTICE_BASE);
1390 }
1391
1392 if (uuids.uuid_info != NULL) {
1393 kfree_data(uuids.uuid_info, uuids.uuid_info_size);
1394 }
1395
1396 return rv;
1397 }
1398
1399 int
telemetry_gather(user_addr_t buffer,uint32_t * length,bool mark)1400 telemetry_gather(user_addr_t buffer, uint32_t *length, bool mark)
1401 {
1402 return telemetry_buffer_gather(buffer, length, mark, &telemetry_buffer);
1403 }
1404
1405 int
telemetry_buffer_gather(user_addr_t buffer,uint32_t * length,bool mark,struct micro_snapshot_buffer * current_buffer)1406 telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, bool mark, struct micro_snapshot_buffer * current_buffer)
1407 {
1408 int result = 0;
1409 uint32_t oldest_record_offset;
1410
1411 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_START,
1412 mark, telemetry_bytes_since_last_mark, 0,
1413 (&telemetry_buffer != current_buffer));
1414
1415 TELEMETRY_LOCK();
1416
1417 if (current_buffer->buffer == 0) {
1418 *length = 0;
1419 goto out;
1420 }
1421
1422 if (*length < current_buffer->size) {
1423 result = KERN_NO_SPACE;
1424 goto out;
1425 }
1426
1427 /*
1428 * Copy the ring buffer out to userland in order sorted by time: least recent to most recent.
1429 * First, we need to search forward from the cursor to find the oldest record in our buffer.
1430 */
1431 oldest_record_offset = current_buffer->current_position;
1432 do {
1433 if (((oldest_record_offset + sizeof(uint32_t)) > current_buffer->size) ||
1434 ((oldest_record_offset + sizeof(uint32_t)) > current_buffer->end_point)) {
1435 if (*(uint32_t *)(uintptr_t)(current_buffer->buffer) == 0) {
1436 /*
1437 * There is no magic number at the start of the buffer, which means
1438 * it's empty; nothing to see here yet.
1439 */
1440 *length = 0;
1441 goto out;
1442 }
1443 /*
1444 * We've looked through the end of the active buffer without finding a valid
1445 * record; that means all valid records are in a single chunk, beginning at
1446 * the very start of the buffer.
1447 */
1448
1449 oldest_record_offset = 0;
1450 assert(*(uint32_t *)(uintptr_t)(current_buffer->buffer) == STACKSHOT_MICRO_SNAPSHOT_MAGIC);
1451 break;
1452 }
1453
1454 if (*(uint32_t *)(uintptr_t)(current_buffer->buffer + oldest_record_offset) == STACKSHOT_MICRO_SNAPSHOT_MAGIC) {
1455 break;
1456 }
1457
1458 /*
1459 * There are no alignment guarantees for micro-stackshot records, so we must search at each
1460 * byte offset.
1461 */
1462 oldest_record_offset++;
1463 } while (oldest_record_offset != current_buffer->current_position);
1464
1465 /*
1466 * If needed, copyout in two chunks: from the oldest record to the end of the buffer, and then
1467 * from the beginning of the buffer up to the current position.
1468 */
1469 if (oldest_record_offset != 0) {
1470 if ((result = copyout((void *)(current_buffer->buffer + oldest_record_offset), buffer,
1471 current_buffer->end_point - oldest_record_offset)) != 0) {
1472 *length = 0;
1473 goto out;
1474 }
1475 *length = current_buffer->end_point - oldest_record_offset;
1476 } else {
1477 *length = 0;
1478 }
1479
1480 if ((result = copyout((void *)current_buffer->buffer, buffer + *length,
1481 current_buffer->current_position)) != 0) {
1482 *length = 0;
1483 goto out;
1484 }
1485 *length += (uint32_t)current_buffer->current_position;
1486
1487 out:
1488
1489 if (mark && (*length > 0)) {
1490 telemetry_bytes_since_last_mark = 0;
1491 }
1492
1493 TELEMETRY_UNLOCK();
1494
1495 KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_END,
1496 current_buffer->current_position, *length,
1497 current_buffer->end_point, (&telemetry_buffer != current_buffer));
1498
1499 return result;
1500 }
1501
1502 #if CONFIG_MACF
1503 static int
telemetry_macf_init_locked(size_t buffer_size)1504 telemetry_macf_init_locked(size_t buffer_size)
1505 {
1506 kern_return_t kr;
1507
1508 if (buffer_size > TELEMETRY_MAX_BUFFER_SIZE) {
1509 buffer_size = TELEMETRY_MAX_BUFFER_SIZE;
1510 }
1511
1512 telemetry_macf_buffer.size = buffer_size;
1513
1514 kr = kmem_alloc(kernel_map, &telemetry_macf_buffer.buffer,
1515 telemetry_macf_buffer.size, KMA_DATA | KMA_ZERO | KMA_PERMANENT,
1516 VM_KERN_MEMORY_SECURITY);
1517
1518 if (kr != KERN_SUCCESS) {
1519 kprintf("Telemetry (MACF): Allocation failed: %d\n", kr);
1520 return ENOMEM;
1521 }
1522
1523 return 0;
1524 }
1525
1526 int
telemetry_macf_mark_curthread(void)1527 telemetry_macf_mark_curthread(void)
1528 {
1529 thread_t thread = current_thread();
1530 task_t task = get_threadtask(thread);
1531 int rv = 0;
1532
1533 if (task == kernel_task) {
1534 /* Kernel threads never return to an AST boundary, and are ineligible */
1535 return EINVAL;
1536 }
1537
1538 /* Initialize the MACF telemetry buffer if needed. */
1539 TELEMETRY_MACF_LOCK();
1540 if (__improbable(telemetry_macf_buffer.size == 0)) {
1541 rv = telemetry_macf_init_locked(TELEMETRY_MACF_DEFAULT_BUFFER_SIZE);
1542
1543 if (rv != 0) {
1544 return rv;
1545 }
1546 }
1547 TELEMETRY_MACF_UNLOCK();
1548
1549 act_set_macf_telemetry_ast(thread);
1550 return 0;
1551 }
1552 #endif /* CONFIG_MACF */
1553
1554 static int
telemetry_backtrace_add_kernel(char * buf,size_t buflen)1555 telemetry_backtrace_add_kernel(
1556 char *buf,
1557 size_t buflen)
1558 {
1559 int rc = 0;
1560 #if defined(__arm__) || defined(__arm64__)
1561 extern vm_offset_t segTEXTEXECB;
1562 extern unsigned long segSizeTEXTEXEC;
1563 vm_address_t unslid = segTEXTEXECB - vm_kernel_stext;
1564
1565 rc += scnprintf(buf, buflen, "%s@%lx:%lx\n",
1566 kernel_uuid_string, unslid, unslid + segSizeTEXTEXEC - 1);
1567 #elif defined(__x86_64__)
1568 rc += scnprintf(buf, buflen, "%s@0:%lx\n",
1569 kernel_uuid_string, vm_kernel_etext - vm_kernel_stext);
1570 #else
1571 #pragma unused(buf, buflen)
1572 #endif
1573 return rc;
1574 }
1575
1576 /**
1577 * Generate a backtrace string which can be symbolicated off system
1578 *
1579 * All addresses are relative to the vm_kernel_stext which means that all
1580 * offsets will be typically <= 50M which uses 7 hex digits.
1581 *
1582 * We allow up to TOT entries from FRAMES. The result will be formatted into BUF
1583 * (up to BUFLEN-1 characters) with the following format:
1584 *
1585 * <OFFSET1>\n
1586 * <OFFSET2>\n
1587 * ...
1588 * <UUID_a>@<TEXT_EXEC_BASE_OFFSET>:<TEXT_EXEC_END_OFFSET>\n
1589 * <UUID_b>@<TEXT_EXEC_BASE_OFFSET>:<TEXT_EXEC_END_OFFSET>\n
1590 * ...
1591 *
1592 * In general this backtrace takes 8 bytes per "frame", with an extra 52 bytes
1593 * per unique UUID referenced. As a rule of thumb, with a 256 byte long output
1594 * buffer, at least five entries from four unique UUIDs will generally fit.
1595 */
1596 void
telemetry_backtrace_to_string(char * buf,size_t buflen,uint32_t tot,uintptr_t * frames)1597 telemetry_backtrace_to_string(
1598 char *buf,
1599 size_t buflen,
1600 uint32_t tot,
1601 uintptr_t *frames)
1602 {
1603 size_t l = 0;
1604
1605 for (uint32_t i = 0; i < tot; i++) {
1606 l += scnprintf(buf + l, buflen - l, "%lx\n",
1607 frames[i] - vm_kernel_stext);
1608 }
1609 l += telemetry_backtrace_add_kernel(buf + l, buflen - l);
1610 telemetry_backtrace_add_kexts(buf + l, buflen - l, frames, tot);
1611 }
1612