xref: /xnu-11417.101.15/osfmk/kern/telemetry.c (revision e3723e1f17661b24996789d8afc084c0c3303b26)
1 /*
2  * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/host_priv.h>
30 #include <mach/host_special_ports.h>
31 #include <mach/mach_types.h>
32 #include <mach/telemetry_notification_server.h>
33 
34 #include <kern/assert.h>
35 #include <kern/clock.h>
36 #include <kern/coalition.h>
37 #include <kern/counter.h>
38 #include <kern/debug.h>
39 #include <kern/host.h>
40 #include <kern/kalloc.h>
41 #include <kern/kern_types.h>
42 #include <kern/locks.h>
43 #include <kern/misc_protos.h>
44 #include <kern/sched.h>
45 #include <kern/sched_prim.h>
46 #include <kern/thread.h>
47 #include <kern/telemetry.h>
48 #include <kern/timer_call.h>
49 #include <kern/policy_internal.h>
50 #include <kern/kcdata.h>
51 #include <kern/percpu.h>
52 #include <kern/mpsc_ring.h>
53 
54 #include <pexpert/pexpert.h>
55 
56 #include <string.h>
57 #include <vm/vm_kern_xnu.h>
58 #include <vm/vm_shared_region.h>
59 
60 #include <kperf/callstack.h>
61 #include <kern/backtrace.h>
62 #include <kern/monotonic.h>
63 
64 #include <security/mac_mach_internal.h>
65 
66 #include <sys/errno.h>
67 #include <sys/kdebug.h>
68 #include <uuid/uuid.h>
69 #include <kdp/kdp_dyld.h>
70 
71 #include <libkern/coreanalytics/coreanalytics.h>
72 #include <kern/thread_call.h>
73 
74 struct proc;
75 extern int proc_pid(struct proc *);
76 extern char *proc_name_address(void *p);
77 extern char *proc_longname_address(void *p);
78 extern uint64_t proc_uniqueid(void *p);
79 extern uint64_t proc_was_throttled(void *p);
80 extern uint64_t proc_did_throttle(void *p);
81 extern boolean_t task_did_exec(task_t task);
82 extern boolean_t task_is_exec_copy(task_t task);
83 
84 #if CONFIG_CPU_COUNTERS
85 #define HAS_PMI_MICROSTACKSHOTS 1
86 #endif /* CONFIG_CPU_COUNTERS */
87 
88 struct micro_snapshot_buffer {
89 	vm_offset_t             buffer;
90 	uint32_t                size;
91 	uint32_t                current_position;
92 	uint32_t                end_point;
93 };
94 
95 static const size_t _telemetry_sample_size_static = sizeof(struct micro_snapshot) +
96     sizeof(struct task_snapshot) +
97     sizeof(struct thread_snapshot);
98 
99 static void telemetry_instrumentation_begin(
100 	struct micro_snapshot_buffer *buffer, enum micro_snapshot_flags flags);
101 
102 static void telemetry_instrumentation_end(struct micro_snapshot_buffer *buffer);
103 
104 static void telemetry_take_sample(thread_t thread, enum micro_snapshot_flags flags);
105 
106 #if HAS_PMI_MICROSTACKSHOTS
107 static void _telemetry_take_sample_kernel(thread_t thread, enum micro_snapshot_flags flags);
108 static void _telemetry_mark_curthread(bool interrupted_userspace);
109 #endif /* HAS_PMI_MICROSTACKSHOTS */
110 
111 #if CONFIG_MACF
112 static void telemetry_macf_take_sample(thread_t thread, enum micro_snapshot_flags flags);
113 #endif
114 
115 struct telemetry_target {
116 	thread_t                         thread;
117 	uintptr_t                       *frames;
118 	size_t                           frames_count;
119 	bool                             user64_regs;
120 	uint16_t                         async_start_index;
121 	enum micro_snapshot_flags        microsnapshot_flags;
122 	bool                             include_metadata;
123 	struct micro_snapshot_buffer    *buffer;
124 	lck_mtx_t                       *buffer_mtx;
125 };
126 
127 static int telemetry_process_sample(
128 	const struct telemetry_target *target,
129 	bool release_buffer_lock,
130 	uint32_t *out_current_record_start);
131 
132 static int telemetry_buffer_gather(
133 	user_addr_t buffer,
134 	uint32_t *length,
135 	bool mark,
136 	struct micro_snapshot_buffer *current_buffer);
137 
138 #define TELEMETRY_DEFAULT_BUFFER_SIZE (16 * 1024)
139 #define TELEMETRY_MAX_BUFFER_SIZE (64 * 1024)
140 
141 #define TELEMETRY_DEFAULT_NOTIFY_LEEWAY (4*1024) // Userland gets 4k of leeway to collect data after notification
142 #define TELEMETRY_MAX_UUID_COUNT (128) // Max of 128 non-shared-cache UUIDs to log for symbolication
143 
144 bool telemetry_sample_pmis = false;
145 
146 uint32_t telemetry_timestamp = 0;
147 
148 struct telemetry_metadata {
149 	/*
150 	 * The current generation of microstackshot-based telemetry.
151 	 * Incremented whenever the settings change.
152 	 */
153 	uint32_t tm_generation;
154 	/*
155 	 * The total number of samples recorded.
156 	 */
157 	uint64_t tm_samples_recorded;
158 	/*
159 	 * The total number of samples that were skipped.
160 	 */
161 	uint64_t tm_samples_skipped;
162 	/*
163 	 * What's triggering the microstackshot samples.
164 	 */
165 	enum telemetry_source {
166 		TMSRC_NONE = 0,
167 		TMSRC_UNKNOWN,
168 		TMSRC_TIME,
169 		TMSRC_INSTRUCTIONS,
170 		TMSRC_CYCLES,
171 	} tm_source;
172 	/*
173 	 * The interval used for periodic sampling.
174 	 */
175 	uint64_t tm_period;
176 };
177 
178 /*
179  * The telemetry_buffer is responsible
180  * for timer samples and interrupt samples that are driven by
181  * compute_averages().  It will notify its client (if one
182  * exists) when it has enough data to be worth flushing.
183  */
184 struct micro_snapshot_buffer telemetry_buffer = {
185 	.buffer = 0,
186 	.size = 0,
187 	.current_position = 0,
188 	.end_point = 0
189 };
190 
191 #if CONFIG_MACF
192 #define TELEMETRY_MACF_DEFAULT_BUFFER_SIZE (16*1024)
193 /*
194  * The MAC framework uses its own telemetry buffer for the purposes of auditing
195  * security-related work being done by userland threads.
196  */
197 struct micro_snapshot_buffer telemetry_macf_buffer = {
198 	.buffer = 0,
199 	.size = 0,
200 	.current_position = 0,
201 	.end_point = 0
202 };
203 #endif /* CONFIG_MACF */
204 
205 int telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked?
206 int telemetry_buffer_notify_at = 0;
207 
208 LCK_GRP_DECLARE(telemetry_lck_grp, "telemetry group");
209 LCK_MTX_DECLARE(telemetry_mtx, &telemetry_lck_grp);
210 LCK_MTX_DECLARE(telemetry_pmi_mtx, &telemetry_lck_grp);
211 LCK_MTX_DECLARE(telemetry_macf_mtx, &telemetry_lck_grp);
212 LCK_SPIN_DECLARE(telemetry_metadata_lck, &telemetry_lck_grp);
213 
214 #define TELEMETRY_LOCK() do { lck_mtx_lock(&telemetry_mtx); } while (0)
215 #define TELEMETRY_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&telemetry_mtx)
216 #define TELEMETRY_UNLOCK() do { lck_mtx_unlock(&telemetry_mtx); } while (0)
217 
218 #define TELEMETRY_PMI_LOCK() do { lck_mtx_lock(&telemetry_pmi_mtx); } while (0)
219 #define TELEMETRY_PMI_UNLOCK() do { lck_mtx_unlock(&telemetry_pmi_mtx); } while (0)
220 
221 #define TELEMETRY_MACF_LOCK() do { lck_mtx_lock(&telemetry_macf_mtx); } while (0)
222 #define TELEMETRY_MACF_UNLOCK() do { lck_mtx_unlock(&telemetry_macf_mtx); } while (0)
223 
224 /*
225  * Protected by the telemetry_metadata_lck spinlock.
226  */
227 struct telemetry_metadata telemetry_metadata = { 0 };
228 
229 #if HAS_PMI_MICROSTACKSHOTS
230 static __security_const_late thread_call_t _telemetry_kernel_notify_thread;
231 _Atomic bool _telemetry_kernel_notified = false;
232 static struct mpsc_ring _telemetry_kernel_ring;
233 
234 static void _telemetry_kernel_notify(void *, void *);
235 #endif /* HAS_PMI_MICROSTACKSHOTS */
236 
237 TUNABLE(uint32_t, telemetry_buffer_size, "telemetry_buffer_size", TELEMETRY_DEFAULT_BUFFER_SIZE);
238 TUNABLE(uint8_t, telemetry_kernel_buffer_size_pow_2, "telemetry_kernel_buffer_size_pow_2", 16);
239 TUNABLE(uint32_t, telemetry_notification_leeway, "telemetry_notification_leeway", TELEMETRY_DEFAULT_NOTIFY_LEEWAY);
240 
241 __startup_func
242 static void
_telemetry_init(void)243 _telemetry_init(void)
244 {
245 	telemetry_buffer.size = MIN(telemetry_buffer_size, TELEMETRY_MAX_BUFFER_SIZE);
246 
247 	kern_return_t ret = kmem_alloc(kernel_map, &telemetry_buffer.buffer, telemetry_buffer.size,
248 	    KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
249 	if (ret != KERN_SUCCESS) {
250 		printf("telemetry: allocation failed: %d\n", ret);
251 		return;
252 	}
253 
254 	if (telemetry_notification_leeway >= telemetry_buffer.size) {
255 		printf("telemetry: nonsensical telemetry_notification_leeway boot-arg %d changed to %d\n",
256 		    telemetry_notification_leeway, TELEMETRY_DEFAULT_NOTIFY_LEEWAY);
257 		telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
258 	}
259 	telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway;
260 
261 #if HAS_PMI_MICROSTACKSHOTS
262 #if __arm__ || __arm64__
263 	unsigned int cpu_count = ml_get_cpu_count();
264 #else // __arm__ || __arm64__
265 	unsigned int cpu_count = ml_early_cpu_max_number() + 1;
266 #endif // !__arm__ && !__arm64__
267 
268 	mpsc_ring_init(&_telemetry_kernel_ring, telemetry_kernel_buffer_size_pow_2, (uint8_t)cpu_count);
269 
270 	_telemetry_kernel_notify_thread = thread_call_allocate_with_options(
271 		_telemetry_kernel_notify, NULL, THREAD_CALL_PRIORITY_USER,
272 		THREAD_CALL_OPTIONS_ONCE);
273 	if (!_telemetry_kernel_notify_thread) {
274 		panic("telemetry_init: failed to allocate kernel notification thread call");
275 	}
276 #endif /* !HAS_PMI_MICROSTACKSHOTS */
277 }
278 
279 STARTUP(MACH_IPC, STARTUP_RANK_FIRST, _telemetry_init);
280 
281 /*
282  * If userland has registered a port for telemetry notifications, send one now.
283  */
284 static void
_telemetry_notify_user(telemetry_notice_t flags)285 _telemetry_notify_user(telemetry_notice_t flags)
286 {
287 	mach_port_t user_port = MACH_PORT_NULL;
288 
289 	kern_return_t kr = host_get_telemetry_port(host_priv_self(), &user_port);
290 	if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
291 		return;
292 	}
293 
294 	telemetry_notification(user_port, flags);
295 	ipc_port_release_send(user_port);
296 }
297 
298 #if HAS_PMI_MICROSTACKSHOTS
299 
300 static void
telemetry_pmi_handler(bool user_mode,__unused void * ctx)301 telemetry_pmi_handler(bool user_mode, __unused void *ctx)
302 {
303 	thread_t thread = current_thread();
304 	if (get_threadtask(thread) == kernel_task) {
305 		_telemetry_take_sample_kernel(thread, kPMIRecord);
306 	} else {
307 		_telemetry_mark_curthread(user_mode);
308 	}
309 }
310 
311 #endif /* HAS_PMI_MICROSTACKSHOTS */
312 
313 int
telemetry_pmi_setup(enum telemetry_pmi pmi_ctr,uint64_t period)314 telemetry_pmi_setup(enum telemetry_pmi pmi_ctr, uint64_t period)
315 {
316 #if HAS_PMI_MICROSTACKSHOTS
317 	enum telemetry_source source = TMSRC_NONE;
318 	int error = 0;
319 	const char *name = "?";
320 
321 	unsigned int ctr = 0;
322 
323 	TELEMETRY_PMI_LOCK();
324 
325 	switch (pmi_ctr) {
326 	case TELEMETRY_PMI_NONE:
327 		if (!telemetry_sample_pmis) {
328 			error = 1;
329 			goto out;
330 		}
331 
332 		telemetry_sample_pmis = false;
333 		error = mt_microstackshot_stop();
334 		if (!error) {
335 			printf("telemetry: disabling ustackshot on PMI\n");
336 			int intrs_en = ml_set_interrupts_enabled(FALSE);
337 			lck_spin_lock(&telemetry_metadata_lck);
338 			telemetry_metadata.tm_period = 0;
339 			telemetry_metadata.tm_source = TMSRC_NONE;
340 			lck_spin_unlock(&telemetry_metadata_lck);
341 			ml_set_interrupts_enabled(intrs_en);
342 		}
343 		goto out;
344 
345 	case TELEMETRY_PMI_INSTRS:
346 		ctr = MT_CORE_INSTRS;
347 		name = "instructions";
348 		source = TMSRC_INSTRUCTIONS;
349 		break;
350 
351 	case TELEMETRY_PMI_CYCLES:
352 		ctr = MT_CORE_CYCLES;
353 		name = "cycles";
354 		source = TMSRC_CYCLES;
355 		break;
356 
357 	default:
358 		error = 1;
359 		goto out;
360 	}
361 
362 	telemetry_sample_pmis = true;
363 
364 	error = mt_microstackshot_start(ctr, period, telemetry_pmi_handler, NULL);
365 	if (!error) {
366 		printf("telemetry: ustackshot every %llu %s\n", period, name);
367 
368 		int intrs_en = ml_set_interrupts_enabled(FALSE);
369 		lck_spin_lock(&telemetry_metadata_lck);
370 		telemetry_metadata.tm_period = period;
371 		telemetry_metadata.tm_source = source;
372 		telemetry_metadata.tm_generation += 1;
373 		lck_spin_unlock(&telemetry_metadata_lck);
374 		ml_set_interrupts_enabled(intrs_en);
375 	}
376 
377 out:
378 	TELEMETRY_PMI_UNLOCK();
379 	return error;
380 #else /* HAS_PMI_MICROSTACKSHOTS */
381 #pragma unused(pmi_ctr, period)
382 	return 1;
383 #endif /* !HAS_PMI_MICROSTACKSHOTS */
384 }
385 
386 #if HAS_PMI_MICROSTACKSHOTS
387 
388 /*
389  * Mark the current thread for an interrupt-based
390  * telemetry record, to be sampled at the next AST boundary.
391  */
392 static void
_telemetry_mark_curthread(bool interrupted_userspace)393 _telemetry_mark_curthread(bool interrupted_userspace)
394 {
395 	uint32_t ast_bits = AST_TELEMETRY_PMI;
396 	thread_t thread = current_thread();
397 
398 	/*
399 	 * PMI handler was called but microstackshot expected sampling to be
400 	 * disabled; log it for telemetry and ignore the sample.
401 	 */
402 	if (!telemetry_sample_pmis) {
403 		os_atomic_inc(&telemetry_metadata.tm_samples_skipped, relaxed);
404 		return;
405 	}
406 
407 	ast_bits |= (interrupted_userspace ? AST_TELEMETRY_USER : AST_TELEMETRY_KERNEL);
408 	thread_ast_set(thread, ast_bits);
409 	ast_propagate(thread);
410 }
411 
412 static void
_telemetry_kernel_notify(void * __unused p1,void * __unused p2)413 _telemetry_kernel_notify(void * __unused p1, void * __unused p2)
414 {
415 	_telemetry_notify_user(TELEMETRY_NOTICE_KERNEL_MICROSTACKSHOT);
416 }
417 
418 #endif /* HAS_PMI_MICROSTACKSHOTS */
419 
420 void
telemetry_ast(thread_t thread,ast_t reasons)421 telemetry_ast(thread_t thread, ast_t reasons)
422 {
423 	assert((reasons & AST_TELEMETRY_ALL) != 0);
424 
425 	uint8_t record_type = 0;
426 	if (reasons & AST_TELEMETRY_IO) {
427 		record_type |= kIORecord;
428 	}
429 	if (reasons & (AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL)) {
430 		record_type |= (reasons & AST_TELEMETRY_PMI) ? kPMIRecord :
431 		    kInterruptRecord;
432 	}
433 
434 	if ((reasons & AST_TELEMETRY_MACF) != 0) {
435 		record_type |= kMACFRecord;
436 	}
437 
438 	enum micro_snapshot_flags user_telemetry = (reasons & AST_TELEMETRY_USER) ? kUserMode : 0;
439 	enum micro_snapshot_flags microsnapshot_flags = record_type | user_telemetry;
440 
441 	if ((reasons & AST_TELEMETRY_MACF) != 0) {
442 		telemetry_macf_take_sample(thread, microsnapshot_flags);
443 	}
444 
445 	if ((reasons & (AST_TELEMETRY_IO | AST_TELEMETRY_KERNEL | AST_TELEMETRY_PMI
446 	    | AST_TELEMETRY_USER)) != 0) {
447 		telemetry_take_sample(thread, microsnapshot_flags);
448 	}
449 }
450 
451 static bool
_telemetry_task_can_sample(task_t task)452 _telemetry_task_can_sample(task_t task)
453 {
454 	return (task != TASK_NULL) && !task_did_exec(task) && !task_is_exec_copy(task);
455 }
456 
457 /*
458  * Kernel Thread Microstackshot Support
459  */
460 
461 #define TELEMETRY_KERNEL_FRAMES_MAX (128)
462 
463 #if HAS_PMI_MICROSTACKSHOTS
464 
465 static const uint32_t TKS_MAGIC = 0x83a83f29;
466 
467 /*
468  * The bare minimum needed to record a sample from interrupt context, stored in
469  * a ringbuffer for later collection.
470  */
471 struct _telemetry_kernel_sample {
472 	clock_sec_t tks_time_secs;
473 	uint64_t tks_serial_number;
474 	uint64_t tks_telemetry_skipped;
475 	uint64_t tks_telemetry_period;
476 
477 	uint64_t tks_system_time_in_terminated_threads;
478 	uint64_t tks_task_size;
479 	uint64_t tks_pageins;
480 	uint64_t tks_faults;
481 	uint64_t tks_cow_faults;
482 
483 	uint64_t tks_thread_id;
484 	uint64_t tks_system_time;
485 	clock_usec_t tks_time_usecs;
486 	uint32_t tks_magic;
487 	uint32_t tks_thread_state;
488 	uint32_t tks_sched_pri;
489 	uint32_t tks_base_pri;
490 	uint32_t tks_sched_flags;
491 	uint32_t tks_call_stack_size;
492 	uint32_t tks_telemetry_source;
493 	uint32_t tks_telemetry_generation;
494 	uint8_t tks_cpu;
495 	uint8_t tks_io_tier;
496 	char tks_thread_name[MAXTHREADNAMESIZE];
497 };
498 
499 /*
500  * Only collect call stacks up to this maximum length.
501  */
502 #define TELEMETRY_KERNEL_FRAMES_MAX (128)
503 
504 /*
505  * A scratch buffer that mirrors the format of data stored in the ringbuffer so
506  * it can be written contiguously in a single update.
507  */
508 struct _telemetry_scratch {
509 	struct _telemetry_kernel_sample ts_sample;
510 	uintptr_t ts_call_stack[TELEMETRY_KERNEL_FRAMES_MAX];
511 };
512 
513 /*
514  * Each writer in interrupt context needs a place off the stack to store these
515  * scratch buffers.
516  */
517 static struct _telemetry_scratch PERCPU_DATA(_telemetry_pcpu);
518 
519 /*
520  * Collect a sample for the current kernel thread.  Must be called in interrupt
521  * context.
522  */
523 static void
_telemetry_take_sample_kernel(thread_t thread,enum micro_snapshot_flags __unused flags)524 _telemetry_take_sample_kernel(thread_t thread, enum micro_snapshot_flags __unused flags)
525 {
526 	assert(ml_at_interrupt_context());
527 	struct _telemetry_scratch *scratch = PERCPU_GET(_telemetry_pcpu);
528 
529 	/*
530 	 * Collect the call stack in a packed representation to fit more of these
531 	 * samples into the ringbuffer.
532 	 */
533 	struct backtrace_control ctl = {
534 		.btc_flags = BTF_KERN_INTERRUPTED,
535 	};
536 	backtrace_info_t info = BTI_NONE;
537 	unsigned int call_stack_count = backtrace(scratch->ts_call_stack,
538 	    TELEMETRY_KERNEL_FRAMES_MAX,
539 	    &ctl,
540 	    &info);
541 	unsigned int call_stack_size = call_stack_count * sizeof(scratch->ts_call_stack[0]);
542 
543 	/*
544 	 * Relaxed here, which allows the samples to be non-monotonically
545 	 * increasing, but avoids any further synchronization with writers.
546 	 */
547 	uint64_t serial_number = os_atomic_inc(&telemetry_metadata.tm_samples_recorded, relaxed);
548 
549 	struct recount_times_mach term_times = recount_task_terminated_times(kernel_task);
550 	struct recount_times_mach thread_times = recount_current_thread_times();
551 
552 	clock_sec_t secs = 0;
553 	clock_usec_t usecs = 0;
554 	clock_get_calendar_microtime(&secs, &usecs);
555 	uint8_t cpu = (uint8_t)cpu_number();
556 	scratch->ts_sample = (struct _telemetry_kernel_sample){
557 		.tks_magic = TKS_MAGIC,
558 		.tks_serial_number = serial_number,
559 		.tks_telemetry_skipped = os_atomic_load(&telemetry_metadata.tm_samples_skipped, relaxed),
560 		.tks_telemetry_period = telemetry_metadata.tm_period,
561 		.tks_telemetry_source = telemetry_metadata.tm_source,
562 		.tks_telemetry_generation = telemetry_metadata.tm_generation,
563 		.tks_cpu = cpu,
564 		.tks_time_secs = secs,
565 		.tks_time_usecs = usecs,
566 		.tks_thread_id = thread_tid(thread),
567 		.tks_pageins = counter_load(&kernel_task->pageins),
568 		.tks_faults = counter_load(&kernel_task->faults),
569 		.tks_cow_faults = counter_load(&kernel_task->cow_faults),
570 		.tks_system_time_in_terminated_threads = term_times.rtm_system,
571 		.tks_system_time = thread_times.rtm_system,
572 		.tks_thread_state = thread->state,
573 		.tks_sched_pri = thread->sched_pri,
574 		.tks_base_pri = thread->base_pri,
575 		.tks_io_tier = (uint8_t)proc_get_effective_thread_policy(thread, TASK_POLICY_IO),
576 		.tks_call_stack_size = call_stack_size,
577 	};
578 	thread_get_thread_name(thread, scratch->ts_sample.tks_thread_name);
579 
580 	/*
581 	 * Write just the amount needed to store the sample information and call
582 	 * stack.
583 	 */
584 	uint32_t size_needed = sizeof(struct _telemetry_kernel_sample) + call_stack_size;
585 	uint32_t available =
586 	    mpsc_ring_write(&_telemetry_kernel_ring, cpu, scratch, size_needed);
587 
588 	/*
589 	 * Check that there was enough space to store the sample.
590 	 */
591 	bool skipped = available < size_needed;
592 	/*
593 	 * Incrementing samples-recorded in the metadata will cover indicating this
594 	 * sample is missing to user space.
595 	 */
596 	if (skipped || available - size_needed <= telemetry_notification_leeway) {
597 		if (os_atomic_cmpxchg(&_telemetry_kernel_notified, false, true, relaxed)) {
598 			thread_call_enter(_telemetry_kernel_notify_thread);
599 		}
600 	}
601 }
602 
603 /*
604  * The format of sample data that user space can parse, with no UUIDs present,
605  * as is the case for kernel samples.
606  */
607 struct _telemetry_kernel_snapshots {
608 	struct micro_snapshot tkse_micro_snap;
609 	struct task_snapshot tkse_task_snap;
610 	struct thread_snapshot tkse_thread_snap;
611 };
612 
613 /*
614  * Convert a kernel sample into the trio of snapshots that user space can parse.
615  */
616 static void
_telemetry_kernel_snapshot(struct _telemetry_kernel_snapshots * snaps,struct _telemetry_kernel_sample * sample)617 _telemetry_kernel_snapshot(
618 	struct _telemetry_kernel_snapshots *snaps,
619 	struct _telemetry_kernel_sample *sample)
620 {
621 	snaps->tkse_micro_snap = (struct micro_snapshot){
622 		.snapshot_magic = STACKSHOT_MICRO_SNAPSHOT_MAGIC,
623 		.ms_flags = (uint8_t)(kPMIRecord | kKernelThread),
624 		.ms_cpu = sample->tks_cpu,
625 		.ms_time = sample->tks_time_secs,
626 		.ms_time_microsecs = sample->tks_time_usecs,
627 	};
628 	snaps->tkse_task_snap = (struct task_snapshot){
629 		.snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC,
630 		.ss_flags = kKernel64_p,
631 		.pid = 0,
632 		.uniqueid = 0,
633 		.system_time_in_terminated_threads =
634 	    sample->tks_system_time_in_terminated_threads,
635 		.task_size = sample->tks_task_size,
636 		.faults = sample->tks_faults,
637 		.pageins = sample->tks_pageins,
638 		.cow_faults = sample->tks_cow_faults,
639 		.p_comm = "kernel_task",
640 		.was_throttled = 0,
641 		.did_throttle = 0,
642 		.p_start_sec = coalition_id(kernel_task->coalition[COALITION_TYPE_RESOURCE]),
643 		/* Set the on-behalf-of pids to -1. */
644 		.p_start_usec = UINT64_MAX,
645 		.latency_qos = LATENCY_QOS_TIER_UNSPECIFIED,
646 		.io_priority_size = {
647 			[0] = ((uint64_t)sample->tks_telemetry_source << 32) | sample->tks_telemetry_generation,
648 			[1] = sample->tks_telemetry_period,
649 			[2] = sample->tks_serial_number,
650 			[3] = sample->tks_telemetry_skipped,
651 		},
652 	};
653 	snaps->tkse_thread_snap = (struct thread_snapshot){
654 		.snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC,
655 		.ss_flags = kKernel64_p,
656 		.nkern_frames = sample->tks_call_stack_size / sizeof(uintptr_t),
657 		.wait_event = 0,
658 		.continuation = 0,
659 		.thread_id = sample->tks_thread_id,
660 		.system_time = sample->tks_system_time,
661 		.state = sample->tks_thread_state,
662 		.priority = sample->tks_base_pri,
663 		.sched_pri = sample->tks_sched_pri,
664 		.io_tier = sample->tks_io_tier,
665 	};
666 	memset(snaps->tkse_thread_snap.pth_name, 0, sizeof(snaps->tkse_thread_snap.pth_name));
667 	strlcpy(snaps->tkse_thread_snap.pth_name,
668 	    sample->tks_thread_name,
669 	    sizeof(snaps->tkse_thread_snap.pth_name));
670 }
671 
672 #endif /* HAS_PMI_MICROSTACKSHOTS */
673 
674 int
telemetry_kernel_gather(user_addr_t user_buffer,uint32_t * user_length)675 telemetry_kernel_gather(user_addr_t user_buffer, uint32_t *user_length)
676 {
677 #if HAS_PMI_MICROSTACKSHOTS
678 	int result = 0;
679 	/*
680 	 * Track how much data has been copied out to the user buffer.
681 	 */
682 	uint32_t copied = 0;
683 	uint32_t copy_length = *user_length;
684 
685 	*user_length = 0;
686 
687 	/*
688 	 * Get a cursor to read from the ringbuffer.
689 	 */
690 	mpsc_ring_cursor_t cursor = mpsc_ring_read_start(&_telemetry_kernel_ring);
691 
692 	while (copied < copy_length) {
693 		/*
694 		 * This function is called directly off a syscall, so it can afford to
695 		 * use some stack space.
696 		 */
697 		struct _telemetry_kernel_snapshots snaps = { 0 };
698 
699 		/*
700 		 * Check that the user buffer still has enough space for at least the
701 		 * snapshot structures.
702 		 */
703 		if (sizeof(snaps) > copy_length - copied) {
704 			break;
705 		}
706 
707 		/*
708 		 * Read the sample from the ringbuffer.
709 		 */
710 		struct _telemetry_kernel_sample sample = { 0 };
711 		bool advanced = mpsc_ring_cursor_advance(
712 			&_telemetry_kernel_ring,
713 			&cursor,
714 			&sample,
715 			sizeof(sample));
716 		/*
717 		 * If there's no more data, return to user space.
718 		 */
719 		if (!advanced) {
720 			break;
721 		}
722 
723 		if (sample.tks_magic != TKS_MAGIC) {
724 			panic("microstackshot: kernel sample magic is invalid");
725 		}
726 		/*
727 		 * Compute the size needed for the snapshots and call stack and bail
728 		 * out if there's not enough room in the user's buffer.
729 		 */
730 		assert3u(sample.tks_call_stack_size, <, sizeof(uintptr_t) * TELEMETRY_KERNEL_FRAMES_MAX);
731 		uint32_t size_needed = sizeof(snaps) + sample.tks_call_stack_size;
732 		if (size_needed > copy_length - copied) {
733 			break;
734 		}
735 
736 		/*
737 		 * Convert the sample into snapshots suitable for user space and copy
738 		 * them out.
739 		 */
740 		_telemetry_kernel_snapshot(&snaps, &sample);
741 		result = copyout(&snaps, user_buffer + copied, sizeof(snaps));
742 		if (result != 0) {
743 			break;
744 		}
745 		copied += sizeof(snaps);
746 
747 		/*
748 		 * Copy the call stack out of the ringbuffer.
749 		 */
750 		uintptr_t call_stack[TELEMETRY_KERNEL_FRAMES_MAX] = { 0 };
751 		assert3u(sizeof(call_stack), >=, sample.tks_call_stack_size);
752 		advanced = mpsc_ring_cursor_advance(
753 			&_telemetry_kernel_ring,
754 			&cursor,
755 			&call_stack,
756 			sample.tks_call_stack_size);
757 		/*
758 		 * There must be a call stack after the sample, otherwise something got
759 		 * corrupted and there's no more framing information for the reader.
760 		 */
761 		assert(advanced);
762 		uint32_t call_stack_count = sample.tks_call_stack_size / sizeof(uintptr_t);
763 		for (uint32_t i = 0; i < call_stack_count; i++) {
764 			/*
765 			 * The last frame of the call stack can sometimes be 0, ignore it.
766 			 */
767 			if (call_stack[i] != 0) {
768 				call_stack[i] = VM_KERNEL_UNSLIDE(call_stack[i]);
769 			}
770 		}
771 
772 		/*
773 		 * Copy the unpacked call stack out to user space.
774 		 */
775 		result = copyout(&call_stack, user_buffer + copied,
776 		    sample.tks_call_stack_size);
777 		if (result != 0) {
778 			break;
779 		}
780 		copied += sample.tks_call_stack_size;
781 		mpsc_ring_cursor_commit(&_telemetry_kernel_ring, &cursor);
782 	}
783 
784 	/*
785 	 * On success, store the number of bytes copied.
786 	 *
787 	 * Some partial data may have been copied out, but user space shouldn't
788 	 * try to inspect it.
789 	 */
790 	if (result == 0) {
791 		/*
792 		 * Complete the read operation and sync any progress back to the ringbuffer.
793 		 */
794 		mpsc_ring_read_finish(&_telemetry_kernel_ring, cursor);
795 		os_atomic_store(&_telemetry_kernel_notified, false, relaxed);
796 		*user_length = copied;
797 	} else {
798 		mpsc_ring_read_cancel(&_telemetry_kernel_ring, cursor);
799 	}
800 	return result;
801 #else /* HAS_PMI_MICROSTACKSHOTS */
802 #pragma unused(user_buffer, user_length)
803 	return ENOTSUP;
804 #endif /* !HAS_PMI_MICROSTACKSHOTS */
805 }
806 
807 void
telemetry_instrumentation_begin(__unused struct micro_snapshot_buffer * buffer,__unused enum micro_snapshot_flags flags)808 telemetry_instrumentation_begin(
809 	__unused struct micro_snapshot_buffer *buffer,
810 	__unused enum micro_snapshot_flags flags)
811 {
812 	/* telemetry_XXX accessed outside of lock for instrumentation only */
813 	KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_START,
814 	    flags, telemetry_bytes_since_last_mark, 0,
815 	    (&telemetry_buffer != buffer));
816 }
817 
818 void
telemetry_instrumentation_end(__unused struct micro_snapshot_buffer * buffer)819 telemetry_instrumentation_end(__unused struct micro_snapshot_buffer *buffer)
820 {
821 	/* telemetry_XXX accessed outside of lock for instrumentation only */
822 	KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_END,
823 	    (&telemetry_buffer == buffer), telemetry_bytes_since_last_mark,
824 	    buffer->current_position, buffer->end_point);
825 }
826 
827 static void
_telemetry_take_sample_user(thread_t thread,enum micro_snapshot_flags flags)828 _telemetry_take_sample_user(thread_t thread, enum micro_snapshot_flags flags)
829 {
830 	uintptr_t                   frames[128];
831 	size_t                      frames_len = sizeof(frames) / sizeof(frames[0]);
832 	uint32_t                    btcount;
833 	struct backtrace_user_info  btinfo = BTUINFO_INIT;
834 	uint16_t                    async_start_index = UINT16_MAX;
835 
836 	/* Collect backtrace from user thread. */
837 	btcount = backtrace_user(frames, frames_len, NULL, &btinfo);
838 	if (btinfo.btui_error != 0) {
839 		return;
840 	}
841 	if (btinfo.btui_async_frame_addr != 0 &&
842 	    btinfo.btui_async_start_index != 0) {
843 		/*
844 		 * Put the async callstack inline after the frame pointer walk call
845 		 * stack.
846 		 */
847 		async_start_index = (uint16_t)btinfo.btui_async_start_index;
848 		uintptr_t frame_addr = btinfo.btui_async_frame_addr;
849 		unsigned int frames_left = frames_len - async_start_index;
850 		struct backtrace_control ctl = { .btc_frame_addr = frame_addr, };
851 		btinfo = BTUINFO_INIT;
852 		unsigned int async_filled = backtrace_user(frames + async_start_index,
853 		    frames_left, &ctl, &btinfo);
854 		if (btinfo.btui_error == 0) {
855 			btcount = MIN(async_start_index + async_filled, frames_len);
856 		}
857 	}
858 
859 	/*
860 	 * Capture any other metadata and write it to the telemetry buffer.
861 	 */
862 	struct telemetry_target target = {
863 		.thread = thread,
864 		.frames = frames,
865 		.frames_count = btcount,
866 		.user64_regs = (btinfo.btui_info & BTI_64_BIT) != 0,
867 		.microsnapshot_flags = flags,
868 		.include_metadata = flags & kPMIRecord,
869 		.buffer = &telemetry_buffer,
870 		.buffer_mtx = &telemetry_mtx,
871 		.async_start_index = async_start_index,
872 	};
873 	telemetry_process_sample(&target, true, NULL);
874 }
875 
876 void
telemetry_take_sample(thread_t thread,enum micro_snapshot_flags flags)877 telemetry_take_sample(thread_t thread, enum micro_snapshot_flags flags)
878 {
879 	if (thread == THREAD_NULL) {
880 		return;
881 	}
882 
883 	/* Ensure task is ready for taking a sample. */
884 	task_t task = get_threadtask(thread);
885 	if (!_telemetry_task_can_sample(task)) {
886 		os_atomic_inc(&telemetry_metadata.tm_samples_skipped, relaxed);
887 		return;
888 	}
889 
890 	telemetry_instrumentation_begin(&telemetry_buffer, flags);
891 	_telemetry_take_sample_user(thread, flags);
892 	telemetry_instrumentation_end(&telemetry_buffer);
893 }
894 
895 #if CONFIG_MACF
896 void
telemetry_macf_take_sample(thread_t thread,enum micro_snapshot_flags flags)897 telemetry_macf_take_sample(thread_t thread, enum micro_snapshot_flags flags)
898 {
899 	task_t                        task;
900 
901 	uintptr_t                     frames_stack[128];
902 	vm_size_t                     btcapacity     = ARRAY_COUNT(frames_stack);
903 	uint32_t                      btcount        = 0;
904 	typedef uintptr_t             telemetry_user_frame_t __kernel_data_semantics;
905 	telemetry_user_frame_t        *frames        = frames_stack;
906 	bool                          alloced_frames = false;
907 
908 	struct backtrace_user_info    btinfo         = BTUINFO_INIT;
909 	struct backtrace_control      btctl          = BTCTL_INIT;
910 
911 	uint32_t                      retry_count    = 0;
912 	const uint32_t                max_retries    = 10;
913 
914 	bool                          initialized    = false;
915 	struct micro_snapshot_buffer *telbuf         = &telemetry_macf_buffer;
916 	uint32_t                      record_start   = 0;
917 	bool                          did_process    = false;
918 	int                           rv             = 0;
919 
920 	if (thread == THREAD_NULL) {
921 		return;
922 	}
923 
924 	telemetry_instrumentation_begin(telbuf, flags);
925 
926 	/* Ensure task is ready for taking a sample. */
927 	task = get_threadtask(thread);
928 	if (!_telemetry_task_can_sample(task) || task == kernel_task) {
929 		rv = EBUSY;
930 		goto out;
931 	}
932 
933 	/* Ensure MACF telemetry buffer was initialized. */
934 	TELEMETRY_MACF_LOCK();
935 	initialized = (telbuf->size > 0);
936 	TELEMETRY_MACF_UNLOCK();
937 
938 	if (!initialized) {
939 		rv = ENOMEM;
940 		goto out;
941 	}
942 
943 	/* Collect backtrace from user thread. */
944 	while (retry_count < max_retries) {
945 		btcount += backtrace_user(frames + btcount, btcapacity - btcount, &btctl, &btinfo);
946 
947 		if ((btinfo.btui_info & BTI_TRUNCATED) != 0 && btinfo.btui_next_frame_addr != 0) {
948 			/*
949 			 * Fast path uses stack memory to avoid an allocation. We must
950 			 * pivot to heap memory in the case where we cannot write the
951 			 * complete backtrace to this buffer.
952 			 */
953 			if (frames == frames_stack) {
954 				btcapacity += 128;
955 				frames = kalloc_data(btcapacity * sizeof(*frames), Z_WAITOK);
956 
957 				if (frames == NULL) {
958 					break;
959 				}
960 
961 				alloced_frames = true;
962 
963 				assert(btcapacity > sizeof(frames_stack) / sizeof(frames_stack[0]));
964 				memcpy(frames, frames_stack, sizeof(frames_stack));
965 			} else {
966 				assert(alloced_frames);
967 				frames = krealloc_data(frames,
968 				    btcapacity * sizeof(*frames),
969 				    (btcapacity + 128) * sizeof(*frames),
970 				    Z_WAITOK);
971 
972 				if (frames == NULL) {
973 					break;
974 				}
975 
976 				btcapacity += 128;
977 			}
978 
979 			btctl.btc_frame_addr = btinfo.btui_next_frame_addr;
980 			++retry_count;
981 		} else {
982 			break;
983 		}
984 	}
985 
986 	if (frames == NULL) {
987 		rv = ENOMEM;
988 		goto out;
989 	} else if (btinfo.btui_error != 0) {
990 		rv = btinfo.btui_error;
991 		goto out;
992 	}
993 
994 	/* Process the backtrace. */
995 	struct telemetry_target target = {
996 		.thread = thread,
997 		.frames = frames,
998 		.frames_count = btcount,
999 		.user64_regs = (btinfo.btui_info & BTI_64_BIT) != 0,
1000 		.microsnapshot_flags = flags,
1001 		.include_metadata = false,
1002 		.buffer = telbuf,
1003 		.buffer_mtx = &telemetry_macf_mtx
1004 	};
1005 	rv = telemetry_process_sample(&target, false, &record_start);
1006 	did_process = true;
1007 
1008 out:
1009 	/* Immediately deliver the collected sample to MAC clients. */
1010 	if (rv == 0) {
1011 		assert(telbuf->current_position >= record_start);
1012 		mac_thread_telemetry(thread,
1013 		    0,
1014 		    (void *)(telbuf->buffer + record_start),
1015 		    telbuf->current_position - record_start);
1016 	} else {
1017 		mac_thread_telemetry(thread, rv, NULL, 0);
1018 	}
1019 
1020 	/*
1021 	 * The lock was taken by telemetry_process_sample, and we asked it not to
1022 	 * unlock upon completion, so we must release the lock here.
1023 	 */
1024 	if (did_process) {
1025 		TELEMETRY_MACF_UNLOCK();
1026 	}
1027 
1028 	if (alloced_frames && frames != NULL) {
1029 		kfree_data(frames, btcapacity * sizeof(*frames));
1030 	}
1031 
1032 	telemetry_instrumentation_end(telbuf);
1033 }
1034 #endif /* CONFIG_MACF */
1035 
1036 static void
_write_task_snapshot(struct task_snapshot * tsnap,const struct telemetry_target * target)1037 _write_task_snapshot(
1038 	struct task_snapshot *tsnap,
1039 	const struct telemetry_target *target)
1040 {
1041 	struct task *task = get_threadtask(target->thread);
1042 	struct proc *p = get_bsdtask_info(task);
1043 	bool user64_va = task_has_64Bit_addr(task);
1044 
1045 	tsnap->snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC;
1046 	tsnap->pid = proc_pid(p);
1047 	tsnap->uniqueid = proc_uniqueid(p);
1048 	struct recount_times_mach times = recount_task_terminated_times(task);
1049 	tsnap->user_time_in_terminated_threads = times.rtm_user;
1050 	tsnap->system_time_in_terminated_threads = times.rtm_system;
1051 	tsnap->suspend_count = task->suspend_count;
1052 	tsnap->task_size = (typeof(tsnap->task_size))(get_task_phys_footprint(task) / PAGE_SIZE);
1053 	tsnap->faults = counter_load(&task->faults);
1054 	tsnap->pageins = counter_load(&task->pageins);
1055 	tsnap->cow_faults = counter_load(&task->cow_faults);
1056 	/*
1057 	 * The throttling counters are maintained as 64-bit counters in the proc
1058 	 * structure. However, we reserve 32-bits (each) for them in the task_snapshot
1059 	 * struct to save space and since we do not expect them to overflow 32-bits. If we
1060 	 * find these values overflowing in the future, the fix would be to simply
1061 	 * upgrade these counters to 64-bit in the task_snapshot struct
1062 	 */
1063 	tsnap->was_throttled = (uint32_t) proc_was_throttled(p);
1064 	tsnap->did_throttle = (uint32_t) proc_did_throttle(p);
1065 
1066 #if CONFIG_COALITIONS
1067 	/*
1068 	 * These fields are overloaded to represent the resource coalition ID of
1069 	 * this task...
1070 	 */
1071 	coalition_t rsrc_coal = task->coalition[COALITION_TYPE_RESOURCE];
1072 	tsnap->p_start_sec = rsrc_coal ? coalition_id(rsrc_coal) : 0;
1073 	/*
1074 	 * ... and the processes this thread is doing work on behalf of.
1075 	 */
1076 	pid_t origin_pid = -1, proximate_pid = -1;
1077 	(void)thread_get_voucher_origin_proximate_pid(target->thread, &origin_pid, &proximate_pid);
1078 	tsnap->p_start_usec = ((uint64_t)proximate_pid << 32) | (uint32_t)origin_pid;
1079 #endif /* CONFIG_COALITIONS */
1080 
1081 	if (task->t_flags & TF_TELEMETRY) {
1082 		tsnap->ss_flags |= kTaskRsrcFlagged;
1083 	}
1084 
1085 	if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG)) {
1086 		tsnap->ss_flags |= kTaskDarwinBG;
1087 	}
1088 
1089 	if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) {
1090 		tsnap->ss_flags |= kTaskIsForeground;
1091 	}
1092 	if (user64_va) {
1093 		tsnap->ss_flags |= kUser64_p;
1094 	}
1095 
1096 	uint32_t bgstate = 0;
1097 	proc_get_darwinbgstate(task, &bgstate);
1098 
1099 	if (bgstate & PROC_FLAG_ADAPTIVE_IMPORTANT) {
1100 		tsnap->ss_flags |= kTaskIsBoosted;
1101 	}
1102 	if (bgstate & PROC_FLAG_SUPPRESSED) {
1103 		tsnap->ss_flags |= kTaskIsSuppressed;
1104 	}
1105 
1106 
1107 	tsnap->latency_qos = task_grab_latency_qos(task);
1108 
1109 	strlcpy(tsnap->p_comm, proc_name_address(p), sizeof(tsnap->p_comm));
1110 	const char *longname = proc_longname_address(p);
1111 	if (longname[0] != '\0') {
1112 		/*
1113 		 * XXX Stash the rest of the process's name in some unused fields.
1114 		 */
1115 		strlcpy((char *)tsnap->io_priority_count, &longname[16], sizeof(tsnap->io_priority_count));
1116 	}
1117 	if (target->include_metadata) {
1118 		tsnap->io_priority_size[0] = ((uint64_t)telemetry_metadata.tm_source << 32) | telemetry_metadata.tm_generation;
1119 		tsnap->io_priority_size[1] = telemetry_metadata.tm_period;
1120 		tsnap->io_priority_size[2] = os_atomic_inc(&telemetry_metadata.tm_samples_recorded, relaxed);
1121 		tsnap->io_priority_size[3] = telemetry_metadata.tm_samples_skipped;
1122 	}
1123 	if (task->task_shared_region_slide != -1) {
1124 		tsnap->shared_cache_slide = task->task_shared_region_slide;
1125 		bcopy(task->task_shared_region_uuid, tsnap->shared_cache_identifier,
1126 		    sizeof(task->task_shared_region_uuid));
1127 	}
1128 }
1129 
1130 static void
_write_thread_snapshot(struct thread_snapshot * thsnap,const struct telemetry_target * target)1131 _write_thread_snapshot(struct thread_snapshot *thsnap, const struct telemetry_target *target)
1132 {
1133 	struct thread *thread = target->thread;
1134 
1135 	thsnap->snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC;
1136 	thsnap->thread_id = thread_tid(thread);
1137 	thsnap->state = thread->state;
1138 	thsnap->priority = thread->base_pri;
1139 	thsnap->sched_pri = thread->sched_pri;
1140 	thsnap->sched_flags = thread->sched_flags;
1141 	thsnap->ss_flags |= kStacksPCOnly;
1142 	thsnap->ts_qos = thread->effective_policy.thep_qos;
1143 	thsnap->ts_rqos = thread->requested_policy.thrp_qos;
1144 	thsnap->ts_rqos_override = MAX(thread->requested_policy.thrp_qos_override,
1145 	    thread->requested_policy.thrp_qos_workq_override);
1146 	thsnap->nuser_frames = target->frames_count;
1147 	memcpy(thsnap->_reserved + 1, &target->async_start_index,
1148 	    sizeof(target->async_start_index));
1149 
1150 	if (proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG)) {
1151 		thsnap->ss_flags |= kThreadDarwinBG;
1152 	}
1153 	if (target->user64_regs) {
1154 		thsnap->ss_flags |= kUser64_p;
1155 	}
1156 
1157 	boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
1158 	struct recount_times_mach times = recount_current_thread_times();
1159 	ml_set_interrupts_enabled(interrupt_state);
1160 	thsnap->user_time = times.rtm_user;
1161 	thsnap->system_time = times.rtm_system;
1162 }
1163 
1164 struct _telemetry_uuids {
1165 	errno_t error;
1166 	void *uuid_info;
1167 	uint32_t uuid_info_count;
1168 	uint32_t uuid_info_size;
1169 };
1170 
1171 /*
1172  * Retrieve the array of UUIDs for binaries used by this task.
1173  */
1174 static struct _telemetry_uuids
_telemetry_sample_uuids(task_t task)1175 _telemetry_sample_uuids(task_t task)
1176 {
1177 	bool const user64_va = task_has_64Bit_addr(task);
1178 	uint32_t uuid_info_count = 0;
1179 	mach_vm_address_t uuid_info_addr = 0;
1180 	uint32_t uuid_info_size = 0;
1181 	if (user64_va) {
1182 		uuid_info_size = sizeof(struct user64_dyld_uuid_info);
1183 		struct user64_dyld_all_image_infos task_image_infos;
1184 		if (copyin(task->all_image_info_addr, &task_image_infos, sizeof(task_image_infos)) == 0) {
1185 			uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1186 			uuid_info_addr = task_image_infos.uuidArray;
1187 		}
1188 	} else {
1189 		uuid_info_size = sizeof(struct user32_dyld_uuid_info);
1190 		struct user32_dyld_all_image_infos task_image_infos;
1191 		if (copyin(task->all_image_info_addr, &task_image_infos, sizeof(task_image_infos)) == 0) {
1192 			uuid_info_count = task_image_infos.uuidArrayCount;
1193 			uuid_info_addr = task_image_infos.uuidArray;
1194 		}
1195 	}
1196 
1197 	/*
1198 	 * If dyld is updating the data structure (indicated by a NULL uuidArray field),
1199 	 * do not provide any UUIDs with the sample.
1200 	 */
1201 	if (uuid_info_addr == USER_ADDR_NULL) {
1202 		return (struct _telemetry_uuids){};
1203 	}
1204 
1205 	/*
1206 	 * The main binary and interesting non-shared-cache libraries should be in the first few images.
1207 	 */
1208 	uuid_info_count = MIN(uuid_info_count, TELEMETRY_MAX_UUID_COUNT);
1209 	uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
1210 	char *uuid_info_array = kalloc_data(uuid_info_array_size, Z_WAITOK);
1211 	if (uuid_info_array == NULL) {
1212 		return (struct _telemetry_uuids){
1213 			       .error = ENOMEM,
1214 		};
1215 	}
1216 
1217 	/*
1218 	 * Copy in the UUID info array.  Ignore any failures to copyin.
1219 	 */
1220 	if (copyin(uuid_info_addr, uuid_info_array, uuid_info_array_size) != 0) {
1221 		kfree_data(uuid_info_array, uuid_info_array_size);
1222 		uuid_info_array = NULL;
1223 		uuid_info_array_size = 0;
1224 	}
1225 
1226 	return (struct _telemetry_uuids){
1227 		       .uuid_info = uuid_info_array,
1228 		       .uuid_info_count = uuid_info_count,
1229 		       .uuid_info_size = uuid_info_array_size,
1230 	};
1231 }
1232 
1233 static bool
_telemetry_sample_dispatch_serialno(task_t task,thread_t thread,uint64_t * serialno_out)1234 _telemetry_sample_dispatch_serialno(task_t task, thread_t thread, uint64_t *serialno_out)
1235 {
1236 	uint64_t const dqkeyaddr = thread_dispatchqaddr(thread);
1237 	if (dqkeyaddr != 0) {
1238 		uint64_t dqaddr = 0;
1239 		size_t const user_ptr_size = task_has_64Bit_addr(task) ? 8 : 4;
1240 
1241 		uint64_t const dq_serialno_offset = get_task_dispatchqueue_serialno_offset(task);
1242 		if ((copyin(dqkeyaddr, (char *)&dqaddr, user_ptr_size) == 0) &&
1243 		    (dqaddr != 0) && (dq_serialno_offset != 0)) {
1244 			uint64_t dqserialnumaddr = dqaddr + dq_serialno_offset;
1245 			if (copyin(dqserialnumaddr, serialno_out, user_ptr_size) == 0) {
1246 				return true;
1247 			}
1248 		}
1249 	}
1250 
1251 	return false;
1252 }
1253 
1254 static void *
_telemetry_buffer_alloc(struct micro_snapshot_buffer * buf,size_t size)1255 _telemetry_buffer_alloc(struct micro_snapshot_buffer *buf, size_t size)
1256 {
1257 	void *alloc = (void *)(uintptr_t)(buf->buffer + buf->current_position);
1258 	memset(alloc, 0, size);
1259 	buf->current_position += size;
1260 	assert3u(buf->current_position, <=, buf->size);
1261 	return alloc;
1262 }
1263 
1264 int
telemetry_process_sample(const struct telemetry_target * target,bool release_buffer_lock,uint32_t * out_current_record_start)1265 telemetry_process_sample(const struct telemetry_target *target,
1266     bool release_buffer_lock,
1267     uint32_t *out_current_record_start)
1268 {
1269 	thread_t const thread = target->thread;
1270 	size_t const btcount = target->frames_count;
1271 	bool const user64_regs = target->user64_regs;
1272 	struct micro_snapshot_buffer * const current_buffer = target->buffer;
1273 	lck_mtx_t * const buffer_mtx = target->buffer_mtx;
1274 
1275 	clock_sec_t secs;
1276 	clock_usec_t usecs;
1277 	bool notify = false;
1278 	int rv = 0;
1279 
1280 	if (thread == THREAD_NULL) {
1281 		return EINVAL;
1282 	}
1283 
1284 	task_t const task = get_threadtask(thread);
1285 
1286 	struct _telemetry_uuids uuids = _telemetry_sample_uuids(task);
1287 
1288 	/*
1289 	 * Look for a dispatch queue serial number, and copy it in from userland if present.
1290 	 */
1291 	uint64_t dqserial = 0;
1292 	bool dqserial_valid = _telemetry_sample_dispatch_serialno(task, thread, &dqserial);
1293 
1294 	size_t const frames_size = btcount * (user64_regs ? 8 : 4);
1295 	size_t const sample_size = _telemetry_sample_size_static +
1296 	    uuids.uuid_info_size + (dqserial_valid ? sizeof(dqserial) : 0) + frames_size;
1297 
1298 	clock_get_calendar_microtime(&secs, &usecs);
1299 
1300 	/*
1301 	 * We do the bulk of the operation under the telemetry lock, on assumption that
1302 	 * any page faults during execution will not cause another AST_TELEMETRY_ALL
1303 	 * to deadlock; they will just block until we finish. This makes it easier
1304 	 * to copy into the buffer directly. As soon as we unlock, userspace can copy
1305 	 * out of our buffer.
1306 	 */
1307 	lck_mtx_lock(buffer_mtx);
1308 
1309 	/*
1310 	 * If the buffer has been deallocated, there's no way to take a sample.
1311 	 */
1312 	if (!current_buffer->buffer) {
1313 		rv = EINVAL;
1314 	}
1315 
1316 	/*
1317 	 * If the sample would be larger than the entire buffer, ignore it.
1318 	 */
1319 	if (rv == 0 && current_buffer->size < sample_size) {
1320 		rv = ERANGE;
1321 	}
1322 
1323 	if (rv == 0) {
1324 		if ((current_buffer->size - current_buffer->current_position) < sample_size) {
1325 			/*
1326 			 * We can't fit a record in the space available, so wrap around to the beginning.
1327 			 * Save the current position as the known end point of valid data.
1328 			 */
1329 			current_buffer->end_point = current_buffer->current_position;
1330 			current_buffer->current_position = 0;
1331 		}
1332 		uint32_t current_record_start = current_buffer->current_position;
1333 
1334 		/*
1335 		 * Write the snapshots and variable-length arrays into the telemetry buffer.
1336 		 */
1337 
1338 		struct micro_snapshot *msnap = _telemetry_buffer_alloc(current_buffer, sizeof(*msnap));
1339 		*msnap = (struct micro_snapshot){
1340 			.snapshot_magic = STACKSHOT_MICRO_SNAPSHOT_MAGIC,
1341 			.ms_flags = (uint8_t)target->microsnapshot_flags,
1342 			.ms_cpu = cpu_number(),
1343 			.ms_time = secs,
1344 			.ms_time_microsecs = usecs,
1345 		};
1346 
1347 		struct task_snapshot *tsnap = _telemetry_buffer_alloc(current_buffer, sizeof(*tsnap));
1348 		_write_task_snapshot(tsnap, target);
1349 
1350 		if (uuids.uuid_info_size > 0) {
1351 			void *uuid_info_buf = _telemetry_buffer_alloc(current_buffer, uuids.uuid_info_size);
1352 			memcpy(uuid_info_buf, uuids.uuid_info, uuids.uuid_info_size);
1353 			tsnap->nloadinfos = uuids.uuid_info_count;
1354 		}
1355 
1356 		struct thread_snapshot *thsnap = _telemetry_buffer_alloc(current_buffer, sizeof(*thsnap));
1357 		_write_thread_snapshot(thsnap, target);
1358 
1359 		if (dqserial_valid) {
1360 			thsnap->ss_flags |= kHasDispatchSerial;
1361 			uint64_t *dqserial_buf = _telemetry_buffer_alloc(current_buffer, sizeof(*dqserial_buf));
1362 			memcpy(dqserial_buf, &dqserial, sizeof(dqserial));
1363 		}
1364 
1365 		void *frames_buf = _telemetry_buffer_alloc(current_buffer, frames_size);
1366 		if (user64_regs) {
1367 			memcpy(frames_buf, target->frames, frames_size);
1368 		} else {
1369 			uint32_t *frames_32 = frames_buf;
1370 			for (int i = 0; i < btcount; i++) {
1371 				frames_32[i] = (uint32_t)target->frames[i];
1372 			}
1373 		}
1374 
1375 		if (current_buffer->end_point < current_buffer->current_position) {
1376 			/*
1377 			 * Each time the cursor wraps around to the beginning, we leave a
1378 			 * differing amount of unused space at the end of the buffer. Make
1379 			 * sure the cursor pushes the end point in case we're making use of
1380 			 * more of the buffer than we did the last time we wrapped.
1381 			 */
1382 			current_buffer->end_point = current_buffer->current_position;
1383 		}
1384 
1385 		/*
1386 		 * Now THIS is a hack.
1387 		 */
1388 		if (current_buffer == &telemetry_buffer) {
1389 			telemetry_bytes_since_last_mark += (current_buffer->current_position - current_record_start);
1390 			if (telemetry_bytes_since_last_mark > telemetry_buffer_notify_at) {
1391 				notify = true;
1392 			}
1393 		}
1394 
1395 		if (out_current_record_start != NULL) {
1396 			*out_current_record_start = current_record_start;
1397 		}
1398 	}
1399 
1400 	if (release_buffer_lock) {
1401 		lck_mtx_unlock(buffer_mtx);
1402 	}
1403 
1404 	if (notify) {
1405 		_telemetry_notify_user(TELEMETRY_NOTICE_BASE);
1406 	}
1407 
1408 	if (uuids.uuid_info != NULL) {
1409 		kfree_data(uuids.uuid_info, uuids.uuid_info_size);
1410 	}
1411 
1412 	return rv;
1413 }
1414 
1415 int
telemetry_gather(user_addr_t buffer,uint32_t * length,bool mark)1416 telemetry_gather(user_addr_t buffer, uint32_t *length, bool mark)
1417 {
1418 	return telemetry_buffer_gather(buffer, length, mark, &telemetry_buffer);
1419 }
1420 
1421 int
telemetry_buffer_gather(user_addr_t buffer,uint32_t * length,bool mark,struct micro_snapshot_buffer * current_buffer)1422 telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, bool mark, struct micro_snapshot_buffer * current_buffer)
1423 {
1424 	int result = 0;
1425 	uint32_t oldest_record_offset;
1426 
1427 	KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_START,
1428 	    mark, telemetry_bytes_since_last_mark, 0,
1429 	    (&telemetry_buffer != current_buffer));
1430 
1431 	TELEMETRY_LOCK();
1432 
1433 	if (current_buffer->buffer == 0) {
1434 		*length = 0;
1435 		goto out;
1436 	}
1437 
1438 	if (*length < current_buffer->size) {
1439 		result = KERN_NO_SPACE;
1440 		goto out;
1441 	}
1442 
1443 	/*
1444 	 * Copy the ring buffer out to userland in order sorted by time: least recent to most recent.
1445 	 * First, we need to search forward from the cursor to find the oldest record in our buffer.
1446 	 */
1447 	oldest_record_offset = current_buffer->current_position;
1448 	do {
1449 		if (((oldest_record_offset + sizeof(uint32_t)) > current_buffer->size) ||
1450 		    ((oldest_record_offset + sizeof(uint32_t)) > current_buffer->end_point)) {
1451 			if (*(uint32_t *)(uintptr_t)(current_buffer->buffer) == 0) {
1452 				/*
1453 				 * There is no magic number at the start of the buffer, which means
1454 				 * it's empty; nothing to see here yet.
1455 				 */
1456 				*length = 0;
1457 				goto out;
1458 			}
1459 			/*
1460 			 * We've looked through the end of the active buffer without finding a valid
1461 			 * record; that means all valid records are in a single chunk, beginning at
1462 			 * the very start of the buffer.
1463 			 */
1464 
1465 			oldest_record_offset = 0;
1466 			assert(*(uint32_t *)(uintptr_t)(current_buffer->buffer) == STACKSHOT_MICRO_SNAPSHOT_MAGIC);
1467 			break;
1468 		}
1469 
1470 		if (*(uint32_t *)(uintptr_t)(current_buffer->buffer + oldest_record_offset) == STACKSHOT_MICRO_SNAPSHOT_MAGIC) {
1471 			break;
1472 		}
1473 
1474 		/*
1475 		 * There are no alignment guarantees for micro-stackshot records, so we must search at each
1476 		 * byte offset.
1477 		 */
1478 		oldest_record_offset++;
1479 	} while (oldest_record_offset != current_buffer->current_position);
1480 
1481 	/*
1482 	 * If needed, copyout in two chunks: from the oldest record to the end of the buffer, and then
1483 	 * from the beginning of the buffer up to the current position.
1484 	 */
1485 	if (oldest_record_offset != 0) {
1486 		if ((result = copyout((void *)(current_buffer->buffer + oldest_record_offset), buffer,
1487 		    current_buffer->end_point - oldest_record_offset)) != 0) {
1488 			*length = 0;
1489 			goto out;
1490 		}
1491 		*length = current_buffer->end_point - oldest_record_offset;
1492 	} else {
1493 		*length = 0;
1494 	}
1495 
1496 	if ((result = copyout((void *)current_buffer->buffer, buffer + *length,
1497 	    current_buffer->current_position)) != 0) {
1498 		*length = 0;
1499 		goto out;
1500 	}
1501 	*length += (uint32_t)current_buffer->current_position;
1502 
1503 out:
1504 
1505 	if (mark && (*length > 0)) {
1506 		telemetry_bytes_since_last_mark = 0;
1507 	}
1508 
1509 	TELEMETRY_UNLOCK();
1510 
1511 	KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_END,
1512 	    current_buffer->current_position, *length,
1513 	    current_buffer->end_point, (&telemetry_buffer != current_buffer));
1514 
1515 	return result;
1516 }
1517 
1518 #if CONFIG_MACF
1519 static int
telemetry_macf_init_locked(size_t buffer_size)1520 telemetry_macf_init_locked(size_t buffer_size)
1521 {
1522 	kern_return_t   kr;
1523 
1524 	if (buffer_size > TELEMETRY_MAX_BUFFER_SIZE) {
1525 		buffer_size = TELEMETRY_MAX_BUFFER_SIZE;
1526 	}
1527 
1528 	telemetry_macf_buffer.size = buffer_size;
1529 
1530 	kr = kmem_alloc(kernel_map, &telemetry_macf_buffer.buffer,
1531 	    telemetry_macf_buffer.size, KMA_DATA | KMA_ZERO | KMA_PERMANENT,
1532 	    VM_KERN_MEMORY_SECURITY);
1533 
1534 	if (kr != KERN_SUCCESS) {
1535 		kprintf("Telemetry (MACF): Allocation failed: %d\n", kr);
1536 		return ENOMEM;
1537 	}
1538 
1539 	return 0;
1540 }
1541 
1542 int
telemetry_macf_mark_curthread(void)1543 telemetry_macf_mark_curthread(void)
1544 {
1545 	thread_t thread = current_thread();
1546 	task_t   task   = get_threadtask(thread);
1547 	int      rv     = 0;
1548 
1549 	if (task == kernel_task) {
1550 		/* Kernel threads never return to an AST boundary, and are ineligible */
1551 		return EINVAL;
1552 	}
1553 
1554 	/* Initialize the MACF telemetry buffer if needed. */
1555 	TELEMETRY_MACF_LOCK();
1556 	if (__improbable(telemetry_macf_buffer.size == 0)) {
1557 		rv = telemetry_macf_init_locked(TELEMETRY_MACF_DEFAULT_BUFFER_SIZE);
1558 
1559 		if (rv != 0) {
1560 			return rv;
1561 		}
1562 	}
1563 	TELEMETRY_MACF_UNLOCK();
1564 
1565 	act_set_macf_telemetry_ast(thread);
1566 	return 0;
1567 }
1568 #endif /* CONFIG_MACF */
1569 
1570 static int
telemetry_backtrace_add_kernel(char * buf,size_t buflen)1571 telemetry_backtrace_add_kernel(
1572 	char        *buf,
1573 	size_t       buflen)
1574 {
1575 	int rc = 0;
1576 #if defined(__arm__) || defined(__arm64__)
1577 	extern vm_offset_t   segTEXTEXECB;
1578 	extern unsigned long segSizeTEXTEXEC;
1579 	vm_address_t unslid = segTEXTEXECB - vm_kernel_stext;
1580 
1581 	rc += scnprintf(buf, buflen, "%s@%lx:%lx\n",
1582 	    kernel_uuid_string, unslid, unslid + segSizeTEXTEXEC - 1);
1583 #elif defined(__x86_64__)
1584 	rc += scnprintf(buf, buflen, "%s@0:%lx\n",
1585 	    kernel_uuid_string, vm_kernel_etext - vm_kernel_stext);
1586 #else
1587 #pragma unused(buf, buflen)
1588 #endif
1589 	return rc;
1590 }
1591 
1592 /**
1593  * Generate a backtrace string which can be symbolicated off system
1594  *
1595  * All addresses are relative to the vm_kernel_stext which means that all
1596  * offsets will be typically <= 50M which uses 7 hex digits.
1597  *
1598  * We allow up to TOT entries from FRAMES. The result will be formatted into BUF
1599  * (up to BUFLEN-1 characters) with the following format:
1600  *
1601  *     <OFFSET1>\n
1602  *     <OFFSET2>\n
1603  *     ...
1604  *     <UUID_a>@<TEXT_EXEC_BASE_OFFSET>:<TEXT_EXEC_END_OFFSET>\n
1605  *     <UUID_b>@<TEXT_EXEC_BASE_OFFSET>:<TEXT_EXEC_END_OFFSET>\n
1606  *     ...
1607  *
1608  * In general this backtrace takes 8 bytes per "frame", with an extra 52 bytes
1609  * per unique UUID referenced. As a rule of thumb, with a 256 byte long output
1610  * buffer, at least five entries from four unique UUIDs will generally fit.
1611  */
1612 void
telemetry_backtrace_to_string(char * buf,size_t buflen,uint32_t tot,uintptr_t * frames)1613 telemetry_backtrace_to_string(
1614 	char        *buf,
1615 	size_t       buflen,
1616 	uint32_t     tot,
1617 	uintptr_t   *frames)
1618 {
1619 	size_t l = 0;
1620 
1621 	for (uint32_t i = 0; i < tot; i++) {
1622 		l += scnprintf(buf + l, buflen - l, "%lx\n",
1623 		    frames[i] - vm_kernel_stext);
1624 	}
1625 	l += telemetry_backtrace_add_kernel(buf + l, buflen - l);
1626 	telemetry_backtrace_add_kexts(buf + l, buflen - l, frames, tot);
1627 }
1628