xref: /xnu-10063.141.1/osfmk/kern/telemetry.c (revision d8b80295118ef25ac3a784134bcf95cd8e88109f)
1 /*
2  * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <mach/host_priv.h>
29 #include <mach/host_special_ports.h>
30 #include <mach/mach_types.h>
31 #include <mach/telemetry_notification_server.h>
32 
33 #include <kern/assert.h>
34 #include <kern/clock.h>
35 #include <kern/coalition.h>
36 #include <kern/debug.h>
37 #include <kern/host.h>
38 #include <kern/kalloc.h>
39 #include <kern/kern_types.h>
40 #include <kern/locks.h>
41 #include <kern/misc_protos.h>
42 #include <kern/sched.h>
43 #include <kern/sched_prim.h>
44 #include <kern/telemetry.h>
45 #include <kern/timer_call.h>
46 #include <kern/policy_internal.h>
47 #include <kern/kcdata.h>
48 
49 #include <pexpert/pexpert.h>
50 
51 #include <string.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_shared_region.h>
54 
55 #include <kperf/callstack.h>
56 #include <kern/backtrace.h>
57 #include <kern/monotonic.h>
58 
59 #include <security/mac_mach_internal.h>
60 
61 #include <sys/errno.h>
62 #include <sys/kdebug.h>
63 #include <uuid/uuid.h>
64 #include <kdp/kdp_dyld.h>
65 
66 #include <libkern/coreanalytics/coreanalytics.h>
67 #include <kern/thread_call.h>
68 
69 #define TELEMETRY_DEBUG 0
70 
71 struct proc;
72 extern int      proc_pid(struct proc *);
73 extern char     *proc_name_address(void *p);
74 extern uint64_t proc_uniqueid(void *p);
75 extern uint64_t proc_was_throttled(void *p);
76 extern uint64_t proc_did_throttle(void *p);
77 extern int      proc_selfpid(void);
78 extern boolean_t task_did_exec(task_t task);
79 extern boolean_t task_is_exec_copy(task_t task);
80 
81 struct micro_snapshot_buffer {
82 	vm_offset_t             buffer;
83 	uint32_t                size;
84 	uint32_t                current_position;
85 	uint32_t                end_point;
86 };
87 
88 static bool telemetry_task_ready_for_sample(task_t task);
89 
90 static void telemetry_instrumentation_begin(
91 	struct micro_snapshot_buffer *buffer, enum micro_snapshot_flags flags);
92 
93 static void telemetry_instrumentation_end(struct micro_snapshot_buffer *buffer);
94 
95 static void telemetry_take_sample(thread_t thread, enum micro_snapshot_flags flags);
96 
97 #if CONFIG_MACF
98 static void telemetry_macf_take_sample(thread_t thread, enum micro_snapshot_flags flags);
99 #endif
100 
101 struct telemetry_target {
102 	thread_t                         thread;
103 	uintptr_t                       *frames;
104 	size_t                           frames_count;
105 	bool                             user64_regs;
106 	uint16_t                         async_start_index;
107 	enum micro_snapshot_flags        microsnapshot_flags;
108 	struct micro_snapshot_buffer    *buffer;
109 	lck_mtx_t                       *buffer_mtx;
110 };
111 
112 static int telemetry_process_sample(
113 	const struct telemetry_target *target,
114 	bool release_buffer_lock,
115 	uint32_t *out_current_record_start);
116 
117 static int telemetry_buffer_gather(
118 	user_addr_t buffer,
119 	uint32_t *length,
120 	bool mark,
121 	struct micro_snapshot_buffer *current_buffer);
122 
123 #define TELEMETRY_DEFAULT_SAMPLE_RATE (1) /* 1 sample every 1 second */
124 #define TELEMETRY_DEFAULT_BUFFER_SIZE (16*1024)
125 #define TELEMETRY_MAX_BUFFER_SIZE (64*1024)
126 
127 #define TELEMETRY_DEFAULT_NOTIFY_LEEWAY (4*1024) // Userland gets 4k of leeway to collect data after notification
128 #define TELEMETRY_MAX_UUID_COUNT (128) // Max of 128 non-shared-cache UUIDs to log for symbolication
129 
130 uint32_t                telemetry_sample_rate = 0;
131 volatile boolean_t      telemetry_needs_record = FALSE;
132 volatile boolean_t      telemetry_needs_timer_arming_record = FALSE;
133 
134 /*
135  * If TRUE, record micro-stackshot samples for all tasks.
136  * If FALSE, only sample tasks which are marked for telemetry.
137  */
138 bool     telemetry_sample_all_tasks = false;
139 bool     telemetry_sample_pmis = false;
140 uint32_t telemetry_active_tasks = 0; // Number of tasks opted into telemetry
141 
142 uint32_t telemetry_timestamp = 0;
143 
144 /*
145  * The telemetry_buffer is responsible
146  * for timer samples and interrupt samples that are driven by
147  * compute_averages().  It will notify its client (if one
148  * exists) when it has enough data to be worth flushing.
149  */
150 struct micro_snapshot_buffer telemetry_buffer = {
151 	.buffer = 0,
152 	.size = 0,
153 	.current_position = 0,
154 	.end_point = 0
155 };
156 
157 #if CONFIG_MACF
158 #define TELEMETRY_MACF_DEFAULT_BUFFER_SIZE (16*1024)
159 /*
160  * The MAC framework uses its own telemetry buffer for the purposes of auditing
161  * security-related work being done by userland threads.
162  */
163 struct micro_snapshot_buffer telemetry_macf_buffer = {
164 	.buffer = 0,
165 	.size = 0,
166 	.current_position = 0,
167 	.end_point = 0
168 };
169 #endif
170 
171 int                                     telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked?
172 int                                     telemetry_buffer_notify_at = 0;
173 
174 LCK_GRP_DECLARE(telemetry_lck_grp, "telemetry group");
175 LCK_MTX_DECLARE(telemetry_mtx, &telemetry_lck_grp);
176 LCK_MTX_DECLARE(telemetry_pmi_mtx, &telemetry_lck_grp);
177 LCK_MTX_DECLARE(telemetry_macf_mtx, &telemetry_lck_grp);
178 
179 #define TELEMETRY_LOCK() do { lck_mtx_lock(&telemetry_mtx); } while (0)
180 #define TELEMETRY_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&telemetry_mtx)
181 #define TELEMETRY_UNLOCK() do { lck_mtx_unlock(&telemetry_mtx); } while (0)
182 
183 #define TELEMETRY_PMI_LOCK() do { lck_mtx_lock(&telemetry_pmi_mtx); } while (0)
184 #define TELEMETRY_PMI_UNLOCK() do { lck_mtx_unlock(&telemetry_pmi_mtx); } while (0)
185 
186 #define TELEMETRY_MACF_LOCK() do { lck_mtx_lock(&telemetry_macf_mtx); } while (0)
187 #define TELEMETRY_MACF_UNLOCK() do { lck_mtx_unlock(&telemetry_macf_mtx); } while (0)
188 
189 #define TELEMETRY_BT_FRAMES  (5)
190 
191 /*
192  * Telemetry reporting is unsafe in interrupt context, since the CA framework
193  * relies on being able to successfully zalloc some memory for the event.
194  * Therefore we maintain a small buffer that is then flushed by an helper thread.
195  */
196 #define CA_ENTRIES_SIZE                           (5)
197 
198 struct telemetry_ca_entry {
199 	uint32_t        type;
200 	uint16_t        code;
201 	uint32_t        num_frames;
202 	uintptr_t       faulting_address;
203 	uintptr_t       frames[TELEMETRY_BT_FRAMES];
204 };
205 
206 LCK_GRP_DECLARE(ca_entries_lock_grp, "ca_entries_lck");
207 LCK_SPIN_DECLARE(ca_entries_lck, &ca_entries_lock_grp);
208 
209 static struct telemetry_ca_entry ca_entries[CA_ENTRIES_SIZE];
210 static uint8_t ca_entries_index = 0;
211 static struct thread_call *telemetry_ca_send_callout;
212 
213 CA_EVENT(kernel_breakpoint_event,
214     CA_INT, brk_type,
215     CA_INT, brk_code,
216     CA_INT, faulting_address,
217     CA_STATIC_STRING(CA_UBSANBUF_LEN), backtrace,
218     CA_STATIC_STRING(CA_UUID_LEN), uuid);
219 
220 /* Rate-limit telemetry on last seen faulting address */
221 static uintptr_t PERCPU_DATA(brk_telemetry_cache_address);
222 /* Get out from the brk handler if the CPU is already servicing one */
223 static bool PERCPU_DATA(brk_telemetry_in_handler);
224 
225 static void telemetry_flush_ca_events(thread_call_param_t, thread_call_param_t);
226 
227 void
telemetry_init(void)228 telemetry_init(void)
229 {
230 	kern_return_t ret;
231 	uint32_t          telemetry_notification_leeway;
232 
233 	if (!PE_parse_boot_argn("telemetry_buffer_size",
234 	    &telemetry_buffer.size, sizeof(telemetry_buffer.size))) {
235 		telemetry_buffer.size = TELEMETRY_DEFAULT_BUFFER_SIZE;
236 	}
237 
238 	if (telemetry_buffer.size > TELEMETRY_MAX_BUFFER_SIZE) {
239 		telemetry_buffer.size = TELEMETRY_MAX_BUFFER_SIZE;
240 	}
241 
242 	ret = kmem_alloc(kernel_map, &telemetry_buffer.buffer, telemetry_buffer.size,
243 	    KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
244 	if (ret != KERN_SUCCESS) {
245 		kprintf("Telemetry: Allocation failed: %d\n", ret);
246 		return;
247 	}
248 
249 	if (!PE_parse_boot_argn("telemetry_notification_leeway",
250 	    &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) {
251 		/*
252 		 * By default, notify the user to collect the buffer when there is this much space left in the buffer.
253 		 */
254 		telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
255 	}
256 	if (telemetry_notification_leeway >= telemetry_buffer.size) {
257 		printf("telemetry: nonsensical telemetry_notification_leeway boot-arg %d changed to %d\n",
258 		    telemetry_notification_leeway, TELEMETRY_DEFAULT_NOTIFY_LEEWAY);
259 		telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
260 	}
261 	telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway;
262 
263 	if (!PE_parse_boot_argn("telemetry_sample_rate",
264 	    &telemetry_sample_rate, sizeof(telemetry_sample_rate))) {
265 		telemetry_sample_rate = TELEMETRY_DEFAULT_SAMPLE_RATE;
266 	}
267 
268 	telemetry_ca_send_callout = thread_call_allocate_with_options(
269 		telemetry_flush_ca_events, NULL, THREAD_CALL_PRIORITY_KERNEL,
270 		THREAD_CALL_OPTIONS_ONCE);
271 
272 	assert(telemetry_ca_send_callout != NULL);
273 	/*
274 	 * To enable telemetry for all tasks, include "telemetry_sample_all_tasks=1" in boot-args.
275 	 */
276 	if (!PE_parse_boot_argn("telemetry_sample_all_tasks",
277 	    &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) {
278 #if !defined(XNU_TARGET_OS_OSX) && !(DEVELOPMENT || DEBUG)
279 		telemetry_sample_all_tasks = false;
280 #else
281 		telemetry_sample_all_tasks = true;
282 #endif /* !defined(XNU_TARGET_OS_OSX) && !(DEVELOPMENT || DEBUG) */
283 	}
284 
285 	kprintf("Telemetry: Sampling %stasks once per %u second%s\n",
286 	    (telemetry_sample_all_tasks) ? "all " : "",
287 	    telemetry_sample_rate, telemetry_sample_rate == 1 ? "" : "s");
288 }
289 
290 /*
291  * Enable or disable global microstackshots (ie telemetry_sample_all_tasks).
292  *
293  * enable_disable == 1: turn it on
294  * enable_disable == 0: turn it off
295  */
296 void
telemetry_global_ctl(int enable_disable)297 telemetry_global_ctl(int enable_disable)
298 {
299 	if (enable_disable == 1) {
300 		telemetry_sample_all_tasks = true;
301 	} else {
302 		telemetry_sample_all_tasks = false;
303 	}
304 }
305 
306 /*
307  * Opt the given task into or out of the telemetry stream.
308  *
309  * Supported reasons (callers may use any or all of):
310  *     TF_CPUMON_WARNING
311  *     TF_WAKEMON_WARNING
312  *
313  * enable_disable == 1: turn it on
314  * enable_disable == 0: turn it off
315  */
316 void
telemetry_task_ctl(task_t task,uint32_t reasons,int enable_disable)317 telemetry_task_ctl(task_t task, uint32_t reasons, int enable_disable)
318 {
319 	task_lock(task);
320 	telemetry_task_ctl_locked(task, reasons, enable_disable);
321 	task_unlock(task);
322 }
323 
324 void
telemetry_task_ctl_locked(task_t task,uint32_t reasons,int enable_disable)325 telemetry_task_ctl_locked(task_t task, uint32_t reasons, int enable_disable)
326 {
327 	uint32_t origflags;
328 
329 	assert((reasons != 0) && ((reasons | TF_TELEMETRY) == TF_TELEMETRY));
330 
331 	task_lock_assert_owned(task);
332 
333 	origflags = task->t_flags;
334 
335 	if (enable_disable == 1) {
336 		task->t_flags |= reasons;
337 		if ((origflags & TF_TELEMETRY) == 0) {
338 			OSIncrementAtomic(&telemetry_active_tasks);
339 #if TELEMETRY_DEBUG
340 			printf("%s: telemetry OFF -> ON (%d active)\n", proc_name_address(get_bsdtask_info(task)), telemetry_active_tasks);
341 #endif
342 		}
343 	} else {
344 		task->t_flags &= ~reasons;
345 		if (((origflags & TF_TELEMETRY) != 0) && ((task->t_flags & TF_TELEMETRY) == 0)) {
346 			/*
347 			 * If this task went from having at least one telemetry bit to having none,
348 			 * the net change was to disable telemetry for the task.
349 			 */
350 			OSDecrementAtomic(&telemetry_active_tasks);
351 #if TELEMETRY_DEBUG
352 			printf("%s: telemetry ON -> OFF (%d active)\n", proc_name_address(get_bsdtask_info(task)), telemetry_active_tasks);
353 #endif
354 		}
355 	}
356 }
357 
358 /*
359  * Determine if the current thread is eligible for telemetry:
360  *
361  * telemetry_sample_all_tasks: All threads are eligible. This takes precedence.
362  * telemetry_active_tasks: Count of tasks opted in.
363  * task->t_flags & TF_TELEMETRY: This task is opted in.
364  */
365 static bool
telemetry_is_active(thread_t thread)366 telemetry_is_active(thread_t thread)
367 {
368 	task_t task = get_threadtask(thread);
369 
370 	if (task == kernel_task) {
371 		/* Kernel threads never return to an AST boundary, and are ineligible */
372 		return false;
373 	}
374 
375 	if (telemetry_sample_all_tasks || telemetry_sample_pmis) {
376 		return true;
377 	}
378 
379 	if ((telemetry_active_tasks > 0) && ((task->t_flags & TF_TELEMETRY) != 0)) {
380 		return true;
381 	}
382 
383 	return false;
384 }
385 
386 /*
387  * Userland is arming a timer. If we are eligible for such a record,
388  * sample now. No need to do this one at the AST because we're already at
389  * a safe place in this system call.
390  */
391 int
telemetry_timer_event(__unused uint64_t deadline,__unused uint64_t interval,__unused uint64_t leeway)392 telemetry_timer_event(__unused uint64_t deadline, __unused uint64_t interval, __unused uint64_t leeway)
393 {
394 	if (telemetry_needs_timer_arming_record == TRUE) {
395 		telemetry_needs_timer_arming_record = FALSE;
396 		telemetry_take_sample(current_thread(), (enum micro_snapshot_flags)(kTimerArmingRecord | kUserMode));
397 	}
398 
399 	return 0;
400 }
401 
402 #if CONFIG_CPU_COUNTERS
403 static void
telemetry_pmi_handler(bool user_mode,__unused void * ctx)404 telemetry_pmi_handler(bool user_mode, __unused void *ctx)
405 {
406 	telemetry_mark_curthread(user_mode, TRUE);
407 }
408 #endif /* CONFIG_CPU_COUNTERS */
409 
410 int
telemetry_pmi_setup(enum telemetry_pmi pmi_ctr,uint64_t period)411 telemetry_pmi_setup(enum telemetry_pmi pmi_ctr, uint64_t period)
412 {
413 #if CONFIG_CPU_COUNTERS
414 	static bool sample_all_tasks_aside = false;
415 	static uint32_t active_tasks_aside = false;
416 	int error = 0;
417 	const char *name = "?";
418 
419 	unsigned int ctr = 0;
420 
421 	TELEMETRY_PMI_LOCK();
422 
423 	switch (pmi_ctr) {
424 	case TELEMETRY_PMI_NONE:
425 		if (!telemetry_sample_pmis) {
426 			error = 1;
427 			goto out;
428 		}
429 
430 		telemetry_sample_pmis = false;
431 		telemetry_sample_all_tasks = sample_all_tasks_aside;
432 		telemetry_active_tasks = active_tasks_aside;
433 		error = mt_microstackshot_stop();
434 		if (!error) {
435 			printf("telemetry: disabling ustackshot on PMI\n");
436 		}
437 		goto out;
438 
439 	case TELEMETRY_PMI_INSTRS:
440 		ctr = MT_CORE_INSTRS;
441 		name = "instructions";
442 		break;
443 
444 	case TELEMETRY_PMI_CYCLES:
445 		ctr = MT_CORE_CYCLES;
446 		name = "cycles";
447 		break;
448 
449 	default:
450 		error = 1;
451 		goto out;
452 	}
453 
454 	telemetry_sample_pmis = true;
455 	sample_all_tasks_aside = telemetry_sample_all_tasks;
456 	active_tasks_aside = telemetry_active_tasks;
457 	telemetry_sample_all_tasks = false;
458 	telemetry_active_tasks = 0;
459 
460 	error = mt_microstackshot_start(ctr, period, telemetry_pmi_handler, NULL);
461 	if (!error) {
462 		printf("telemetry: ustackshot every %llu %s\n", period, name);
463 	}
464 
465 out:
466 	TELEMETRY_PMI_UNLOCK();
467 	return error;
468 #else /* CONFIG_CPU_COUNTERS */
469 #pragma unused(pmi_ctr, period)
470 	return 1;
471 #endif /* !CONFIG_CPU_COUNTERS */
472 }
473 
474 /*
475  * Mark the current thread for an interrupt-based
476  * telemetry record, to be sampled at the next AST boundary.
477  */
478 void
telemetry_mark_curthread(boolean_t interrupted_userspace,boolean_t pmi)479 telemetry_mark_curthread(boolean_t interrupted_userspace, boolean_t pmi)
480 {
481 	uint32_t ast_bits = 0;
482 	thread_t thread = current_thread();
483 
484 	/*
485 	 * If telemetry isn't active for this thread, return and try
486 	 * again next time.
487 	 */
488 	if (telemetry_is_active(thread) == false) {
489 		return;
490 	}
491 
492 	ast_bits |= (interrupted_userspace ? AST_TELEMETRY_USER : AST_TELEMETRY_KERNEL);
493 	if (pmi) {
494 		ast_bits |= AST_TELEMETRY_PMI;
495 	}
496 
497 	telemetry_needs_record = FALSE;
498 	thread_ast_set(thread, ast_bits);
499 	ast_propagate(thread);
500 }
501 
502 void
compute_telemetry(void * arg __unused)503 compute_telemetry(void *arg __unused)
504 {
505 	if (telemetry_sample_all_tasks || (telemetry_active_tasks > 0)) {
506 		if ((++telemetry_timestamp) % telemetry_sample_rate == 0) {
507 			telemetry_needs_record = TRUE;
508 			telemetry_needs_timer_arming_record = TRUE;
509 		}
510 	}
511 }
512 
513 /*
514  * If userland has registered a port for telemetry notifications, send one now.
515  */
516 static void
telemetry_notify_user(void)517 telemetry_notify_user(void)
518 {
519 	mach_port_t user_port = MACH_PORT_NULL;
520 
521 	kern_return_t kr = host_get_telemetry_port(host_priv_self(), &user_port);
522 	if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
523 		return;
524 	}
525 
526 	telemetry_notification(user_port, 0);
527 	ipc_port_release_send(user_port);
528 }
529 
530 void
telemetry_ast(thread_t thread,ast_t reasons)531 telemetry_ast(thread_t thread, ast_t reasons)
532 {
533 	assert((reasons & AST_TELEMETRY_ALL) != 0);
534 
535 	uint8_t record_type = 0;
536 	if (reasons & AST_TELEMETRY_IO) {
537 		record_type |= kIORecord;
538 	}
539 	if (reasons & (AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL)) {
540 		record_type |= (reasons & AST_TELEMETRY_PMI) ? kPMIRecord :
541 		    kInterruptRecord;
542 	}
543 
544 	if ((reasons & AST_TELEMETRY_MACF) != 0) {
545 		record_type |= kMACFRecord;
546 	}
547 
548 	enum micro_snapshot_flags user_telemetry = (reasons & AST_TELEMETRY_USER) ? kUserMode : 0;
549 	enum micro_snapshot_flags microsnapshot_flags = record_type | user_telemetry;
550 
551 	if ((reasons & AST_TELEMETRY_MACF) != 0) {
552 		telemetry_macf_take_sample(thread, microsnapshot_flags);
553 	}
554 
555 	if ((reasons & (AST_TELEMETRY_IO | AST_TELEMETRY_KERNEL | AST_TELEMETRY_PMI
556 	    | AST_TELEMETRY_USER)) != 0) {
557 		telemetry_take_sample(thread, microsnapshot_flags);
558 	}
559 }
560 
561 bool
telemetry_task_ready_for_sample(task_t task)562 telemetry_task_ready_for_sample(task_t task)
563 {
564 	return task != TASK_NULL &&
565 	       task != kernel_task &&
566 	       !task_did_exec(task) &&
567 	       !task_is_exec_copy(task);
568 }
569 
570 void
telemetry_instrumentation_begin(__unused struct micro_snapshot_buffer * buffer,__unused enum micro_snapshot_flags flags)571 telemetry_instrumentation_begin(
572 	__unused struct micro_snapshot_buffer *buffer,
573 	__unused enum micro_snapshot_flags flags)
574 {
575 	/* telemetry_XXX accessed outside of lock for instrumentation only */
576 	KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_START,
577 	    flags, telemetry_bytes_since_last_mark, 0,
578 	    (&telemetry_buffer != buffer));
579 }
580 
581 void
telemetry_instrumentation_end(__unused struct micro_snapshot_buffer * buffer)582 telemetry_instrumentation_end(__unused struct micro_snapshot_buffer *buffer)
583 {
584 	/* telemetry_XXX accessed outside of lock for instrumentation only */
585 	KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_END,
586 	    (&telemetry_buffer == buffer), telemetry_bytes_since_last_mark,
587 	    buffer->current_position, buffer->end_point);
588 }
589 
590 void
telemetry_take_sample(thread_t thread,enum micro_snapshot_flags flags)591 telemetry_take_sample(thread_t thread, enum micro_snapshot_flags flags)
592 {
593 	task_t                      task;
594 	uintptr_t                   frames[128];
595 	size_t                      frames_len = sizeof(frames) / sizeof(frames[0]);
596 	uint32_t                    btcount;
597 	struct backtrace_user_info  btinfo = BTUINFO_INIT;
598 	uint16_t                    async_start_index = UINT16_MAX;
599 
600 	if (thread == THREAD_NULL) {
601 		return;
602 	}
603 
604 	/* Ensure task is ready for taking a sample. */
605 	task = get_threadtask(thread);
606 	if (!telemetry_task_ready_for_sample(task)) {
607 		return;
608 	}
609 
610 	telemetry_instrumentation_begin(&telemetry_buffer, flags);
611 
612 	/* Collect backtrace from user thread. */
613 	btcount = backtrace_user(frames, frames_len, NULL, &btinfo);
614 	if (btinfo.btui_error != 0) {
615 		return;
616 	}
617 	if (btinfo.btui_async_frame_addr != 0 &&
618 	    btinfo.btui_async_start_index != 0) {
619 		/*
620 		 * Put the async callstack inline after the frame pointer walk call
621 		 * stack.
622 		 */
623 		async_start_index = (uint16_t)btinfo.btui_async_start_index;
624 		uintptr_t frame_addr = btinfo.btui_async_frame_addr;
625 		unsigned int frames_left = frames_len - async_start_index;
626 		struct backtrace_control ctl = { .btc_frame_addr = frame_addr, };
627 		btinfo = BTUINFO_INIT;
628 		unsigned int async_filled = backtrace_user(frames + async_start_index,
629 		    frames_left, &ctl, &btinfo);
630 		if (btinfo.btui_error == 0) {
631 			btcount = MIN(async_start_index + async_filled, frames_len);
632 		}
633 	}
634 
635 	/* Process the backtrace. */
636 	struct telemetry_target target = {
637 		.thread = thread,
638 		.frames = frames,
639 		.frames_count = btcount,
640 		.user64_regs = (btinfo.btui_info & BTI_64_BIT) != 0,
641 		.microsnapshot_flags = flags,
642 		.buffer = &telemetry_buffer,
643 		.buffer_mtx = &telemetry_mtx,
644 		.async_start_index = async_start_index,
645 	};
646 	telemetry_process_sample(&target, true, NULL);
647 
648 	telemetry_instrumentation_end(&telemetry_buffer);
649 }
650 
651 #if CONFIG_MACF
652 void
telemetry_macf_take_sample(thread_t thread,enum micro_snapshot_flags flags)653 telemetry_macf_take_sample(thread_t thread, enum micro_snapshot_flags flags)
654 {
655 	task_t                        task;
656 
657 	vm_size_t                     btcapacity     = 128;
658 	uintptr_t                     frames_stack[btcapacity];
659 	uint32_t                      btcount        = 0;
660 	typedef uintptr_t             telemetry_user_frame_t __kernel_data_semantics;
661 	telemetry_user_frame_t        *frames        = frames_stack;
662 	bool                          alloced_frames = false;
663 
664 	struct backtrace_user_info    btinfo         = BTUINFO_INIT;
665 	struct backtrace_control      btctl          = BTCTL_INIT;
666 
667 	uint32_t                      retry_count    = 0;
668 	const uint32_t                max_retries    = 10;
669 
670 	bool                          initialized    = false;
671 	struct micro_snapshot_buffer *telbuf         = &telemetry_macf_buffer;
672 	uint32_t                      record_start   = 0;
673 	bool                          did_process    = false;
674 	int                           rv             = 0;
675 
676 	if (thread == THREAD_NULL) {
677 		return;
678 	}
679 
680 	telemetry_instrumentation_begin(telbuf, flags);
681 
682 	/* Ensure task is ready for taking a sample. */
683 	task = get_threadtask(thread);
684 	if (!telemetry_task_ready_for_sample(task)) {
685 		rv = EBUSY;
686 		goto out;
687 	}
688 
689 	/* Ensure MACF telemetry buffer was initialized. */
690 	TELEMETRY_MACF_LOCK();
691 	initialized = (telbuf->size > 0);
692 	TELEMETRY_MACF_UNLOCK();
693 
694 	if (!initialized) {
695 		rv = ENOMEM;
696 		goto out;
697 	}
698 
699 	/* Collect backtrace from user thread. */
700 	while (retry_count < max_retries) {
701 		btcount += backtrace_user(frames + btcount, btcapacity - btcount, &btctl, &btinfo);
702 
703 		if ((btinfo.btui_info & BTI_TRUNCATED) != 0 && btinfo.btui_next_frame_addr != 0) {
704 			/*
705 			 * Fast path uses stack memory to avoid an allocation. We must
706 			 * pivot to heap memory in the case where we cannot write the
707 			 * complete backtrace to this buffer.
708 			 */
709 			if (frames == frames_stack) {
710 				btcapacity += 128;
711 				frames = kalloc_data(btcapacity * sizeof(*frames), Z_WAITOK);
712 
713 				if (frames == NULL) {
714 					break;
715 				}
716 
717 				alloced_frames = true;
718 
719 				assert(btcapacity > sizeof(frames_stack) / sizeof(frames_stack[0]));
720 				memcpy(frames, frames_stack, sizeof(frames_stack));
721 			} else {
722 				assert(alloced_frames);
723 				frames = krealloc_data(frames,
724 				    btcapacity * sizeof(*frames),
725 				    (btcapacity + 128) * sizeof(*frames),
726 				    Z_WAITOK);
727 
728 				if (frames == NULL) {
729 					break;
730 				}
731 
732 				btcapacity += 128;
733 			}
734 
735 			btctl.btc_frame_addr = btinfo.btui_next_frame_addr;
736 			++retry_count;
737 		} else {
738 			break;
739 		}
740 	}
741 
742 	if (frames == NULL) {
743 		rv = ENOMEM;
744 		goto out;
745 	} else if (btinfo.btui_error != 0) {
746 		rv = btinfo.btui_error;
747 		goto out;
748 	}
749 
750 	/* Process the backtrace. */
751 	struct telemetry_target target = {
752 		.thread = thread,
753 		.frames = frames,
754 		.frames_count = btcount,
755 		.user64_regs = (btinfo.btui_info & BTI_64_BIT) != 0,
756 		.microsnapshot_flags = flags,
757 		.buffer = telbuf,
758 		.buffer_mtx = &telemetry_macf_mtx
759 	};
760 	rv = telemetry_process_sample(&target, false, &record_start);
761 	did_process = true;
762 
763 out:
764 	/* Immediately deliver the collected sample to MAC clients. */
765 	if (rv == 0) {
766 		assert(telbuf->current_position >= record_start);
767 		mac_thread_telemetry(thread,
768 		    0,
769 		    (void *)(telbuf->buffer + record_start),
770 		    telbuf->current_position - record_start);
771 	} else {
772 		mac_thread_telemetry(thread, rv, NULL, 0);
773 	}
774 
775 	/*
776 	 * The lock was taken by telemetry_process_sample, and we asked it not to
777 	 * unlock upon completion, so we must release the lock here.
778 	 */
779 	if (did_process) {
780 		TELEMETRY_MACF_UNLOCK();
781 	}
782 
783 	if (alloced_frames && frames != NULL) {
784 		kfree_data(frames, btcapacity * sizeof(*frames));
785 	}
786 
787 	telemetry_instrumentation_end(telbuf);
788 }
789 #endif /* CONFIG_MACF */
790 
791 int
telemetry_process_sample(const struct telemetry_target * target,bool release_buffer_lock,uint32_t * out_current_record_start)792 telemetry_process_sample(const struct telemetry_target *target,
793     bool release_buffer_lock,
794     uint32_t *out_current_record_start)
795 {
796 	thread_t thread = target->thread;
797 	uintptr_t *frames = target->frames;
798 	size_t btcount = target->frames_count;
799 	bool user64_regs = target->user64_regs;
800 	enum micro_snapshot_flags microsnapshot_flags = target->microsnapshot_flags;
801 	struct micro_snapshot_buffer *current_buffer = target->buffer;
802 	lck_mtx_t *buffer_mtx = target->buffer_mtx;
803 
804 	task_t task;
805 	void *p;
806 	uint32_t bti;
807 	struct micro_snapshot *msnap;
808 	struct task_snapshot *tsnap;
809 	struct thread_snapshot *thsnap;
810 	clock_sec_t secs;
811 	clock_usec_t usecs;
812 	vm_size_t framesize;
813 	uint32_t current_record_start;
814 	uint32_t tmp = 0;
815 	bool notify = false;
816 	int     rv = 0;
817 
818 	if (thread == THREAD_NULL) {
819 		return EINVAL;
820 	}
821 
822 	task = get_threadtask(thread);
823 	p = get_bsdtask_info(task);
824 	bool user64_va = task_has_64Bit_addr(task);
825 
826 	/*
827 	 * Retrieve the array of UUID's for binaries used by this task.
828 	 * We reach down into DYLD's data structures to find the array.
829 	 *
830 	 * XXX - make this common with kdp?
831 	 */
832 	uint32_t uuid_info_count = 0;
833 	mach_vm_address_t uuid_info_addr = 0;
834 	uint32_t uuid_info_size = 0;
835 	if (user64_va) {
836 		uuid_info_size = sizeof(struct user64_dyld_uuid_info);
837 		struct user64_dyld_all_image_infos task_image_infos;
838 		if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
839 			uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
840 			uuid_info_addr = task_image_infos.uuidArray;
841 		}
842 	} else {
843 		uuid_info_size = sizeof(struct user32_dyld_uuid_info);
844 		struct user32_dyld_all_image_infos task_image_infos;
845 		if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
846 			uuid_info_count = task_image_infos.uuidArrayCount;
847 			uuid_info_addr = task_image_infos.uuidArray;
848 		}
849 	}
850 
851 	/*
852 	 * If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
853 	 * this data structure), we zero the uuid_info_count so that we won't even try to save load info
854 	 * for this task.
855 	 */
856 	if (!uuid_info_addr) {
857 		uuid_info_count = 0;
858 	}
859 
860 	/*
861 	 * Don't copy in an unbounded amount of memory. The main binary and interesting
862 	 * non-shared-cache libraries should be in the first few images.
863 	 */
864 	if (uuid_info_count > TELEMETRY_MAX_UUID_COUNT) {
865 		uuid_info_count = TELEMETRY_MAX_UUID_COUNT;
866 	}
867 
868 	uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
869 	char     *uuid_info_array = NULL;
870 
871 	if (uuid_info_count > 0) {
872 		uuid_info_array = kalloc_data(uuid_info_array_size, Z_WAITOK);
873 		if (uuid_info_array == NULL) {
874 			return ENOMEM;
875 		}
876 
877 		/*
878 		 * Copy in the UUID info array.
879 		 * It may be nonresident, in which case just fix up nloadinfos to 0 in the task snapshot.
880 		 */
881 		if (copyin(uuid_info_addr, uuid_info_array, uuid_info_array_size) != 0) {
882 			kfree_data(uuid_info_array, uuid_info_array_size);
883 			uuid_info_array = NULL;
884 			uuid_info_array_size = 0;
885 		}
886 	}
887 
888 	/*
889 	 * Look for a dispatch queue serial number, and copy it in from userland if present.
890 	 */
891 	uint64_t dqserialnum = 0;
892 	int              dqserialnum_valid = 0;
893 
894 	uint64_t dqkeyaddr = thread_dispatchqaddr(thread);
895 	if (dqkeyaddr != 0) {
896 		uint64_t dqaddr = 0;
897 		uint64_t dq_serialno_offset = get_task_dispatchqueue_serialno_offset(task);
898 		if ((copyin(dqkeyaddr, (char *)&dqaddr, (user64_va ? 8 : 4)) == 0) &&
899 		    (dqaddr != 0) && (dq_serialno_offset != 0)) {
900 			uint64_t dqserialnumaddr = dqaddr + dq_serialno_offset;
901 			if (copyin(dqserialnumaddr, (char *)&dqserialnum, (user64_va ? 8 : 4)) == 0) {
902 				dqserialnum_valid = 1;
903 			}
904 		}
905 	}
906 
907 	clock_get_calendar_microtime(&secs, &usecs);
908 
909 	lck_mtx_lock(buffer_mtx);
910 
911 	/*
912 	 * If our buffer is not backed by anything,
913 	 * then we cannot take the sample.  Meant to allow us to deallocate the window
914 	 * buffer if it is disabled.
915 	 */
916 	if (!current_buffer->buffer) {
917 		rv = EINVAL;
918 		goto cancel_sample;
919 	}
920 
921 	/*
922 	 * We do the bulk of the operation under the telemetry lock, on assumption that
923 	 * any page faults during execution will not cause another AST_TELEMETRY_ALL
924 	 * to deadlock; they will just block until we finish. This makes it easier
925 	 * to copy into the buffer directly. As soon as we unlock, userspace can copy
926 	 * out of our buffer.
927 	 */
928 
929 copytobuffer:
930 
931 	current_record_start = current_buffer->current_position;
932 
933 	if ((current_buffer->size - current_buffer->current_position) < sizeof(struct micro_snapshot)) {
934 		/*
935 		 * We can't fit a record in the space available, so wrap around to the beginning.
936 		 * Save the current position as the known end point of valid data.
937 		 */
938 		current_buffer->end_point = current_record_start;
939 		current_buffer->current_position = 0;
940 		if (current_record_start == 0) {
941 			/* This sample is too large to fit in the buffer even when we started at 0, so skip it */
942 			rv = ERANGE;
943 			goto cancel_sample;
944 		}
945 		goto copytobuffer;
946 	}
947 
948 	msnap = (struct micro_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
949 	msnap->snapshot_magic = STACKSHOT_MICRO_SNAPSHOT_MAGIC;
950 	msnap->ms_flags = (uint8_t)microsnapshot_flags;
951 	msnap->ms_opaque_flags = 0; /* namespace managed by userspace */
952 	msnap->ms_cpu = cpu_number();
953 	msnap->ms_time = secs;
954 	msnap->ms_time_microsecs = usecs;
955 
956 	current_buffer->current_position += sizeof(struct micro_snapshot);
957 
958 	if ((current_buffer->size - current_buffer->current_position) < sizeof(struct task_snapshot)) {
959 		current_buffer->end_point = current_record_start;
960 		current_buffer->current_position = 0;
961 		if (current_record_start == 0) {
962 			/* This sample is too large to fit in the buffer even when we started at 0, so skip it */
963 			rv = ERANGE;
964 			goto cancel_sample;
965 		}
966 		goto copytobuffer;
967 	}
968 
969 	tsnap = (struct task_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
970 	bzero(tsnap, sizeof(*tsnap));
971 	tsnap->snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC;
972 	tsnap->pid = proc_pid(p);
973 	tsnap->uniqueid = proc_uniqueid(p);
974 	struct recount_times_mach times = recount_task_terminated_times(task);
975 	tsnap->user_time_in_terminated_threads = times.rtm_user;
976 	tsnap->system_time_in_terminated_threads = times.rtm_system;
977 	tsnap->suspend_count = task->suspend_count;
978 	tsnap->task_size = (typeof(tsnap->task_size))(get_task_phys_footprint(task) / PAGE_SIZE);
979 	tsnap->faults = counter_load(&task->faults);
980 	tsnap->pageins = counter_load(&task->pageins);
981 	tsnap->cow_faults = counter_load(&task->cow_faults);
982 	/*
983 	 * The throttling counters are maintained as 64-bit counters in the proc
984 	 * structure. However, we reserve 32-bits (each) for them in the task_snapshot
985 	 * struct to save space and since we do not expect them to overflow 32-bits. If we
986 	 * find these values overflowing in the future, the fix would be to simply
987 	 * upgrade these counters to 64-bit in the task_snapshot struct
988 	 */
989 	tsnap->was_throttled = (uint32_t) proc_was_throttled(p);
990 	tsnap->did_throttle = (uint32_t) proc_did_throttle(p);
991 #if CONFIG_COALITIONS
992 	/*
993 	 * These fields are overloaded to represent the resource coalition ID of
994 	 * this task...
995 	 */
996 	coalition_t rsrc_coal = task->coalition[COALITION_TYPE_RESOURCE];
997 	tsnap->p_start_sec = rsrc_coal ? coalition_id(rsrc_coal) : 0;
998 	/*
999 	 * ... and the processes this thread is doing work on behalf of.
1000 	 */
1001 	pid_t origin_pid = -1, proximate_pid = -1;
1002 	(void)thread_get_voucher_origin_proximate_pid(thread, &origin_pid, &proximate_pid);
1003 	tsnap->p_start_usec = ((uint64_t)proximate_pid << 32) | (uint32_t)origin_pid;
1004 #endif /* CONFIG_COALITIONS */
1005 
1006 	if (task->t_flags & TF_TELEMETRY) {
1007 		tsnap->ss_flags |= kTaskRsrcFlagged;
1008 	}
1009 
1010 	if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG)) {
1011 		tsnap->ss_flags |= kTaskDarwinBG;
1012 	}
1013 
1014 	proc_get_darwinbgstate(task, &tmp);
1015 
1016 	if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) {
1017 		tsnap->ss_flags |= kTaskIsForeground;
1018 	}
1019 
1020 	if (tmp & PROC_FLAG_ADAPTIVE_IMPORTANT) {
1021 		tsnap->ss_flags |= kTaskIsBoosted;
1022 	}
1023 
1024 	if (tmp & PROC_FLAG_SUPPRESSED) {
1025 		tsnap->ss_flags |= kTaskIsSuppressed;
1026 	}
1027 
1028 
1029 	tsnap->latency_qos = task_grab_latency_qos(task);
1030 
1031 	strlcpy(tsnap->p_comm, proc_name_address(p), sizeof(tsnap->p_comm));
1032 	if (user64_va) {
1033 		tsnap->ss_flags |= kUser64_p;
1034 	}
1035 
1036 	if (task->task_shared_region_slide != -1) {
1037 		tsnap->shared_cache_slide = task->task_shared_region_slide;
1038 		bcopy(task->task_shared_region_uuid, tsnap->shared_cache_identifier,
1039 		    sizeof(task->task_shared_region_uuid));
1040 	}
1041 
1042 	current_buffer->current_position += sizeof(struct task_snapshot);
1043 
1044 	/*
1045 	 * Directly after the task snapshot, place the array of UUID's corresponding to the binaries
1046 	 * used by this task.
1047 	 */
1048 	if ((current_buffer->size - current_buffer->current_position) < uuid_info_array_size) {
1049 		current_buffer->end_point = current_record_start;
1050 		current_buffer->current_position = 0;
1051 		if (current_record_start == 0) {
1052 			/* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1053 			rv = ERANGE;
1054 			goto cancel_sample;
1055 		}
1056 		goto copytobuffer;
1057 	}
1058 
1059 	/*
1060 	 * Copy the UUID info array into our sample.
1061 	 */
1062 	if (uuid_info_array_size > 0) {
1063 		bcopy(uuid_info_array, (char *)(current_buffer->buffer + current_buffer->current_position), uuid_info_array_size);
1064 		tsnap->nloadinfos = uuid_info_count;
1065 	}
1066 
1067 	current_buffer->current_position += uuid_info_array_size;
1068 
1069 	/*
1070 	 * After the task snapshot & list of binary UUIDs, we place a thread snapshot.
1071 	 */
1072 
1073 	if ((current_buffer->size - current_buffer->current_position) < sizeof(struct thread_snapshot)) {
1074 		/* wrap and overwrite */
1075 		current_buffer->end_point = current_record_start;
1076 		current_buffer->current_position = 0;
1077 		if (current_record_start == 0) {
1078 			/* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1079 			rv = ERANGE;
1080 			goto cancel_sample;
1081 		}
1082 		goto copytobuffer;
1083 	}
1084 
1085 	thsnap = (struct thread_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
1086 	bzero(thsnap, sizeof(*thsnap));
1087 
1088 	thsnap->snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC;
1089 	thsnap->thread_id = thread_tid(thread);
1090 	thsnap->state = thread->state;
1091 	thsnap->priority = thread->base_pri;
1092 	thsnap->sched_pri = thread->sched_pri;
1093 	thsnap->sched_flags = thread->sched_flags;
1094 	thsnap->ss_flags |= kStacksPCOnly;
1095 	thsnap->ts_qos = thread->effective_policy.thep_qos;
1096 	thsnap->ts_rqos = thread->requested_policy.thrp_qos;
1097 	thsnap->ts_rqos_override = MAX(thread->requested_policy.thrp_qos_override,
1098 	    thread->requested_policy.thrp_qos_workq_override);
1099 	memcpy(thsnap->_reserved + 1, &target->async_start_index,
1100 	    sizeof(target->async_start_index));
1101 
1102 	if (proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG)) {
1103 		thsnap->ss_flags |= kThreadDarwinBG;
1104 	}
1105 
1106 	boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
1107 	times = recount_current_thread_times();
1108 	ml_set_interrupts_enabled(interrupt_state);
1109 	thsnap->user_time = times.rtm_user;
1110 	thsnap->system_time = times.rtm_system;
1111 
1112 	current_buffer->current_position += sizeof(struct thread_snapshot);
1113 
1114 	/*
1115 	 * If this thread has a dispatch queue serial number, include it here.
1116 	 */
1117 	if (dqserialnum_valid) {
1118 		if ((current_buffer->size - current_buffer->current_position) < sizeof(dqserialnum)) {
1119 			/* wrap and overwrite */
1120 			current_buffer->end_point = current_record_start;
1121 			current_buffer->current_position = 0;
1122 			if (current_record_start == 0) {
1123 				/* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1124 				rv = ERANGE;
1125 				goto cancel_sample;
1126 			}
1127 			goto copytobuffer;
1128 		}
1129 
1130 		thsnap->ss_flags |= kHasDispatchSerial;
1131 		bcopy(&dqserialnum, (char *)current_buffer->buffer + current_buffer->current_position, sizeof(dqserialnum));
1132 		current_buffer->current_position += sizeof(dqserialnum);
1133 	}
1134 
1135 	if (user64_regs) {
1136 		framesize = 8;
1137 		thsnap->ss_flags |= kUser64_p;
1138 	} else {
1139 		framesize = 4;
1140 	}
1141 
1142 	/*
1143 	 * If we can't fit this entire stacktrace then cancel this record, wrap to the beginning,
1144 	 * and start again there so that we always store a full record.
1145 	 */
1146 	if ((current_buffer->size - current_buffer->current_position) / framesize < btcount) {
1147 		current_buffer->end_point = current_record_start;
1148 		current_buffer->current_position = 0;
1149 		if (current_record_start == 0) {
1150 			/* This sample is too large to fit in the buffer even when we started at 0, so skip it */
1151 			rv = ERANGE;
1152 			goto cancel_sample;
1153 		}
1154 		goto copytobuffer;
1155 	}
1156 
1157 	for (bti = 0; bti < btcount; bti++, current_buffer->current_position += framesize) {
1158 		if (framesize == 8) {
1159 			*(uint64_t *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position) = frames[bti];
1160 		} else {
1161 			*(uint32_t *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position) = (uint32_t)frames[bti];
1162 		}
1163 	}
1164 
1165 	if (current_buffer->end_point < current_buffer->current_position) {
1166 		/*
1167 		 * Each time the cursor wraps around to the beginning, we leave a
1168 		 * differing amount of unused space at the end of the buffer. Make
1169 		 * sure the cursor pushes the end point in case we're making use of
1170 		 * more of the buffer than we did the last time we wrapped.
1171 		 */
1172 		current_buffer->end_point = current_buffer->current_position;
1173 	}
1174 
1175 	thsnap->nuser_frames = btcount;
1176 
1177 	/*
1178 	 * Now THIS is a hack.
1179 	 */
1180 	if (current_buffer == &telemetry_buffer) {
1181 		telemetry_bytes_since_last_mark += (current_buffer->current_position - current_record_start);
1182 		if (telemetry_bytes_since_last_mark > telemetry_buffer_notify_at) {
1183 			notify = true;
1184 		}
1185 	}
1186 
1187 	if (out_current_record_start != NULL) {
1188 		*out_current_record_start = current_record_start;
1189 	}
1190 
1191 cancel_sample:
1192 	if (release_buffer_lock) {
1193 		lck_mtx_unlock(buffer_mtx);
1194 	}
1195 
1196 	if (notify) {
1197 		telemetry_notify_user();
1198 	}
1199 
1200 	if (uuid_info_array != NULL) {
1201 		kfree_data(uuid_info_array, uuid_info_array_size);
1202 	}
1203 
1204 	return rv;
1205 }
1206 
1207 #if TELEMETRY_DEBUG
1208 static void
log_telemetry_output(vm_offset_t buf,uint32_t pos,uint32_t sz)1209 log_telemetry_output(vm_offset_t buf, uint32_t pos, uint32_t sz)
1210 {
1211 	struct micro_snapshot *p;
1212 	uint32_t offset;
1213 
1214 	printf("Copying out %d bytes of telemetry at offset %d\n", sz, pos);
1215 
1216 	buf += pos;
1217 
1218 	/*
1219 	 * Find and log each timestamp in this chunk of buffer.
1220 	 */
1221 	for (offset = 0; offset < sz; offset++) {
1222 		p = (struct micro_snapshot *)(buf + offset);
1223 		if (p->snapshot_magic == STACKSHOT_MICRO_SNAPSHOT_MAGIC) {
1224 			printf("telemetry timestamp: %lld\n", p->ms_time);
1225 		}
1226 	}
1227 }
1228 #endif
1229 
1230 int
telemetry_gather(user_addr_t buffer,uint32_t * length,bool mark)1231 telemetry_gather(user_addr_t buffer, uint32_t *length, bool mark)
1232 {
1233 	return telemetry_buffer_gather(buffer, length, mark, &telemetry_buffer);
1234 }
1235 
1236 int
telemetry_buffer_gather(user_addr_t buffer,uint32_t * length,bool mark,struct micro_snapshot_buffer * current_buffer)1237 telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, bool mark, struct micro_snapshot_buffer * current_buffer)
1238 {
1239 	int result = 0;
1240 	uint32_t oldest_record_offset;
1241 
1242 	KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_START,
1243 	    mark, telemetry_bytes_since_last_mark, 0,
1244 	    (&telemetry_buffer != current_buffer));
1245 
1246 	TELEMETRY_LOCK();
1247 
1248 	if (current_buffer->buffer == 0) {
1249 		*length = 0;
1250 		goto out;
1251 	}
1252 
1253 	if (*length < current_buffer->size) {
1254 		result = KERN_NO_SPACE;
1255 		goto out;
1256 	}
1257 
1258 	/*
1259 	 * Copy the ring buffer out to userland in order sorted by time: least recent to most recent.
1260 	 * First, we need to search forward from the cursor to find the oldest record in our buffer.
1261 	 */
1262 	oldest_record_offset = current_buffer->current_position;
1263 	do {
1264 		if (((oldest_record_offset + sizeof(uint32_t)) > current_buffer->size) ||
1265 		    ((oldest_record_offset + sizeof(uint32_t)) > current_buffer->end_point)) {
1266 			if (*(uint32_t *)(uintptr_t)(current_buffer->buffer) == 0) {
1267 				/*
1268 				 * There is no magic number at the start of the buffer, which means
1269 				 * it's empty; nothing to see here yet.
1270 				 */
1271 				*length = 0;
1272 				goto out;
1273 			}
1274 			/*
1275 			 * We've looked through the end of the active buffer without finding a valid
1276 			 * record; that means all valid records are in a single chunk, beginning at
1277 			 * the very start of the buffer.
1278 			 */
1279 
1280 			oldest_record_offset = 0;
1281 			assert(*(uint32_t *)(uintptr_t)(current_buffer->buffer) == STACKSHOT_MICRO_SNAPSHOT_MAGIC);
1282 			break;
1283 		}
1284 
1285 		if (*(uint32_t *)(uintptr_t)(current_buffer->buffer + oldest_record_offset) == STACKSHOT_MICRO_SNAPSHOT_MAGIC) {
1286 			break;
1287 		}
1288 
1289 		/*
1290 		 * There are no alignment guarantees for micro-stackshot records, so we must search at each
1291 		 * byte offset.
1292 		 */
1293 		oldest_record_offset++;
1294 	} while (oldest_record_offset != current_buffer->current_position);
1295 
1296 	/*
1297 	 * If needed, copyout in two chunks: from the oldest record to the end of the buffer, and then
1298 	 * from the beginning of the buffer up to the current position.
1299 	 */
1300 	if (oldest_record_offset != 0) {
1301 #if TELEMETRY_DEBUG
1302 		log_telemetry_output(current_buffer->buffer, oldest_record_offset,
1303 		    current_buffer->end_point - oldest_record_offset);
1304 #endif
1305 		if ((result = copyout((void *)(current_buffer->buffer + oldest_record_offset), buffer,
1306 		    current_buffer->end_point - oldest_record_offset)) != 0) {
1307 			*length = 0;
1308 			goto out;
1309 		}
1310 		*length = current_buffer->end_point - oldest_record_offset;
1311 	} else {
1312 		*length = 0;
1313 	}
1314 
1315 #if TELEMETRY_DEBUG
1316 	log_telemetry_output(current_buffer->buffer, 0, current_buffer->current_position);
1317 #endif
1318 	if ((result = copyout((void *)current_buffer->buffer, buffer + *length,
1319 	    current_buffer->current_position)) != 0) {
1320 		*length = 0;
1321 		goto out;
1322 	}
1323 	*length += (uint32_t)current_buffer->current_position;
1324 
1325 out:
1326 
1327 	if (mark && (*length > 0)) {
1328 		telemetry_bytes_since_last_mark = 0;
1329 	}
1330 
1331 	TELEMETRY_UNLOCK();
1332 
1333 	KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_END,
1334 	    current_buffer->current_position, *length,
1335 	    current_buffer->end_point, (&telemetry_buffer != current_buffer));
1336 
1337 	return result;
1338 }
1339 
1340 #if CONFIG_MACF
1341 static int
telemetry_macf_init_locked(size_t buffer_size)1342 telemetry_macf_init_locked(size_t buffer_size)
1343 {
1344 	kern_return_t   kr;
1345 
1346 	if (buffer_size > TELEMETRY_MAX_BUFFER_SIZE) {
1347 		buffer_size = TELEMETRY_MAX_BUFFER_SIZE;
1348 	}
1349 
1350 	telemetry_macf_buffer.size = buffer_size;
1351 
1352 	kr = kmem_alloc(kernel_map, &telemetry_macf_buffer.buffer,
1353 	    telemetry_macf_buffer.size, KMA_DATA | KMA_ZERO | KMA_PERMANENT,
1354 	    VM_KERN_MEMORY_SECURITY);
1355 
1356 	if (kr != KERN_SUCCESS) {
1357 		kprintf("Telemetry (MACF): Allocation failed: %d\n", kr);
1358 		return ENOMEM;
1359 	}
1360 
1361 	return 0;
1362 }
1363 
1364 int
telemetry_macf_mark_curthread(void)1365 telemetry_macf_mark_curthread(void)
1366 {
1367 	thread_t thread = current_thread();
1368 	task_t   task   = get_threadtask(thread);
1369 	int      rv     = 0;
1370 
1371 	if (task == kernel_task) {
1372 		/* Kernel threads never return to an AST boundary, and are ineligible */
1373 		return EINVAL;
1374 	}
1375 
1376 	/* Initialize the MACF telemetry buffer if needed. */
1377 	TELEMETRY_MACF_LOCK();
1378 	if (__improbable(telemetry_macf_buffer.size == 0)) {
1379 		rv = telemetry_macf_init_locked(TELEMETRY_MACF_DEFAULT_BUFFER_SIZE);
1380 
1381 		if (rv != 0) {
1382 			return rv;
1383 		}
1384 	}
1385 	TELEMETRY_MACF_UNLOCK();
1386 
1387 	act_set_macf_telemetry_ast(thread);
1388 	return 0;
1389 }
1390 #endif /* CONFIG_MACF */
1391 
1392 
1393 static void
telemetry_stash_ca_event(kernel_brk_type_t type,uint16_t comment,uint32_t total_frames,uintptr_t * backtrace,uintptr_t faulting_address)1394 telemetry_stash_ca_event(
1395 	kernel_brk_type_t    type,
1396 	uint16_t             comment,
1397 	uint32_t             total_frames,
1398 	uintptr_t            *backtrace,
1399 	uintptr_t            faulting_address)
1400 {
1401 	/* Skip telemetry if we accidentally took a fault while handling telemetry */
1402 	bool *in_handler = PERCPU_GET(brk_telemetry_in_handler);
1403 	if (*in_handler) {
1404 #if DEVELOPMENT
1405 		panic("Breakpoint trap re-entered from within a spinlock");
1406 #endif
1407 		return;
1408 	}
1409 
1410 	/* Rate limit on repeatedly seeing the same address */
1411 	uintptr_t *cache_address = PERCPU_GET(brk_telemetry_cache_address);
1412 	if (*cache_address == faulting_address) {
1413 		return;
1414 	}
1415 
1416 	*cache_address = faulting_address;
1417 
1418 	lck_spin_lock(&ca_entries_lck);
1419 	*in_handler = true;
1420 
1421 	if (__improbable(ca_entries_index > CA_ENTRIES_SIZE)) {
1422 		panic("Invalid CA interrupt buffer index %d >= %d",
1423 		    ca_entries_index, CA_ENTRIES_SIZE);
1424 	}
1425 
1426 	/* We're full, just drop the event */
1427 	if (ca_entries_index == CA_ENTRIES_SIZE) {
1428 		*in_handler = false;
1429 		lck_spin_unlock(&ca_entries_lck);
1430 		return;
1431 	}
1432 
1433 	ca_entries[ca_entries_index].type = type;
1434 	ca_entries[ca_entries_index].code = comment;
1435 	ca_entries[ca_entries_index].faulting_address = faulting_address;
1436 
1437 	assert(total_frames <= TELEMETRY_BT_FRAMES);
1438 
1439 	if (total_frames <= TELEMETRY_BT_FRAMES) {
1440 		ca_entries[ca_entries_index].num_frames = total_frames;
1441 		memcpy(ca_entries[ca_entries_index].frames, backtrace,
1442 		    total_frames * sizeof(uintptr_t));
1443 	}
1444 
1445 	ca_entries_index++;
1446 
1447 	*in_handler = false;
1448 	lck_spin_unlock(&ca_entries_lck);
1449 
1450 	thread_call_enter(telemetry_ca_send_callout);
1451 }
1452 
1453 static int
telemetry_backtrace_add_kernel(char * buf,size_t buflen)1454 telemetry_backtrace_add_kernel(
1455 	char        *buf,
1456 	size_t       buflen)
1457 {
1458 	int rc = 0;
1459 #if defined(__arm__) || defined(__arm64__)
1460 	extern vm_offset_t   segTEXTEXECB;
1461 	extern unsigned long segSizeTEXTEXEC;
1462 	vm_address_t unslid = segTEXTEXECB - vm_kernel_stext;
1463 
1464 	rc += scnprintf(buf, buflen, "%s@%lx:%lx\n",
1465 	    kernel_uuid_string, unslid, unslid + segSizeTEXTEXEC - 1);
1466 #elif defined(__x86_64__)
1467 	rc += scnprintf(buf, buflen, "%s@0:%lx\n",
1468 	    kernel_uuid_string, vm_kernel_etext - vm_kernel_stext);
1469 #else
1470 #pragma unused(buf, buflen)
1471 #endif
1472 	return rc;
1473 }
1474 
1475 static void
telemetry_backtrace_to_string(char * buf,size_t buflen,uint32_t tot,uintptr_t * frames)1476 telemetry_backtrace_to_string(
1477 	char        *buf,
1478 	size_t       buflen,
1479 	uint32_t     tot,
1480 	uintptr_t   *frames)
1481 {
1482 	size_t l = 0;
1483 
1484 	for (uint32_t i = 0; i < tot; i++) {
1485 		l += scnprintf(buf + l, buflen - l, "%lx\n",
1486 		    frames[i] - vm_kernel_stext);
1487 	}
1488 	l += telemetry_backtrace_add_kernel(buf + l, buflen - l);
1489 	telemetry_backtrace_add_kexts(buf + l, buflen - l, frames, tot);
1490 }
1491 
1492 static void
telemetry_flush_ca_events(__unused thread_call_param_t p0,__unused thread_call_param_t p1)1493 telemetry_flush_ca_events(
1494 	__unused thread_call_param_t p0,
1495 	__unused thread_call_param_t p1)
1496 {
1497 	struct telemetry_ca_entry local_entries[CA_ENTRIES_SIZE] = {0};
1498 	uint8_t entry_cnt = 0;
1499 	bool *in_handler = PERCPU_GET(brk_telemetry_in_handler);
1500 
1501 	lck_spin_lock(&ca_entries_lck);
1502 	*in_handler = true;
1503 
1504 	if (__improbable(ca_entries_index > CA_ENTRIES_SIZE)) {
1505 		panic("Invalid CA interrupt buffer index %d > %d", ca_entries_index,
1506 		    CA_ENTRIES_SIZE);
1507 	}
1508 
1509 	if (ca_entries_index == 0) {
1510 		*in_handler = false;
1511 		lck_spin_unlock(&ca_entries_lck);
1512 		return;
1513 	} else {
1514 		memcpy(local_entries, ca_entries, sizeof(local_entries));
1515 		entry_cnt = ca_entries_index;
1516 		ca_entries_index = 0;
1517 	}
1518 
1519 	*in_handler = false;
1520 	lck_spin_unlock(&ca_entries_lck);
1521 
1522 	/*
1523 	 * All addresses (faulting_address and backtrace) are relative to the
1524 	 * vm_kernel_stext which means that all offsets will be typically <=
1525 	 * 50M which uses 7 hex digits.
1526 	 *
1527 	 * We allow up to TELEMETRY_BT_FRAMES (5) entries,
1528 	 * and be formatted like this:
1529 	 *
1530 	 *     <OFFSET1>\n
1531 	 *     <OFFSET2>\n
1532 	 *     ...
1533 	 *     <UUID_a>@<TEXT_EXEC_BASE_OFFSET>:<TEXT_EXEC_END_OFFSET>\n
1534 	 *     <UUID_b>@<TEXT_EXEC_BASE_OFFSET>:<TEXT_EXEC_END_OFFSET>\n
1535 	 *     ...
1536 	 *
1537 	 * In general this backtrace takes 8 bytes per "frame",
1538 	 * with an extra 52 bytes per unique UUID referenced.
1539 	 *
1540 	 * The buffer we have is CA_UBSANBUF_LEN (256 bytes) long, which
1541 	 * accomodates for 4 full unique UUIDs which should be sufficient.
1542 	 */
1543 
1544 	/* Send the events */
1545 	for (uint8_t i = 0; i < entry_cnt; i++) {
1546 		ca_event_t ca_event = CA_EVENT_ALLOCATE(kernel_breakpoint_event);
1547 		CA_EVENT_TYPE(kernel_breakpoint_event) * event = ca_event->data;
1548 
1549 		event->brk_type = local_entries[i].type;
1550 		event->brk_code = local_entries[i].code;
1551 		event->faulting_address = local_entries[i].faulting_address;
1552 
1553 		telemetry_backtrace_to_string(event->backtrace,
1554 		    sizeof(event->backtrace),
1555 		    local_entries[i].num_frames,
1556 		    local_entries[i].frames);
1557 		strlcpy(event->uuid, kernel_uuid_string, CA_UUID_LEN);
1558 
1559 		CA_EVENT_SEND(ca_event);
1560 	}
1561 }
1562 
1563 void
telemetry_kernel_brk(kernel_brk_type_t type,kernel_brk_options_t options,void * tstate,uint16_t comment)1564 telemetry_kernel_brk(
1565 	kernel_brk_type_t     type,
1566 	kernel_brk_options_t  options,
1567 	void                  *tstate,
1568 	uint16_t              comment)
1569 {
1570 #if __arm64__
1571 	arm_saved_state_t *state = (arm_saved_state_t *)tstate;
1572 
1573 	uintptr_t faulting_address = get_saved_state_pc(state);
1574 	uintptr_t saved_fp = get_saved_state_fp(state);
1575 #else
1576 	x86_saved_state64_t *state = (x86_saved_state64_t *)tstate;
1577 
1578 	uintptr_t faulting_address = state->isf.rip;
1579 	uintptr_t saved_fp = state->rbp;
1580 #endif
1581 
1582 	assert(options & KERNEL_BRK_TELEMETRY_OPTIONS);
1583 
1584 	if (startup_phase < STARTUP_SUB_THREAD_CALL) {
1585 #if DEVELOPMENT || DEBUG
1586 		panic("Attempting kernel breakpoint telemetry in early boot.");
1587 #endif
1588 		return;
1589 	}
1590 
1591 	if (options & KERNEL_BRK_CORE_ANALYTICS) {
1592 		uintptr_t frames[TELEMETRY_BT_FRAMES];
1593 
1594 		struct backtrace_control ctl = {
1595 			.btc_frame_addr = (uintptr_t)saved_fp,
1596 		};
1597 
1598 		uint32_t total_frames = backtrace(frames, TELEMETRY_BT_FRAMES, &ctl, NULL);
1599 
1600 		telemetry_stash_ca_event(type, comment, total_frames,
1601 		    frames, faulting_address - vm_kernel_stext);
1602 	}
1603 }
1604 
1605 /************************/
1606 /* BOOT PROFILE SUPPORT */
1607 /************************/
1608 /*
1609  * Boot Profiling
1610  *
1611  * The boot-profiling support is a mechanism to sample activity happening on the
1612  * system during boot. This mechanism sets up a periodic timer and on every timer fire,
1613  * captures a full backtrace into the boot profiling buffer. This buffer can be pulled
1614  * out and analyzed from user-space. It is turned on using the following boot-args:
1615  * "bootprofile_buffer_size" specifies the size of the boot profile buffer
1616  * "bootprofile_interval_ms" specifies the interval for the profiling timer
1617  *
1618  * Process Specific Boot Profiling
1619  *
1620  * The boot-arg "bootprofile_proc_name" can be used to specify a certain
1621  * process that needs to profiled during boot. Setting this boot-arg changes
1622  * the way stackshots are captured. At every timer fire, the code looks at the
1623  * currently running process and takes a stackshot only if the requested process
1624  * is on-core (which makes it unsuitable for MP systems).
1625  *
1626  * Trigger Events
1627  *
1628  * The boot-arg "bootprofile_type=boot" starts the timer during early boot. Using
1629  * "wake" starts the timer at AP wake from suspend-to-RAM.
1630  */
1631 
1632 #define BOOTPROFILE_MAX_BUFFER_SIZE (64*1024*1024) /* see also COPYSIZELIMIT_PANIC */
1633 
1634 vm_offset_t         bootprofile_buffer = 0;
1635 uint32_t            bootprofile_buffer_size = 0;
1636 uint32_t            bootprofile_buffer_current_position = 0;
1637 uint32_t            bootprofile_interval_ms = 0;
1638 uint64_t            bootprofile_stackshot_flags = 0;
1639 uint64_t            bootprofile_interval_abs = 0;
1640 uint64_t            bootprofile_next_deadline = 0;
1641 uint32_t            bootprofile_all_procs = 0;
1642 char                bootprofile_proc_name[17];
1643 uint64_t            bootprofile_delta_since_timestamp = 0;
1644 LCK_GRP_DECLARE(bootprofile_lck_grp, "bootprofile_group");
1645 LCK_MTX_DECLARE(bootprofile_mtx, &bootprofile_lck_grp);
1646 
1647 
1648 enum {
1649 	kBootProfileDisabled = 0,
1650 	kBootProfileStartTimerAtBoot,
1651 	kBootProfileStartTimerAtWake
1652 } bootprofile_type = kBootProfileDisabled;
1653 
1654 
1655 static timer_call_data_t        bootprofile_timer_call_entry;
1656 
1657 #define BOOTPROFILE_LOCK() do { lck_mtx_lock(&bootprofile_mtx); } while(0)
1658 #define BOOTPROFILE_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&bootprofile_mtx)
1659 #define BOOTPROFILE_UNLOCK() do { lck_mtx_unlock(&bootprofile_mtx); } while(0)
1660 
1661 static void bootprofile_timer_call(
1662 	timer_call_param_t      param0,
1663 	timer_call_param_t      param1);
1664 
1665 void
bootprofile_init(void)1666 bootprofile_init(void)
1667 {
1668 	kern_return_t ret;
1669 	char type[32];
1670 
1671 	if (!PE_parse_boot_argn("bootprofile_buffer_size",
1672 	    &bootprofile_buffer_size, sizeof(bootprofile_buffer_size))) {
1673 		bootprofile_buffer_size = 0;
1674 	}
1675 
1676 	if (bootprofile_buffer_size > BOOTPROFILE_MAX_BUFFER_SIZE) {
1677 		bootprofile_buffer_size = BOOTPROFILE_MAX_BUFFER_SIZE;
1678 	}
1679 
1680 	if (!PE_parse_boot_argn("bootprofile_interval_ms",
1681 	    &bootprofile_interval_ms, sizeof(bootprofile_interval_ms))) {
1682 		bootprofile_interval_ms = 0;
1683 	}
1684 
1685 	if (!PE_parse_boot_argn("bootprofile_stackshot_flags",
1686 	    &bootprofile_stackshot_flags, sizeof(bootprofile_stackshot_flags))) {
1687 		bootprofile_stackshot_flags = 0;
1688 	}
1689 
1690 	if (!PE_parse_boot_argn("bootprofile_proc_name",
1691 	    &bootprofile_proc_name, sizeof(bootprofile_proc_name))) {
1692 		bootprofile_all_procs = 1;
1693 		bootprofile_proc_name[0] = '\0';
1694 	}
1695 
1696 	if (PE_parse_boot_argn("bootprofile_type", type, sizeof(type))) {
1697 		if (0 == strcmp(type, "boot")) {
1698 			bootprofile_type = kBootProfileStartTimerAtBoot;
1699 		} else if (0 == strcmp(type, "wake")) {
1700 			bootprofile_type = kBootProfileStartTimerAtWake;
1701 		} else {
1702 			bootprofile_type = kBootProfileDisabled;
1703 		}
1704 	} else {
1705 		bootprofile_type = kBootProfileDisabled;
1706 	}
1707 
1708 	clock_interval_to_absolutetime_interval(bootprofile_interval_ms, NSEC_PER_MSEC, &bootprofile_interval_abs);
1709 
1710 	/* Both boot args must be set to enable */
1711 	if ((bootprofile_type == kBootProfileDisabled) || (bootprofile_buffer_size == 0) || (bootprofile_interval_abs == 0)) {
1712 		return;
1713 	}
1714 
1715 	ret = kmem_alloc(kernel_map, &bootprofile_buffer, bootprofile_buffer_size,
1716 	    KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
1717 	if (ret != KERN_SUCCESS) {
1718 		kprintf("Boot profile: Allocation failed: %d\n", ret);
1719 		return;
1720 	}
1721 
1722 	kprintf("Boot profile: Sampling %s once per %u ms at %s\n",
1723 	    bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms,
1724 	    bootprofile_type == kBootProfileStartTimerAtBoot ? "boot" : (bootprofile_type == kBootProfileStartTimerAtWake ? "wake" : "unknown"));
1725 
1726 	timer_call_setup(&bootprofile_timer_call_entry,
1727 	    bootprofile_timer_call,
1728 	    NULL);
1729 
1730 	if (bootprofile_type == kBootProfileStartTimerAtBoot) {
1731 		bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs;
1732 		timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1733 		    NULL,
1734 		    bootprofile_next_deadline,
1735 		    0,
1736 		    TIMER_CALL_SYS_NORMAL,
1737 		    false);
1738 	}
1739 }
1740 
1741 void
bootprofile_wake_from_sleep(void)1742 bootprofile_wake_from_sleep(void)
1743 {
1744 	if (bootprofile_type == kBootProfileStartTimerAtWake) {
1745 		bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs;
1746 		timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1747 		    NULL,
1748 		    bootprofile_next_deadline,
1749 		    0,
1750 		    TIMER_CALL_SYS_NORMAL,
1751 		    false);
1752 	}
1753 }
1754 
1755 
1756 static void
bootprofile_timer_call(timer_call_param_t param0 __unused,timer_call_param_t param1 __unused)1757 bootprofile_timer_call(
1758 	timer_call_param_t      param0 __unused,
1759 	timer_call_param_t      param1 __unused)
1760 {
1761 	unsigned retbytes = 0;
1762 	int pid_to_profile = -1;
1763 
1764 	if (!BOOTPROFILE_TRY_SPIN_LOCK()) {
1765 		goto reprogram;
1766 	}
1767 
1768 	/* Check if process-specific boot profiling is turned on */
1769 	if (!bootprofile_all_procs) {
1770 		/*
1771 		 * Since boot profiling initializes really early in boot, it is
1772 		 * possible that at this point, the task/proc is not initialized.
1773 		 * Nothing to do in that case.
1774 		 */
1775 
1776 		if ((current_task() != NULL) && (get_bsdtask_info(current_task()) != NULL) &&
1777 		    (0 == strncmp(bootprofile_proc_name, proc_name_address(get_bsdtask_info(current_task())), 17))) {
1778 			pid_to_profile = proc_selfpid();
1779 		} else {
1780 			/*
1781 			 * Process-specific boot profiling requested but the on-core process is
1782 			 * something else. Nothing to do here.
1783 			 */
1784 			BOOTPROFILE_UNLOCK();
1785 			goto reprogram;
1786 		}
1787 	}
1788 
1789 	/* initiate a stackshot with whatever portion of the buffer is left */
1790 	if (bootprofile_buffer_current_position < bootprofile_buffer_size) {
1791 		uint64_t flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_TRYLOCK | STACKSHOT_SAVE_LOADINFO
1792 		    | STACKSHOT_GET_GLOBAL_MEM_STATS;
1793 #if defined(XNU_TARGET_OS_OSX)
1794 		flags |= STACKSHOT_SAVE_KEXT_LOADINFO;
1795 #endif
1796 
1797 
1798 		/* OR on flags specified in boot-args */
1799 		flags |= bootprofile_stackshot_flags;
1800 		if ((flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) && (bootprofile_delta_since_timestamp == 0)) {
1801 			/* Can't take deltas until the first one */
1802 			flags &= ~STACKSHOT_COLLECT_DELTA_SNAPSHOT;
1803 		}
1804 
1805 		uint64_t timestamp = 0;
1806 		if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) {
1807 			timestamp = mach_absolute_time();
1808 		}
1809 
1810 		kern_return_t r = stack_snapshot_from_kernel(
1811 			pid_to_profile, (void *)(bootprofile_buffer + bootprofile_buffer_current_position),
1812 			bootprofile_buffer_size - bootprofile_buffer_current_position,
1813 			flags, bootprofile_delta_since_timestamp, 0, &retbytes);
1814 
1815 		/*
1816 		 * We call with STACKSHOT_TRYLOCK because the stackshot lock is coarser
1817 		 * than the bootprofile lock.  If someone else has the lock we'll just
1818 		 * try again later.
1819 		 */
1820 
1821 		if (r == KERN_LOCK_OWNED) {
1822 			BOOTPROFILE_UNLOCK();
1823 			goto reprogram;
1824 		}
1825 
1826 		if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT &&
1827 		    r == KERN_SUCCESS) {
1828 			bootprofile_delta_since_timestamp = timestamp;
1829 		}
1830 
1831 		bootprofile_buffer_current_position += retbytes;
1832 	}
1833 
1834 	BOOTPROFILE_UNLOCK();
1835 
1836 	/* If we didn't get any data or have run out of buffer space, stop profiling */
1837 	if ((retbytes == 0) || (bootprofile_buffer_current_position == bootprofile_buffer_size)) {
1838 		return;
1839 	}
1840 
1841 
1842 reprogram:
1843 	/* If the user gathered the buffer, no need to keep profiling */
1844 	if (bootprofile_interval_abs == 0) {
1845 		return;
1846 	}
1847 
1848 	clock_deadline_for_periodic_event(bootprofile_interval_abs,
1849 	    mach_absolute_time(),
1850 	    &bootprofile_next_deadline);
1851 	timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1852 	    NULL,
1853 	    bootprofile_next_deadline,
1854 	    0,
1855 	    TIMER_CALL_SYS_NORMAL,
1856 	    false);
1857 }
1858 
1859 void
bootprofile_get(void ** buffer,uint32_t * length)1860 bootprofile_get(void **buffer, uint32_t *length)
1861 {
1862 	BOOTPROFILE_LOCK();
1863 	*buffer = (void*) bootprofile_buffer;
1864 	*length = bootprofile_buffer_current_position;
1865 	BOOTPROFILE_UNLOCK();
1866 }
1867 
1868 int
bootprofile_gather(user_addr_t buffer,uint32_t * length)1869 bootprofile_gather(user_addr_t buffer, uint32_t *length)
1870 {
1871 	int result = 0;
1872 
1873 	BOOTPROFILE_LOCK();
1874 
1875 	if (bootprofile_buffer == 0) {
1876 		*length = 0;
1877 		goto out;
1878 	}
1879 
1880 	if (*length < bootprofile_buffer_current_position) {
1881 		result = KERN_NO_SPACE;
1882 		goto out;
1883 	}
1884 
1885 	if ((result = copyout((void *)bootprofile_buffer, buffer,
1886 	    bootprofile_buffer_current_position)) != 0) {
1887 		*length = 0;
1888 		goto out;
1889 	}
1890 	*length = bootprofile_buffer_current_position;
1891 
1892 	/* cancel future timers */
1893 	bootprofile_interval_abs = 0;
1894 
1895 out:
1896 
1897 	BOOTPROFILE_UNLOCK();
1898 
1899 	return result;
1900 }
1901