xref: /xnu-8019.80.24/osfmk/kperf/pet.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2011-2018 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * Profile Every Thread (PET) provides a profile of all threads on the system
31  * when a timer fires.  PET supports the "record waiting threads" mode in
32  * Instruments, and used to be called All Thread States (ATS).  New tools should
33  * adopt the lightweight PET mode, which provides the same information, but with
34  * much less overhead.
35  *
36  * When traditional (non-lightweight) PET is active, a migrating timer call
37  * causes the PET thread to wake up.  The timer handler also issues a broadcast
38  * IPI to the other CPUs, to provide a (somewhat) synchronized set of on-core
39  * samples.  This is provided for backwards-compatibility with clients that
40  * expect on-core samples, when PET's timer was based off the on-core timers.
41  * Because PET sampling can take on the order of milliseconds, the PET thread
42  * will enter a new timer deadline after it finished sampling This perturbs the
43  * timer cadence by the duration of PET sampling, but it leaves the system to
44  * work on non-profiling tasks for the duration of the timer period.
45  *
46  * Lightweight PET samples the system less-intrusively than normal PET
47  * mode.  Instead of iterating tasks and threads on each sample, it increments
48  * a global generation count, `kppet_gencount`, which is checked as threads are
49  * context switched on-core.  If the thread's local generation count is older
50  * than the global generation, the thread samples itself.
51  *
52  *            |  |
53  * thread A   +--+---------|
54  *            |  |
55  * thread B   |--+---------------|
56  *            |  |
57  * thread C   |  |         |-------------------------------------
58  *            |  |         |
59  * thread D   |  |         |     |-------------------------------
60  *            |  |         |     |
61  *            +--+---------+-----+--------------------------------> time
62  *               |         │     |
63  *               |         +-----+--- threads sampled when they come on-core in
64  *               |                    kperf_pet_switch_context
65  *               |
66  *               +--- PET timer fire, sample on-core threads A and B,
67  *                    increment kppet_gencount
68  */
69 
70 #include <mach/mach_types.h>
71 #include <sys/errno.h>
72 
73 #include <kperf/kperf.h>
74 #include <kperf/buffer.h>
75 #include <kperf/sample.h>
76 #include <kperf/context.h>
77 #include <kperf/action.h>
78 #include <kperf/pet.h>
79 #include <kperf/kptimer.h>
80 
81 #include <kern/task.h>
82 #include <kern/kalloc.h>
83 #if defined(__x86_64__)
84 #include <i386/mp.h>
85 #endif /* defined(__x86_64__) */
86 
87 static LCK_MTX_DECLARE(kppet_mtx, &kperf_lck_grp);
88 
89 static struct {
90 	unsigned int g_actionid;
91 	/*
92 	 * The idle rate controls how many sampling periods to skip if a thread
93 	 * is idle.
94 	 */
95 	uint32_t g_idle_rate;
96 	bool g_setup:1;
97 	bool g_lightweight:1;
98 	struct kperf_sample *g_sample;
99 
100 	thread_t g_sample_thread;
101 
102 	/*
103 	 * Used by the PET thread to manage which threads and tasks to sample.
104 	 */
105 	thread_t *g_threads;
106 	unsigned int g_nthreads;
107 	size_t g_threads_count;
108 
109 	task_t *g_tasks;
110 	unsigned int g_ntasks;
111 	size_t g_tasks_count;
112 } kppet = {
113 	.g_actionid = 0,
114 	.g_idle_rate = KPERF_PET_DEFAULT_IDLE_RATE,
115 };
116 
117 bool kppet_lightweight_active = false;
118 _Atomic uint32_t kppet_gencount = 0;
119 
120 static uint64_t kppet_sample_tasks(uint32_t idle_rate);
121 static void kppet_thread(void * param, wait_result_t wr);
122 
123 static void
kppet_lock_assert_owned(void)124 kppet_lock_assert_owned(void)
125 {
126 	lck_mtx_assert(&kppet_mtx, LCK_MTX_ASSERT_OWNED);
127 }
128 
129 static void
kppet_lock(void)130 kppet_lock(void)
131 {
132 	lck_mtx_lock(&kppet_mtx);
133 }
134 
135 static void
kppet_unlock(void)136 kppet_unlock(void)
137 {
138 	lck_mtx_unlock(&kppet_mtx);
139 }
140 
141 void
kppet_on_cpu(thread_t thread,thread_continue_t continuation,uintptr_t * starting_fp)142 kppet_on_cpu(thread_t thread, thread_continue_t continuation,
143     uintptr_t *starting_fp)
144 {
145 	assert(thread != NULL);
146 	assert(ml_get_interrupts_enabled() == FALSE);
147 
148 	uint32_t actionid = kppet.g_actionid;
149 	if (actionid == 0) {
150 		return;
151 	}
152 
153 	if (thread->kperf_pet_gen != atomic_load(&kppet_gencount)) {
154 		BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_START,
155 		    atomic_load_explicit(&kppet_gencount,
156 		    memory_order_relaxed), thread->kperf_pet_gen);
157 
158 		task_t task = get_threadtask(thread);
159 		struct kperf_context ctx = {
160 			.cur_thread = thread,
161 			.cur_task = task,
162 			.cur_pid = task_pid(task),
163 			.starting_fp = starting_fp,
164 		};
165 		/*
166 		 * Use a per-CPU interrupt buffer, since this is only called
167 		 * while interrupts are disabled, from the scheduler.
168 		 */
169 		struct kperf_sample *sample = kperf_intr_sample_buffer();
170 		if (!sample) {
171 			BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_END, 1);
172 			return;
173 		}
174 
175 		unsigned int flags = SAMPLE_FLAG_NON_INTERRUPT | SAMPLE_FLAG_PEND_USER;
176 		if (continuation != NULL) {
177 			flags |= SAMPLE_FLAG_CONTINUATION;
178 		}
179 		kperf_sample(sample, &ctx, actionid, flags);
180 
181 		BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_END);
182 	} else {
183 		BUF_VERB(PERF_PET_SAMPLE_THREAD,
184 		    os_atomic_load(&kppet_gencount, relaxed), thread->kperf_pet_gen);
185 	}
186 }
187 
188 #pragma mark - state transitions
189 
190 /*
191  * Lazily initialize PET.  The PET thread never exits once PET has been used
192  * once.
193  */
194 static void
kppet_setup(void)195 kppet_setup(void)
196 {
197 	if (kppet.g_setup) {
198 		return;
199 	}
200 
201 	kern_return_t kr = kernel_thread_start(kppet_thread, NULL,
202 	    &kppet.g_sample_thread);
203 	if (kr != KERN_SUCCESS) {
204 		panic("kperf: failed to create PET thread %d", kr);
205 	}
206 
207 	thread_set_thread_name(kppet.g_sample_thread, "kperf-pet-sampling");
208 	kppet.g_setup = true;
209 }
210 
211 void
kppet_config(unsigned int actionid)212 kppet_config(unsigned int actionid)
213 {
214 	/*
215 	 * Resetting kperf shouldn't get the PET thread started.
216 	 */
217 	if (actionid == 0 && !kppet.g_setup) {
218 		return;
219 	}
220 
221 	kppet_setup();
222 
223 	kppet_lock();
224 
225 	kppet.g_actionid = actionid;
226 
227 	if (actionid > 0) {
228 		if (!kppet.g_sample) {
229 			kppet.g_sample = kalloc_type_tag(struct kperf_sample,
230 			    Z_WAITOK | Z_NOFAIL, VM_KERN_MEMORY_DIAG);
231 			kppet.g_sample->usample.usample_min = kalloc_type_tag(
232 				struct kperf_usample_min, Z_WAITOK | Z_NOFAIL, VM_KERN_MEMORY_DIAG);
233 		}
234 	} else {
235 		if (kppet.g_tasks) {
236 			assert(kppet.g_tasks_count != 0);
237 			kfree_type(task_t, kppet.g_tasks_count, kppet.g_tasks);
238 			kppet.g_tasks = NULL;
239 			kppet.g_tasks_count = 0;
240 			kppet.g_ntasks = 0;
241 		}
242 		if (kppet.g_threads) {
243 			assert(kppet.g_threads_count != 0);
244 			kfree_type(thread_t, kppet.g_threads_count, kppet.g_tasks);
245 			kppet.g_threads = NULL;
246 			kppet.g_threads_count = 0;
247 			kppet.g_nthreads = 0;
248 		}
249 		if (kppet.g_sample != NULL) {
250 			kfree_type(struct kperf_usample_min,
251 			    kppet.g_sample->usample.usample_min);
252 			kfree_type(struct kperf_sample, kppet.g_sample);
253 		}
254 	}
255 
256 	kppet_unlock();
257 }
258 
259 void
kppet_reset(void)260 kppet_reset(void)
261 {
262 	kppet_config(0);
263 	kppet_set_idle_rate(KPERF_PET_DEFAULT_IDLE_RATE);
264 	kppet_set_lightweight_pet(0);
265 }
266 
267 void
kppet_wake_thread(void)268 kppet_wake_thread(void)
269 {
270 	thread_wakeup(&kppet);
271 }
272 
273 __attribute__((noreturn))
274 static void
kppet_thread(void * __unused param,wait_result_t __unused wr)275 kppet_thread(void * __unused param, wait_result_t __unused wr)
276 {
277 	kppet_lock();
278 
279 	for (;;) {
280 		BUF_INFO(PERF_PET_IDLE);
281 
282 		do {
283 			(void)lck_mtx_sleep(&kppet_mtx, LCK_SLEEP_DEFAULT, &kppet,
284 			    THREAD_UNINT);
285 		} while (kppet.g_actionid == 0);
286 
287 		BUF_INFO(PERF_PET_RUN);
288 
289 		uint64_t sampledur_abs = kppet_sample_tasks(kppet.g_idle_rate);
290 
291 		kptimer_pet_enter(sampledur_abs);
292 	}
293 }
294 
295 #pragma mark - sampling
296 
297 static void
kppet_sample_thread(int pid,task_t task,thread_t thread,uint32_t idle_rate)298 kppet_sample_thread(int pid, task_t task, thread_t thread, uint32_t idle_rate)
299 {
300 	kppet_lock_assert_owned();
301 
302 	uint32_t sample_flags = SAMPLE_FLAG_IDLE_THREADS |
303 	    SAMPLE_FLAG_THREAD_ONLY;
304 
305 	BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_START);
306 
307 	struct kperf_context ctx = {
308 		.cur_thread = thread,
309 		.cur_task = task,
310 		.cur_pid = pid,
311 	};
312 
313 	boolean_t thread_dirty = kperf_thread_get_dirty(thread);
314 
315 	/*
316 	 * Clean a dirty thread and skip callstack sample if the thread was not
317 	 * dirty and thread had skipped less than `idle_rate` samples.
318 	 */
319 	if (thread_dirty) {
320 		kperf_thread_set_dirty(thread, FALSE);
321 	} else if ((thread->kperf_pet_cnt % idle_rate) != 0) {
322 		sample_flags |= SAMPLE_FLAG_EMPTY_CALLSTACK;
323 	}
324 	thread->kperf_pet_cnt++;
325 
326 	kperf_sample(kppet.g_sample, &ctx, kppet.g_actionid, sample_flags);
327 	kperf_sample_user(&kppet.g_sample->usample, &ctx, kppet.g_actionid,
328 	    sample_flags);
329 
330 	BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_END);
331 }
332 
333 static kern_return_t
kppet_threads_prepare(task_t task)334 kppet_threads_prepare(task_t task)
335 {
336 	kppet_lock_assert_owned();
337 
338 	vm_size_t count_needed;
339 
340 	for (;;) {
341 		task_lock(task);
342 
343 		if (!task->active) {
344 			task_unlock(task);
345 			return KERN_FAILURE;
346 		}
347 
348 		/*
349 		 * With the task locked, figure out if enough space has been allocated to
350 		 * contain all of the thread references.
351 		 */
352 		count_needed = task->thread_count;
353 		if (count_needed <= kppet.g_threads_count) {
354 			break;
355 		}
356 
357 		/*
358 		 * Otherwise, allocate more and try again.
359 		 */
360 		task_unlock(task);
361 
362 		kfree_type(thread_t, kppet.g_threads_count, kppet.g_threads);
363 
364 		assert(count_needed > 0);
365 		kppet.g_threads_count = count_needed;
366 
367 		kppet.g_threads = kalloc_type_tag(thread_t, kppet.g_threads_count,
368 		    Z_WAITOK | Z_ZERO, VM_KERN_MEMORY_DIAG);
369 		if (kppet.g_threads == NULL) {
370 			kppet.g_threads_count = 0;
371 			return KERN_RESOURCE_SHORTAGE;
372 		}
373 	}
374 
375 	thread_t thread;
376 	kppet.g_nthreads = 0;
377 	queue_iterate(&(task->threads), thread, thread_t, task_threads) {
378 		thread_reference(thread);
379 		kppet.g_threads[kppet.g_nthreads++] = thread;
380 	}
381 
382 	task_unlock(task);
383 
384 	return (kppet.g_nthreads > 0) ? KERN_SUCCESS : KERN_FAILURE;
385 }
386 
387 /*
388  * Sample a `task`, using `idle_rate` to control whether idle threads need to be
389  * re-sampled.
390  *
391  * The task must be referenced.
392  */
393 static void
kppet_sample_task(task_t task,uint32_t idle_rate)394 kppet_sample_task(task_t task, uint32_t idle_rate)
395 {
396 	kppet_lock_assert_owned();
397 	assert(task != kernel_task);
398 	if (task == kernel_task) {
399 		return;
400 	}
401 
402 	BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_START);
403 
404 	int pid = task_pid(task);
405 	if (kperf_action_has_task(kppet.g_actionid)) {
406 		struct kperf_context ctx = {
407 			.cur_task = task,
408 			.cur_pid = pid,
409 		};
410 
411 		kperf_sample(kppet.g_sample, &ctx, kppet.g_actionid,
412 		    SAMPLE_FLAG_TASK_ONLY);
413 	}
414 
415 	if (!kperf_action_has_thread(kppet.g_actionid)) {
416 		BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END);
417 		return;
418 	}
419 
420 	/*
421 	 * Suspend the task to see an atomic snapshot of all its threads.  This
422 	 * is expensive and disruptive.
423 	 */
424 	kern_return_t kr = task_suspend_internal(task);
425 	if (kr != KERN_SUCCESS) {
426 		BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END, 1);
427 		return;
428 	}
429 
430 	kr = kppet_threads_prepare(task);
431 	if (kr != KERN_SUCCESS) {
432 		BUF_INFO(PERF_PET_ERROR, ERR_THREAD, kr);
433 		goto out;
434 	}
435 
436 	for (unsigned int i = 0; i < kppet.g_nthreads; i++) {
437 		thread_t thread = kppet.g_threads[i];
438 		assert(thread != THREAD_NULL);
439 
440 		kppet_sample_thread(pid, task, thread, idle_rate);
441 
442 		thread_deallocate(kppet.g_threads[i]);
443 	}
444 
445 out:
446 	task_resume_internal(task);
447 
448 	BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END, kppet.g_nthreads);
449 }
450 
451 /*
452  * Store and reference all tasks on the system, so they can be safely inspected
453  * outside the `tasks_threads_lock`.
454  */
455 static kern_return_t
kppet_tasks_prepare(void)456 kppet_tasks_prepare(void)
457 {
458 	kppet_lock_assert_owned();
459 
460 	vm_size_t count_needed = 0;
461 
462 	for (;;) {
463 		lck_mtx_lock(&tasks_threads_lock);
464 
465 		/*
466 		 * With the lock held, break out of the lock/unlock loop if
467 		 * there's enough space to store all the tasks.
468 		 */
469 		count_needed = tasks_count;
470 		if (count_needed <= kppet.g_tasks_count) {
471 			break;
472 		}
473 
474 		/*
475 		 * Otherwise, allocate more memory outside of the lock.
476 		 */
477 		lck_mtx_unlock(&tasks_threads_lock);
478 
479 		if (count_needed > kppet.g_tasks_count) {
480 			if (kppet.g_tasks_count != 0) {
481 				kfree_type(task_t, kppet.g_tasks_count, kppet.g_tasks);
482 			}
483 
484 			assert(count_needed > 0);
485 			kppet.g_tasks_count = count_needed;
486 
487 			kppet.g_tasks = kalloc_type_tag(task_t, kppet.g_tasks_count,
488 			    Z_WAITOK | Z_ZERO, VM_KERN_MEMORY_DIAG);
489 			if (!kppet.g_tasks) {
490 				kppet.g_tasks_count = 0;
491 				return KERN_RESOURCE_SHORTAGE;
492 			}
493 		}
494 	}
495 
496 	task_t task = TASK_NULL;
497 	kppet.g_ntasks = 0;
498 	queue_iterate(&tasks, task, task_t, tasks) {
499 		bool eligible_task = task != kernel_task;
500 		if (eligible_task) {
501 			task_reference(task);
502 			kppet.g_tasks[kppet.g_ntasks++] = task;
503 		}
504 	}
505 
506 	lck_mtx_unlock(&tasks_threads_lock);
507 
508 	return KERN_SUCCESS;
509 }
510 
511 static uint64_t
kppet_sample_tasks(uint32_t idle_rate)512 kppet_sample_tasks(uint32_t idle_rate)
513 {
514 	kppet_lock_assert_owned();
515 	assert(kppet.g_actionid > 0);
516 
517 	uint64_t start_abs = mach_absolute_time();
518 
519 	BUF_INFO(PERF_PET_SAMPLE | DBG_FUNC_START);
520 
521 	kern_return_t kr = kppet_tasks_prepare();
522 	if (kr != KERN_SUCCESS) {
523 		BUF_INFO(PERF_PET_ERROR, ERR_TASK, kr);
524 		BUF_INFO(PERF_PET_SAMPLE | DBG_FUNC_END);
525 		return mach_absolute_time() - start_abs;
526 	}
527 
528 	for (unsigned int i = 0; i < kppet.g_ntasks; i++) {
529 		task_t task = kppet.g_tasks[i];
530 		assert(task != TASK_NULL);
531 		kppet_sample_task(task, idle_rate);
532 		task_deallocate(task);
533 		kppet.g_tasks[i] = TASK_NULL;
534 	}
535 
536 	BUF_INFO(PERF_PET_SAMPLE | DBG_FUNC_END, kppet.g_ntasks);
537 	kppet.g_ntasks = 0;
538 	return mach_absolute_time() - start_abs;
539 }
540 
541 #pragma mark - sysctl accessors
542 
543 int
kppet_get_idle_rate(void)544 kppet_get_idle_rate(void)
545 {
546 	return kppet.g_idle_rate;
547 }
548 
549 int
kppet_set_idle_rate(int new_idle_rate)550 kppet_set_idle_rate(int new_idle_rate)
551 {
552 	kppet.g_idle_rate = new_idle_rate;
553 	return 0;
554 }
555 
556 void
kppet_lightweight_active_update(void)557 kppet_lightweight_active_update(void)
558 {
559 	kppet_lightweight_active = (kperf_is_sampling() && kppet.g_lightweight);
560 	kperf_on_cpu_update();
561 }
562 
563 int
kppet_get_lightweight_pet(void)564 kppet_get_lightweight_pet(void)
565 {
566 	return kppet.g_lightweight;
567 }
568 
569 int
kppet_set_lightweight_pet(int on)570 kppet_set_lightweight_pet(int on)
571 {
572 	if (kperf_is_sampling()) {
573 		return EBUSY;
574 	}
575 
576 	kppet.g_lightweight = (on == 1);
577 	kppet_lightweight_active_update();
578 	return 0;
579 }
580