xref: /xnu-8796.101.5/osfmk/kperf/kperf.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2011-2018 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/ipc_tt.h> /* port_name_to_task */
30 #include <kern/thread.h>
31 #include <kern/machine.h>
32 #include <kern/kalloc.h>
33 #include <mach/mach_types.h>
34 #include <sys/errno.h>
35 #include <sys/ktrace.h>
36 
37 #include <kperf/action.h>
38 #include <kperf/buffer.h>
39 #include <kperf/kdebug_trigger.h>
40 #include <kperf/kperf.h>
41 #include <kperf/kptimer.h>
42 #include <kperf/lazy.h>
43 #include <kperf/pet.h>
44 #include <kperf/sample.h>
45 
46 /* from libkern/libkern.h */
47 extern uint64_t strtouq(const char *, char **, int);
48 
49 LCK_GRP_DECLARE(kperf_lck_grp, "kperf");
50 
51 /* one wired sample buffer per CPU */
52 static struct kperf_sample *__zpercpu intr_samplev;
53 
54 /* current sampling status */
55 enum kperf_sampling _Atomic kperf_status = KPERF_SAMPLING_OFF;
56 
57 /*
58  * Only set up kperf once.
59  */
60 static bool kperf_is_setup = false;
61 
62 /* whether or not to callback to kperf on context switch */
63 boolean_t kperf_on_cpu_active = FALSE;
64 
65 unsigned int kperf_thread_blocked_action;
66 unsigned int kperf_cpu_sample_action;
67 
68 struct kperf_sample *
kperf_intr_sample_buffer(void)69 kperf_intr_sample_buffer(void)
70 {
71 	assert(ml_get_interrupts_enabled() == FALSE);
72 
73 	return zpercpu_get(intr_samplev);
74 }
75 
76 void
kperf_init_early(void)77 kperf_init_early(void)
78 {
79 	/*
80 	 * kperf allocates based on the number of CPUs and requires them to all be
81 	 * accounted for.
82 	 */
83 	ml_wait_max_cpus();
84 
85 	boolean_t found_kperf = FALSE;
86 	char kperf_config_str[64];
87 	found_kperf = PE_parse_boot_arg_str("kperf", kperf_config_str, sizeof(kperf_config_str));
88 	if (found_kperf && kperf_config_str[0] != '\0') {
89 		kperf_kernel_configure(kperf_config_str);
90 	}
91 }
92 
93 void
kperf_init(void)94 kperf_init(void)
95 {
96 	kptimer_init();
97 }
98 
99 void
kperf_setup(void)100 kperf_setup(void)
101 {
102 	if (kperf_is_setup) {
103 		return;
104 	}
105 
106 	intr_samplev = zalloc_percpu_permanent_type(struct kperf_sample);
107 
108 	kperf_kdebug_setup();
109 	kperf_is_setup = true;
110 }
111 
112 void
kperf_reset(void)113 kperf_reset(void)
114 {
115 	/*
116 	 * Make sure samples aren't being taken before tearing everything down.
117 	 */
118 	(void)kperf_disable_sampling();
119 
120 	kperf_lazy_reset();
121 	(void)kperf_kdbg_cswitch_set(0);
122 	kperf_kdebug_reset();
123 	kptimer_reset();
124 	kppet_reset();
125 
126 	/*
127 	 * Most of the other systems call into actions, so reset them last.
128 	 */
129 	kperf_action_reset();
130 }
131 
132 void
kperf_kernel_configure(const char * config)133 kperf_kernel_configure(const char *config)
134 {
135 	int pairs = 0;
136 	char *end;
137 	bool pet = false;
138 
139 	assert(config != NULL);
140 
141 	ktrace_start_single_threaded();
142 
143 	ktrace_kernel_configure(KTRACE_KPERF);
144 
145 	if (config[0] == 'p') {
146 		pet = true;
147 		config++;
148 	}
149 
150 	do {
151 		uint32_t action_samplers;
152 		uint64_t timer_period_ns;
153 		uint64_t timer_period;
154 
155 		pairs += 1;
156 		kperf_action_set_count(pairs);
157 		kptimer_set_count(pairs);
158 
159 		action_samplers = (uint32_t)strtouq(config, &end, 0);
160 		if (config == end) {
161 			kprintf("kperf: unable to parse '%s' as action sampler\n", config);
162 			goto out;
163 		}
164 		config = end;
165 
166 		kperf_action_set_samplers(pairs, action_samplers);
167 
168 		if (config[0] == '\0') {
169 			kprintf("kperf: missing timer period in config\n");
170 			goto out;
171 		}
172 		config++;
173 
174 		timer_period_ns = strtouq(config, &end, 0);
175 		if (config == end) {
176 			kprintf("kperf: unable to parse '%s' as timer period\n", config);
177 			goto out;
178 		}
179 		nanoseconds_to_absolutetime(timer_period_ns, &timer_period);
180 		config = end;
181 
182 		kptimer_set_period(pairs - 1, timer_period);
183 		kptimer_set_action(pairs - 1, pairs);
184 
185 		if (pet) {
186 			kptimer_set_pet_timerid(pairs - 1);
187 			kppet_set_lightweight_pet(1);
188 			pet = false;
189 		}
190 	} while (*(config++) == ',');
191 
192 	int error = kperf_enable_sampling();
193 	if (error) {
194 		printf("kperf: cannot enable sampling at boot: %d\n", error);
195 	}
196 
197 out:
198 	ktrace_end_single_threaded();
199 }
200 
201 void kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation,
202     uintptr_t *starting_fp);
203 void
kperf_on_cpu_internal(thread_t thread,thread_continue_t continuation,uintptr_t * starting_fp)204 kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation,
205     uintptr_t *starting_fp)
206 {
207 	if (kperf_kdebug_cswitch) {
208 		/* trace the new thread's PID for Instruments */
209 		int pid = task_pid(get_threadtask(thread));
210 		BUF_DATA(PERF_TI_CSWITCH, thread_tid(thread), pid);
211 	}
212 	if (kppet_lightweight_active) {
213 		kppet_on_cpu(thread, continuation, starting_fp);
214 	}
215 	if (kperf_lazy_wait_action != 0) {
216 		kperf_lazy_wait_sample(thread, continuation, starting_fp);
217 	}
218 }
219 
220 void
kperf_on_cpu_update(void)221 kperf_on_cpu_update(void)
222 {
223 	kperf_on_cpu_active = kperf_kdebug_cswitch ||
224 	    kppet_lightweight_active ||
225 	    kperf_lazy_wait_action != 0;
226 }
227 
228 bool
kperf_is_sampling(void)229 kperf_is_sampling(void)
230 {
231 	return os_atomic_load(&kperf_status, acquire) == KPERF_SAMPLING_ON;
232 }
233 
234 int
kperf_enable_sampling(void)235 kperf_enable_sampling(void)
236 {
237 	if (!kperf_is_setup || kperf_action_get_count() == 0) {
238 		return ECANCELED;
239 	}
240 
241 	enum kperf_sampling prev_status = KPERF_SAMPLING_ON;
242 	int ok = os_atomic_cmpxchgv(&kperf_status, KPERF_SAMPLING_OFF,
243 	    KPERF_SAMPLING_ON, &prev_status, seq_cst);
244 	if (!ok) {
245 		if (prev_status == KPERF_SAMPLING_ON) {
246 			return 0;
247 		}
248 		panic("kperf: sampling was %d when asked to enable", prev_status);
249 	}
250 
251 	kppet_lightweight_active_update();
252 	kptimer_start();
253 
254 	return 0;
255 }
256 
257 int
kperf_disable_sampling(void)258 kperf_disable_sampling(void)
259 {
260 	enum kperf_sampling prev_status = KPERF_SAMPLING_ON;
261 	int ok = os_atomic_cmpxchgv(&kperf_status, KPERF_SAMPLING_ON,
262 	    KPERF_SAMPLING_SHUTDOWN, &prev_status, seq_cst);
263 	if (!ok) {
264 		if (prev_status == KPERF_SAMPLING_OFF) {
265 			return 0;
266 		}
267 		panic("kperf: sampling was %d when asked to disable", prev_status);
268 	}
269 
270 	kptimer_stop();
271 
272 	ok = os_atomic_cmpxchgv(&kperf_status, KPERF_SAMPLING_SHUTDOWN,
273 	    KPERF_SAMPLING_OFF, &prev_status, seq_cst);
274 	if (!ok) {
275 		panic("kperf: sampling was %d during disable", prev_status);
276 	}
277 	kppet_lightweight_active_update();
278 
279 	return 0;
280 }
281 
282 void
kperf_timer_expire(void * param0,void * __unused param1)283 kperf_timer_expire(void *param0, void * __unused param1)
284 {
285 	processor_t processor = param0;
286 	int cpuid = processor->cpu_id;
287 
288 	kptimer_expire(processor, cpuid, mach_absolute_time());
289 }
290 
291 boolean_t
kperf_thread_get_dirty(thread_t thread)292 kperf_thread_get_dirty(thread_t thread)
293 {
294 	return thread->c_switch != thread->kperf_c_switch;
295 }
296 
297 void
kperf_thread_set_dirty(thread_t thread,boolean_t dirty)298 kperf_thread_set_dirty(thread_t thread, boolean_t dirty)
299 {
300 	if (dirty) {
301 		thread->kperf_c_switch = thread->c_switch - 1;
302 	} else {
303 		thread->kperf_c_switch = thread->c_switch;
304 	}
305 }
306 
307 int
kperf_port_to_pid(mach_port_name_t portname)308 kperf_port_to_pid(mach_port_name_t portname)
309 {
310 	if (!MACH_PORT_VALID(portname)) {
311 		return -1;
312 	}
313 
314 	task_t task = port_name_to_task(portname);
315 	if (task == TASK_NULL) {
316 		return -1;
317 	}
318 
319 	pid_t pid = task_pid(task);
320 
321 
322 	assert(os_ref_get_count(&task->ref_count) > 1);
323 	task_deallocate(task);
324 
325 	return pid;
326 }
327