1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34 #include <mach/mach_types.h>
35 #include <machine/machine_routines.h>
36 #include <kern/kalloc.h>
37 #include <kern/debug.h> /* panic */
38 #include <kern/thread.h>
39 #include <sys/errno.h>
40 #include <sys/vm.h>
41 #include <vm/vm_object_xnu.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout_xnu.h>
44
45 #ifdef CONFIG_EXCLAVES
46 #include <kern/exclaves.tightbeam.h>
47 #endif /* CONFIG_EXCLAVES */
48
49 #include <kperf/action.h>
50 #include <kperf/ast.h>
51 #include <kperf/buffer.h>
52 #include <kperf/callstack.h>
53 #include <kperf/context.h>
54 #include <kperf/kdebug_trigger.h>
55 #include <kperf/kperf.h>
56 #include <kperf/kperf_kpc.h>
57 #include <kperf/kptimer.h>
58 #include <kperf/pet.h>
59 #include <kperf/sample.h>
60 #include <kperf/thread_samplers.h>
61
62 #define ACTION_MAX (32)
63
64 /* the list of different actions to take */
65 struct action {
66 uint32_t sample;
67 uint32_t ucallstack_depth;
68 uint32_t kcallstack_depth;
69 uint32_t userdata;
70 int pid_filter;
71 };
72
73 /* the list of actions */
74 static unsigned int actionc = 0;
75 static struct action *actionv = NULL;
76
77 /* should emit tracepoint on context switch */
78 int kperf_kdebug_cswitch = 0;
79
80 int kperf_max_actions = ACTION_MAX;
81 bool
kperf_action_has_non_system(unsigned int actionid)82 kperf_action_has_non_system(unsigned int actionid)
83 {
84 if (actionid > actionc) {
85 return false;
86 }
87
88 if (actionv[actionid - 1].sample & ~SAMPLER_SYS_MEM) {
89 return true;
90 } else {
91 return false;
92 }
93 }
94
95 bool
kperf_action_has_task(unsigned int actionid)96 kperf_action_has_task(unsigned int actionid)
97 {
98 if (actionid > actionc) {
99 return false;
100 }
101
102 return actionv[actionid - 1].sample & SAMPLER_TASK_MASK;
103 }
104
105 bool
kperf_action_has_thread(unsigned int actionid)106 kperf_action_has_thread(unsigned int actionid)
107 {
108 if (actionid > actionc) {
109 return false;
110 }
111
112 return actionv[actionid - 1].sample & SAMPLER_THREAD_MASK;
113 }
114
115 static void
kperf_system_memory_log(void)116 kperf_system_memory_log(void)
117 {
118 extern unsigned int memorystatus_level;
119
120 BUF_DATA(PERF_MI_SYS_DATA, (uintptr_t)vm_page_free_count,
121 (uintptr_t)vm_page_wire_count, (uintptr_t)vm_page_external_count,
122 (uintptr_t)(vm_page_active_count + vm_page_inactive_count +
123 vm_page_speculative_count));
124 BUF_DATA(PERF_MI_SYS_DATA_2, (uintptr_t)vm_page_anonymous_count,
125 (uintptr_t)vm_page_internal_count,
126 (uintptr_t)vm_pageout_vminfo.vm_pageout_compressions,
127 (uintptr_t)VM_PAGE_COMPRESSOR_COUNT);
128 BUF_DATA(PERF_MI_SYS_DATA_3,
129 #if CONFIG_SECLUDED_MEMORY
130 (uintptr_t)vm_page_secluded_count,
131 #else // CONFIG_SECLUDED_MEMORY
132 0,
133 #endif // !CONFIG_SECLUDED_MEMORY
134 (uintptr_t)vm_page_purgeable_count,
135 memorystatus_level);
136 }
137
138 static void
kperf_sample_user_internal(struct kperf_usample * sbuf,struct kperf_context * context,unsigned int actionid,unsigned int sample_what)139 kperf_sample_user_internal(struct kperf_usample *sbuf,
140 struct kperf_context *context, unsigned int actionid,
141 unsigned int sample_what)
142 {
143 if (sample_what & SAMPLER_USTACK) {
144 kperf_ucallstack_sample(&sbuf->ucallstack, context);
145 }
146 if (sample_what & SAMPLER_TH_INFO) {
147 kperf_thread_info_sample(&sbuf->th_info, context);
148 }
149
150 boolean_t intren = ml_set_interrupts_enabled(FALSE);
151
152 /*
153 * No userdata or sample_flags for this one.
154 */
155 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what, actionid);
156
157 if (sample_what & SAMPLER_USTACK) {
158 kperf_ucallstack_log(&sbuf->ucallstack);
159 }
160 if (sample_what & SAMPLER_TH_DISPATCH) {
161 kperf_thread_dispatch_log(&sbuf->usample_min->th_dispatch);
162 }
163 if (sample_what & SAMPLER_TH_INFO) {
164 kperf_thread_info_log(&sbuf->th_info);
165 }
166
167 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what);
168
169 ml_set_interrupts_enabled(intren);
170 }
171
172 static unsigned int
kperf_prepare_sample_what(unsigned int sample_what,unsigned int sample_flags)173 kperf_prepare_sample_what(unsigned int sample_what, unsigned int sample_flags)
174 {
175 /* callstacks should be explicitly ignored */
176 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
177 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK | SAMPLER_EXSTACK);
178 }
179 if (sample_flags & SAMPLE_FLAG_ONLY_SYSTEM) {
180 sample_what &= SAMPLER_SYS_MEM;
181 }
182 assert((sample_flags & (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY))
183 != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY));
184 if (sample_flags & SAMPLE_FLAG_THREAD_ONLY) {
185 sample_what &= SAMPLER_THREAD_MASK;
186 }
187 if (sample_flags & SAMPLE_FLAG_TASK_ONLY) {
188 sample_what &= SAMPLER_TASK_MASK;
189 }
190
191 return sample_what;
192 }
193
194 void
kperf_sample_user(struct kperf_usample * sbuf,struct kperf_context * context,unsigned int actionid,unsigned int sample_flags)195 kperf_sample_user(struct kperf_usample *sbuf, struct kperf_context *context,
196 unsigned int actionid, unsigned int sample_flags)
197 {
198 if (actionid == 0 || actionid > actionc) {
199 return;
200 }
201
202 unsigned int sample_what = kperf_prepare_sample_what(
203 actionv[actionid - 1].sample, sample_flags);
204 if (sample_what == 0) {
205 return;
206 }
207
208 unsigned int ucallstack_depth = actionv[actionid - 1].ucallstack_depth;
209 sbuf->ucallstack.kpuc_nframes = ucallstack_depth ?: MAX_UCALLSTACK_FRAMES;
210
211 kperf_sample_user_internal(sbuf, context, actionid, sample_what);
212 }
213
214 static kern_return_t
kperf_sample_internal(struct kperf_sample * sbuf,struct kperf_context * context,unsigned sample_what,unsigned sample_flags,unsigned actionid,unsigned ucallstack_depth)215 kperf_sample_internal(struct kperf_sample *sbuf,
216 struct kperf_context *context,
217 unsigned sample_what, unsigned sample_flags,
218 unsigned actionid, unsigned ucallstack_depth)
219 {
220 int pended_ucallstack = 0;
221 int pended_th_dispatch = 0;
222 uint32_t userdata = actionid;
223 bool task_only = (sample_flags & SAMPLE_FLAG_TASK_ONLY) != 0;
224 bool pended_exclave_callstack = false;
225 uint64_t sample_meta_flags = 0;
226
227 sample_what = kperf_prepare_sample_what(sample_what, sample_flags);
228 if (sample_what == 0) {
229 return SAMPLE_CONTINUE;
230 }
231
232 if (!task_only) {
233 kppet_mark_sampled(context->cur_thread);
234 }
235 bool is_kernel = (context->cur_pid == 0);
236
237 if (actionid && actionid <= actionc) {
238 sbuf->kcallstack.kpkc_nframes =
239 actionv[actionid - 1].kcallstack_depth;
240 } else {
241 sbuf->kcallstack.kpkc_nframes = MAX_KCALLSTACK_FRAMES;
242 }
243
244 ucallstack_depth = ucallstack_depth ?: MAX_UCALLSTACK_FRAMES;
245 sbuf->kcallstack.kpkc_flags = 0;
246 sbuf->usample.ucallstack.kpuc_flags = 0;
247
248 if (sample_what & SAMPLER_TH_INFO) {
249 kperf_thread_info_sample(&sbuf->th_info, context);
250
251 if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
252 if (sbuf->th_info.kpthi_runmode & 0x40) {
253 sample_meta_flags |= SAMPLE_META_THREAD_WAS_IDLE;
254 goto log_sample;
255 }
256 }
257 }
258
259 if (sample_what & SAMPLER_TH_SNAPSHOT) {
260 kperf_thread_snapshot_sample(&(sbuf->th_snapshot), context);
261 }
262 if (sample_what & SAMPLER_TH_SCHEDULING) {
263 kperf_thread_scheduling_sample(&(sbuf->th_scheduling), context);
264 }
265 if (sample_what & SAMPLER_KSTACK) {
266 if (sample_flags & SAMPLE_FLAG_CONTINUATION) {
267 kperf_continuation_sample(&(sbuf->kcallstack), context);
268 } else if (sample_flags & SAMPLE_FLAG_NON_INTERRUPT) {
269 /* outside of interrupt context, backtrace the current thread */
270 kperf_backtrace_sample(&(sbuf->kcallstack), context);
271 } else {
272 kperf_kcallstack_sample(&(sbuf->kcallstack), context);
273 }
274 }
275 if (sample_what & SAMPLER_TK_SNAPSHOT) {
276 kperf_task_snapshot_sample(context->cur_task, &(sbuf->tk_snapshot));
277 }
278
279 if (!is_kernel) {
280 if (sample_what & SAMPLER_MEMINFO) {
281 kperf_meminfo_sample(context->cur_task, &(sbuf->meminfo));
282 }
283
284 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
285 if (sample_what & SAMPLER_USTACK) {
286 pended_ucallstack = kperf_ucallstack_pend(context,
287 ucallstack_depth, actionid);
288 }
289
290 if (sample_what & SAMPLER_TH_DISPATCH) {
291 pended_th_dispatch =
292 kperf_thread_dispatch_pend(context, actionid);
293 }
294 }
295 }
296
297 #if CONFIG_EXCLAVES
298 if (sample_what & SAMPLER_EXSTACK) {
299 pended_exclave_callstack = kperf_exclave_callstack_pend(context, actionid);
300 }
301 #endif /* CONFIG_EXCLAVES */
302
303 #if CONFIG_CPU_COUNTERS
304 if (sample_what & SAMPLER_PMC_THREAD) {
305 kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
306 } else if (sample_what & SAMPLER_PMC_CPU) {
307 kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
308 }
309 #endif /* CONFIG_CPU_COUNTERS */
310
311 log_sample:
312 /* lookup the user tag, if any */
313 if (actionid && (actionid <= actionc)) {
314 userdata = actionv[actionid - 1].userdata;
315 }
316
317 /* avoid logging if this sample only pended samples */
318 if (sample_flags & SAMPLE_FLAG_PEND_USER &&
319 !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH))) {
320 return SAMPLE_CONTINUE;
321 }
322
323 /* stash the data into the buffer
324 * interrupts off to ensure we don't get split
325 */
326 boolean_t enabled = ml_set_interrupts_enabled(FALSE);
327
328 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
329 actionid, userdata, sample_flags);
330
331 if (sample_flags & SAMPLE_FLAG_SYSTEM) {
332 if (sample_what & SAMPLER_SYS_MEM) {
333 kperf_system_memory_log();
334 }
335 }
336 if (sample_meta_flags & SAMPLE_META_THREAD_WAS_IDLE) {
337 goto log_sample_end;
338 }
339
340 if (sample_what & SAMPLER_TH_INFO) {
341 kperf_thread_info_log(&sbuf->th_info);
342 }
343 if (sample_what & SAMPLER_TH_SCHEDULING) {
344 kperf_thread_scheduling_log(&(sbuf->th_scheduling));
345 }
346 if (sample_what & SAMPLER_TH_SNAPSHOT) {
347 kperf_thread_snapshot_log(&(sbuf->th_snapshot));
348 }
349 if (sample_what & SAMPLER_KSTACK) {
350 kperf_kcallstack_log(&sbuf->kcallstack);
351 }
352 if (sample_what & SAMPLER_TH_INSCYC) {
353 kperf_thread_inscyc_log(context);
354 }
355 if (sample_what & SAMPLER_TK_SNAPSHOT) {
356 kperf_task_snapshot_log(&(sbuf->tk_snapshot));
357 }
358 if (sample_what & SAMPLER_TK_INFO) {
359 kperf_task_info_log(context);
360 }
361
362 /* dump user stuff */
363 if (!is_kernel) {
364 /* dump meminfo */
365 if (sample_what & SAMPLER_MEMINFO) {
366 kperf_meminfo_log(&(sbuf->meminfo));
367 }
368
369 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
370 if (pended_ucallstack) {
371 BUF_INFO(PERF_CS_UPEND);
372 sample_meta_flags |= SAMPLE_META_UPEND;
373 }
374
375 if (pended_th_dispatch) {
376 BUF_INFO(PERF_TI_DISPPEND);
377 }
378 }
379 }
380
381 if (pended_exclave_callstack) {
382 sample_meta_flags |= SAMPLE_META_EXPEND;
383 }
384
385 #if CONFIG_CPU_COUNTERS
386 if (sample_what & SAMPLER_PMC_CONFIG) {
387 kperf_kpc_config_log(&(sbuf->kpcdata));
388 }
389 if (sample_what & SAMPLER_PMC_THREAD) {
390 kperf_kpc_thread_log(&(sbuf->kpcdata));
391 } else if (sample_what & SAMPLER_PMC_CPU) {
392 kperf_kpc_cpu_log(&(sbuf->kpcdata));
393 }
394 #endif /* CONFIG_CPU_COUNTERS */
395
396 log_sample_end:
397 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what, sample_meta_flags);
398
399 /* intrs back on */
400 ml_set_interrupts_enabled(enabled);
401
402 return SAMPLE_CONTINUE;
403 }
404
405 /* Translate actionid into sample bits and take a sample */
406 kern_return_t
kperf_sample(struct kperf_sample * sbuf,struct kperf_context * context,unsigned actionid,unsigned sample_flags)407 kperf_sample(struct kperf_sample *sbuf,
408 struct kperf_context *context,
409 unsigned actionid, unsigned sample_flags)
410 {
411 /* work out what to sample, if anything */
412 if ((actionid > actionc) || (actionid == 0)) {
413 return SAMPLE_SHUTDOWN;
414 }
415
416 /* check the pid filter against the context's current pid.
417 * filter pid == -1 means any pid
418 */
419 int pid_filter = actionv[actionid - 1].pid_filter;
420 if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
421 return SAMPLE_CONTINUE;
422 }
423
424 /* the samplers to run */
425 unsigned int sample_what = actionv[actionid - 1].sample;
426 unsigned int ucallstack_depth = actionv[actionid - 1].ucallstack_depth;
427
428 /* do the actual sample operation */
429 return kperf_sample_internal(sbuf, context, sample_what,
430 sample_flags, actionid, ucallstack_depth);
431 }
432
433 void
kperf_kdebug_handler(uint32_t debugid,uintptr_t * starting_fp)434 kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp)
435 {
436 uint32_t sample_flags = SAMPLE_FLAG_NON_INTERRUPT | SAMPLE_FLAG_PEND_USER;
437 struct kperf_sample *sample = NULL;
438 kern_return_t kr = KERN_SUCCESS;
439 int s;
440
441 if (!kperf_kdebug_should_trigger(debugid)) {
442 return;
443 }
444
445 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_START, debugid);
446
447 thread_t thread = current_thread();
448 task_t task = get_threadtask(thread);
449 struct kperf_context ctx = {
450 .cur_thread = thread,
451 .cur_task = task,
452 .cur_pid = task_pid(task),
453 .trigger_type = TRIGGER_TYPE_KDEBUG,
454 .trigger_id = 0,
455 .starting_fp = starting_fp,
456 };
457
458 s = ml_set_interrupts_enabled(0);
459
460 sample = kperf_intr_sample_buffer();
461
462 kr = kperf_sample(sample, &ctx, kperf_kdebug_get_action(), sample_flags);
463
464 ml_set_interrupts_enabled(s);
465 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_END, kr);
466 }
467
468 /*
469 * Sample using a minimum of stack space during this phase.
470 */
471 static void
kperf_ast_sample_min_stack_phase(struct kperf_usample_min * sbuf_min,struct kperf_context * context,unsigned int sample_what)472 kperf_ast_sample_min_stack_phase(struct kperf_usample_min *sbuf_min,
473 struct kperf_context *context, unsigned int sample_what)
474 {
475 if (sample_what & SAMPLER_TH_DISPATCH) {
476 kperf_thread_dispatch_sample(&sbuf_min->th_dispatch, context);
477 }
478 }
479
480 /*
481 * This function should not be inlined with its caller, which would pollute
482 * the stack usage of the minimum stack phase, above.
483 */
484 __attribute__((noinline))
485 static void
kperf_ast_sample_max_stack_phase(struct kperf_usample_min * sbuf_min,struct kperf_context * context,uint32_t actionid,unsigned int sample_what,unsigned int nframes)486 kperf_ast_sample_max_stack_phase(struct kperf_usample_min *sbuf_min,
487 struct kperf_context *context, uint32_t actionid, unsigned int sample_what,
488 unsigned int nframes)
489 {
490 struct kperf_usample sbuf = { .usample_min = sbuf_min };
491 sbuf.ucallstack.kpuc_nframes = nframes;
492
493 kperf_sample_user_internal(&sbuf, context, actionid, sample_what);
494 }
495
496 /*
497 * This function allocates >2.3KB of the stack. Prevent the compiler from
498 * inlining this function into ast_taken and ensure the stack memory is only
499 * allocated for the kperf AST.
500 */
501 __attribute__((noinline))
502 void
kperf_thread_ast_handler(thread_t thread)503 kperf_thread_ast_handler(thread_t thread)
504 {
505 uint32_t ast = thread->kperf_ast;
506
507 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_START, thread, ast);
508
509 task_t task = get_threadtask(thread);
510
511 if (task_did_exec(task) || task_is_exec_copy(task)) {
512 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, SAMPLE_CONTINUE);
513 return;
514 }
515
516 struct kperf_context ctx = {
517 .cur_thread = thread,
518 .cur_task = task,
519 .cur_pid = task_pid(task),
520 };
521
522 unsigned int sample_what = 0;
523 if (ast & T_KPERF_AST_DISPATCH) {
524 sample_what |= SAMPLER_TH_DISPATCH;
525 }
526 if (ast & T_KPERF_AST_CALLSTACK) {
527 /* TH_INFO for backwards compatibility */
528 sample_what |= SAMPLER_USTACK | SAMPLER_TH_INFO;
529 }
530
531 unsigned int actionid = T_KPERF_GET_ACTIONID(ast);
532
533 struct kperf_usample_min sbuf_min = { 0 };
534 kperf_ast_sample_min_stack_phase(&sbuf_min, &ctx, sample_what);
535 kperf_ast_sample_max_stack_phase(&sbuf_min, &ctx, actionid, sample_what,
536 T_KPERF_GET_CALLSTACK_DEPTH(ast) ?: MAX_UCALLSTACK_FRAMES);
537
538 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END);
539 }
540
541
542 #if CONFIG_EXCLAVES
543 /* Called from Exclave inspection thread after collecting a sample */
544 __attribute__((noinline))
545 void kperf_thread_exclaves_ast_handler(thread_t thread, const stackshot_stackshotentry_s * _Nonnull entry);
546
547 __attribute__((noinline))
548 void
kperf_thread_exclaves_ast_handler(thread_t thread,const stackshot_stackshotentry_s * _Nonnull entry)549 kperf_thread_exclaves_ast_handler(thread_t thread, const stackshot_stackshotentry_s * _Nonnull entry)
550 {
551 assert3u(entry->scid, ==, thread->th_exclaves_ipc_ctx.scid);
552 uint32_t ast = thread->kperf_exclaves_ast;
553
554 BUF_INFO(PERF_AST_EXCLAVES | DBG_FUNC_START, thread, ast);
555 unsigned int actionid = T_KPERF_GET_ACTIONID(ast);
556
557 boolean_t intren = ml_set_interrupts_enabled(false);
558
559 __block size_t ipcstack_count = 0;
560
561 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, SAMPLER_EXSTACK, actionid);
562 if (entry->ipcstack.has_value) {
563 stackshottypes_ipcstackentry__v_visit(&entry->ipcstack.value, ^(size_t __unused i, const stackshottypes_ipcstackentry_s * _Nonnull __unused ipcstack) {
564 ipcstack_count += 1;
565 });
566
567 BUF_DATA(PERF_CS_EXSTACKHDR, ipcstack_count, thread->thread_id, entry->scid);
568
569 stackshottypes_ipcstackentry__v_visit(&entry->ipcstack.value, ^(size_t __unused j, const stackshottypes_ipcstackentry_s * _Nonnull ipcstack) {
570 kperf_excallstack_log(ipcstack);
571 });
572 }
573 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, SAMPLER_EXSTACK);
574
575 ml_set_interrupts_enabled(intren);
576
577 BUF_INFO(PERF_AST_EXCLAVES | DBG_FUNC_END);
578 }
579 #endif /* CONFIG_EXCLAVES */
580
581 int
kperf_ast_pend(thread_t thread,uint32_t set_flags,unsigned int set_actionid)582 kperf_ast_pend(thread_t thread, uint32_t set_flags, unsigned int set_actionid)
583 {
584 if (thread != current_thread()) {
585 panic("kperf: pending AST to non-current thread");
586 }
587
588 uint32_t ast = thread->kperf_ast;
589 unsigned int actionid = T_KPERF_GET_ACTIONID(ast);
590 uint32_t flags = ast & T_KPERF_AST_ALL;
591
592 if ((flags | set_flags) != flags || actionid != set_actionid) {
593 ast &= ~T_KPERF_SET_ACTIONID(actionid);
594 ast |= T_KPERF_SET_ACTIONID(set_actionid);
595 ast |= set_flags;
596
597 thread->kperf_ast = ast;
598
599 /* set the actual AST */
600 act_set_kperf(thread);
601 return 1;
602 }
603
604 return 0;
605 }
606
607 void
kperf_ast_set_callstack_depth(thread_t thread,uint32_t depth)608 kperf_ast_set_callstack_depth(thread_t thread, uint32_t depth)
609 {
610 uint32_t ast = thread->kperf_ast;
611 uint32_t existing_depth = T_KPERF_GET_CALLSTACK_DEPTH(ast);
612 if (existing_depth < depth) {
613 ast &= ~T_KPERF_SET_CALLSTACK_DEPTH(existing_depth);
614 ast |= T_KPERF_SET_CALLSTACK_DEPTH(depth);
615 thread->kperf_ast = ast;
616 }
617 }
618
619 int
kperf_kdbg_cswitch_get(void)620 kperf_kdbg_cswitch_get(void)
621 {
622 return kperf_kdebug_cswitch;
623 }
624
625 int
kperf_kdbg_cswitch_set(int newval)626 kperf_kdbg_cswitch_set(int newval)
627 {
628 kperf_kdebug_cswitch = newval;
629 kperf_on_cpu_update();
630
631 return 0;
632 }
633
634 /*
635 * Action configuration
636 */
637 unsigned int
kperf_action_get_count(void)638 kperf_action_get_count(void)
639 {
640 return actionc;
641 }
642
643 int
kperf_action_set_samplers(unsigned actionid,uint32_t samplers)644 kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
645 {
646 if ((actionid > actionc) || (actionid == 0)) {
647 return EINVAL;
648 }
649
650 /* disallow both CPU and thread counters to be sampled in the same
651 * action */
652 if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
653 return EINVAL;
654 }
655
656 actionv[actionid - 1].sample = samplers;
657
658 return 0;
659 }
660
661 int
kperf_action_get_samplers(unsigned actionid,uint32_t * samplers_out)662 kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
663 {
664 if ((actionid > actionc)) {
665 return EINVAL;
666 }
667
668 if (actionid == 0) {
669 *samplers_out = 0; /* "NULL" action */
670 } else {
671 *samplers_out = actionv[actionid - 1].sample;
672 }
673
674 return 0;
675 }
676
677 int
kperf_action_set_userdata(unsigned actionid,uint32_t userdata)678 kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
679 {
680 if ((actionid > actionc) || (actionid == 0)) {
681 return EINVAL;
682 }
683
684 actionv[actionid - 1].userdata = userdata;
685
686 return 0;
687 }
688
689 int
kperf_action_get_userdata(unsigned actionid,uint32_t * userdata_out)690 kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
691 {
692 if ((actionid > actionc)) {
693 return EINVAL;
694 }
695
696 if (actionid == 0) {
697 *userdata_out = 0; /* "NULL" action */
698 } else {
699 *userdata_out = actionv[actionid - 1].userdata;
700 }
701
702 return 0;
703 }
704
705 int
kperf_action_set_filter(unsigned actionid,int pid)706 kperf_action_set_filter(unsigned actionid, int pid)
707 {
708 if ((actionid > actionc) || (actionid == 0)) {
709 return EINVAL;
710 }
711
712 actionv[actionid - 1].pid_filter = pid;
713
714 return 0;
715 }
716
717 int
kperf_action_get_filter(unsigned actionid,int * pid_out)718 kperf_action_get_filter(unsigned actionid, int *pid_out)
719 {
720 if ((actionid > actionc)) {
721 return EINVAL;
722 }
723
724 if (actionid == 0) {
725 *pid_out = -1; /* "NULL" action */
726 } else {
727 *pid_out = actionv[actionid - 1].pid_filter;
728 }
729
730 return 0;
731 }
732
733 void
kperf_action_reset(void)734 kperf_action_reset(void)
735 {
736 for (unsigned int i = 0; i < actionc; i++) {
737 kperf_action_set_samplers(i + 1, 0);
738 kperf_action_set_userdata(i + 1, 0);
739 kperf_action_set_filter(i + 1, -1);
740 kperf_action_set_ucallstack_depth(i + 1, MAX_UCALLSTACK_FRAMES);
741 kperf_action_set_kcallstack_depth(i + 1, MAX_KCALLSTACK_FRAMES);
742 }
743 }
744
745 int
kperf_action_set_count(unsigned count)746 kperf_action_set_count(unsigned count)
747 {
748 struct action *new_actionv = NULL, *old_actionv = NULL;
749 unsigned old_count;
750
751 /* easy no-op */
752 if (count == actionc) {
753 return 0;
754 }
755
756 /* TODO: allow shrinking? */
757 if (count < actionc) {
758 return EINVAL;
759 }
760
761 /* cap it for good measure */
762 if (count > ACTION_MAX) {
763 return EINVAL;
764 }
765
766 /* creating the action arror for the first time. create a few
767 * more things, too.
768 */
769 if (actionc == 0) {
770 kperf_setup();
771 }
772
773 /* create a new array */
774 new_actionv = kalloc_data_tag(count * sizeof(*new_actionv),
775 Z_WAITOK, VM_KERN_MEMORY_DIAG);
776 if (new_actionv == NULL) {
777 return ENOMEM;
778 }
779
780 old_actionv = actionv;
781 old_count = actionc;
782
783 if (old_actionv != NULL) {
784 memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
785 }
786
787 memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
788
789 for (unsigned int i = old_count; i < count; i++) {
790 new_actionv[i].pid_filter = -1;
791 new_actionv[i].ucallstack_depth = MAX_UCALLSTACK_FRAMES;
792 new_actionv[i].kcallstack_depth = MAX_KCALLSTACK_FRAMES;
793 }
794
795 actionv = new_actionv;
796 actionc = count;
797
798 kfree_data(old_actionv, old_count * sizeof(*actionv));
799
800 return 0;
801 }
802
803 int
kperf_action_set_ucallstack_depth(unsigned action_id,uint32_t depth)804 kperf_action_set_ucallstack_depth(unsigned action_id, uint32_t depth)
805 {
806 if ((action_id > actionc) || (action_id == 0)) {
807 return EINVAL;
808 }
809
810 if (depth > MAX_UCALLSTACK_FRAMES) {
811 return EINVAL;
812 }
813 if (depth < 2) {
814 return EINVAL;
815 }
816
817 actionv[action_id - 1].ucallstack_depth = depth;
818
819 return 0;
820 }
821
822 int
kperf_action_set_kcallstack_depth(unsigned action_id,uint32_t depth)823 kperf_action_set_kcallstack_depth(unsigned action_id, uint32_t depth)
824 {
825 if ((action_id > actionc) || (action_id == 0)) {
826 return EINVAL;
827 }
828
829 if (depth > MAX_KCALLSTACK_FRAMES) {
830 return EINVAL;
831 }
832 if (depth < 1) {
833 return EINVAL;
834 }
835
836 actionv[action_id - 1].kcallstack_depth = depth;
837
838 return 0;
839 }
840
841 int
kperf_action_get_ucallstack_depth(unsigned action_id,uint32_t * depth_out)842 kperf_action_get_ucallstack_depth(unsigned action_id, uint32_t * depth_out)
843 {
844 if ((action_id > actionc)) {
845 return EINVAL;
846 }
847
848 assert(depth_out);
849
850 if (action_id == 0) {
851 *depth_out = MAX_UCALLSTACK_FRAMES;
852 } else {
853 *depth_out = actionv[action_id - 1].ucallstack_depth;
854 }
855
856 return 0;
857 }
858
859 int
kperf_action_get_kcallstack_depth(unsigned action_id,uint32_t * depth_out)860 kperf_action_get_kcallstack_depth(unsigned action_id, uint32_t * depth_out)
861 {
862 if ((action_id > actionc)) {
863 return EINVAL;
864 }
865
866 assert(depth_out);
867
868 if (action_id == 0) {
869 *depth_out = MAX_KCALLSTACK_FRAMES;
870 } else {
871 *depth_out = actionv[action_id - 1].kcallstack_depth;
872 }
873
874 return 0;
875 }
876