1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34 #include <mach/mach_types.h>
35 #include <machine/machine_routines.h>
36 #include <kern/kalloc.h>
37 #include <kern/debug.h> /* panic */
38 #include <kern/thread.h>
39 #include <sys/errno.h>
40 #include <sys/vm.h>
41 #include <vm/vm_object.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
44
45 #include <kperf/action.h>
46 #include <kperf/ast.h>
47 #include <kperf/buffer.h>
48 #include <kperf/callstack.h>
49 #include <kperf/context.h>
50 #include <kperf/kdebug_trigger.h>
51 #include <kperf/kperf.h>
52 #include <kperf/kperf_kpc.h>
53 #include <kperf/kptimer.h>
54 #include <kperf/pet.h>
55 #include <kperf/sample.h>
56 #include <kperf/thread_samplers.h>
57
58 #define ACTION_MAX (32)
59
60 /* the list of different actions to take */
61 struct action {
62 uint32_t sample;
63 uint32_t ucallstack_depth;
64 uint32_t kcallstack_depth;
65 uint32_t userdata;
66 int pid_filter;
67 };
68
69 /* the list of actions */
70 static unsigned int actionc = 0;
71 static struct action *actionv = NULL;
72
73 /* should emit tracepoint on context switch */
74 int kperf_kdebug_cswitch = 0;
75
76 int kperf_max_actions = ACTION_MAX;
77 bool
kperf_action_has_non_system(unsigned int actionid)78 kperf_action_has_non_system(unsigned int actionid)
79 {
80 if (actionid > actionc) {
81 return false;
82 }
83
84 if (actionv[actionid - 1].sample & ~SAMPLER_SYS_MEM) {
85 return true;
86 } else {
87 return false;
88 }
89 }
90
91 bool
kperf_action_has_task(unsigned int actionid)92 kperf_action_has_task(unsigned int actionid)
93 {
94 if (actionid > actionc) {
95 return false;
96 }
97
98 return actionv[actionid - 1].sample & SAMPLER_TASK_MASK;
99 }
100
101 bool
kperf_action_has_thread(unsigned int actionid)102 kperf_action_has_thread(unsigned int actionid)
103 {
104 if (actionid > actionc) {
105 return false;
106 }
107
108 return actionv[actionid - 1].sample & SAMPLER_THREAD_MASK;
109 }
110
111 static void
kperf_system_memory_log(void)112 kperf_system_memory_log(void)
113 {
114 extern unsigned int memorystatus_level;
115
116 BUF_DATA(PERF_MI_SYS_DATA, (uintptr_t)vm_page_free_count,
117 (uintptr_t)vm_page_wire_count, (uintptr_t)vm_page_external_count,
118 (uintptr_t)(vm_page_active_count + vm_page_inactive_count +
119 vm_page_speculative_count));
120 BUF_DATA(PERF_MI_SYS_DATA_2, (uintptr_t)vm_page_anonymous_count,
121 (uintptr_t)vm_page_internal_count,
122 (uintptr_t)vm_pageout_vminfo.vm_pageout_compressions,
123 (uintptr_t)VM_PAGE_COMPRESSOR_COUNT);
124 BUF_DATA(PERF_MI_SYS_DATA_3,
125 #if CONFIG_SECLUDED_MEMORY
126 (uintptr_t)vm_page_secluded_count,
127 #else // CONFIG_SECLUDED_MEMORY
128 0,
129 #endif // !CONFIG_SECLUDED_MEMORY
130 (uintptr_t)vm_page_purgeable_count,
131 memorystatus_level);
132 }
133
134 static void
kperf_sample_user_internal(struct kperf_usample * sbuf,struct kperf_context * context,unsigned int actionid,unsigned int sample_what)135 kperf_sample_user_internal(struct kperf_usample *sbuf,
136 struct kperf_context *context, unsigned int actionid,
137 unsigned int sample_what)
138 {
139 if (sample_what & SAMPLER_USTACK) {
140 kperf_ucallstack_sample(&sbuf->ucallstack, context);
141 }
142 if (sample_what & SAMPLER_TH_INFO) {
143 kperf_thread_info_sample(&sbuf->th_info, context);
144 }
145
146 boolean_t intren = ml_set_interrupts_enabled(FALSE);
147
148 /*
149 * No userdata or sample_flags for this one.
150 */
151 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what, actionid);
152
153 if (sample_what & SAMPLER_USTACK) {
154 kperf_ucallstack_log(&sbuf->ucallstack);
155 }
156 if (sample_what & SAMPLER_TH_DISPATCH) {
157 kperf_thread_dispatch_log(&sbuf->usample_min->th_dispatch);
158 }
159 if (sample_what & SAMPLER_TH_INFO) {
160 kperf_thread_info_log(&sbuf->th_info);
161 }
162
163 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what);
164
165 ml_set_interrupts_enabled(intren);
166 }
167
168 static unsigned int
kperf_prepare_sample_what(unsigned int sample_what,unsigned int sample_flags)169 kperf_prepare_sample_what(unsigned int sample_what, unsigned int sample_flags)
170 {
171 /* callstacks should be explicitly ignored */
172 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
173 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
174 }
175 if (sample_flags & SAMPLE_FLAG_ONLY_SYSTEM) {
176 sample_what &= SAMPLER_SYS_MEM;
177 }
178 assert((sample_flags & (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY))
179 != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY));
180 if (sample_flags & SAMPLE_FLAG_THREAD_ONLY) {
181 sample_what &= SAMPLER_THREAD_MASK;
182 }
183 if (sample_flags & SAMPLE_FLAG_TASK_ONLY) {
184 sample_what &= SAMPLER_TASK_MASK;
185 }
186
187 return sample_what;
188 }
189
190 void
kperf_sample_user(struct kperf_usample * sbuf,struct kperf_context * context,unsigned int actionid,unsigned int sample_flags)191 kperf_sample_user(struct kperf_usample *sbuf, struct kperf_context *context,
192 unsigned int actionid, unsigned int sample_flags)
193 {
194 if (actionid == 0 || actionid > actionc) {
195 return;
196 }
197
198 unsigned int sample_what = kperf_prepare_sample_what(
199 actionv[actionid - 1].sample, sample_flags);
200 if (sample_what == 0) {
201 return;
202 }
203
204 unsigned int ucallstack_depth = actionv[actionid - 1].ucallstack_depth;
205 sbuf->ucallstack.kpuc_nframes = ucallstack_depth ?: MAX_UCALLSTACK_FRAMES;
206
207 kperf_sample_user_internal(sbuf, context, actionid, sample_what);
208 }
209
210 static kern_return_t
kperf_sample_internal(struct kperf_sample * sbuf,struct kperf_context * context,unsigned sample_what,unsigned sample_flags,unsigned actionid,unsigned ucallstack_depth)211 kperf_sample_internal(struct kperf_sample *sbuf,
212 struct kperf_context *context,
213 unsigned sample_what, unsigned sample_flags,
214 unsigned actionid, unsigned ucallstack_depth)
215 {
216 int pended_ucallstack = 0;
217 int pended_th_dispatch = 0;
218 bool on_idle_thread = false;
219 uint32_t userdata = actionid;
220 bool task_only = (sample_flags & SAMPLE_FLAG_TASK_ONLY) != 0;
221
222 sample_what = kperf_prepare_sample_what(sample_what, sample_flags);
223 if (sample_what == 0) {
224 return SAMPLE_CONTINUE;
225 }
226
227 if (!task_only) {
228 context->cur_thread->kperf_pet_gen =
229 os_atomic_load(&kppet_gencount, relaxed);
230 }
231 bool is_kernel = (context->cur_pid == 0);
232
233 if (actionid && actionid <= actionc) {
234 sbuf->kcallstack.kpkc_nframes =
235 actionv[actionid - 1].kcallstack_depth;
236 } else {
237 sbuf->kcallstack.kpkc_nframes = MAX_KCALLSTACK_FRAMES;
238 }
239
240 ucallstack_depth = ucallstack_depth ?: MAX_UCALLSTACK_FRAMES;
241 sbuf->kcallstack.kpkc_flags = 0;
242 sbuf->usample.ucallstack.kpuc_flags = 0;
243
244 if (sample_what & SAMPLER_TH_INFO) {
245 kperf_thread_info_sample(&sbuf->th_info, context);
246
247 if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
248 if (sbuf->th_info.kpthi_runmode & 0x40) {
249 on_idle_thread = true;
250 goto log_sample;
251 }
252 }
253 }
254
255 if (sample_what & SAMPLER_TH_SNAPSHOT) {
256 kperf_thread_snapshot_sample(&(sbuf->th_snapshot), context);
257 }
258 if (sample_what & SAMPLER_TH_SCHEDULING) {
259 kperf_thread_scheduling_sample(&(sbuf->th_scheduling), context);
260 }
261 if (sample_what & SAMPLER_KSTACK) {
262 if (sample_flags & SAMPLE_FLAG_CONTINUATION) {
263 kperf_continuation_sample(&(sbuf->kcallstack), context);
264 } else if (sample_flags & SAMPLE_FLAG_NON_INTERRUPT) {
265 /* outside of interrupt context, backtrace the current thread */
266 kperf_backtrace_sample(&(sbuf->kcallstack), context);
267 } else {
268 kperf_kcallstack_sample(&(sbuf->kcallstack), context);
269 }
270 }
271 if (sample_what & SAMPLER_TK_SNAPSHOT) {
272 kperf_task_snapshot_sample(context->cur_task, &(sbuf->tk_snapshot));
273 }
274
275 if (!is_kernel) {
276 if (sample_what & SAMPLER_MEMINFO) {
277 kperf_meminfo_sample(context->cur_task, &(sbuf->meminfo));
278 }
279
280 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
281 if (sample_what & SAMPLER_USTACK) {
282 pended_ucallstack = kperf_ucallstack_pend(context,
283 ucallstack_depth, actionid);
284 }
285
286 if (sample_what & SAMPLER_TH_DISPATCH) {
287 pended_th_dispatch =
288 kperf_thread_dispatch_pend(context, actionid);
289 }
290 }
291 }
292
293 if (sample_what & SAMPLER_PMC_THREAD) {
294 kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
295 } else if (sample_what & SAMPLER_PMC_CPU) {
296 kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
297 }
298
299 log_sample:
300 /* lookup the user tag, if any */
301 if (actionid && (actionid <= actionc)) {
302 userdata = actionv[actionid - 1].userdata;
303 }
304
305 /* avoid logging if this sample only pended samples */
306 if (sample_flags & SAMPLE_FLAG_PEND_USER &&
307 !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH))) {
308 return SAMPLE_CONTINUE;
309 }
310
311 /* stash the data into the buffer
312 * interrupts off to ensure we don't get split
313 */
314 boolean_t enabled = ml_set_interrupts_enabled(FALSE);
315
316 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
317 actionid, userdata, sample_flags);
318
319 if (sample_flags & SAMPLE_FLAG_SYSTEM) {
320 if (sample_what & SAMPLER_SYS_MEM) {
321 kperf_system_memory_log();
322 }
323 }
324 if (on_idle_thread) {
325 goto log_sample_end;
326 }
327
328 if (sample_what & SAMPLER_TH_INFO) {
329 kperf_thread_info_log(&sbuf->th_info);
330 }
331 if (sample_what & SAMPLER_TH_SCHEDULING) {
332 kperf_thread_scheduling_log(&(sbuf->th_scheduling));
333 }
334 if (sample_what & SAMPLER_TH_SNAPSHOT) {
335 kperf_thread_snapshot_log(&(sbuf->th_snapshot));
336 }
337 if (sample_what & SAMPLER_KSTACK) {
338 kperf_kcallstack_log(&sbuf->kcallstack);
339 }
340 if (sample_what & SAMPLER_TH_INSCYC) {
341 kperf_thread_inscyc_log(context);
342 }
343 if (sample_what & SAMPLER_TK_SNAPSHOT) {
344 kperf_task_snapshot_log(&(sbuf->tk_snapshot));
345 }
346 if (sample_what & SAMPLER_TK_INFO) {
347 kperf_task_info_log(context);
348 }
349
350 /* dump user stuff */
351 if (!is_kernel) {
352 /* dump meminfo */
353 if (sample_what & SAMPLER_MEMINFO) {
354 kperf_meminfo_log(&(sbuf->meminfo));
355 }
356
357 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
358 if (pended_ucallstack) {
359 BUF_INFO(PERF_CS_UPEND);
360 }
361
362 if (pended_th_dispatch) {
363 BUF_INFO(PERF_TI_DISPPEND);
364 }
365 }
366 }
367
368 if (sample_what & SAMPLER_PMC_CONFIG) {
369 kperf_kpc_config_log(&(sbuf->kpcdata));
370 }
371 if (sample_what & SAMPLER_PMC_THREAD) {
372 kperf_kpc_thread_log(&(sbuf->kpcdata));
373 } else if (sample_what & SAMPLER_PMC_CPU) {
374 kperf_kpc_cpu_log(&(sbuf->kpcdata));
375 }
376
377 log_sample_end:
378 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what, on_idle_thread ? 1 : 0);
379
380 /* intrs back on */
381 ml_set_interrupts_enabled(enabled);
382
383 return SAMPLE_CONTINUE;
384 }
385
386 /* Translate actionid into sample bits and take a sample */
387 kern_return_t
kperf_sample(struct kperf_sample * sbuf,struct kperf_context * context,unsigned actionid,unsigned sample_flags)388 kperf_sample(struct kperf_sample *sbuf,
389 struct kperf_context *context,
390 unsigned actionid, unsigned sample_flags)
391 {
392 /* work out what to sample, if anything */
393 if ((actionid > actionc) || (actionid == 0)) {
394 return SAMPLE_SHUTDOWN;
395 }
396
397 /* check the pid filter against the context's current pid.
398 * filter pid == -1 means any pid
399 */
400 int pid_filter = actionv[actionid - 1].pid_filter;
401 if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
402 return SAMPLE_CONTINUE;
403 }
404
405 /* the samplers to run */
406 unsigned int sample_what = actionv[actionid - 1].sample;
407 unsigned int ucallstack_depth = actionv[actionid - 1].ucallstack_depth;
408
409 /* do the actual sample operation */
410 return kperf_sample_internal(sbuf, context, sample_what,
411 sample_flags, actionid, ucallstack_depth);
412 }
413
414 void
kperf_kdebug_handler(uint32_t debugid,uintptr_t * starting_fp)415 kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp)
416 {
417 uint32_t sample_flags = SAMPLE_FLAG_PEND_USER;
418 struct kperf_sample *sample = NULL;
419 kern_return_t kr = KERN_SUCCESS;
420 int s;
421
422 if (!kperf_kdebug_should_trigger(debugid)) {
423 return;
424 }
425
426 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_START, debugid);
427
428 thread_t thread = current_thread();
429 task_t task = get_threadtask(thread);
430 struct kperf_context ctx = {
431 .cur_thread = thread,
432 .cur_task = task,
433 .cur_pid = task_pid(task),
434 .trigger_type = TRIGGER_TYPE_KDEBUG,
435 .trigger_id = 0,
436 };
437
438 s = ml_set_interrupts_enabled(0);
439
440 sample = kperf_intr_sample_buffer();
441
442 if (!ml_at_interrupt_context()) {
443 sample_flags |= SAMPLE_FLAG_NON_INTERRUPT;
444 ctx.starting_fp = starting_fp;
445 }
446
447 kr = kperf_sample(sample, &ctx, kperf_kdebug_get_action(), sample_flags);
448
449 ml_set_interrupts_enabled(s);
450 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_END, kr);
451 }
452
453 /*
454 * Sample using a minimum of stack space during this phase.
455 */
456 static void
kperf_ast_sample_min_stack_phase(struct kperf_usample_min * sbuf_min,struct kperf_context * context,unsigned int sample_what)457 kperf_ast_sample_min_stack_phase(struct kperf_usample_min *sbuf_min,
458 struct kperf_context *context, unsigned int sample_what)
459 {
460 if (sample_what & SAMPLER_TH_DISPATCH) {
461 kperf_thread_dispatch_sample(&sbuf_min->th_dispatch, context);
462 }
463 }
464
465 /*
466 * This function should not be inlined with its caller, which would pollute
467 * the stack usage of the minimum stack phase, above.
468 */
469 __attribute__((noinline))
470 static void
kperf_ast_sample_max_stack_phase(struct kperf_usample_min * sbuf_min,struct kperf_context * context,uint32_t actionid,unsigned int sample_what,unsigned int nframes)471 kperf_ast_sample_max_stack_phase(struct kperf_usample_min *sbuf_min,
472 struct kperf_context *context, uint32_t actionid, unsigned int sample_what,
473 unsigned int nframes)
474 {
475 struct kperf_usample sbuf = { .usample_min = sbuf_min };
476 sbuf.ucallstack.kpuc_nframes = nframes;
477
478 kperf_sample_user_internal(&sbuf, context, actionid, sample_what);
479 }
480
481 /*
482 * This function allocates >2.3KB of the stack. Prevent the compiler from
483 * inlining this function into ast_taken and ensure the stack memory is only
484 * allocated for the kperf AST.
485 */
486 __attribute__((noinline))
487 void
kperf_thread_ast_handler(thread_t thread)488 kperf_thread_ast_handler(thread_t thread)
489 {
490 uint32_t ast = thread->kperf_ast;
491
492 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_START, thread, ast);
493
494 task_t task = get_threadtask(thread);
495
496 if (task_did_exec(task) || task_is_exec_copy(task)) {
497 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, SAMPLE_CONTINUE);
498 return;
499 }
500
501 struct kperf_context ctx = {
502 .cur_thread = thread,
503 .cur_task = task,
504 .cur_pid = task_pid(task),
505 };
506
507 unsigned int sample_what = 0;
508 if (ast & T_KPERF_AST_DISPATCH) {
509 sample_what |= SAMPLER_TH_DISPATCH;
510 }
511 if (ast & T_KPERF_AST_CALLSTACK) {
512 /* TH_INFO for backwards compatibility */
513 sample_what |= SAMPLER_USTACK | SAMPLER_TH_INFO;
514 }
515
516 unsigned int actionid = T_KPERF_GET_ACTIONID(ast);
517
518 struct kperf_usample_min sbuf_min = { 0 };
519 kperf_ast_sample_min_stack_phase(&sbuf_min, &ctx, sample_what);
520 kperf_ast_sample_max_stack_phase(&sbuf_min, &ctx, actionid, sample_what,
521 T_KPERF_GET_CALLSTACK_DEPTH(ast) ?: MAX_UCALLSTACK_FRAMES);
522
523 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END);
524 }
525
526 int
kperf_ast_pend(thread_t thread,uint32_t set_flags,unsigned int set_actionid)527 kperf_ast_pend(thread_t thread, uint32_t set_flags, unsigned int set_actionid)
528 {
529 if (thread != current_thread()) {
530 panic("kperf: pending AST to non-current thread");
531 }
532
533 uint32_t ast = thread->kperf_ast;
534 unsigned int actionid = T_KPERF_GET_ACTIONID(ast);
535 uint32_t flags = ast & T_KPERF_AST_ALL;
536
537 if ((flags | set_flags) != flags || actionid != set_actionid) {
538 ast &= ~T_KPERF_SET_ACTIONID(actionid);
539 ast |= T_KPERF_SET_ACTIONID(set_actionid);
540 ast |= set_flags;
541
542 thread->kperf_ast = ast;
543
544 /* set the actual AST */
545 act_set_kperf(thread);
546 return 1;
547 }
548
549 return 0;
550 }
551
552 void
kperf_ast_set_callstack_depth(thread_t thread,uint32_t depth)553 kperf_ast_set_callstack_depth(thread_t thread, uint32_t depth)
554 {
555 uint32_t ast = thread->kperf_ast;
556 uint32_t existing_depth = T_KPERF_GET_CALLSTACK_DEPTH(ast);
557 if (existing_depth < depth) {
558 ast &= ~T_KPERF_SET_CALLSTACK_DEPTH(existing_depth);
559 ast |= T_KPERF_SET_CALLSTACK_DEPTH(depth);
560 thread->kperf_ast = ast;
561 }
562 }
563
564 int
kperf_kdbg_cswitch_get(void)565 kperf_kdbg_cswitch_get(void)
566 {
567 return kperf_kdebug_cswitch;
568 }
569
570 int
kperf_kdbg_cswitch_set(int newval)571 kperf_kdbg_cswitch_set(int newval)
572 {
573 kperf_kdebug_cswitch = newval;
574 kperf_on_cpu_update();
575
576 return 0;
577 }
578
579 /*
580 * Action configuration
581 */
582 unsigned int
kperf_action_get_count(void)583 kperf_action_get_count(void)
584 {
585 return actionc;
586 }
587
588 int
kperf_action_set_samplers(unsigned actionid,uint32_t samplers)589 kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
590 {
591 if ((actionid > actionc) || (actionid == 0)) {
592 return EINVAL;
593 }
594
595 /* disallow both CPU and thread counters to be sampled in the same
596 * action */
597 if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
598 return EINVAL;
599 }
600
601 actionv[actionid - 1].sample = samplers;
602
603 return 0;
604 }
605
606 int
kperf_action_get_samplers(unsigned actionid,uint32_t * samplers_out)607 kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
608 {
609 if ((actionid > actionc)) {
610 return EINVAL;
611 }
612
613 if (actionid == 0) {
614 *samplers_out = 0; /* "NULL" action */
615 } else {
616 *samplers_out = actionv[actionid - 1].sample;
617 }
618
619 return 0;
620 }
621
622 int
kperf_action_set_userdata(unsigned actionid,uint32_t userdata)623 kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
624 {
625 if ((actionid > actionc) || (actionid == 0)) {
626 return EINVAL;
627 }
628
629 actionv[actionid - 1].userdata = userdata;
630
631 return 0;
632 }
633
634 int
kperf_action_get_userdata(unsigned actionid,uint32_t * userdata_out)635 kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
636 {
637 if ((actionid > actionc)) {
638 return EINVAL;
639 }
640
641 if (actionid == 0) {
642 *userdata_out = 0; /* "NULL" action */
643 } else {
644 *userdata_out = actionv[actionid - 1].userdata;
645 }
646
647 return 0;
648 }
649
650 int
kperf_action_set_filter(unsigned actionid,int pid)651 kperf_action_set_filter(unsigned actionid, int pid)
652 {
653 if ((actionid > actionc) || (actionid == 0)) {
654 return EINVAL;
655 }
656
657 actionv[actionid - 1].pid_filter = pid;
658
659 return 0;
660 }
661
662 int
kperf_action_get_filter(unsigned actionid,int * pid_out)663 kperf_action_get_filter(unsigned actionid, int *pid_out)
664 {
665 if ((actionid > actionc)) {
666 return EINVAL;
667 }
668
669 if (actionid == 0) {
670 *pid_out = -1; /* "NULL" action */
671 } else {
672 *pid_out = actionv[actionid - 1].pid_filter;
673 }
674
675 return 0;
676 }
677
678 void
kperf_action_reset(void)679 kperf_action_reset(void)
680 {
681 for (unsigned int i = 0; i < actionc; i++) {
682 kperf_action_set_samplers(i + 1, 0);
683 kperf_action_set_userdata(i + 1, 0);
684 kperf_action_set_filter(i + 1, -1);
685 kperf_action_set_ucallstack_depth(i + 1, MAX_UCALLSTACK_FRAMES);
686 kperf_action_set_kcallstack_depth(i + 1, MAX_KCALLSTACK_FRAMES);
687 }
688 }
689
690 int
kperf_action_set_count(unsigned count)691 kperf_action_set_count(unsigned count)
692 {
693 struct action *new_actionv = NULL, *old_actionv = NULL;
694 unsigned old_count;
695
696 /* easy no-op */
697 if (count == actionc) {
698 return 0;
699 }
700
701 /* TODO: allow shrinking? */
702 if (count < actionc) {
703 return EINVAL;
704 }
705
706 /* cap it for good measure */
707 if (count > ACTION_MAX) {
708 return EINVAL;
709 }
710
711 /* creating the action arror for the first time. create a few
712 * more things, too.
713 */
714 if (actionc == 0) {
715 kperf_setup();
716 }
717
718 /* create a new array */
719 new_actionv = kalloc_data_tag(count * sizeof(*new_actionv),
720 Z_WAITOK, VM_KERN_MEMORY_DIAG);
721 if (new_actionv == NULL) {
722 return ENOMEM;
723 }
724
725 old_actionv = actionv;
726 old_count = actionc;
727
728 if (old_actionv != NULL) {
729 memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
730 }
731
732 memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
733
734 for (unsigned int i = old_count; i < count; i++) {
735 new_actionv[i].pid_filter = -1;
736 new_actionv[i].ucallstack_depth = MAX_UCALLSTACK_FRAMES;
737 new_actionv[i].kcallstack_depth = MAX_KCALLSTACK_FRAMES;
738 }
739
740 actionv = new_actionv;
741 actionc = count;
742
743 kfree_data(old_actionv, old_count * sizeof(*actionv));
744
745 return 0;
746 }
747
748 int
kperf_action_set_ucallstack_depth(unsigned action_id,uint32_t depth)749 kperf_action_set_ucallstack_depth(unsigned action_id, uint32_t depth)
750 {
751 if ((action_id > actionc) || (action_id == 0)) {
752 return EINVAL;
753 }
754
755 if (depth > MAX_UCALLSTACK_FRAMES) {
756 return EINVAL;
757 }
758 if (depth < 2) {
759 return EINVAL;
760 }
761
762 actionv[action_id - 1].ucallstack_depth = depth;
763
764 return 0;
765 }
766
767 int
kperf_action_set_kcallstack_depth(unsigned action_id,uint32_t depth)768 kperf_action_set_kcallstack_depth(unsigned action_id, uint32_t depth)
769 {
770 if ((action_id > actionc) || (action_id == 0)) {
771 return EINVAL;
772 }
773
774 if (depth > MAX_KCALLSTACK_FRAMES) {
775 return EINVAL;
776 }
777 if (depth < 1) {
778 return EINVAL;
779 }
780
781 actionv[action_id - 1].kcallstack_depth = depth;
782
783 return 0;
784 }
785
786 int
kperf_action_get_ucallstack_depth(unsigned action_id,uint32_t * depth_out)787 kperf_action_get_ucallstack_depth(unsigned action_id, uint32_t * depth_out)
788 {
789 if ((action_id > actionc)) {
790 return EINVAL;
791 }
792
793 assert(depth_out);
794
795 if (action_id == 0) {
796 *depth_out = MAX_UCALLSTACK_FRAMES;
797 } else {
798 *depth_out = actionv[action_id - 1].ucallstack_depth;
799 }
800
801 return 0;
802 }
803
804 int
kperf_action_get_kcallstack_depth(unsigned action_id,uint32_t * depth_out)805 kperf_action_get_kcallstack_depth(unsigned action_id, uint32_t * depth_out)
806 {
807 if ((action_id > actionc)) {
808 return EINVAL;
809 }
810
811 assert(depth_out);
812
813 if (action_id == 0) {
814 *depth_out = MAX_KCALLSTACK_FRAMES;
815 } else {
816 *depth_out = actionv[action_id - 1].kcallstack_depth;
817 }
818
819 return 0;
820 }
821