xref: /xnu-8019.80.24/osfmk/kperf/action.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * Called from a trigger. Actually takes the data from the different
31  * modules and puts them in a buffer
32  */
33 
34 #include <mach/mach_types.h>
35 #include <machine/machine_routines.h>
36 #include <kern/kalloc.h>
37 #include <kern/debug.h> /* panic */
38 #include <kern/thread.h>
39 #include <sys/errno.h>
40 #include <sys/vm.h>
41 #include <vm/vm_object.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
44 
45 #include <kperf/action.h>
46 #include <kperf/ast.h>
47 #include <kperf/buffer.h>
48 #include <kperf/callstack.h>
49 #include <kperf/context.h>
50 #include <kperf/kdebug_trigger.h>
51 #include <kperf/kperf.h>
52 #include <kperf/kperf_kpc.h>
53 #include <kperf/kptimer.h>
54 #include <kperf/pet.h>
55 #include <kperf/sample.h>
56 #include <kperf/thread_samplers.h>
57 
58 #define ACTION_MAX (32)
59 
60 /* the list of different actions to take */
61 struct action {
62 	uint32_t sample;
63 	uint32_t ucallstack_depth;
64 	uint32_t kcallstack_depth;
65 	uint32_t userdata;
66 	int pid_filter;
67 };
68 
69 /* the list of actions */
70 static unsigned int actionc = 0;
71 static struct action *actionv = NULL;
72 
73 /* should emit tracepoint on context switch */
74 int kperf_kdebug_cswitch = 0;
75 
76 bool
kperf_action_has_non_system(unsigned int actionid)77 kperf_action_has_non_system(unsigned int actionid)
78 {
79 	if (actionid > actionc) {
80 		return false;
81 	}
82 
83 	if (actionv[actionid - 1].sample & ~SAMPLER_SYS_MEM) {
84 		return true;
85 	} else {
86 		return false;
87 	}
88 }
89 
90 bool
kperf_action_has_task(unsigned int actionid)91 kperf_action_has_task(unsigned int actionid)
92 {
93 	if (actionid > actionc) {
94 		return false;
95 	}
96 
97 	return actionv[actionid - 1].sample & SAMPLER_TASK_MASK;
98 }
99 
100 bool
kperf_action_has_thread(unsigned int actionid)101 kperf_action_has_thread(unsigned int actionid)
102 {
103 	if (actionid > actionc) {
104 		return false;
105 	}
106 
107 	return actionv[actionid - 1].sample & SAMPLER_THREAD_MASK;
108 }
109 
110 static void
kperf_system_memory_log(void)111 kperf_system_memory_log(void)
112 {
113 	extern unsigned int memorystatus_level;
114 
115 	BUF_DATA(PERF_MI_SYS_DATA, (uintptr_t)vm_page_free_count,
116 	    (uintptr_t)vm_page_wire_count, (uintptr_t)vm_page_external_count,
117 	    (uintptr_t)(vm_page_active_count + vm_page_inactive_count +
118 	    vm_page_speculative_count));
119 	BUF_DATA(PERF_MI_SYS_DATA_2, (uintptr_t)vm_page_anonymous_count,
120 	    (uintptr_t)vm_page_internal_count,
121 	    (uintptr_t)vm_pageout_vminfo.vm_pageout_compressions,
122 	    (uintptr_t)VM_PAGE_COMPRESSOR_COUNT);
123 	BUF_DATA(PERF_MI_SYS_DATA_3,
124 #if CONFIG_SECLUDED_MEMORY
125 	    (uintptr_t)vm_page_secluded_count,
126 #else // CONFIG_SECLUDED_MEMORY
127 	    0,
128 #endif // !CONFIG_SECLUDED_MEMORY
129 	    (uintptr_t)vm_page_purgeable_count,
130 	    memorystatus_level);
131 }
132 
133 static void
kperf_sample_user_internal(struct kperf_usample * sbuf,struct kperf_context * context,unsigned int actionid,unsigned int sample_what)134 kperf_sample_user_internal(struct kperf_usample *sbuf,
135     struct kperf_context *context, unsigned int actionid,
136     unsigned int sample_what)
137 {
138 	if (sample_what & SAMPLER_USTACK) {
139 		kperf_ucallstack_sample(&sbuf->ucallstack, context);
140 	}
141 	if (sample_what & SAMPLER_TH_INFO) {
142 		kperf_thread_info_sample(&sbuf->th_info, context);
143 	}
144 
145 	boolean_t intren = ml_set_interrupts_enabled(FALSE);
146 
147 	/*
148 	 * No userdata or sample_flags for this one.
149 	 */
150 	BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what, actionid);
151 
152 	if (sample_what & SAMPLER_USTACK) {
153 		kperf_ucallstack_log(&sbuf->ucallstack);
154 	}
155 	if (sample_what & SAMPLER_TH_DISPATCH) {
156 		kperf_thread_dispatch_log(&sbuf->usample_min->th_dispatch);
157 	}
158 	if (sample_what & SAMPLER_TH_INFO) {
159 		kperf_thread_info_log(&sbuf->th_info);
160 	}
161 
162 	BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what);
163 
164 	ml_set_interrupts_enabled(intren);
165 }
166 
167 static unsigned int
kperf_prepare_sample_what(unsigned int sample_what,unsigned int sample_flags)168 kperf_prepare_sample_what(unsigned int sample_what, unsigned int sample_flags)
169 {
170 	/* callstacks should be explicitly ignored */
171 	if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
172 		sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
173 	}
174 	if (sample_flags & SAMPLE_FLAG_ONLY_SYSTEM) {
175 		sample_what &= SAMPLER_SYS_MEM;
176 	}
177 	assert((sample_flags & (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY))
178 	    != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY));
179 	if (sample_flags & SAMPLE_FLAG_THREAD_ONLY) {
180 		sample_what &= SAMPLER_THREAD_MASK;
181 	}
182 	if (sample_flags & SAMPLE_FLAG_TASK_ONLY) {
183 		sample_what &= SAMPLER_TASK_MASK;
184 	}
185 
186 	return sample_what;
187 }
188 
189 void
kperf_sample_user(struct kperf_usample * sbuf,struct kperf_context * context,unsigned int actionid,unsigned int sample_flags)190 kperf_sample_user(struct kperf_usample *sbuf, struct kperf_context *context,
191     unsigned int actionid, unsigned int sample_flags)
192 {
193 	if (actionid == 0 || actionid > actionc) {
194 		return;
195 	}
196 
197 	unsigned int sample_what = kperf_prepare_sample_what(
198 		actionv[actionid - 1].sample, sample_flags);
199 	if (sample_what == 0) {
200 		return;
201 	}
202 
203 	unsigned int ucallstack_depth = actionv[actionid - 1].ucallstack_depth;
204 	sbuf->ucallstack.kpuc_nframes = ucallstack_depth ?: MAX_UCALLSTACK_FRAMES;
205 
206 	kperf_sample_user_internal(sbuf, context, actionid, sample_what);
207 }
208 
209 static kern_return_t
kperf_sample_internal(struct kperf_sample * sbuf,struct kperf_context * context,unsigned sample_what,unsigned sample_flags,unsigned actionid,unsigned ucallstack_depth)210 kperf_sample_internal(struct kperf_sample *sbuf,
211     struct kperf_context *context,
212     unsigned sample_what, unsigned sample_flags,
213     unsigned actionid, unsigned ucallstack_depth)
214 {
215 	int pended_ucallstack = 0;
216 	int pended_th_dispatch = 0;
217 	bool on_idle_thread = false;
218 	uint32_t userdata = actionid;
219 	bool task_only = (sample_flags & SAMPLE_FLAG_TASK_ONLY) != 0;
220 
221 	sample_what = kperf_prepare_sample_what(sample_what, sample_flags);
222 	if (sample_what == 0) {
223 		return SAMPLE_CONTINUE;
224 	}
225 
226 	if (!task_only) {
227 		context->cur_thread->kperf_pet_gen =
228 		    os_atomic_load(&kppet_gencount, relaxed);
229 	}
230 	bool is_kernel = (context->cur_pid == 0);
231 
232 	if (actionid && actionid <= actionc) {
233 		sbuf->kcallstack.kpkc_nframes =
234 		    actionv[actionid - 1].kcallstack_depth;
235 	} else {
236 		sbuf->kcallstack.kpkc_nframes = MAX_KCALLSTACK_FRAMES;
237 	}
238 
239 	ucallstack_depth = ucallstack_depth ?: MAX_UCALLSTACK_FRAMES;
240 	sbuf->kcallstack.kpkc_flags = 0;
241 	sbuf->usample.ucallstack.kpuc_flags = 0;
242 
243 	if (sample_what & SAMPLER_TH_INFO) {
244 		kperf_thread_info_sample(&sbuf->th_info, context);
245 
246 		if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
247 			if (sbuf->th_info.kpthi_runmode & 0x40) {
248 				on_idle_thread = true;
249 				goto log_sample;
250 			}
251 		}
252 	}
253 
254 	if (sample_what & SAMPLER_TH_SNAPSHOT) {
255 		kperf_thread_snapshot_sample(&(sbuf->th_snapshot), context);
256 	}
257 	if (sample_what & SAMPLER_TH_SCHEDULING) {
258 		kperf_thread_scheduling_sample(&(sbuf->th_scheduling), context);
259 	}
260 	if (sample_what & SAMPLER_KSTACK) {
261 		if (sample_flags & SAMPLE_FLAG_CONTINUATION) {
262 			kperf_continuation_sample(&(sbuf->kcallstack), context);
263 		} else if (sample_flags & SAMPLE_FLAG_NON_INTERRUPT) {
264 			/* outside of interrupt context, backtrace the current thread */
265 			kperf_backtrace_sample(&(sbuf->kcallstack), context);
266 		} else {
267 			kperf_kcallstack_sample(&(sbuf->kcallstack), context);
268 		}
269 	}
270 	if (sample_what & SAMPLER_TK_SNAPSHOT) {
271 		kperf_task_snapshot_sample(context->cur_task, &(sbuf->tk_snapshot));
272 	}
273 
274 	if (!is_kernel) {
275 		if (sample_what & SAMPLER_MEMINFO) {
276 			kperf_meminfo_sample(context->cur_task, &(sbuf->meminfo));
277 		}
278 
279 		if (sample_flags & SAMPLE_FLAG_PEND_USER) {
280 			if (sample_what & SAMPLER_USTACK) {
281 				pended_ucallstack = kperf_ucallstack_pend(context,
282 				    ucallstack_depth, actionid);
283 			}
284 
285 			if (sample_what & SAMPLER_TH_DISPATCH) {
286 				pended_th_dispatch =
287 				    kperf_thread_dispatch_pend(context, actionid);
288 			}
289 		}
290 	}
291 
292 	if (sample_what & SAMPLER_PMC_THREAD) {
293 		kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
294 	} else if (sample_what & SAMPLER_PMC_CPU) {
295 		kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
296 	}
297 
298 log_sample:
299 	/* lookup the user tag, if any */
300 	if (actionid && (actionid <= actionc)) {
301 		userdata = actionv[actionid - 1].userdata;
302 	}
303 
304 	/* avoid logging if this sample only pended samples */
305 	if (sample_flags & SAMPLE_FLAG_PEND_USER &&
306 	    !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH))) {
307 		return SAMPLE_CONTINUE;
308 	}
309 
310 	/* stash the data into the buffer
311 	 * interrupts off to ensure we don't get split
312 	 */
313 	boolean_t enabled = ml_set_interrupts_enabled(FALSE);
314 
315 	BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
316 	    actionid, userdata, sample_flags);
317 
318 	if (sample_flags & SAMPLE_FLAG_SYSTEM) {
319 		if (sample_what & SAMPLER_SYS_MEM) {
320 			kperf_system_memory_log();
321 		}
322 	}
323 	if (on_idle_thread) {
324 		goto log_sample_end;
325 	}
326 
327 	if (sample_what & SAMPLER_TH_INFO) {
328 		kperf_thread_info_log(&sbuf->th_info);
329 	}
330 	if (sample_what & SAMPLER_TH_SCHEDULING) {
331 		kperf_thread_scheduling_log(&(sbuf->th_scheduling));
332 	}
333 	if (sample_what & SAMPLER_TH_SNAPSHOT) {
334 		kperf_thread_snapshot_log(&(sbuf->th_snapshot));
335 	}
336 	if (sample_what & SAMPLER_KSTACK) {
337 		kperf_kcallstack_log(&sbuf->kcallstack);
338 	}
339 	if (sample_what & SAMPLER_TH_INSCYC) {
340 		kperf_thread_inscyc_log(context);
341 	}
342 	if (sample_what & SAMPLER_TK_SNAPSHOT) {
343 		kperf_task_snapshot_log(&(sbuf->tk_snapshot));
344 	}
345 	if (sample_what & SAMPLER_TK_INFO) {
346 		kperf_task_info_log(context);
347 	}
348 
349 	/* dump user stuff */
350 	if (!is_kernel) {
351 		/* dump meminfo */
352 		if (sample_what & SAMPLER_MEMINFO) {
353 			kperf_meminfo_log(&(sbuf->meminfo));
354 		}
355 
356 		if (sample_flags & SAMPLE_FLAG_PEND_USER) {
357 			if (pended_ucallstack) {
358 				BUF_INFO(PERF_CS_UPEND);
359 			}
360 
361 			if (pended_th_dispatch) {
362 				BUF_INFO(PERF_TI_DISPPEND);
363 			}
364 		}
365 	}
366 
367 	if (sample_what & SAMPLER_PMC_CONFIG) {
368 		kperf_kpc_config_log(&(sbuf->kpcdata));
369 	}
370 	if (sample_what & SAMPLER_PMC_THREAD) {
371 		kperf_kpc_thread_log(&(sbuf->kpcdata));
372 	} else if (sample_what & SAMPLER_PMC_CPU) {
373 		kperf_kpc_cpu_log(&(sbuf->kpcdata));
374 	}
375 
376 log_sample_end:
377 	BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what, on_idle_thread ? 1 : 0);
378 
379 	/* intrs back on */
380 	ml_set_interrupts_enabled(enabled);
381 
382 	return SAMPLE_CONTINUE;
383 }
384 
385 /* Translate actionid into sample bits and take a sample */
386 kern_return_t
kperf_sample(struct kperf_sample * sbuf,struct kperf_context * context,unsigned actionid,unsigned sample_flags)387 kperf_sample(struct kperf_sample *sbuf,
388     struct kperf_context *context,
389     unsigned actionid, unsigned sample_flags)
390 {
391 	/* work out what to sample, if anything */
392 	if ((actionid > actionc) || (actionid == 0)) {
393 		return SAMPLE_SHUTDOWN;
394 	}
395 
396 	/* check the pid filter against the context's current pid.
397 	 * filter pid == -1 means any pid
398 	 */
399 	int pid_filter = actionv[actionid - 1].pid_filter;
400 	if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
401 		return SAMPLE_CONTINUE;
402 	}
403 
404 	/* the samplers to run */
405 	unsigned int sample_what = actionv[actionid - 1].sample;
406 	unsigned int ucallstack_depth = actionv[actionid - 1].ucallstack_depth;
407 
408 	/* do the actual sample operation */
409 	return kperf_sample_internal(sbuf, context, sample_what,
410 	           sample_flags, actionid, ucallstack_depth);
411 }
412 
413 void
kperf_kdebug_handler(uint32_t debugid,uintptr_t * starting_fp)414 kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp)
415 {
416 	uint32_t sample_flags = SAMPLE_FLAG_PEND_USER;
417 	struct kperf_sample *sample = NULL;
418 	kern_return_t kr = KERN_SUCCESS;
419 	int s;
420 
421 	if (!kperf_kdebug_should_trigger(debugid)) {
422 		return;
423 	}
424 
425 	BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_START, debugid);
426 
427 	thread_t thread = current_thread();
428 	task_t task = get_threadtask(thread);
429 	struct kperf_context ctx = {
430 		.cur_thread = thread,
431 		.cur_task = task,
432 		.cur_pid = task_pid(task),
433 		.trigger_type = TRIGGER_TYPE_KDEBUG,
434 		.trigger_id = 0,
435 	};
436 
437 	s = ml_set_interrupts_enabled(0);
438 
439 	sample = kperf_intr_sample_buffer();
440 
441 	if (!ml_at_interrupt_context()) {
442 		sample_flags |= SAMPLE_FLAG_NON_INTERRUPT;
443 		ctx.starting_fp = starting_fp;
444 	}
445 
446 	kr = kperf_sample(sample, &ctx, kperf_kdebug_get_action(), sample_flags);
447 
448 	ml_set_interrupts_enabled(s);
449 	BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_END, kr);
450 }
451 
452 /*
453  * Sample using a minimum of stack space during this phase.
454  */
455 static void
kperf_ast_sample_min_stack_phase(struct kperf_usample_min * sbuf_min,struct kperf_context * context,unsigned int sample_what)456 kperf_ast_sample_min_stack_phase(struct kperf_usample_min *sbuf_min,
457     struct kperf_context *context, unsigned int sample_what)
458 {
459 	if (sample_what & SAMPLER_TH_DISPATCH) {
460 		kperf_thread_dispatch_sample(&sbuf_min->th_dispatch, context);
461 	}
462 }
463 
464 /*
465  * This function should not be inlined with its caller, which would pollute
466  * the stack usage of the minimum stack phase, above.
467  */
468 __attribute__((noinline))
469 static void
kperf_ast_sample_max_stack_phase(struct kperf_usample_min * sbuf_min,struct kperf_context * context,uint32_t actionid,unsigned int sample_what,unsigned int nframes)470 kperf_ast_sample_max_stack_phase(struct kperf_usample_min *sbuf_min,
471     struct kperf_context *context, uint32_t actionid, unsigned int sample_what,
472     unsigned int nframes)
473 {
474 	struct kperf_usample sbuf = { .usample_min = sbuf_min };
475 	sbuf.ucallstack.kpuc_nframes = nframes;
476 
477 	kperf_sample_user_internal(&sbuf, context, actionid, sample_what);
478 }
479 
480 /*
481  * This function allocates >2.3KB of the stack.  Prevent the compiler from
482  * inlining this function into ast_taken and ensure the stack memory is only
483  * allocated for the kperf AST.
484  */
485 __attribute__((noinline))
486 void
kperf_thread_ast_handler(thread_t thread)487 kperf_thread_ast_handler(thread_t thread)
488 {
489 	uint32_t ast = thread->kperf_ast;
490 
491 	BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_START, thread, ast);
492 
493 	task_t task = get_threadtask(thread);
494 
495 	if (task_did_exec(task) || task_is_exec_copy(task)) {
496 		BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, SAMPLE_CONTINUE);
497 		return;
498 	}
499 
500 	struct kperf_context ctx = {
501 		.cur_thread = thread,
502 		.cur_task = task,
503 		.cur_pid = task_pid(task),
504 	};
505 
506 	unsigned int sample_what = 0;
507 	if (ast & T_KPERF_AST_DISPATCH) {
508 		sample_what |= SAMPLER_TH_DISPATCH;
509 	}
510 	if (ast & T_KPERF_AST_CALLSTACK) {
511 		/* TH_INFO for backwards compatibility */
512 		sample_what |= SAMPLER_USTACK | SAMPLER_TH_INFO;
513 	}
514 
515 	unsigned int actionid = T_KPERF_GET_ACTIONID(ast);
516 
517 	struct kperf_usample_min sbuf_min = { 0 };
518 	kperf_ast_sample_min_stack_phase(&sbuf_min, &ctx, sample_what);
519 	kperf_ast_sample_max_stack_phase(&sbuf_min, &ctx, actionid, sample_what,
520 	    T_KPERF_GET_CALLSTACK_DEPTH(ast) ?: MAX_UCALLSTACK_FRAMES);
521 
522 	BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END);
523 }
524 
525 int
kperf_ast_pend(thread_t thread,uint32_t set_flags,unsigned int set_actionid)526 kperf_ast_pend(thread_t thread, uint32_t set_flags, unsigned int set_actionid)
527 {
528 	if (thread != current_thread()) {
529 		panic("kperf: pending AST to non-current thread");
530 	}
531 
532 	uint32_t ast = thread->kperf_ast;
533 	unsigned int actionid = T_KPERF_GET_ACTIONID(ast);
534 	uint32_t flags = ast & T_KPERF_AST_ALL;
535 
536 	if ((flags | set_flags) != flags || actionid != set_actionid) {
537 		ast &= ~T_KPERF_SET_ACTIONID(actionid);
538 		ast |= T_KPERF_SET_ACTIONID(set_actionid);
539 		ast |= set_flags;
540 
541 		thread->kperf_ast = ast;
542 
543 		/* set the actual AST */
544 		act_set_kperf(thread);
545 		return 1;
546 	}
547 
548 	return 0;
549 }
550 
551 void
kperf_ast_set_callstack_depth(thread_t thread,uint32_t depth)552 kperf_ast_set_callstack_depth(thread_t thread, uint32_t depth)
553 {
554 	uint32_t ast = thread->kperf_ast;
555 	uint32_t existing_depth = T_KPERF_GET_CALLSTACK_DEPTH(ast);
556 	if (existing_depth < depth) {
557 		ast &= ~T_KPERF_SET_CALLSTACK_DEPTH(existing_depth);
558 		ast |= T_KPERF_SET_CALLSTACK_DEPTH(depth);
559 		thread->kperf_ast = ast;
560 	}
561 }
562 
563 int
kperf_kdbg_cswitch_get(void)564 kperf_kdbg_cswitch_get(void)
565 {
566 	return kperf_kdebug_cswitch;
567 }
568 
569 int
kperf_kdbg_cswitch_set(int newval)570 kperf_kdbg_cswitch_set(int newval)
571 {
572 	kperf_kdebug_cswitch = newval;
573 	kperf_on_cpu_update();
574 
575 	return 0;
576 }
577 
578 /*
579  * Action configuration
580  */
581 unsigned int
kperf_action_get_count(void)582 kperf_action_get_count(void)
583 {
584 	return actionc;
585 }
586 
587 int
kperf_action_set_samplers(unsigned actionid,uint32_t samplers)588 kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
589 {
590 	if ((actionid > actionc) || (actionid == 0)) {
591 		return EINVAL;
592 	}
593 
594 	/* disallow both CPU and thread counters to be sampled in the same
595 	 * action */
596 	if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
597 		return EINVAL;
598 	}
599 
600 	actionv[actionid - 1].sample = samplers;
601 
602 	return 0;
603 }
604 
605 int
kperf_action_get_samplers(unsigned actionid,uint32_t * samplers_out)606 kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
607 {
608 	if ((actionid > actionc)) {
609 		return EINVAL;
610 	}
611 
612 	if (actionid == 0) {
613 		*samplers_out = 0; /* "NULL" action */
614 	} else {
615 		*samplers_out = actionv[actionid - 1].sample;
616 	}
617 
618 	return 0;
619 }
620 
621 int
kperf_action_set_userdata(unsigned actionid,uint32_t userdata)622 kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
623 {
624 	if ((actionid > actionc) || (actionid == 0)) {
625 		return EINVAL;
626 	}
627 
628 	actionv[actionid - 1].userdata = userdata;
629 
630 	return 0;
631 }
632 
633 int
kperf_action_get_userdata(unsigned actionid,uint32_t * userdata_out)634 kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
635 {
636 	if ((actionid > actionc)) {
637 		return EINVAL;
638 	}
639 
640 	if (actionid == 0) {
641 		*userdata_out = 0; /* "NULL" action */
642 	} else {
643 		*userdata_out = actionv[actionid - 1].userdata;
644 	}
645 
646 	return 0;
647 }
648 
649 int
kperf_action_set_filter(unsigned actionid,int pid)650 kperf_action_set_filter(unsigned actionid, int pid)
651 {
652 	if ((actionid > actionc) || (actionid == 0)) {
653 		return EINVAL;
654 	}
655 
656 	actionv[actionid - 1].pid_filter = pid;
657 
658 	return 0;
659 }
660 
661 int
kperf_action_get_filter(unsigned actionid,int * pid_out)662 kperf_action_get_filter(unsigned actionid, int *pid_out)
663 {
664 	if ((actionid > actionc)) {
665 		return EINVAL;
666 	}
667 
668 	if (actionid == 0) {
669 		*pid_out = -1; /* "NULL" action */
670 	} else {
671 		*pid_out = actionv[actionid - 1].pid_filter;
672 	}
673 
674 	return 0;
675 }
676 
677 void
kperf_action_reset(void)678 kperf_action_reset(void)
679 {
680 	for (unsigned int i = 0; i < actionc; i++) {
681 		kperf_action_set_samplers(i + 1, 0);
682 		kperf_action_set_userdata(i + 1, 0);
683 		kperf_action_set_filter(i + 1, -1);
684 		kperf_action_set_ucallstack_depth(i + 1, MAX_UCALLSTACK_FRAMES);
685 		kperf_action_set_kcallstack_depth(i + 1, MAX_KCALLSTACK_FRAMES);
686 	}
687 }
688 
689 int
kperf_action_set_count(unsigned count)690 kperf_action_set_count(unsigned count)
691 {
692 	struct action *new_actionv = NULL, *old_actionv = NULL;
693 	unsigned old_count;
694 
695 	/* easy no-op */
696 	if (count == actionc) {
697 		return 0;
698 	}
699 
700 	/* TODO: allow shrinking? */
701 	if (count < actionc) {
702 		return EINVAL;
703 	}
704 
705 	/* cap it for good measure */
706 	if (count > ACTION_MAX) {
707 		return EINVAL;
708 	}
709 
710 	/* creating the action arror for the first time. create a few
711 	 * more things, too.
712 	 */
713 	if (actionc == 0) {
714 		kperf_setup();
715 	}
716 
717 	/* create a new array */
718 	new_actionv = kalloc_data_tag(count * sizeof(*new_actionv),
719 	    Z_WAITOK, VM_KERN_MEMORY_DIAG);
720 	if (new_actionv == NULL) {
721 		return ENOMEM;
722 	}
723 
724 	old_actionv = actionv;
725 	old_count = actionc;
726 
727 	if (old_actionv != NULL) {
728 		memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
729 	}
730 
731 	memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
732 
733 	for (unsigned int i = old_count; i < count; i++) {
734 		new_actionv[i].pid_filter = -1;
735 		new_actionv[i].ucallstack_depth = MAX_UCALLSTACK_FRAMES;
736 		new_actionv[i].kcallstack_depth = MAX_KCALLSTACK_FRAMES;
737 	}
738 
739 	actionv = new_actionv;
740 	actionc = count;
741 
742 	kfree_data(old_actionv, old_count * sizeof(*actionv));
743 
744 	return 0;
745 }
746 
747 int
kperf_action_set_ucallstack_depth(unsigned action_id,uint32_t depth)748 kperf_action_set_ucallstack_depth(unsigned action_id, uint32_t depth)
749 {
750 	if ((action_id > actionc) || (action_id == 0)) {
751 		return EINVAL;
752 	}
753 
754 	if (depth > MAX_UCALLSTACK_FRAMES) {
755 		return EINVAL;
756 	}
757 	if (depth < 2) {
758 		return EINVAL;
759 	}
760 
761 	actionv[action_id - 1].ucallstack_depth = depth;
762 
763 	return 0;
764 }
765 
766 int
kperf_action_set_kcallstack_depth(unsigned action_id,uint32_t depth)767 kperf_action_set_kcallstack_depth(unsigned action_id, uint32_t depth)
768 {
769 	if ((action_id > actionc) || (action_id == 0)) {
770 		return EINVAL;
771 	}
772 
773 	if (depth > MAX_KCALLSTACK_FRAMES) {
774 		return EINVAL;
775 	}
776 	if (depth < 1) {
777 		return EINVAL;
778 	}
779 
780 	actionv[action_id - 1].kcallstack_depth = depth;
781 
782 	return 0;
783 }
784 
785 int
kperf_action_get_ucallstack_depth(unsigned action_id,uint32_t * depth_out)786 kperf_action_get_ucallstack_depth(unsigned action_id, uint32_t * depth_out)
787 {
788 	if ((action_id > actionc)) {
789 		return EINVAL;
790 	}
791 
792 	assert(depth_out);
793 
794 	if (action_id == 0) {
795 		*depth_out = MAX_UCALLSTACK_FRAMES;
796 	} else {
797 		*depth_out = actionv[action_id - 1].ucallstack_depth;
798 	}
799 
800 	return 0;
801 }
802 
803 int
kperf_action_get_kcallstack_depth(unsigned action_id,uint32_t * depth_out)804 kperf_action_get_kcallstack_depth(unsigned action_id, uint32_t * depth_out)
805 {
806 	if ((action_id > actionc)) {
807 		return EINVAL;
808 	}
809 
810 	assert(depth_out);
811 
812 	if (action_id == 0) {
813 		*depth_out = MAX_KCALLSTACK_FRAMES;
814 	} else {
815 		*depth_out = actionv[action_id - 1].kcallstack_depth;
816 	}
817 
818 	return 0;
819 }
820