xref: /xnu-10002.1.13/osfmk/kern/kpc_common.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2012 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <sys/errno.h>
34 #include <sys/vm.h>
35 #include <kperf/buffer.h>
36 #if MONOTONIC
37 #include <kern/monotonic.h>
38 #endif /* MONOTONIC */
39 #include <kern/thread.h>
40 #if defined(__arm64__)
41 #include <arm/cpu_data_internal.h>
42 #endif
43 
44 #include <kern/kpc.h>
45 
46 #include <kperf/kperf.h>
47 #include <kperf/sample.h>
48 #include <kperf/context.h>
49 #include <kperf/action.h>
50 
51 uint32_t kpc_actionid[KPC_MAX_COUNTERS];
52 
53 #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
54 #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
55 	                 COUNTERBUF_SIZE_PER_CPU)
56 
57 /* locks */
58 static LCK_GRP_DECLARE(kpc_config_lckgrp, "kpc");
59 static LCK_MTX_DECLARE(kpc_config_lock, &kpc_config_lckgrp);
60 
61 /* state specifying if all counters have been requested by kperf */
62 static boolean_t force_all_ctrs = FALSE;
63 
64 /* power manager */
65 static kpc_pm_handler_t kpc_pm_handler;
66 static boolean_t kpc_pm_has_custom_config;
67 static uint64_t kpc_pm_pmc_mask;
68 
69 boolean_t kpc_context_switch_active = FALSE;
70 bool kpc_supported = true;
71 
72 static uint64_t *
kpc_percpu_alloc(void)73 kpc_percpu_alloc(void)
74 {
75 	return kalloc_data_tag(COUNTERBUF_SIZE_PER_CPU, Z_WAITOK | Z_ZERO,
76 	           VM_KERN_MEMORY_DIAG);
77 }
78 
79 static void
kpc_percpu_free(uint64_t * buf)80 kpc_percpu_free(uint64_t *buf)
81 {
82 	kfree_data(buf, COUNTERBUF_SIZE_PER_CPU);
83 }
84 
85 boolean_t
kpc_register_cpu(struct cpu_data * cpu_data)86 kpc_register_cpu(struct cpu_data *cpu_data)
87 {
88 	assert(cpu_data);
89 	assert(cpu_data->cpu_kpc_buf[0] == NULL);
90 	assert(cpu_data->cpu_kpc_buf[1] == NULL);
91 	assert(cpu_data->cpu_kpc_shadow == NULL);
92 	assert(cpu_data->cpu_kpc_reload == NULL);
93 
94 	/*
95 	 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
96 	 * store all PMCs values from all CPUs. This mimics the userspace API.
97 	 * This does not suit well with the per-CPU kpc buffers, since:
98 	 *      1. Buffers don't need to be this large.
99 	 *      2. The actual number of CPUs is not known at this point.
100 	 *
101 	 * CPUs are asked to callout into kpc when being registered, we'll
102 	 * allocate the memory here.
103 	 */
104 
105 	if ((cpu_data->cpu_kpc_buf[0] = kpc_percpu_alloc()) == NULL) {
106 		goto error;
107 	}
108 	if ((cpu_data->cpu_kpc_buf[1] = kpc_percpu_alloc()) == NULL) {
109 		goto error;
110 	}
111 	if ((cpu_data->cpu_kpc_shadow = kpc_percpu_alloc()) == NULL) {
112 		goto error;
113 	}
114 	if ((cpu_data->cpu_kpc_reload = kpc_percpu_alloc()) == NULL) {
115 		goto error;
116 	}
117 
118 	/* success */
119 	return TRUE;
120 
121 error:
122 	kpc_unregister_cpu(cpu_data);
123 	return FALSE;
124 }
125 
126 void
kpc_unregister_cpu(struct cpu_data * cpu_data)127 kpc_unregister_cpu(struct cpu_data *cpu_data)
128 {
129 	assert(cpu_data);
130 	if (cpu_data->cpu_kpc_buf[0] != NULL) {
131 		kpc_percpu_free(cpu_data->cpu_kpc_buf[0]);
132 		cpu_data->cpu_kpc_buf[0] = NULL;
133 	}
134 	if (cpu_data->cpu_kpc_buf[1] != NULL) {
135 		kpc_percpu_free(cpu_data->cpu_kpc_buf[1]);
136 		cpu_data->cpu_kpc_buf[1] = NULL;
137 	}
138 	if (cpu_data->cpu_kpc_shadow != NULL) {
139 		kpc_percpu_free(cpu_data->cpu_kpc_shadow);
140 		cpu_data->cpu_kpc_shadow = NULL;
141 	}
142 	if (cpu_data->cpu_kpc_reload != NULL) {
143 		kpc_percpu_free(cpu_data->cpu_kpc_reload);
144 		cpu_data->cpu_kpc_reload = NULL;
145 	}
146 }
147 
148 static void
kpc_task_set_forced_all_ctrs(task_t task,boolean_t state)149 kpc_task_set_forced_all_ctrs(task_t task, boolean_t state)
150 {
151 	assert(task);
152 
153 	task_lock(task);
154 	if (state) {
155 		task->t_kpc |= TASK_KPC_FORCED_ALL_CTRS;
156 	} else {
157 		task->t_kpc &= ~TASK_KPC_FORCED_ALL_CTRS;
158 	}
159 	task_unlock(task);
160 }
161 
162 bool kpc_task_get_forced_all_ctrs(task_t task);
163 
164 bool
kpc_task_get_forced_all_ctrs(task_t task)165 kpc_task_get_forced_all_ctrs(task_t task)
166 {
167 	return task->t_kpc & TASK_KPC_FORCED_ALL_CTRS;
168 }
169 
170 int
kpc_force_all_ctrs(task_t task,int val)171 kpc_force_all_ctrs(task_t task, int val)
172 {
173 	boolean_t new_state = val ? TRUE : FALSE;
174 	boolean_t old_state = kpc_get_force_all_ctrs();
175 
176 	/*
177 	 * Refuse to do the operation if the counters are already forced by
178 	 * another task.
179 	 */
180 	if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task)) {
181 		return EACCES;
182 	}
183 
184 	/* nothing to do if the state is not changing */
185 	if (old_state == new_state) {
186 		return 0;
187 	}
188 
189 #if MONOTONIC
190 	mt_ownership_change(new_state);
191 #endif /* MONOTONIC */
192 
193 	/* notify the power manager */
194 	if (kpc_pm_handler) {
195 		kpc_pm_handler(new_state ? FALSE : TRUE);
196 	}
197 
198 	/*
199 	 * This is a force -- ensure that counters are forced, even if power
200 	 * management fails to acknowledge it.
201 	 */
202 	if (force_all_ctrs != new_state) {
203 		force_all_ctrs = new_state;
204 	}
205 
206 	/* update the task bits */
207 	kpc_task_set_forced_all_ctrs(task, new_state);
208 
209 	return 0;
210 }
211 
212 void
kpc_pm_acknowledge(boolean_t available_to_pm)213 kpc_pm_acknowledge(boolean_t available_to_pm)
214 {
215 	/*
216 	 * Force-all-counters should still be true when the counters are being
217 	 * made available to power management and false when counters are going
218 	 * to be taken away.
219 	 */
220 	assert(force_all_ctrs == available_to_pm);
221 	/*
222 	 * Make sure power management isn't playing games with us.
223 	 */
224 
225 	/*
226 	 * Counters being available means no one is forcing all counters.
227 	 */
228 	force_all_ctrs = available_to_pm ? FALSE : TRUE;
229 }
230 
231 int
kpc_get_force_all_ctrs(void)232 kpc_get_force_all_ctrs(void)
233 {
234 	return force_all_ctrs;
235 }
236 
237 boolean_t
kpc_multiple_clients(void)238 kpc_multiple_clients(void)
239 {
240 	return kpc_pm_handler != NULL;
241 }
242 
243 boolean_t
kpc_controls_fixed_counters(void)244 kpc_controls_fixed_counters(void)
245 {
246 	return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config;
247 }
248 
249 boolean_t
kpc_controls_counter(uint32_t ctr)250 kpc_controls_counter(uint32_t ctr)
251 {
252 	uint64_t pmc_mask = 0ULL;
253 
254 	assert(ctr < (kpc_fixed_count() + kpc_configurable_count()));
255 
256 	if (ctr < kpc_fixed_count()) {
257 		return kpc_controls_fixed_counters();
258 	}
259 
260 	/*
261 	 * By default kpc manages all PMCs, but if the Power Manager registered
262 	 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
263 	 * However, kpc takes ownership back if a task acquired all PMCs via
264 	 * force_all_ctrs.
265 	 */
266 	pmc_mask = (1ULL << (ctr - kpc_fixed_count()));
267 	if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs) {
268 		return FALSE;
269 	}
270 
271 	return TRUE;
272 }
273 
274 uint32_t
kpc_get_running(void)275 kpc_get_running(void)
276 {
277 	uint64_t pmc_mask = 0;
278 	uint32_t cur_state = 0;
279 
280 	if (kpc_is_running_fixed()) {
281 		cur_state |= KPC_CLASS_FIXED_MASK;
282 	}
283 
284 	pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
285 	if (kpc_is_running_configurable(pmc_mask)) {
286 		cur_state |= KPC_CLASS_CONFIGURABLE_MASK;
287 	}
288 
289 	pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
290 	if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask)) {
291 		cur_state |= KPC_CLASS_POWER_MASK;
292 	}
293 
294 	return cur_state;
295 }
296 
297 /* may be called from an IPI */
298 int
kpc_get_curcpu_counters(uint32_t classes,int * curcpu,uint64_t * buf)299 kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf)
300 {
301 	int enabled = 0, offset = 0;
302 	uint64_t pmc_mask = 0ULL;
303 
304 	assert(buf);
305 
306 	enabled = ml_set_interrupts_enabled(FALSE);
307 
308 	/* grab counters and CPU number as close as possible */
309 	if (curcpu) {
310 		*curcpu = cpu_number();
311 	}
312 
313 	if (classes & KPC_CLASS_FIXED_MASK) {
314 		kpc_get_fixed_counters(&buf[offset]);
315 		offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
316 	}
317 
318 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
319 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
320 		kpc_get_configurable_counters(&buf[offset], pmc_mask);
321 		offset += kpc_popcount(pmc_mask);
322 	}
323 
324 	if (classes & KPC_CLASS_POWER_MASK) {
325 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
326 		kpc_get_configurable_counters(&buf[offset], pmc_mask);
327 		offset += kpc_popcount(pmc_mask);
328 	}
329 
330 	ml_set_interrupts_enabled(enabled);
331 
332 	return offset;
333 }
334 
335 /* generic counter reading function, public api */
336 int
kpc_get_cpu_counters(boolean_t all_cpus,uint32_t classes,int * curcpu,uint64_t * buf)337 kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes,
338     int *curcpu, uint64_t *buf)
339 {
340 	assert(buf);
341 
342 	/*
343 	 * Unlike reading the current CPU counters, reading counters from all
344 	 * CPUs is architecture dependent. This allows kpc to make the most of
345 	 * the platform if memory mapped registers is supported.
346 	 */
347 	if (all_cpus) {
348 		return kpc_get_all_cpus_counters(classes, curcpu, buf);
349 	} else {
350 		return kpc_get_curcpu_counters(classes, curcpu, buf);
351 	}
352 }
353 
354 int
kpc_get_shadow_counters(boolean_t all_cpus,uint32_t classes,int * curcpu,uint64_t * buf)355 kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes,
356     int *curcpu, uint64_t *buf)
357 {
358 	int curcpu_id = cpu_number();
359 	uint32_t cfg_count = kpc_configurable_count(), offset = 0;
360 	uint64_t pmc_mask = 0ULL;
361 	boolean_t enabled;
362 
363 	assert(buf);
364 
365 	enabled = ml_set_interrupts_enabled(FALSE);
366 
367 	curcpu_id = cpu_number();
368 	if (curcpu) {
369 		*curcpu = curcpu_id;
370 	}
371 
372 	for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) {
373 		/* filter if the caller did not request all cpus */
374 		if (!all_cpus && (cpu != curcpu_id)) {
375 			continue;
376 		}
377 
378 		if (classes & KPC_CLASS_FIXED_MASK) {
379 			uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
380 			memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t));
381 			offset += count;
382 		}
383 
384 		if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
385 			pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
386 
387 			for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) {
388 				if ((1ULL << cfg_ctr) & pmc_mask) {
389 					buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
390 				}
391 			}
392 		}
393 
394 		if (classes & KPC_CLASS_POWER_MASK) {
395 			pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
396 
397 			for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) {
398 				if ((1ULL << cfg_ctr) & pmc_mask) {
399 					buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
400 				}
401 			}
402 		}
403 	}
404 
405 	ml_set_interrupts_enabled(enabled);
406 
407 	return offset;
408 }
409 
410 uint32_t
kpc_get_counter_count(uint32_t classes)411 kpc_get_counter_count(uint32_t classes)
412 {
413 	uint32_t count = 0;
414 
415 	if (classes & KPC_CLASS_FIXED_MASK) {
416 		count += kpc_fixed_count();
417 	}
418 
419 	if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
420 		uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes);
421 		uint32_t pmc_cnt = kpc_popcount(pmc_msk);
422 		count += pmc_cnt;
423 	}
424 
425 	return count;
426 }
427 
428 uint32_t
kpc_get_config_count(uint32_t classes)429 kpc_get_config_count(uint32_t classes)
430 {
431 	uint32_t count = 0;
432 
433 	if (classes & KPC_CLASS_FIXED_MASK) {
434 		count += kpc_fixed_config_count();
435 	}
436 
437 	if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
438 		uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes);
439 		count += kpc_configurable_config_count(pmc_mask);
440 	}
441 
442 	if ((classes & KPC_CLASS_RAWPMU_MASK) &&
443 	    (!kpc_multiple_clients() || force_all_ctrs)) {
444 		count += kpc_rawpmu_config_count();
445 	}
446 
447 	return count;
448 }
449 
450 int
kpc_get_config(uint32_t classes,kpc_config_t * current_config)451 kpc_get_config(uint32_t classes, kpc_config_t *current_config)
452 {
453 	uint32_t count = 0;
454 
455 	assert(current_config);
456 
457 	if (classes & KPC_CLASS_FIXED_MASK) {
458 		kpc_get_fixed_config(&current_config[count]);
459 		count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
460 	}
461 
462 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
463 		uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
464 		kpc_get_configurable_config(&current_config[count], pmc_mask);
465 		count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
466 	}
467 
468 	if (classes & KPC_CLASS_POWER_MASK) {
469 		uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
470 		kpc_get_configurable_config(&current_config[count], pmc_mask);
471 		count += kpc_get_config_count(KPC_CLASS_POWER_MASK);
472 	}
473 
474 	if (classes & KPC_CLASS_RAWPMU_MASK) {
475 		// Client shouldn't ask for config words that aren't available.
476 		// Most likely, they'd misinterpret the returned buffer if we
477 		// allowed this.
478 		if (kpc_multiple_clients() && !force_all_ctrs) {
479 			return EPERM;
480 		}
481 		kpc_get_rawpmu_config(&current_config[count]);
482 		count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK);
483 	}
484 
485 	return 0;
486 }
487 
488 int
kpc_set_config(uint32_t classes,kpc_config_t * configv)489 kpc_set_config(uint32_t classes, kpc_config_t *configv)
490 {
491 	int ret = 0;
492 	struct kpc_config_remote mp_config = {
493 		.classes = classes, .configv = configv,
494 		.pmc_mask = kpc_get_configurable_pmc_mask(classes)
495 	};
496 
497 	assert(configv);
498 
499 	/* don't allow RAWPMU configuration when sharing counters */
500 	if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients() &&
501 	    !force_all_ctrs) {
502 		return EPERM;
503 	}
504 
505 	/* no clients have the right to modify both classes */
506 	if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
507 	    (classes & (KPC_CLASS_POWER_MASK))) {
508 		return EPERM;
509 	}
510 
511 	lck_mtx_lock(&kpc_config_lock);
512 
513 	/* translate the power class for the machine layer */
514 	if (classes & KPC_CLASS_POWER_MASK) {
515 		mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
516 	}
517 
518 	ret = kpc_set_config_arch( &mp_config );
519 
520 	lck_mtx_unlock(&kpc_config_lock);
521 
522 	return ret;
523 }
524 
525 uint32_t
kpc_get_counterbuf_size(void)526 kpc_get_counterbuf_size(void)
527 {
528 	return COUNTERBUF_SIZE;
529 }
530 
531 /* allocate a buffer large enough for all possible counters */
532 uint64_t *
kpc_counterbuf_alloc(void)533 kpc_counterbuf_alloc(void)
534 {
535 	return kalloc_data_tag(COUNTERBUF_SIZE, Z_WAITOK | Z_ZERO,
536 	           VM_KERN_MEMORY_DIAG);
537 }
538 
539 void
kpc_counterbuf_free(uint64_t * buf)540 kpc_counterbuf_free(uint64_t *buf)
541 {
542 	kfree_data(buf, COUNTERBUF_SIZE);
543 }
544 
545 void
kpc_sample_kperf(uint32_t actionid,uint32_t counter,uint64_t config,uint64_t count,uintptr_t pc,kperf_kpc_flags_t flags)546 kpc_sample_kperf(uint32_t actionid, uint32_t counter, uint64_t config,
547     uint64_t count, uintptr_t pc, kperf_kpc_flags_t flags)
548 {
549 	struct kperf_sample sbuf;
550 
551 	uint64_t desc = config | (uint64_t)counter << 32 | (uint64_t)flags << 48;
552 
553 	BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START, desc, count, pc);
554 
555 	thread_t thread = current_thread();
556 	task_t task = get_threadtask(thread);
557 
558 	struct kperf_context ctx = {
559 		.cur_thread = thread,
560 		.cur_task = task,
561 		.cur_pid = task_pid(task),
562 		.trigger_type = TRIGGER_TYPE_PMI,
563 		.trigger_id = 0,
564 	};
565 
566 	int r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER);
567 
568 	BUF_INFO(PERF_KPC_HNDLR | DBG_FUNC_END, r);
569 }
570 
571 
572 int
kpc_set_period(uint32_t classes,uint64_t * val)573 kpc_set_period(uint32_t classes, uint64_t *val)
574 {
575 	struct kpc_config_remote mp_config = {
576 		.classes = classes, .configv = val,
577 		.pmc_mask = kpc_get_configurable_pmc_mask(classes)
578 	};
579 
580 	assert(val);
581 
582 	/* no clients have the right to modify both classes */
583 	if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
584 	    (classes & (KPC_CLASS_POWER_MASK))) {
585 		return EPERM;
586 	}
587 
588 	lck_mtx_lock(&kpc_config_lock);
589 
590 #ifdef FIXED_COUNTER_SHADOW
591 	if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) {
592 		lck_mtx_unlock(&kpc_config_lock);
593 		return EPERM;
594 	}
595 # else
596 	if (classes & KPC_CLASS_FIXED_MASK) {
597 		lck_mtx_unlock(&kpc_config_lock);
598 		return EINVAL;
599 	}
600 #endif
601 
602 	/* translate the power class for the machine layer */
603 	if (classes & KPC_CLASS_POWER_MASK) {
604 		mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
605 	}
606 
607 	kprintf("setting period %u\n", classes);
608 	kpc_set_period_arch( &mp_config );
609 
610 	lck_mtx_unlock(&kpc_config_lock);
611 
612 	return 0;
613 }
614 
615 int
kpc_get_period(uint32_t classes,uint64_t * val)616 kpc_get_period(uint32_t classes, uint64_t *val)
617 {
618 	uint32_t count = 0;
619 	uint64_t pmc_mask = 0ULL;
620 
621 	assert(val);
622 
623 	lck_mtx_lock(&kpc_config_lock);
624 
625 	if (classes & KPC_CLASS_FIXED_MASK) {
626 		/* convert reload values to periods */
627 		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
628 		for (uint32_t i = 0; i < count; ++i) {
629 			*val++ = kpc_fixed_max() - FIXED_RELOAD(i);
630 		}
631 	}
632 
633 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
634 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
635 
636 		/* convert reload values to periods */
637 		count = kpc_configurable_count();
638 		for (uint32_t i = 0; i < count; ++i) {
639 			if ((1ULL << i) & pmc_mask) {
640 				*val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
641 			}
642 		}
643 	}
644 
645 	if (classes & KPC_CLASS_POWER_MASK) {
646 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
647 
648 		/* convert reload values to periods */
649 		count = kpc_configurable_count();
650 		for (uint32_t i = 0; i < count; ++i) {
651 			if ((1ULL << i) & pmc_mask) {
652 				*val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
653 			}
654 		}
655 	}
656 
657 	lck_mtx_unlock(&kpc_config_lock);
658 
659 	return 0;
660 }
661 
662 int
kpc_set_actionid(uint32_t classes,uint32_t * val)663 kpc_set_actionid(uint32_t classes, uint32_t *val)
664 {
665 	uint32_t count = 0;
666 	uint64_t pmc_mask = 0ULL;
667 
668 	assert(val);
669 
670 	/* NOTE: what happens if a pmi occurs while actionids are being
671 	 * set is undefined. */
672 	lck_mtx_lock(&kpc_config_lock);
673 
674 	if (classes & KPC_CLASS_FIXED_MASK) {
675 		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
676 		memcpy(&FIXED_ACTIONID(0), val, count * sizeof(uint32_t));
677 		val += count;
678 	}
679 
680 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
681 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
682 
683 		count = kpc_configurable_count();
684 		for (uint32_t i = 0; i < count; ++i) {
685 			if ((1ULL << i) & pmc_mask) {
686 				CONFIGURABLE_ACTIONID(i) = *val++;
687 			}
688 		}
689 	}
690 
691 	if (classes & KPC_CLASS_POWER_MASK) {
692 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
693 
694 		count = kpc_configurable_count();
695 		for (uint32_t i = 0; i < count; ++i) {
696 			if ((1ULL << i) & pmc_mask) {
697 				CONFIGURABLE_ACTIONID(i) = *val++;
698 			}
699 		}
700 	}
701 
702 	lck_mtx_unlock(&kpc_config_lock);
703 
704 	return 0;
705 }
706 
707 int
kpc_get_actionid(uint32_t classes,uint32_t * val)708 kpc_get_actionid(uint32_t classes, uint32_t *val)
709 {
710 	uint32_t count = 0;
711 	uint64_t pmc_mask = 0ULL;
712 
713 	assert(val);
714 
715 	lck_mtx_lock(&kpc_config_lock);
716 
717 	if (classes & KPC_CLASS_FIXED_MASK) {
718 		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
719 		memcpy(val, &FIXED_ACTIONID(0), count * sizeof(uint32_t));
720 		val += count;
721 	}
722 
723 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
724 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
725 
726 		count = kpc_configurable_count();
727 		for (uint32_t i = 0; i < count; ++i) {
728 			if ((1ULL << i) & pmc_mask) {
729 				*val++ = CONFIGURABLE_ACTIONID(i);
730 			}
731 		}
732 	}
733 
734 	if (classes & KPC_CLASS_POWER_MASK) {
735 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
736 
737 		count = kpc_configurable_count();
738 		for (uint32_t i = 0; i < count; ++i) {
739 			if ((1ULL << i) & pmc_mask) {
740 				*val++ = CONFIGURABLE_ACTIONID(i);
741 			}
742 		}
743 	}
744 
745 	lck_mtx_unlock(&kpc_config_lock);
746 
747 	return 0;
748 }
749 
750 int
kpc_set_running(uint32_t classes)751 kpc_set_running(uint32_t classes)
752 {
753 	uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK;
754 	struct kpc_running_remote mp_config = {
755 		.classes = classes, .cfg_target_mask = 0ULL, .cfg_state_mask = 0ULL
756 	};
757 
758 	/* target all available PMCs */
759 	mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes);
760 
761 	/* translate the power class for the machine layer */
762 	if (classes & KPC_CLASS_POWER_MASK) {
763 		mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
764 	}
765 
766 	/* generate the state of each configurable PMCs */
767 	mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes);
768 
769 	return kpc_set_running_arch(&mp_config);
770 }
771 
772 boolean_t
kpc_register_pm_handler(kpc_pm_handler_t handler)773 kpc_register_pm_handler(kpc_pm_handler_t handler)
774 {
775 	return kpc_reserve_pm_counters(0x38, handler, TRUE);
776 }
777 
778 boolean_t
kpc_reserve_pm_counters(uint64_t pmc_mask,kpc_pm_handler_t handler,boolean_t custom_config)779 kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler,
780     boolean_t custom_config)
781 {
782 	uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1;
783 	uint64_t req_mask = 0ULL;
784 
785 	/* pre-condition */
786 	assert(handler != NULL);
787 	assert(kpc_pm_handler == NULL);
788 
789 	/* check number of counters requested */
790 	req_mask = (pmc_mask & all_mask);
791 	assert(kpc_popcount(req_mask) <= kpc_configurable_count());
792 
793 	/* save the power manager states */
794 	kpc_pm_has_custom_config = custom_config;
795 	kpc_pm_pmc_mask = req_mask;
796 	kpc_pm_handler = handler;
797 
798 	printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
799 	    req_mask, custom_config);
800 
801 	/* post-condition */
802 	{
803 		uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
804 		uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask);
805 #pragma unused(cfg_count, pwr_count)
806 		assert((cfg_count + pwr_count) == kpc_configurable_count());
807 	}
808 
809 	return force_all_ctrs ? FALSE : TRUE;
810 }
811 
812 void
kpc_release_pm_counters(void)813 kpc_release_pm_counters(void)
814 {
815 	/* pre-condition */
816 	assert(kpc_pm_handler != NULL);
817 
818 	/* release the counters */
819 	kpc_pm_has_custom_config = FALSE;
820 	kpc_pm_pmc_mask = 0ULL;
821 	kpc_pm_handler = NULL;
822 
823 	printf("kpc: pm released counters\n");
824 
825 	/* post-condition */
826 	assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count());
827 }
828 
829 uint8_t
kpc_popcount(uint64_t value)830 kpc_popcount(uint64_t value)
831 {
832 	return (uint8_t)__builtin_popcountll(value);
833 }
834 
835 uint64_t
kpc_get_configurable_pmc_mask(uint32_t classes)836 kpc_get_configurable_pmc_mask(uint32_t classes)
837 {
838 	uint32_t configurable_count = kpc_configurable_count();
839 	uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL;
840 
841 	/* not configurable classes or no configurable counters */
842 	if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) ||
843 	    (configurable_count == 0)) {
844 		goto exit;
845 	}
846 
847 	assert(configurable_count < 64);
848 	all_cfg_pmcs_mask = (1ULL << configurable_count) - 1;
849 
850 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
851 		if (force_all_ctrs == TRUE) {
852 			cfg_mask |= all_cfg_pmcs_mask;
853 		} else {
854 			cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask;
855 		}
856 	}
857 
858 	/*
859 	 * The power class exists iff:
860 	 *      - No tasks acquired all PMCs
861 	 *      - PM registered and uses kpc to interact with PMCs
862 	 */
863 	if ((force_all_ctrs == FALSE) &&
864 	    (kpc_pm_handler != NULL) &&
865 	    (kpc_pm_has_custom_config == FALSE) &&
866 	    (classes & KPC_CLASS_POWER_MASK)) {
867 		pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask;
868 	}
869 
870 exit:
871 	/* post-conditions */
872 	assert(((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 );
873 	assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count());
874 	assert((cfg_mask & pwr_mask) == 0ULL );
875 
876 	return cfg_mask | pwr_mask;
877 }
878