xref: /xnu-11215.41.3/osfmk/kern/kpc_common.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2012 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <sys/errno.h>
34 #include <sys/vm.h>
35 #include <kperf/buffer.h>
36 #include <kern/monotonic.h>
37 #include <kern/thread.h>
38 
39 #include <kern/kpc.h>
40 
41 #include <kperf/kperf.h>
42 #include <kperf/sample.h>
43 #include <kperf/context.h>
44 #include <kperf/action.h>
45 
46 #if CONFIG_CPU_COUNTERS
47 
48 uint32_t kpc_actionid[KPC_MAX_COUNTERS];
49 
50 #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
51 #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
52 	                 COUNTERBUF_SIZE_PER_CPU)
53 
54 /* locks */
55 static LCK_GRP_DECLARE(kpc_config_lckgrp, "kpc");
56 static LCK_MTX_DECLARE(kpc_config_lock, &kpc_config_lckgrp);
57 
58 /* state specifying if all counters have been requested by kperf */
59 static boolean_t force_all_ctrs = FALSE;
60 
61 /* power manager */
62 static kpc_pm_handler_t kpc_pm_handler;
63 static boolean_t kpc_pm_has_custom_config;
64 static uint64_t kpc_pm_pmc_mask;
65 
66 boolean_t kpc_context_switch_active = FALSE;
67 bool kpc_supported = true;
68 
69 static uint64_t *
kpc_percpu_alloc(void)70 kpc_percpu_alloc(void)
71 {
72 	return kalloc_data_tag(COUNTERBUF_SIZE_PER_CPU, Z_WAITOK | Z_ZERO,
73 	           VM_KERN_MEMORY_DIAG);
74 }
75 
76 static void
kpc_percpu_free(uint64_t * buf)77 kpc_percpu_free(uint64_t *buf)
78 {
79 	kfree_data(buf, COUNTERBUF_SIZE_PER_CPU);
80 }
81 
82 void
kpc_register_cpu(struct cpu_data * cpu_data)83 kpc_register_cpu(struct cpu_data *cpu_data)
84 {
85 	assert(cpu_data);
86 	assert(cpu_data->cpu_kpc_buf[0] == NULL);
87 	assert(cpu_data->cpu_kpc_buf[1] == NULL);
88 	assert(cpu_data->cpu_kpc_shadow == NULL);
89 	assert(cpu_data->cpu_kpc_reload == NULL);
90 
91 	/*
92 	 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
93 	 * store all PMCs values from all CPUs. This mimics the userspace API.
94 	 * This does not suit well with the per-CPU kpc buffers, since:
95 	 *      1. Buffers don't need to be this large.
96 	 *      2. The actual number of CPUs is not known at this point.
97 	 *
98 	 * CPUs are asked to callout into kpc when being registered, we'll
99 	 * allocate the memory here.
100 	 */
101 
102 	if ((cpu_data->cpu_kpc_buf[0] = kpc_percpu_alloc()) == NULL) {
103 		goto error;
104 	}
105 	if ((cpu_data->cpu_kpc_buf[1] = kpc_percpu_alloc()) == NULL) {
106 		goto error;
107 	}
108 	if ((cpu_data->cpu_kpc_shadow = kpc_percpu_alloc()) == NULL) {
109 		goto error;
110 	}
111 	if ((cpu_data->cpu_kpc_reload = kpc_percpu_alloc()) == NULL) {
112 		goto error;
113 	}
114 
115 	/* success */
116 	return;
117 
118 error:
119 	panic("kpc_percpu_alloc failed");
120 }
121 
122 void
kpc_unregister_cpu(struct cpu_data * cpu_data)123 kpc_unregister_cpu(struct cpu_data *cpu_data)
124 {
125 	assert(cpu_data);
126 	if (cpu_data->cpu_kpc_buf[0] != NULL) {
127 		kpc_percpu_free(cpu_data->cpu_kpc_buf[0]);
128 		cpu_data->cpu_kpc_buf[0] = NULL;
129 	}
130 	if (cpu_data->cpu_kpc_buf[1] != NULL) {
131 		kpc_percpu_free(cpu_data->cpu_kpc_buf[1]);
132 		cpu_data->cpu_kpc_buf[1] = NULL;
133 	}
134 	if (cpu_data->cpu_kpc_shadow != NULL) {
135 		kpc_percpu_free(cpu_data->cpu_kpc_shadow);
136 		cpu_data->cpu_kpc_shadow = NULL;
137 	}
138 	if (cpu_data->cpu_kpc_reload != NULL) {
139 		kpc_percpu_free(cpu_data->cpu_kpc_reload);
140 		cpu_data->cpu_kpc_reload = NULL;
141 	}
142 }
143 
144 static void
kpc_task_set_forced_all_ctrs(task_t task,boolean_t state)145 kpc_task_set_forced_all_ctrs(task_t task, boolean_t state)
146 {
147 	assert(task);
148 
149 	task_lock(task);
150 	if (state) {
151 		task->t_kpc |= TASK_KPC_FORCED_ALL_CTRS;
152 	} else {
153 		task->t_kpc &= ~TASK_KPC_FORCED_ALL_CTRS;
154 	}
155 	task_unlock(task);
156 }
157 
158 bool kpc_task_get_forced_all_ctrs(task_t task);
159 
160 bool
kpc_task_get_forced_all_ctrs(task_t task)161 kpc_task_get_forced_all_ctrs(task_t task)
162 {
163 	return task->t_kpc & TASK_KPC_FORCED_ALL_CTRS;
164 }
165 
166 int
kpc_force_all_ctrs(task_t task,int val)167 kpc_force_all_ctrs(task_t task, int val)
168 {
169 	boolean_t new_state = val ? TRUE : FALSE;
170 	boolean_t old_state = kpc_get_force_all_ctrs();
171 
172 	/*
173 	 * Refuse to do the operation if the counters are already forced by
174 	 * another task.
175 	 */
176 	if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task)) {
177 		return EACCES;
178 	}
179 
180 	/* nothing to do if the state is not changing */
181 	if (old_state == new_state) {
182 		return 0;
183 	}
184 
185 #if CONFIG_CPU_COUNTERS
186 	mt_ownership_change(new_state);
187 #endif /* CONFIG_CPU_COUNTERS */
188 
189 	/* notify the power manager */
190 	if (kpc_pm_handler) {
191 		kpc_pm_handler(new_state ? FALSE : TRUE);
192 	}
193 
194 	/*
195 	 * This is a force -- ensure that counters are forced, even if power
196 	 * management fails to acknowledge it.
197 	 */
198 	if (force_all_ctrs != new_state) {
199 		force_all_ctrs = new_state;
200 	}
201 
202 	/* update the task bits */
203 	kpc_task_set_forced_all_ctrs(task, new_state);
204 
205 	return 0;
206 }
207 
208 void
kpc_pm_acknowledge(boolean_t available_to_pm)209 kpc_pm_acknowledge(boolean_t available_to_pm)
210 {
211 	/*
212 	 * Force-all-counters should still be true when the counters are being
213 	 * made available to power management and false when counters are going
214 	 * to be taken away.
215 	 */
216 	assert(force_all_ctrs == available_to_pm);
217 	/*
218 	 * Make sure power management isn't playing games with us.
219 	 */
220 
221 	/*
222 	 * Counters being available means no one is forcing all counters.
223 	 */
224 	force_all_ctrs = available_to_pm ? FALSE : TRUE;
225 }
226 
227 int
kpc_get_force_all_ctrs(void)228 kpc_get_force_all_ctrs(void)
229 {
230 	return force_all_ctrs;
231 }
232 
233 boolean_t
kpc_multiple_clients(void)234 kpc_multiple_clients(void)
235 {
236 	return kpc_pm_handler != NULL;
237 }
238 
239 boolean_t
kpc_controls_fixed_counters(void)240 kpc_controls_fixed_counters(void)
241 {
242 	return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config;
243 }
244 
245 boolean_t
kpc_controls_counter(uint32_t ctr)246 kpc_controls_counter(uint32_t ctr)
247 {
248 	uint64_t pmc_mask = 0ULL;
249 
250 	assert(ctr < (kpc_fixed_count() + kpc_configurable_count()));
251 
252 	if (ctr < kpc_fixed_count()) {
253 		return kpc_controls_fixed_counters();
254 	}
255 
256 	/*
257 	 * By default kpc manages all PMCs, but if the Power Manager registered
258 	 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
259 	 * However, kpc takes ownership back if a task acquired all PMCs via
260 	 * force_all_ctrs.
261 	 */
262 	pmc_mask = (1ULL << (ctr - kpc_fixed_count()));
263 	if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs) {
264 		return FALSE;
265 	}
266 
267 	return TRUE;
268 }
269 
270 uint32_t
kpc_get_running(void)271 kpc_get_running(void)
272 {
273 	uint64_t pmc_mask = 0;
274 	uint32_t cur_state = 0;
275 
276 	if (kpc_is_running_fixed()) {
277 		cur_state |= KPC_CLASS_FIXED_MASK;
278 	}
279 
280 	pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
281 	if (kpc_is_running_configurable(pmc_mask)) {
282 		cur_state |= KPC_CLASS_CONFIGURABLE_MASK;
283 	}
284 
285 	pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
286 	if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask)) {
287 		cur_state |= KPC_CLASS_POWER_MASK;
288 	}
289 
290 	return cur_state;
291 }
292 
293 /* may be called from an IPI */
294 int
kpc_get_curcpu_counters(uint32_t classes,int * curcpu,uint64_t * buf)295 kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf)
296 {
297 	int enabled = 0, offset = 0;
298 	uint64_t pmc_mask = 0ULL;
299 
300 	assert(buf);
301 
302 	enabled = ml_set_interrupts_enabled(FALSE);
303 
304 	/* grab counters and CPU number as close as possible */
305 	if (curcpu) {
306 		*curcpu = cpu_number();
307 	}
308 
309 	if (classes & KPC_CLASS_FIXED_MASK) {
310 		kpc_get_fixed_counters(&buf[offset]);
311 		offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
312 	}
313 
314 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
315 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
316 		kpc_get_configurable_counters(&buf[offset], pmc_mask);
317 		offset += kpc_popcount(pmc_mask);
318 	}
319 
320 	if (classes & KPC_CLASS_POWER_MASK) {
321 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
322 		kpc_get_configurable_counters(&buf[offset], pmc_mask);
323 		offset += kpc_popcount(pmc_mask);
324 	}
325 
326 	ml_set_interrupts_enabled(enabled);
327 
328 	return offset;
329 }
330 
331 /* generic counter reading function */
332 int
kpc_get_cpu_counters(boolean_t all_cpus,uint32_t classes,int * curcpu,uint64_t * buf)333 kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes,
334     int *curcpu, uint64_t *buf)
335 {
336 	assert(buf);
337 
338 	/*
339 	 * Unlike reading the current CPU counters, reading counters from all
340 	 * CPUs is architecture dependent. This allows kpc to make the most of
341 	 * the platform if memory mapped registers is supported.
342 	 */
343 	if (all_cpus) {
344 		return kpc_get_all_cpus_counters(classes, curcpu, buf);
345 	} else {
346 		return kpc_get_curcpu_counters(classes, curcpu, buf);
347 	}
348 }
349 
350 int
kpc_get_shadow_counters(boolean_t all_cpus,uint32_t classes,int * curcpu,uint64_t * buf)351 kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes,
352     int *curcpu, uint64_t *buf)
353 {
354 	int curcpu_id = cpu_number();
355 	uint32_t cfg_count = kpc_configurable_count(), offset = 0;
356 	uint64_t pmc_mask = 0ULL;
357 	boolean_t enabled;
358 
359 	assert(buf);
360 
361 	enabled = ml_set_interrupts_enabled(FALSE);
362 
363 	curcpu_id = cpu_number();
364 	if (curcpu) {
365 		*curcpu = curcpu_id;
366 	}
367 
368 	for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) {
369 		/* filter if the caller did not request all cpus */
370 		if (!all_cpus && (cpu != curcpu_id)) {
371 			continue;
372 		}
373 
374 		if (classes & KPC_CLASS_FIXED_MASK) {
375 			uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
376 			memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t));
377 			offset += count;
378 		}
379 
380 		if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
381 			pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
382 
383 			for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) {
384 				if ((1ULL << cfg_ctr) & pmc_mask) {
385 					buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
386 				}
387 			}
388 		}
389 
390 		if (classes & KPC_CLASS_POWER_MASK) {
391 			pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
392 
393 			for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) {
394 				if ((1ULL << cfg_ctr) & pmc_mask) {
395 					buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
396 				}
397 			}
398 		}
399 	}
400 
401 	ml_set_interrupts_enabled(enabled);
402 
403 	return offset;
404 }
405 
406 uint32_t
kpc_get_counter_count(uint32_t classes)407 kpc_get_counter_count(uint32_t classes)
408 {
409 	uint32_t count = 0;
410 
411 	if (classes & KPC_CLASS_FIXED_MASK) {
412 		count += kpc_fixed_count();
413 	}
414 
415 	if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
416 		uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes);
417 		uint32_t pmc_cnt = kpc_popcount(pmc_msk);
418 		count += pmc_cnt;
419 	}
420 
421 	return count;
422 }
423 
424 uint32_t
kpc_get_config_count(uint32_t classes)425 kpc_get_config_count(uint32_t classes)
426 {
427 	uint32_t count = 0;
428 
429 	if (classes & KPC_CLASS_FIXED_MASK) {
430 		count += kpc_fixed_config_count();
431 	}
432 
433 	if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
434 		uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes);
435 		count += kpc_configurable_config_count(pmc_mask);
436 	}
437 
438 	if ((classes & KPC_CLASS_RAWPMU_MASK) &&
439 	    (!kpc_multiple_clients() || force_all_ctrs)) {
440 		count += kpc_rawpmu_config_count();
441 	}
442 
443 	return count;
444 }
445 
446 int
kpc_get_config(uint32_t classes,kpc_config_t * current_config)447 kpc_get_config(uint32_t classes, kpc_config_t *current_config)
448 {
449 	uint32_t count = 0;
450 
451 	assert(current_config);
452 
453 	if (classes & KPC_CLASS_FIXED_MASK) {
454 		kpc_get_fixed_config(&current_config[count]);
455 		count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
456 	}
457 
458 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
459 		uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
460 		kpc_get_configurable_config(&current_config[count], pmc_mask);
461 		count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
462 	}
463 
464 	if (classes & KPC_CLASS_POWER_MASK) {
465 		uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
466 		kpc_get_configurable_config(&current_config[count], pmc_mask);
467 		count += kpc_get_config_count(KPC_CLASS_POWER_MASK);
468 	}
469 
470 	if (classes & KPC_CLASS_RAWPMU_MASK) {
471 		// Client shouldn't ask for config words that aren't available.
472 		// Most likely, they'd misinterpret the returned buffer if we
473 		// allowed this.
474 		if (kpc_multiple_clients() && !force_all_ctrs) {
475 			return EPERM;
476 		}
477 		kpc_get_rawpmu_config(&current_config[count]);
478 		count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK);
479 	}
480 
481 	return 0;
482 }
483 
484 static int
_kpc_set_config_internal(uint32_t classes,kpc_config_t * configv,bool secure)485 _kpc_set_config_internal(uint32_t classes, kpc_config_t *configv, bool secure)
486 {
487 	int ret = 0;
488 	struct kpc_config_remote mp_config = {
489 		.classes = classes, .configv = configv,
490 		.pmc_mask = kpc_get_configurable_pmc_mask(classes),
491 		.secure = secure,
492 	};
493 
494 	assert(configv);
495 
496 	/* don't allow RAWPMU configuration when sharing counters */
497 	if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients() &&
498 	    !force_all_ctrs) {
499 		return EPERM;
500 	}
501 
502 	/* no clients have the right to modify both classes */
503 	if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
504 	    (classes & (KPC_CLASS_POWER_MASK))) {
505 		return EPERM;
506 	}
507 
508 	lck_mtx_lock(&kpc_config_lock);
509 
510 	/* translate the power class for the machine layer */
511 	if (classes & KPC_CLASS_POWER_MASK) {
512 		mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
513 	}
514 
515 	ret = kpc_set_config_arch( &mp_config );
516 
517 	lck_mtx_unlock(&kpc_config_lock);
518 
519 	return ret;
520 }
521 
522 int
kpc_set_config_kernel(uint32_t classes,kpc_config_t * configv)523 kpc_set_config_kernel(uint32_t classes, kpc_config_t * configv)
524 {
525 	return _kpc_set_config_internal(classes, configv, true);
526 }
527 
528 int kpc_set_config_external(uint32_t classes, kpc_config_t *configv);
529 int
kpc_set_config_external(uint32_t classes,kpc_config_t * configv)530 kpc_set_config_external(uint32_t classes, kpc_config_t *configv)
531 {
532 	return _kpc_set_config_internal(classes, configv, false);
533 }
534 
535 uint32_t
kpc_get_counterbuf_size(void)536 kpc_get_counterbuf_size(void)
537 {
538 	return COUNTERBUF_SIZE;
539 }
540 
541 /* allocate a buffer large enough for all possible counters */
542 uint64_t *
kpc_counterbuf_alloc(void)543 kpc_counterbuf_alloc(void)
544 {
545 	return kalloc_data_tag(COUNTERBUF_SIZE, Z_WAITOK | Z_ZERO,
546 	           VM_KERN_MEMORY_DIAG);
547 }
548 
549 void
kpc_counterbuf_free(uint64_t * buf)550 kpc_counterbuf_free(uint64_t *buf)
551 {
552 	kfree_data(buf, COUNTERBUF_SIZE);
553 }
554 
555 void
kpc_sample_kperf(uint32_t actionid,uint32_t counter,uint64_t config,uint64_t count,uintptr_t pc,kperf_kpc_flags_t flags)556 kpc_sample_kperf(uint32_t actionid, uint32_t counter, uint64_t config,
557     uint64_t count, uintptr_t pc, kperf_kpc_flags_t flags)
558 {
559 	struct kperf_sample sbuf;
560 
561 	uint64_t desc = config | (uint64_t)counter << 32 | (uint64_t)flags << 48;
562 
563 	BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START, desc, count, pc);
564 
565 	thread_t thread = current_thread();
566 	task_t task = get_threadtask(thread);
567 
568 	struct kperf_context ctx = {
569 		.cur_thread = thread,
570 		.cur_task = task,
571 		.cur_pid = task_pid(task),
572 		.trigger_type = TRIGGER_TYPE_PMI,
573 		.trigger_id = 0,
574 	};
575 
576 	int r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER);
577 
578 	BUF_INFO(PERF_KPC_HNDLR | DBG_FUNC_END, r);
579 }
580 
581 
582 int
kpc_set_period(uint32_t classes,uint64_t * val)583 kpc_set_period(uint32_t classes, uint64_t *val)
584 {
585 	struct kpc_config_remote mp_config = {
586 		.classes = classes, .configv = val,
587 		.pmc_mask = kpc_get_configurable_pmc_mask(classes)
588 	};
589 
590 	assert(val);
591 
592 	/* no clients have the right to modify both classes */
593 	if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
594 	    (classes & (KPC_CLASS_POWER_MASK))) {
595 		return EPERM;
596 	}
597 
598 	lck_mtx_lock(&kpc_config_lock);
599 
600 #ifdef FIXED_COUNTER_SHADOW
601 	if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) {
602 		lck_mtx_unlock(&kpc_config_lock);
603 		return EPERM;
604 	}
605 # else
606 	if (classes & KPC_CLASS_FIXED_MASK) {
607 		lck_mtx_unlock(&kpc_config_lock);
608 		return EINVAL;
609 	}
610 #endif
611 
612 	/* translate the power class for the machine layer */
613 	if (classes & KPC_CLASS_POWER_MASK) {
614 		mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
615 	}
616 
617 	kprintf("setting period %u\n", classes);
618 	kpc_set_period_arch( &mp_config );
619 
620 	lck_mtx_unlock(&kpc_config_lock);
621 
622 	return 0;
623 }
624 
625 int
kpc_get_period(uint32_t classes,uint64_t * val)626 kpc_get_period(uint32_t classes, uint64_t *val)
627 {
628 	uint32_t count = 0;
629 	uint64_t pmc_mask = 0ULL;
630 
631 	assert(val);
632 
633 	lck_mtx_lock(&kpc_config_lock);
634 
635 	if (classes & KPC_CLASS_FIXED_MASK) {
636 		/* convert reload values to periods */
637 		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
638 		for (uint32_t i = 0; i < count; ++i) {
639 			*val++ = kpc_fixed_max() - FIXED_RELOAD(i);
640 		}
641 	}
642 
643 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
644 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
645 
646 		/* convert reload values to periods */
647 		count = kpc_configurable_count();
648 		for (uint32_t i = 0; i < count; ++i) {
649 			if ((1ULL << i) & pmc_mask) {
650 				*val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
651 			}
652 		}
653 	}
654 
655 	if (classes & KPC_CLASS_POWER_MASK) {
656 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
657 
658 		/* convert reload values to periods */
659 		count = kpc_configurable_count();
660 		for (uint32_t i = 0; i < count; ++i) {
661 			if ((1ULL << i) & pmc_mask) {
662 				*val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
663 			}
664 		}
665 	}
666 
667 	lck_mtx_unlock(&kpc_config_lock);
668 
669 	return 0;
670 }
671 
672 int
kpc_set_actionid(uint32_t classes,uint32_t * val)673 kpc_set_actionid(uint32_t classes, uint32_t *val)
674 {
675 	uint32_t count = 0;
676 	uint64_t pmc_mask = 0ULL;
677 
678 	assert(val);
679 
680 	/* NOTE: what happens if a pmi occurs while actionids are being
681 	 * set is undefined. */
682 	lck_mtx_lock(&kpc_config_lock);
683 
684 	if (classes & KPC_CLASS_FIXED_MASK) {
685 		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
686 		memcpy(&FIXED_ACTIONID(0), val, count * sizeof(uint32_t));
687 		val += count;
688 	}
689 
690 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
691 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
692 
693 		count = kpc_configurable_count();
694 		for (uint32_t i = 0; i < count; ++i) {
695 			if ((1ULL << i) & pmc_mask) {
696 				CONFIGURABLE_ACTIONID(i) = *val++;
697 			}
698 		}
699 	}
700 
701 	if (classes & KPC_CLASS_POWER_MASK) {
702 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
703 
704 		count = kpc_configurable_count();
705 		for (uint32_t i = 0; i < count; ++i) {
706 			if ((1ULL << i) & pmc_mask) {
707 				CONFIGURABLE_ACTIONID(i) = *val++;
708 			}
709 		}
710 	}
711 
712 	lck_mtx_unlock(&kpc_config_lock);
713 
714 	return 0;
715 }
716 
717 int
kpc_get_actionid(uint32_t classes,uint32_t * val)718 kpc_get_actionid(uint32_t classes, uint32_t *val)
719 {
720 	uint32_t count = 0;
721 	uint64_t pmc_mask = 0ULL;
722 
723 	assert(val);
724 
725 	lck_mtx_lock(&kpc_config_lock);
726 
727 	if (classes & KPC_CLASS_FIXED_MASK) {
728 		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
729 		memcpy(val, &FIXED_ACTIONID(0), count * sizeof(uint32_t));
730 		val += count;
731 	}
732 
733 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
734 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
735 
736 		count = kpc_configurable_count();
737 		for (uint32_t i = 0; i < count; ++i) {
738 			if ((1ULL << i) & pmc_mask) {
739 				*val++ = CONFIGURABLE_ACTIONID(i);
740 			}
741 		}
742 	}
743 
744 	if (classes & KPC_CLASS_POWER_MASK) {
745 		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
746 
747 		count = kpc_configurable_count();
748 		for (uint32_t i = 0; i < count; ++i) {
749 			if ((1ULL << i) & pmc_mask) {
750 				*val++ = CONFIGURABLE_ACTIONID(i);
751 			}
752 		}
753 	}
754 
755 	lck_mtx_unlock(&kpc_config_lock);
756 
757 	return 0;
758 }
759 
760 int
kpc_set_running(uint32_t classes)761 kpc_set_running(uint32_t classes)
762 {
763 	uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK;
764 	struct kpc_running_remote mp_config = {
765 		.classes = classes, .cfg_target_mask = 0ULL, .cfg_state_mask = 0ULL
766 	};
767 
768 	/* target all available PMCs */
769 	mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes);
770 
771 	/* translate the power class for the machine layer */
772 	if (classes & KPC_CLASS_POWER_MASK) {
773 		mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
774 	}
775 
776 	/* generate the state of each configurable PMCs */
777 	mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes);
778 
779 	return kpc_set_running_arch(&mp_config);
780 }
781 
782 boolean_t
kpc_register_pm_handler(kpc_pm_handler_t handler)783 kpc_register_pm_handler(kpc_pm_handler_t handler)
784 {
785 	return kpc_reserve_pm_counters(0x38, handler, TRUE);
786 }
787 
788 boolean_t
kpc_reserve_pm_counters(uint64_t pmc_mask,kpc_pm_handler_t handler,boolean_t custom_config)789 kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler,
790     boolean_t custom_config)
791 {
792 	uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1;
793 	uint64_t req_mask = 0ULL;
794 
795 	/* pre-condition */
796 	assert(handler != NULL);
797 	assert(kpc_pm_handler == NULL);
798 
799 	/* check number of counters requested */
800 	req_mask = (pmc_mask & all_mask);
801 	assert(kpc_popcount(req_mask) <= kpc_configurable_count());
802 
803 	/* save the power manager states */
804 	kpc_pm_has_custom_config = custom_config;
805 	kpc_pm_pmc_mask = req_mask;
806 	kpc_pm_handler = handler;
807 
808 	printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
809 	    req_mask, custom_config);
810 
811 	/* post-condition */
812 	{
813 		uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
814 		uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask);
815 #pragma unused(cfg_count, pwr_count)
816 		assert((cfg_count + pwr_count) == kpc_configurable_count());
817 	}
818 
819 	return force_all_ctrs ? FALSE : TRUE;
820 }
821 
822 void
kpc_release_pm_counters(void)823 kpc_release_pm_counters(void)
824 {
825 	/* pre-condition */
826 	assert(kpc_pm_handler != NULL);
827 
828 	/* release the counters */
829 	kpc_pm_has_custom_config = FALSE;
830 	kpc_pm_pmc_mask = 0ULL;
831 	kpc_pm_handler = NULL;
832 
833 	printf("kpc: pm released counters\n");
834 
835 	/* post-condition */
836 	assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count());
837 }
838 
839 uint8_t
kpc_popcount(uint64_t value)840 kpc_popcount(uint64_t value)
841 {
842 	return (uint8_t)__builtin_popcountll(value);
843 }
844 
845 uint64_t
kpc_get_configurable_pmc_mask(uint32_t classes)846 kpc_get_configurable_pmc_mask(uint32_t classes)
847 {
848 	uint32_t configurable_count = kpc_configurable_count();
849 	uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL;
850 
851 	/* not configurable classes or no configurable counters */
852 	if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) ||
853 	    (configurable_count == 0)) {
854 		goto exit;
855 	}
856 
857 	assert(configurable_count < 64);
858 	all_cfg_pmcs_mask = (1ULL << configurable_count) - 1;
859 
860 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
861 		if (force_all_ctrs == TRUE) {
862 			cfg_mask |= all_cfg_pmcs_mask;
863 		} else {
864 			cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask;
865 		}
866 	}
867 
868 	/*
869 	 * The power class exists iff:
870 	 *      - No tasks acquired all PMCs
871 	 *      - PM registered and uses kpc to interact with PMCs
872 	 */
873 	if ((force_all_ctrs == FALSE) &&
874 	    (kpc_pm_handler != NULL) &&
875 	    (kpc_pm_has_custom_config == FALSE) &&
876 	    (classes & KPC_CLASS_POWER_MASK)) {
877 		pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask;
878 	}
879 
880 exit:
881 	/* post-conditions */
882 	assert(((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 );
883 	assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count());
884 	assert((cfg_mask & pwr_mask) == 0ULL );
885 
886 	return cfg_mask | pwr_mask;
887 }
888 
889 #else // CONFIG_CPU_COUNTERS
890 
891 /*
892  * Ensure there are stubs available for kexts, even if xnu isn't built to
893  * support CPU counters.
894  */
895 
896 void
kpc_pm_acknowledge(boolean_t __unused available_to_pm)897 kpc_pm_acknowledge(boolean_t __unused available_to_pm)
898 {
899 }
900 
901 boolean_t
kpc_register_pm_handler(kpc_pm_handler_t __unused handler)902 kpc_register_pm_handler(kpc_pm_handler_t __unused handler)
903 {
904 	return FALSE;
905 }
906 
907 boolean_t
kpc_reserve_pm_counters(uint64_t __unused pmc_mask,kpc_pm_handler_t __unused handler,boolean_t __unused custom_config)908 kpc_reserve_pm_counters(
909 	uint64_t __unused pmc_mask,
910 	kpc_pm_handler_t __unused handler,
911 	boolean_t __unused custom_config)
912 {
913 	return TRUE;
914 }
915 
916 void
kpc_release_pm_counters(void)917 kpc_release_pm_counters(void)
918 {
919 }
920 
921 int
kpc_get_force_all_ctrs(void)922 kpc_get_force_all_ctrs(void)
923 {
924 	return 0;
925 }
926 
927 int
kpc_get_cpu_counters(boolean_t __unused all_cpus,uint32_t __unused classes,int * __unused curcpu,uint64_t * __unused buf)928 kpc_get_cpu_counters(
929 	boolean_t __unused all_cpus,
930 	uint32_t __unused classes,
931 	int * __unused curcpu,
932 	uint64_t * __unused buf)
933 {
934 	return ENOTSUP;
935 }
936 
937 int
kpc_get_shadow_counters(boolean_t __unused all_cpus,uint32_t __unused classes,int * __unused curcpu,uint64_t * __unused buf)938 kpc_get_shadow_counters(
939 	boolean_t __unused all_cpus,
940 	uint32_t __unused classes,
941 	int * __unused curcpu,
942 	uint64_t * __unused buf)
943 {
944 	return ENOTSUP;
945 }
946 
947 uint32_t
kpc_get_running(void)948 kpc_get_running(void)
949 {
950 	return 0;
951 }
952 
953 int
kpc_set_running(uint32_t __unused classes)954 kpc_set_running(uint32_t __unused classes)
955 {
956 	return ENOTSUP;
957 }
958 
959 int
kpc_get_config(uint32_t __unused classes,kpc_config_t * __unused current_config)960 kpc_get_config(
961 	uint32_t __unused classes,
962 	kpc_config_t * __unused current_config)
963 {
964 	return ENOTSUP;
965 }
966 
967 int kpc_set_config_external(uint32_t classes, kpc_config_t *configv);
968 int
kpc_set_config_external(uint32_t __unused classes,kpc_config_t * __unused configv)969 kpc_set_config_external(
970 	uint32_t __unused classes,
971 	kpc_config_t * __unused configv)
972 {
973 	return ENOTSUP;
974 }
975 
976 #endif // !CONFIG_CPU_COUNTERS
977