1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <sys/errno.h>
34 #include <sys/vm.h>
35 #include <kperf/buffer.h>
36 #if MONOTONIC
37 #include <kern/monotonic.h>
38 #endif /* MONOTONIC */
39 #include <kern/thread.h>
40 #if defined(__arm64__) || defined(__arm__)
41 #include <arm/cpu_data_internal.h>
42 #endif
43
44 #include <kern/kpc.h>
45
46 #include <kperf/kperf.h>
47 #include <kperf/sample.h>
48 #include <kperf/context.h>
49 #include <kperf/action.h>
50
51 uint32_t kpc_actionid[KPC_MAX_COUNTERS];
52
53 #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
54 #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
55 COUNTERBUF_SIZE_PER_CPU)
56
57 /* locks */
58 static LCK_GRP_DECLARE(kpc_config_lckgrp, "kpc");
59 static LCK_MTX_DECLARE(kpc_config_lock, &kpc_config_lckgrp);
60
61 /* state specifying if all counters have been requested by kperf */
62 static boolean_t force_all_ctrs = FALSE;
63
64 /* power manager */
65 static kpc_pm_handler_t kpc_pm_handler;
66 static boolean_t kpc_pm_has_custom_config;
67 static uint64_t kpc_pm_pmc_mask;
68
69 boolean_t kpc_context_switch_active = FALSE;
70 bool kpc_supported = true;
71
72 static uint64_t *
kpc_percpu_alloc(void)73 kpc_percpu_alloc(void)
74 {
75 return kalloc_data_tag(COUNTERBUF_SIZE_PER_CPU, Z_WAITOK | Z_ZERO,
76 VM_KERN_MEMORY_DIAG);
77 }
78
79 static void
kpc_percpu_free(uint64_t * buf)80 kpc_percpu_free(uint64_t *buf)
81 {
82 kfree_data(buf, COUNTERBUF_SIZE_PER_CPU);
83 }
84
85 boolean_t
kpc_register_cpu(struct cpu_data * cpu_data)86 kpc_register_cpu(struct cpu_data *cpu_data)
87 {
88 assert(cpu_data);
89 assert(cpu_data->cpu_kpc_buf[0] == NULL);
90 assert(cpu_data->cpu_kpc_buf[1] == NULL);
91 assert(cpu_data->cpu_kpc_shadow == NULL);
92 assert(cpu_data->cpu_kpc_reload == NULL);
93
94 /*
95 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
96 * store all PMCs values from all CPUs. This mimics the userspace API.
97 * This does not suit well with the per-CPU kpc buffers, since:
98 * 1. Buffers don't need to be this large.
99 * 2. The actual number of CPUs is not known at this point.
100 *
101 * CPUs are asked to callout into kpc when being registered, we'll
102 * allocate the memory here.
103 */
104
105 if ((cpu_data->cpu_kpc_buf[0] = kpc_percpu_alloc()) == NULL) {
106 goto error;
107 }
108 if ((cpu_data->cpu_kpc_buf[1] = kpc_percpu_alloc()) == NULL) {
109 goto error;
110 }
111 if ((cpu_data->cpu_kpc_shadow = kpc_percpu_alloc()) == NULL) {
112 goto error;
113 }
114 if ((cpu_data->cpu_kpc_reload = kpc_percpu_alloc()) == NULL) {
115 goto error;
116 }
117
118 /* success */
119 return TRUE;
120
121 error:
122 kpc_unregister_cpu(cpu_data);
123 return FALSE;
124 }
125
126 void
kpc_unregister_cpu(struct cpu_data * cpu_data)127 kpc_unregister_cpu(struct cpu_data *cpu_data)
128 {
129 assert(cpu_data);
130 if (cpu_data->cpu_kpc_buf[0] != NULL) {
131 kpc_percpu_free(cpu_data->cpu_kpc_buf[0]);
132 cpu_data->cpu_kpc_buf[0] = NULL;
133 }
134 if (cpu_data->cpu_kpc_buf[1] != NULL) {
135 kpc_percpu_free(cpu_data->cpu_kpc_buf[1]);
136 cpu_data->cpu_kpc_buf[1] = NULL;
137 }
138 if (cpu_data->cpu_kpc_shadow != NULL) {
139 kpc_percpu_free(cpu_data->cpu_kpc_shadow);
140 cpu_data->cpu_kpc_shadow = NULL;
141 }
142 if (cpu_data->cpu_kpc_reload != NULL) {
143 kpc_percpu_free(cpu_data->cpu_kpc_reload);
144 cpu_data->cpu_kpc_reload = NULL;
145 }
146 }
147
148 static void
kpc_task_set_forced_all_ctrs(task_t task,boolean_t state)149 kpc_task_set_forced_all_ctrs(task_t task, boolean_t state)
150 {
151 assert(task);
152
153 task_lock(task);
154 if (state) {
155 task->t_kpc |= TASK_KPC_FORCED_ALL_CTRS;
156 } else {
157 task->t_kpc &= ~TASK_KPC_FORCED_ALL_CTRS;
158 }
159 task_unlock(task);
160 }
161
162 static boolean_t
kpc_task_get_forced_all_ctrs(task_t task)163 kpc_task_get_forced_all_ctrs(task_t task)
164 {
165 assert(task);
166 return task->t_kpc & TASK_KPC_FORCED_ALL_CTRS ? TRUE : FALSE;
167 }
168
169 int
kpc_force_all_ctrs(task_t task,int val)170 kpc_force_all_ctrs(task_t task, int val)
171 {
172 boolean_t new_state = val ? TRUE : FALSE;
173 boolean_t old_state = kpc_get_force_all_ctrs();
174
175 /*
176 * Refuse to do the operation if the counters are already forced by
177 * another task.
178 */
179 if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task)) {
180 return EACCES;
181 }
182
183 /* nothing to do if the state is not changing */
184 if (old_state == new_state) {
185 return 0;
186 }
187
188 #if MONOTONIC
189 mt_ownership_change(new_state);
190 #endif /* MONOTONIC */
191
192 /* notify the power manager */
193 if (kpc_pm_handler) {
194 kpc_pm_handler(new_state ? FALSE : TRUE);
195 }
196
197 /*
198 * This is a force -- ensure that counters are forced, even if power
199 * management fails to acknowledge it.
200 */
201 if (force_all_ctrs != new_state) {
202 force_all_ctrs = new_state;
203 }
204
205 /* update the task bits */
206 kpc_task_set_forced_all_ctrs(task, new_state);
207
208 return 0;
209 }
210
211 void
kpc_pm_acknowledge(boolean_t available_to_pm)212 kpc_pm_acknowledge(boolean_t available_to_pm)
213 {
214 /*
215 * Force-all-counters should still be true when the counters are being
216 * made available to power management and false when counters are going
217 * to be taken away.
218 */
219 assert(force_all_ctrs == available_to_pm);
220 /*
221 * Make sure power management isn't playing games with us.
222 */
223
224 /*
225 * Counters being available means no one is forcing all counters.
226 */
227 force_all_ctrs = available_to_pm ? FALSE : TRUE;
228 }
229
230 int
kpc_get_force_all_ctrs(void)231 kpc_get_force_all_ctrs(void)
232 {
233 return force_all_ctrs;
234 }
235
236 boolean_t
kpc_multiple_clients(void)237 kpc_multiple_clients(void)
238 {
239 return kpc_pm_handler != NULL;
240 }
241
242 boolean_t
kpc_controls_fixed_counters(void)243 kpc_controls_fixed_counters(void)
244 {
245 return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config;
246 }
247
248 boolean_t
kpc_controls_counter(uint32_t ctr)249 kpc_controls_counter(uint32_t ctr)
250 {
251 uint64_t pmc_mask = 0ULL;
252
253 assert(ctr < (kpc_fixed_count() + kpc_configurable_count()));
254
255 if (ctr < kpc_fixed_count()) {
256 return kpc_controls_fixed_counters();
257 }
258
259 /*
260 * By default kpc manages all PMCs, but if the Power Manager registered
261 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
262 * However, kpc takes ownership back if a task acquired all PMCs via
263 * force_all_ctrs.
264 */
265 pmc_mask = (1ULL << (ctr - kpc_fixed_count()));
266 if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs) {
267 return FALSE;
268 }
269
270 return TRUE;
271 }
272
273 uint32_t
kpc_get_running(void)274 kpc_get_running(void)
275 {
276 uint64_t pmc_mask = 0;
277 uint32_t cur_state = 0;
278
279 if (kpc_is_running_fixed()) {
280 cur_state |= KPC_CLASS_FIXED_MASK;
281 }
282
283 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
284 if (kpc_is_running_configurable(pmc_mask)) {
285 cur_state |= KPC_CLASS_CONFIGURABLE_MASK;
286 }
287
288 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
289 if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask)) {
290 cur_state |= KPC_CLASS_POWER_MASK;
291 }
292
293 return cur_state;
294 }
295
296 /* may be called from an IPI */
297 int
kpc_get_curcpu_counters(uint32_t classes,int * curcpu,uint64_t * buf)298 kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf)
299 {
300 int enabled = 0, offset = 0;
301 uint64_t pmc_mask = 0ULL;
302
303 assert(buf);
304
305 enabled = ml_set_interrupts_enabled(FALSE);
306
307 /* grab counters and CPU number as close as possible */
308 if (curcpu) {
309 *curcpu = cpu_number();
310 }
311
312 if (classes & KPC_CLASS_FIXED_MASK) {
313 kpc_get_fixed_counters(&buf[offset]);
314 offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
315 }
316
317 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
318 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
319 kpc_get_configurable_counters(&buf[offset], pmc_mask);
320 offset += kpc_popcount(pmc_mask);
321 }
322
323 if (classes & KPC_CLASS_POWER_MASK) {
324 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
325 kpc_get_configurable_counters(&buf[offset], pmc_mask);
326 offset += kpc_popcount(pmc_mask);
327 }
328
329 ml_set_interrupts_enabled(enabled);
330
331 return offset;
332 }
333
334 /* generic counter reading function, public api */
335 int
kpc_get_cpu_counters(boolean_t all_cpus,uint32_t classes,int * curcpu,uint64_t * buf)336 kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes,
337 int *curcpu, uint64_t *buf)
338 {
339 assert(buf);
340
341 /*
342 * Unlike reading the current CPU counters, reading counters from all
343 * CPUs is architecture dependent. This allows kpc to make the most of
344 * the platform if memory mapped registers is supported.
345 */
346 if (all_cpus) {
347 return kpc_get_all_cpus_counters(classes, curcpu, buf);
348 } else {
349 return kpc_get_curcpu_counters(classes, curcpu, buf);
350 }
351 }
352
353 int
kpc_get_shadow_counters(boolean_t all_cpus,uint32_t classes,int * curcpu,uint64_t * buf)354 kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes,
355 int *curcpu, uint64_t *buf)
356 {
357 int curcpu_id = cpu_number();
358 uint32_t cfg_count = kpc_configurable_count(), offset = 0;
359 uint64_t pmc_mask = 0ULL;
360 boolean_t enabled;
361
362 assert(buf);
363
364 enabled = ml_set_interrupts_enabled(FALSE);
365
366 curcpu_id = cpu_number();
367 if (curcpu) {
368 *curcpu = curcpu_id;
369 }
370
371 for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) {
372 /* filter if the caller did not request all cpus */
373 if (!all_cpus && (cpu != curcpu_id)) {
374 continue;
375 }
376
377 if (classes & KPC_CLASS_FIXED_MASK) {
378 uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
379 memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t));
380 offset += count;
381 }
382
383 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
384 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
385
386 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) {
387 if ((1ULL << cfg_ctr) & pmc_mask) {
388 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
389 }
390 }
391 }
392
393 if (classes & KPC_CLASS_POWER_MASK) {
394 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
395
396 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) {
397 if ((1ULL << cfg_ctr) & pmc_mask) {
398 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
399 }
400 }
401 }
402 }
403
404 ml_set_interrupts_enabled(enabled);
405
406 return offset;
407 }
408
409 uint32_t
kpc_get_counter_count(uint32_t classes)410 kpc_get_counter_count(uint32_t classes)
411 {
412 uint32_t count = 0;
413
414 if (classes & KPC_CLASS_FIXED_MASK) {
415 count += kpc_fixed_count();
416 }
417
418 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
419 uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes);
420 uint32_t pmc_cnt = kpc_popcount(pmc_msk);
421 count += pmc_cnt;
422 }
423
424 return count;
425 }
426
427 uint32_t
kpc_get_config_count(uint32_t classes)428 kpc_get_config_count(uint32_t classes)
429 {
430 uint32_t count = 0;
431
432 if (classes & KPC_CLASS_FIXED_MASK) {
433 count += kpc_fixed_config_count();
434 }
435
436 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
437 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes);
438 count += kpc_configurable_config_count(pmc_mask);
439 }
440
441 if ((classes & KPC_CLASS_RAWPMU_MASK) &&
442 (!kpc_multiple_clients() || force_all_ctrs)) {
443 count += kpc_rawpmu_config_count();
444 }
445
446 return count;
447 }
448
449 int
kpc_get_config(uint32_t classes,kpc_config_t * current_config)450 kpc_get_config(uint32_t classes, kpc_config_t *current_config)
451 {
452 uint32_t count = 0;
453
454 assert(current_config);
455
456 if (classes & KPC_CLASS_FIXED_MASK) {
457 kpc_get_fixed_config(¤t_config[count]);
458 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
459 }
460
461 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
462 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
463 kpc_get_configurable_config(¤t_config[count], pmc_mask);
464 count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
465 }
466
467 if (classes & KPC_CLASS_POWER_MASK) {
468 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
469 kpc_get_configurable_config(¤t_config[count], pmc_mask);
470 count += kpc_get_config_count(KPC_CLASS_POWER_MASK);
471 }
472
473 if (classes & KPC_CLASS_RAWPMU_MASK) {
474 // Client shouldn't ask for config words that aren't available.
475 // Most likely, they'd misinterpret the returned buffer if we
476 // allowed this.
477 if (kpc_multiple_clients() && !force_all_ctrs) {
478 return EPERM;
479 }
480 kpc_get_rawpmu_config(¤t_config[count]);
481 count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK);
482 }
483
484 return 0;
485 }
486
487 int
kpc_set_config(uint32_t classes,kpc_config_t * configv)488 kpc_set_config(uint32_t classes, kpc_config_t *configv)
489 {
490 int ret = 0;
491 struct kpc_config_remote mp_config = {
492 .classes = classes, .configv = configv,
493 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
494 };
495
496 assert(configv);
497
498 /* don't allow RAWPMU configuration when sharing counters */
499 if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients() &&
500 !force_all_ctrs) {
501 return EPERM;
502 }
503
504 /* no clients have the right to modify both classes */
505 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
506 (classes & (KPC_CLASS_POWER_MASK))) {
507 return EPERM;
508 }
509
510 lck_mtx_lock(&kpc_config_lock);
511
512 /* translate the power class for the machine layer */
513 if (classes & KPC_CLASS_POWER_MASK) {
514 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
515 }
516
517 ret = kpc_set_config_arch( &mp_config );
518
519 lck_mtx_unlock(&kpc_config_lock);
520
521 return ret;
522 }
523
524 uint32_t
kpc_get_counterbuf_size(void)525 kpc_get_counterbuf_size(void)
526 {
527 return COUNTERBUF_SIZE;
528 }
529
530 /* allocate a buffer large enough for all possible counters */
531 uint64_t *
kpc_counterbuf_alloc(void)532 kpc_counterbuf_alloc(void)
533 {
534 return kalloc_data_tag(COUNTERBUF_SIZE, Z_WAITOK | Z_ZERO,
535 VM_KERN_MEMORY_DIAG);
536 }
537
538 void
kpc_counterbuf_free(uint64_t * buf)539 kpc_counterbuf_free(uint64_t *buf)
540 {
541 kfree_data(buf, COUNTERBUF_SIZE);
542 }
543
544 void
kpc_sample_kperf(uint32_t actionid,uint32_t counter,uint64_t config,uint64_t count,uintptr_t pc,kperf_kpc_flags_t flags)545 kpc_sample_kperf(uint32_t actionid, uint32_t counter, uint64_t config,
546 uint64_t count, uintptr_t pc, kperf_kpc_flags_t flags)
547 {
548 struct kperf_sample sbuf;
549
550 uint64_t desc = config | (uint64_t)counter << 32 | (uint64_t)flags << 48;
551
552 BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START, desc, count, pc);
553
554 thread_t thread = current_thread();
555 task_t task = get_threadtask(thread);
556
557 struct kperf_context ctx = {
558 .cur_thread = thread,
559 .cur_task = task,
560 .cur_pid = task_pid(task),
561 .trigger_type = TRIGGER_TYPE_PMI,
562 .trigger_id = 0,
563 };
564
565 int r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER);
566
567 BUF_INFO(PERF_KPC_HNDLR | DBG_FUNC_END, r);
568 }
569
570
571 int
kpc_set_period(uint32_t classes,uint64_t * val)572 kpc_set_period(uint32_t classes, uint64_t *val)
573 {
574 struct kpc_config_remote mp_config = {
575 .classes = classes, .configv = val,
576 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
577 };
578
579 assert(val);
580
581 /* no clients have the right to modify both classes */
582 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
583 (classes & (KPC_CLASS_POWER_MASK))) {
584 return EPERM;
585 }
586
587 lck_mtx_lock(&kpc_config_lock);
588
589 #ifdef FIXED_COUNTER_SHADOW
590 if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) {
591 lck_mtx_unlock(&kpc_config_lock);
592 return EPERM;
593 }
594 # else
595 if (classes & KPC_CLASS_FIXED_MASK) {
596 lck_mtx_unlock(&kpc_config_lock);
597 return EINVAL;
598 }
599 #endif
600
601 /* translate the power class for the machine layer */
602 if (classes & KPC_CLASS_POWER_MASK) {
603 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
604 }
605
606 kprintf("setting period %u\n", classes);
607 kpc_set_period_arch( &mp_config );
608
609 lck_mtx_unlock(&kpc_config_lock);
610
611 return 0;
612 }
613
614 int
kpc_get_period(uint32_t classes,uint64_t * val)615 kpc_get_period(uint32_t classes, uint64_t *val)
616 {
617 uint32_t count = 0;
618 uint64_t pmc_mask = 0ULL;
619
620 assert(val);
621
622 lck_mtx_lock(&kpc_config_lock);
623
624 if (classes & KPC_CLASS_FIXED_MASK) {
625 /* convert reload values to periods */
626 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
627 for (uint32_t i = 0; i < count; ++i) {
628 *val++ = kpc_fixed_max() - FIXED_RELOAD(i);
629 }
630 }
631
632 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
633 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
634
635 /* convert reload values to periods */
636 count = kpc_configurable_count();
637 for (uint32_t i = 0; i < count; ++i) {
638 if ((1ULL << i) & pmc_mask) {
639 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
640 }
641 }
642 }
643
644 if (classes & KPC_CLASS_POWER_MASK) {
645 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
646
647 /* convert reload values to periods */
648 count = kpc_configurable_count();
649 for (uint32_t i = 0; i < count; ++i) {
650 if ((1ULL << i) & pmc_mask) {
651 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
652 }
653 }
654 }
655
656 lck_mtx_unlock(&kpc_config_lock);
657
658 return 0;
659 }
660
661 int
kpc_set_actionid(uint32_t classes,uint32_t * val)662 kpc_set_actionid(uint32_t classes, uint32_t *val)
663 {
664 uint32_t count = 0;
665 uint64_t pmc_mask = 0ULL;
666
667 assert(val);
668
669 /* NOTE: what happens if a pmi occurs while actionids are being
670 * set is undefined. */
671 lck_mtx_lock(&kpc_config_lock);
672
673 if (classes & KPC_CLASS_FIXED_MASK) {
674 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
675 memcpy(&FIXED_ACTIONID(0), val, count * sizeof(uint32_t));
676 val += count;
677 }
678
679 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
680 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
681
682 count = kpc_configurable_count();
683 for (uint32_t i = 0; i < count; ++i) {
684 if ((1ULL << i) & pmc_mask) {
685 CONFIGURABLE_ACTIONID(i) = *val++;
686 }
687 }
688 }
689
690 if (classes & KPC_CLASS_POWER_MASK) {
691 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
692
693 count = kpc_configurable_count();
694 for (uint32_t i = 0; i < count; ++i) {
695 if ((1ULL << i) & pmc_mask) {
696 CONFIGURABLE_ACTIONID(i) = *val++;
697 }
698 }
699 }
700
701 lck_mtx_unlock(&kpc_config_lock);
702
703 return 0;
704 }
705
706 int
kpc_get_actionid(uint32_t classes,uint32_t * val)707 kpc_get_actionid(uint32_t classes, uint32_t *val)
708 {
709 uint32_t count = 0;
710 uint64_t pmc_mask = 0ULL;
711
712 assert(val);
713
714 lck_mtx_lock(&kpc_config_lock);
715
716 if (classes & KPC_CLASS_FIXED_MASK) {
717 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
718 memcpy(val, &FIXED_ACTIONID(0), count * sizeof(uint32_t));
719 val += count;
720 }
721
722 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
723 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
724
725 count = kpc_configurable_count();
726 for (uint32_t i = 0; i < count; ++i) {
727 if ((1ULL << i) & pmc_mask) {
728 *val++ = CONFIGURABLE_ACTIONID(i);
729 }
730 }
731 }
732
733 if (classes & KPC_CLASS_POWER_MASK) {
734 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
735
736 count = kpc_configurable_count();
737 for (uint32_t i = 0; i < count; ++i) {
738 if ((1ULL << i) & pmc_mask) {
739 *val++ = CONFIGURABLE_ACTIONID(i);
740 }
741 }
742 }
743
744 lck_mtx_unlock(&kpc_config_lock);
745
746 return 0;
747 }
748
749 int
kpc_set_running(uint32_t classes)750 kpc_set_running(uint32_t classes)
751 {
752 uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK;
753 struct kpc_running_remote mp_config = {
754 .classes = classes, .cfg_target_mask = 0ULL, .cfg_state_mask = 0ULL
755 };
756
757 /* target all available PMCs */
758 mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes);
759
760 /* translate the power class for the machine layer */
761 if (classes & KPC_CLASS_POWER_MASK) {
762 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
763 }
764
765 /* generate the state of each configurable PMCs */
766 mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes);
767
768 return kpc_set_running_arch(&mp_config);
769 }
770
771 boolean_t
kpc_register_pm_handler(kpc_pm_handler_t handler)772 kpc_register_pm_handler(kpc_pm_handler_t handler)
773 {
774 return kpc_reserve_pm_counters(0x38, handler, TRUE);
775 }
776
777 boolean_t
kpc_reserve_pm_counters(uint64_t pmc_mask,kpc_pm_handler_t handler,boolean_t custom_config)778 kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler,
779 boolean_t custom_config)
780 {
781 uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1;
782 uint64_t req_mask = 0ULL;
783
784 /* pre-condition */
785 assert(handler != NULL);
786 assert(kpc_pm_handler == NULL);
787
788 /* check number of counters requested */
789 req_mask = (pmc_mask & all_mask);
790 assert(kpc_popcount(req_mask) <= kpc_configurable_count());
791
792 /* save the power manager states */
793 kpc_pm_has_custom_config = custom_config;
794 kpc_pm_pmc_mask = req_mask;
795 kpc_pm_handler = handler;
796
797 printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
798 req_mask, custom_config);
799
800 /* post-condition */
801 {
802 uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
803 uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask);
804 #pragma unused(cfg_count, pwr_count)
805 assert((cfg_count + pwr_count) == kpc_configurable_count());
806 }
807
808 return force_all_ctrs ? FALSE : TRUE;
809 }
810
811 void
kpc_release_pm_counters(void)812 kpc_release_pm_counters(void)
813 {
814 /* pre-condition */
815 assert(kpc_pm_handler != NULL);
816
817 /* release the counters */
818 kpc_pm_has_custom_config = FALSE;
819 kpc_pm_pmc_mask = 0ULL;
820 kpc_pm_handler = NULL;
821
822 printf("kpc: pm released counters\n");
823
824 /* post-condition */
825 assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count());
826 }
827
828 uint8_t
kpc_popcount(uint64_t value)829 kpc_popcount(uint64_t value)
830 {
831 return (uint8_t)__builtin_popcountll(value);
832 }
833
834 uint64_t
kpc_get_configurable_pmc_mask(uint32_t classes)835 kpc_get_configurable_pmc_mask(uint32_t classes)
836 {
837 uint32_t configurable_count = kpc_configurable_count();
838 uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL;
839
840 /* not configurable classes or no configurable counters */
841 if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) ||
842 (configurable_count == 0)) {
843 goto exit;
844 }
845
846 assert(configurable_count < 64);
847 all_cfg_pmcs_mask = (1ULL << configurable_count) - 1;
848
849 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
850 if (force_all_ctrs == TRUE) {
851 cfg_mask |= all_cfg_pmcs_mask;
852 } else {
853 cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask;
854 }
855 }
856
857 /*
858 * The power class exists iff:
859 * - No tasks acquired all PMCs
860 * - PM registered and uses kpc to interact with PMCs
861 */
862 if ((force_all_ctrs == FALSE) &&
863 (kpc_pm_handler != NULL) &&
864 (kpc_pm_has_custom_config == FALSE) &&
865 (classes & KPC_CLASS_POWER_MASK)) {
866 pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask;
867 }
868
869 exit:
870 /* post-conditions */
871 assert(((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 );
872 assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count());
873 assert((cfg_mask & pwr_mask) == 0ULL );
874
875 return cfg_mask | pwr_mask;
876 }
877