1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <sys/errno.h>
34 #include <sys/vm.h>
35 #include <kperf/buffer.h>
36 #if MONOTONIC
37 #include <kern/monotonic.h>
38 #endif /* MONOTONIC */
39 #include <kern/thread.h>
40 #if defined(__arm64__)
41 #include <arm/cpu_data_internal.h>
42 #endif
43
44 #include <kern/kpc.h>
45
46 #include <kperf/kperf.h>
47 #include <kperf/sample.h>
48 #include <kperf/context.h>
49 #include <kperf/action.h>
50
51 uint32_t kpc_actionid[KPC_MAX_COUNTERS];
52
53 #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
54 #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
55 COUNTERBUF_SIZE_PER_CPU)
56
57 /* locks */
58 static LCK_GRP_DECLARE(kpc_config_lckgrp, "kpc");
59 static LCK_MTX_DECLARE(kpc_config_lock, &kpc_config_lckgrp);
60
61 /* state specifying if all counters have been requested by kperf */
62 static boolean_t force_all_ctrs = FALSE;
63
64 /* power manager */
65 static kpc_pm_handler_t kpc_pm_handler;
66 static boolean_t kpc_pm_has_custom_config;
67 static uint64_t kpc_pm_pmc_mask;
68
69 boolean_t kpc_context_switch_active = FALSE;
70 bool kpc_supported = true;
71
72 static uint64_t *
kpc_percpu_alloc(void)73 kpc_percpu_alloc(void)
74 {
75 return kalloc_data_tag(COUNTERBUF_SIZE_PER_CPU, Z_WAITOK | Z_ZERO,
76 VM_KERN_MEMORY_DIAG);
77 }
78
79 static void
kpc_percpu_free(uint64_t * buf)80 kpc_percpu_free(uint64_t *buf)
81 {
82 kfree_data(buf, COUNTERBUF_SIZE_PER_CPU);
83 }
84
85 boolean_t
kpc_register_cpu(struct cpu_data * cpu_data)86 kpc_register_cpu(struct cpu_data *cpu_data)
87 {
88 assert(cpu_data);
89 assert(cpu_data->cpu_kpc_buf[0] == NULL);
90 assert(cpu_data->cpu_kpc_buf[1] == NULL);
91 assert(cpu_data->cpu_kpc_shadow == NULL);
92 assert(cpu_data->cpu_kpc_reload == NULL);
93
94 /*
95 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
96 * store all PMCs values from all CPUs. This mimics the userspace API.
97 * This does not suit well with the per-CPU kpc buffers, since:
98 * 1. Buffers don't need to be this large.
99 * 2. The actual number of CPUs is not known at this point.
100 *
101 * CPUs are asked to callout into kpc when being registered, we'll
102 * allocate the memory here.
103 */
104
105 if ((cpu_data->cpu_kpc_buf[0] = kpc_percpu_alloc()) == NULL) {
106 goto error;
107 }
108 if ((cpu_data->cpu_kpc_buf[1] = kpc_percpu_alloc()) == NULL) {
109 goto error;
110 }
111 if ((cpu_data->cpu_kpc_shadow = kpc_percpu_alloc()) == NULL) {
112 goto error;
113 }
114 if ((cpu_data->cpu_kpc_reload = kpc_percpu_alloc()) == NULL) {
115 goto error;
116 }
117
118 /* success */
119 return TRUE;
120
121 error:
122 kpc_unregister_cpu(cpu_data);
123 return FALSE;
124 }
125
126 void
kpc_unregister_cpu(struct cpu_data * cpu_data)127 kpc_unregister_cpu(struct cpu_data *cpu_data)
128 {
129 assert(cpu_data);
130 if (cpu_data->cpu_kpc_buf[0] != NULL) {
131 kpc_percpu_free(cpu_data->cpu_kpc_buf[0]);
132 cpu_data->cpu_kpc_buf[0] = NULL;
133 }
134 if (cpu_data->cpu_kpc_buf[1] != NULL) {
135 kpc_percpu_free(cpu_data->cpu_kpc_buf[1]);
136 cpu_data->cpu_kpc_buf[1] = NULL;
137 }
138 if (cpu_data->cpu_kpc_shadow != NULL) {
139 kpc_percpu_free(cpu_data->cpu_kpc_shadow);
140 cpu_data->cpu_kpc_shadow = NULL;
141 }
142 if (cpu_data->cpu_kpc_reload != NULL) {
143 kpc_percpu_free(cpu_data->cpu_kpc_reload);
144 cpu_data->cpu_kpc_reload = NULL;
145 }
146 }
147
148 static void
kpc_task_set_forced_all_ctrs(task_t task,boolean_t state)149 kpc_task_set_forced_all_ctrs(task_t task, boolean_t state)
150 {
151 assert(task);
152
153 task_lock(task);
154 if (state) {
155 task->t_kpc |= TASK_KPC_FORCED_ALL_CTRS;
156 } else {
157 task->t_kpc &= ~TASK_KPC_FORCED_ALL_CTRS;
158 }
159 task_unlock(task);
160 }
161
162 bool kpc_task_get_forced_all_ctrs(task_t task);
163
164 bool
kpc_task_get_forced_all_ctrs(task_t task)165 kpc_task_get_forced_all_ctrs(task_t task)
166 {
167 return task->t_kpc & TASK_KPC_FORCED_ALL_CTRS;
168 }
169
170 int
kpc_force_all_ctrs(task_t task,int val)171 kpc_force_all_ctrs(task_t task, int val)
172 {
173 boolean_t new_state = val ? TRUE : FALSE;
174 boolean_t old_state = kpc_get_force_all_ctrs();
175
176 /*
177 * Refuse to do the operation if the counters are already forced by
178 * another task.
179 */
180 if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task)) {
181 return EACCES;
182 }
183
184 /* nothing to do if the state is not changing */
185 if (old_state == new_state) {
186 return 0;
187 }
188
189 #if MONOTONIC
190 mt_ownership_change(new_state);
191 #endif /* MONOTONIC */
192
193 /* notify the power manager */
194 if (kpc_pm_handler) {
195 kpc_pm_handler(new_state ? FALSE : TRUE);
196 }
197
198 /*
199 * This is a force -- ensure that counters are forced, even if power
200 * management fails to acknowledge it.
201 */
202 if (force_all_ctrs != new_state) {
203 force_all_ctrs = new_state;
204 }
205
206 /* update the task bits */
207 kpc_task_set_forced_all_ctrs(task, new_state);
208
209 return 0;
210 }
211
212 void
kpc_pm_acknowledge(boolean_t available_to_pm)213 kpc_pm_acknowledge(boolean_t available_to_pm)
214 {
215 /*
216 * Force-all-counters should still be true when the counters are being
217 * made available to power management and false when counters are going
218 * to be taken away.
219 */
220 assert(force_all_ctrs == available_to_pm);
221 /*
222 * Make sure power management isn't playing games with us.
223 */
224
225 /*
226 * Counters being available means no one is forcing all counters.
227 */
228 force_all_ctrs = available_to_pm ? FALSE : TRUE;
229 }
230
231 int
kpc_get_force_all_ctrs(void)232 kpc_get_force_all_ctrs(void)
233 {
234 return force_all_ctrs;
235 }
236
237 boolean_t
kpc_multiple_clients(void)238 kpc_multiple_clients(void)
239 {
240 return kpc_pm_handler != NULL;
241 }
242
243 boolean_t
kpc_controls_fixed_counters(void)244 kpc_controls_fixed_counters(void)
245 {
246 return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config;
247 }
248
249 boolean_t
kpc_controls_counter(uint32_t ctr)250 kpc_controls_counter(uint32_t ctr)
251 {
252 uint64_t pmc_mask = 0ULL;
253
254 assert(ctr < (kpc_fixed_count() + kpc_configurable_count()));
255
256 if (ctr < kpc_fixed_count()) {
257 return kpc_controls_fixed_counters();
258 }
259
260 /*
261 * By default kpc manages all PMCs, but if the Power Manager registered
262 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
263 * However, kpc takes ownership back if a task acquired all PMCs via
264 * force_all_ctrs.
265 */
266 pmc_mask = (1ULL << (ctr - kpc_fixed_count()));
267 if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs) {
268 return FALSE;
269 }
270
271 return TRUE;
272 }
273
274 uint32_t
kpc_get_running(void)275 kpc_get_running(void)
276 {
277 uint64_t pmc_mask = 0;
278 uint32_t cur_state = 0;
279
280 if (kpc_is_running_fixed()) {
281 cur_state |= KPC_CLASS_FIXED_MASK;
282 }
283
284 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
285 if (kpc_is_running_configurable(pmc_mask)) {
286 cur_state |= KPC_CLASS_CONFIGURABLE_MASK;
287 }
288
289 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
290 if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask)) {
291 cur_state |= KPC_CLASS_POWER_MASK;
292 }
293
294 return cur_state;
295 }
296
297 /* may be called from an IPI */
298 int
kpc_get_curcpu_counters(uint32_t classes,int * curcpu,uint64_t * buf)299 kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf)
300 {
301 int enabled = 0, offset = 0;
302 uint64_t pmc_mask = 0ULL;
303
304 assert(buf);
305
306 enabled = ml_set_interrupts_enabled(FALSE);
307
308 /* grab counters and CPU number as close as possible */
309 if (curcpu) {
310 *curcpu = cpu_number();
311 }
312
313 if (classes & KPC_CLASS_FIXED_MASK) {
314 kpc_get_fixed_counters(&buf[offset]);
315 offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
316 }
317
318 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
319 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
320 kpc_get_configurable_counters(&buf[offset], pmc_mask);
321 offset += kpc_popcount(pmc_mask);
322 }
323
324 if (classes & KPC_CLASS_POWER_MASK) {
325 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
326 kpc_get_configurable_counters(&buf[offset], pmc_mask);
327 offset += kpc_popcount(pmc_mask);
328 }
329
330 ml_set_interrupts_enabled(enabled);
331
332 return offset;
333 }
334
335 /* generic counter reading function, public api */
336 int
kpc_get_cpu_counters(boolean_t all_cpus,uint32_t classes,int * curcpu,uint64_t * buf)337 kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes,
338 int *curcpu, uint64_t *buf)
339 {
340 assert(buf);
341
342 /*
343 * Unlike reading the current CPU counters, reading counters from all
344 * CPUs is architecture dependent. This allows kpc to make the most of
345 * the platform if memory mapped registers is supported.
346 */
347 if (all_cpus) {
348 return kpc_get_all_cpus_counters(classes, curcpu, buf);
349 } else {
350 return kpc_get_curcpu_counters(classes, curcpu, buf);
351 }
352 }
353
354 int
kpc_get_shadow_counters(boolean_t all_cpus,uint32_t classes,int * curcpu,uint64_t * buf)355 kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes,
356 int *curcpu, uint64_t *buf)
357 {
358 int curcpu_id = cpu_number();
359 uint32_t cfg_count = kpc_configurable_count(), offset = 0;
360 uint64_t pmc_mask = 0ULL;
361 boolean_t enabled;
362
363 assert(buf);
364
365 enabled = ml_set_interrupts_enabled(FALSE);
366
367 curcpu_id = cpu_number();
368 if (curcpu) {
369 *curcpu = curcpu_id;
370 }
371
372 for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) {
373 /* filter if the caller did not request all cpus */
374 if (!all_cpus && (cpu != curcpu_id)) {
375 continue;
376 }
377
378 if (classes & KPC_CLASS_FIXED_MASK) {
379 uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
380 memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t));
381 offset += count;
382 }
383
384 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
385 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
386
387 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) {
388 if ((1ULL << cfg_ctr) & pmc_mask) {
389 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
390 }
391 }
392 }
393
394 if (classes & KPC_CLASS_POWER_MASK) {
395 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
396
397 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) {
398 if ((1ULL << cfg_ctr) & pmc_mask) {
399 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
400 }
401 }
402 }
403 }
404
405 ml_set_interrupts_enabled(enabled);
406
407 return offset;
408 }
409
410 uint32_t
kpc_get_counter_count(uint32_t classes)411 kpc_get_counter_count(uint32_t classes)
412 {
413 uint32_t count = 0;
414
415 if (classes & KPC_CLASS_FIXED_MASK) {
416 count += kpc_fixed_count();
417 }
418
419 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
420 uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes);
421 uint32_t pmc_cnt = kpc_popcount(pmc_msk);
422 count += pmc_cnt;
423 }
424
425 return count;
426 }
427
428 uint32_t
kpc_get_config_count(uint32_t classes)429 kpc_get_config_count(uint32_t classes)
430 {
431 uint32_t count = 0;
432
433 if (classes & KPC_CLASS_FIXED_MASK) {
434 count += kpc_fixed_config_count();
435 }
436
437 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
438 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes);
439 count += kpc_configurable_config_count(pmc_mask);
440 }
441
442 if ((classes & KPC_CLASS_RAWPMU_MASK) &&
443 (!kpc_multiple_clients() || force_all_ctrs)) {
444 count += kpc_rawpmu_config_count();
445 }
446
447 return count;
448 }
449
450 int
kpc_get_config(uint32_t classes,kpc_config_t * current_config)451 kpc_get_config(uint32_t classes, kpc_config_t *current_config)
452 {
453 uint32_t count = 0;
454
455 assert(current_config);
456
457 if (classes & KPC_CLASS_FIXED_MASK) {
458 kpc_get_fixed_config(¤t_config[count]);
459 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
460 }
461
462 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
463 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
464 kpc_get_configurable_config(¤t_config[count], pmc_mask);
465 count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
466 }
467
468 if (classes & KPC_CLASS_POWER_MASK) {
469 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
470 kpc_get_configurable_config(¤t_config[count], pmc_mask);
471 count += kpc_get_config_count(KPC_CLASS_POWER_MASK);
472 }
473
474 if (classes & KPC_CLASS_RAWPMU_MASK) {
475 // Client shouldn't ask for config words that aren't available.
476 // Most likely, they'd misinterpret the returned buffer if we
477 // allowed this.
478 if (kpc_multiple_clients() && !force_all_ctrs) {
479 return EPERM;
480 }
481 kpc_get_rawpmu_config(¤t_config[count]);
482 count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK);
483 }
484
485 return 0;
486 }
487
488 static int
_kpc_set_config_internal(uint32_t classes,kpc_config_t * configv,bool allow_list)489 _kpc_set_config_internal(uint32_t classes, kpc_config_t *configv, bool allow_list)
490 {
491 int ret = 0;
492 struct kpc_config_remote mp_config = {
493 .classes = classes, .configv = configv,
494 .pmc_mask = kpc_get_configurable_pmc_mask(classes),
495 .allow_list = allow_list,
496 };
497
498 assert(configv);
499
500 /* don't allow RAWPMU configuration when sharing counters */
501 if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients() &&
502 !force_all_ctrs) {
503 return EPERM;
504 }
505
506 /* no clients have the right to modify both classes */
507 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
508 (classes & (KPC_CLASS_POWER_MASK))) {
509 return EPERM;
510 }
511
512 lck_mtx_lock(&kpc_config_lock);
513
514 /* translate the power class for the machine layer */
515 if (classes & KPC_CLASS_POWER_MASK) {
516 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
517 }
518
519 ret = kpc_set_config_arch( &mp_config );
520
521 lck_mtx_unlock(&kpc_config_lock);
522
523 return ret;
524 }
525
526 int
kpc_set_config_kernel(uint32_t classes,kpc_config_t * configv)527 kpc_set_config_kernel(uint32_t classes, kpc_config_t * configv)
528 {
529 return _kpc_set_config_internal(classes, configv, true);
530 }
531
532 int kpc_set_config_external(uint32_t classes, kpc_config_t *configv);
533 int
kpc_set_config_external(uint32_t classes,kpc_config_t * configv)534 kpc_set_config_external(uint32_t classes, kpc_config_t *configv)
535 {
536 return _kpc_set_config_internal(classes, configv, false);
537 }
538
539 uint32_t
kpc_get_counterbuf_size(void)540 kpc_get_counterbuf_size(void)
541 {
542 return COUNTERBUF_SIZE;
543 }
544
545 /* allocate a buffer large enough for all possible counters */
546 uint64_t *
kpc_counterbuf_alloc(void)547 kpc_counterbuf_alloc(void)
548 {
549 return kalloc_data_tag(COUNTERBUF_SIZE, Z_WAITOK | Z_ZERO,
550 VM_KERN_MEMORY_DIAG);
551 }
552
553 void
kpc_counterbuf_free(uint64_t * buf)554 kpc_counterbuf_free(uint64_t *buf)
555 {
556 kfree_data(buf, COUNTERBUF_SIZE);
557 }
558
559 void
kpc_sample_kperf(uint32_t actionid,uint32_t counter,uint64_t config,uint64_t count,uintptr_t pc,kperf_kpc_flags_t flags)560 kpc_sample_kperf(uint32_t actionid, uint32_t counter, uint64_t config,
561 uint64_t count, uintptr_t pc, kperf_kpc_flags_t flags)
562 {
563 struct kperf_sample sbuf;
564
565 uint64_t desc = config | (uint64_t)counter << 32 | (uint64_t)flags << 48;
566
567 BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START, desc, count, pc);
568
569 thread_t thread = current_thread();
570 task_t task = get_threadtask(thread);
571
572 struct kperf_context ctx = {
573 .cur_thread = thread,
574 .cur_task = task,
575 .cur_pid = task_pid(task),
576 .trigger_type = TRIGGER_TYPE_PMI,
577 .trigger_id = 0,
578 };
579
580 int r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER);
581
582 BUF_INFO(PERF_KPC_HNDLR | DBG_FUNC_END, r);
583 }
584
585
586 int
kpc_set_period(uint32_t classes,uint64_t * val)587 kpc_set_period(uint32_t classes, uint64_t *val)
588 {
589 struct kpc_config_remote mp_config = {
590 .classes = classes, .configv = val,
591 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
592 };
593
594 assert(val);
595
596 /* no clients have the right to modify both classes */
597 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
598 (classes & (KPC_CLASS_POWER_MASK))) {
599 return EPERM;
600 }
601
602 lck_mtx_lock(&kpc_config_lock);
603
604 #ifdef FIXED_COUNTER_SHADOW
605 if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) {
606 lck_mtx_unlock(&kpc_config_lock);
607 return EPERM;
608 }
609 # else
610 if (classes & KPC_CLASS_FIXED_MASK) {
611 lck_mtx_unlock(&kpc_config_lock);
612 return EINVAL;
613 }
614 #endif
615
616 /* translate the power class for the machine layer */
617 if (classes & KPC_CLASS_POWER_MASK) {
618 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
619 }
620
621 kprintf("setting period %u\n", classes);
622 kpc_set_period_arch( &mp_config );
623
624 lck_mtx_unlock(&kpc_config_lock);
625
626 return 0;
627 }
628
629 int
kpc_get_period(uint32_t classes,uint64_t * val)630 kpc_get_period(uint32_t classes, uint64_t *val)
631 {
632 uint32_t count = 0;
633 uint64_t pmc_mask = 0ULL;
634
635 assert(val);
636
637 lck_mtx_lock(&kpc_config_lock);
638
639 if (classes & KPC_CLASS_FIXED_MASK) {
640 /* convert reload values to periods */
641 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
642 for (uint32_t i = 0; i < count; ++i) {
643 *val++ = kpc_fixed_max() - FIXED_RELOAD(i);
644 }
645 }
646
647 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
648 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
649
650 /* convert reload values to periods */
651 count = kpc_configurable_count();
652 for (uint32_t i = 0; i < count; ++i) {
653 if ((1ULL << i) & pmc_mask) {
654 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
655 }
656 }
657 }
658
659 if (classes & KPC_CLASS_POWER_MASK) {
660 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
661
662 /* convert reload values to periods */
663 count = kpc_configurable_count();
664 for (uint32_t i = 0; i < count; ++i) {
665 if ((1ULL << i) & pmc_mask) {
666 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
667 }
668 }
669 }
670
671 lck_mtx_unlock(&kpc_config_lock);
672
673 return 0;
674 }
675
676 int
kpc_set_actionid(uint32_t classes,uint32_t * val)677 kpc_set_actionid(uint32_t classes, uint32_t *val)
678 {
679 uint32_t count = 0;
680 uint64_t pmc_mask = 0ULL;
681
682 assert(val);
683
684 /* NOTE: what happens if a pmi occurs while actionids are being
685 * set is undefined. */
686 lck_mtx_lock(&kpc_config_lock);
687
688 if (classes & KPC_CLASS_FIXED_MASK) {
689 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
690 memcpy(&FIXED_ACTIONID(0), val, count * sizeof(uint32_t));
691 val += count;
692 }
693
694 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
695 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
696
697 count = kpc_configurable_count();
698 for (uint32_t i = 0; i < count; ++i) {
699 if ((1ULL << i) & pmc_mask) {
700 CONFIGURABLE_ACTIONID(i) = *val++;
701 }
702 }
703 }
704
705 if (classes & KPC_CLASS_POWER_MASK) {
706 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
707
708 count = kpc_configurable_count();
709 for (uint32_t i = 0; i < count; ++i) {
710 if ((1ULL << i) & pmc_mask) {
711 CONFIGURABLE_ACTIONID(i) = *val++;
712 }
713 }
714 }
715
716 lck_mtx_unlock(&kpc_config_lock);
717
718 return 0;
719 }
720
721 int
kpc_get_actionid(uint32_t classes,uint32_t * val)722 kpc_get_actionid(uint32_t classes, uint32_t *val)
723 {
724 uint32_t count = 0;
725 uint64_t pmc_mask = 0ULL;
726
727 assert(val);
728
729 lck_mtx_lock(&kpc_config_lock);
730
731 if (classes & KPC_CLASS_FIXED_MASK) {
732 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
733 memcpy(val, &FIXED_ACTIONID(0), count * sizeof(uint32_t));
734 val += count;
735 }
736
737 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
738 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
739
740 count = kpc_configurable_count();
741 for (uint32_t i = 0; i < count; ++i) {
742 if ((1ULL << i) & pmc_mask) {
743 *val++ = CONFIGURABLE_ACTIONID(i);
744 }
745 }
746 }
747
748 if (classes & KPC_CLASS_POWER_MASK) {
749 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
750
751 count = kpc_configurable_count();
752 for (uint32_t i = 0; i < count; ++i) {
753 if ((1ULL << i) & pmc_mask) {
754 *val++ = CONFIGURABLE_ACTIONID(i);
755 }
756 }
757 }
758
759 lck_mtx_unlock(&kpc_config_lock);
760
761 return 0;
762 }
763
764 int
kpc_set_running(uint32_t classes)765 kpc_set_running(uint32_t classes)
766 {
767 uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK;
768 struct kpc_running_remote mp_config = {
769 .classes = classes, .cfg_target_mask = 0ULL, .cfg_state_mask = 0ULL
770 };
771
772 /* target all available PMCs */
773 mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes);
774
775 /* translate the power class for the machine layer */
776 if (classes & KPC_CLASS_POWER_MASK) {
777 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
778 }
779
780 /* generate the state of each configurable PMCs */
781 mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes);
782
783 return kpc_set_running_arch(&mp_config);
784 }
785
786 boolean_t
kpc_register_pm_handler(kpc_pm_handler_t handler)787 kpc_register_pm_handler(kpc_pm_handler_t handler)
788 {
789 return kpc_reserve_pm_counters(0x38, handler, TRUE);
790 }
791
792 boolean_t
kpc_reserve_pm_counters(uint64_t pmc_mask,kpc_pm_handler_t handler,boolean_t custom_config)793 kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler,
794 boolean_t custom_config)
795 {
796 uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1;
797 uint64_t req_mask = 0ULL;
798
799 /* pre-condition */
800 assert(handler != NULL);
801 assert(kpc_pm_handler == NULL);
802
803 /* check number of counters requested */
804 req_mask = (pmc_mask & all_mask);
805 assert(kpc_popcount(req_mask) <= kpc_configurable_count());
806
807 /* save the power manager states */
808 kpc_pm_has_custom_config = custom_config;
809 kpc_pm_pmc_mask = req_mask;
810 kpc_pm_handler = handler;
811
812 printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
813 req_mask, custom_config);
814
815 /* post-condition */
816 {
817 uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
818 uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask);
819 #pragma unused(cfg_count, pwr_count)
820 assert((cfg_count + pwr_count) == kpc_configurable_count());
821 }
822
823 return force_all_ctrs ? FALSE : TRUE;
824 }
825
826 void
kpc_release_pm_counters(void)827 kpc_release_pm_counters(void)
828 {
829 /* pre-condition */
830 assert(kpc_pm_handler != NULL);
831
832 /* release the counters */
833 kpc_pm_has_custom_config = FALSE;
834 kpc_pm_pmc_mask = 0ULL;
835 kpc_pm_handler = NULL;
836
837 printf("kpc: pm released counters\n");
838
839 /* post-condition */
840 assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count());
841 }
842
843 uint8_t
kpc_popcount(uint64_t value)844 kpc_popcount(uint64_t value)
845 {
846 return (uint8_t)__builtin_popcountll(value);
847 }
848
849 uint64_t
kpc_get_configurable_pmc_mask(uint32_t classes)850 kpc_get_configurable_pmc_mask(uint32_t classes)
851 {
852 uint32_t configurable_count = kpc_configurable_count();
853 uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL;
854
855 /* not configurable classes or no configurable counters */
856 if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) ||
857 (configurable_count == 0)) {
858 goto exit;
859 }
860
861 assert(configurable_count < 64);
862 all_cfg_pmcs_mask = (1ULL << configurable_count) - 1;
863
864 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
865 if (force_all_ctrs == TRUE) {
866 cfg_mask |= all_cfg_pmcs_mask;
867 } else {
868 cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask;
869 }
870 }
871
872 /*
873 * The power class exists iff:
874 * - No tasks acquired all PMCs
875 * - PM registered and uses kpc to interact with PMCs
876 */
877 if ((force_all_ctrs == FALSE) &&
878 (kpc_pm_handler != NULL) &&
879 (kpc_pm_has_custom_config == FALSE) &&
880 (classes & KPC_CLASS_POWER_MASK)) {
881 pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask;
882 }
883
884 exit:
885 /* post-conditions */
886 assert(((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 );
887 assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count());
888 assert((cfg_mask & pwr_mask) == 0ULL );
889
890 return cfg_mask | pwr_mask;
891 }
892