1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <i386/cpuid.h>
33 #include <i386/proc_reg.h>
34 #include <i386/mp.h>
35 #include <sys/errno.h>
36 #include <kperf/buffer.h>
37
38 #include <kern/kpc.h>
39
40 #include <kperf/kperf.h>
41 #include <kperf/sample.h>
42 #include <kperf/context.h>
43 #include <kperf/action.h>
44
45 #include <kern/monotonic.h>
46
47 /* Fixed counter mask for each fixed counter -- each with OS and USER */
48 #define IA32_FIXED_CTR_ENABLE_ALL_RINGS (0x3)
49 #define IA32_FIXED_CTR_ENABLE_PMI (0x8)
50
51 #define IA32_PERFEVT_USER_EN (0x10000)
52 #define IA32_PERFEVT_OS_EN (0x20000)
53
54 #define IA32_PERFEVTSEL_PMI (1ull << 20)
55 #define IA32_PERFEVTSEL_EN (1ull << 22)
56
57 /* Non-serialising */
58 #define USE_RDPMC
59
60 #define RDPMC_FIXED_COUNTER_SELECTOR (1ULL<<30)
61
62 /* track the last config we enabled */
63 static uint64_t kpc_running_cfg_pmc_mask = 0;
64 static uint32_t kpc_running_classes = 0;
65
66 /* PMC / MSR accesses */
67
68 static uint64_t
IA32_FIXED_CTR_CTRL(void)69 IA32_FIXED_CTR_CTRL(void)
70 {
71 return rdmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL );
72 }
73
74 #ifdef FIXED_COUNTER_RELOAD
75 static void
wrIA32_FIXED_CTRx(uint32_t ctr,uint64_t value)76 wrIA32_FIXED_CTRx(uint32_t ctr, uint64_t value)
77 {
78 return wrmsr64(MSR_IA32_PERF_FIXED_CTR0 + ctr, value);
79 }
80 #endif
81
82 static uint64_t
IA32_PMCx(uint32_t ctr)83 IA32_PMCx(uint32_t ctr)
84 {
85 #ifdef USE_RDPMC
86 return rdpmc64(ctr);
87 #else /* !USE_RDPMC */
88 return rdmsr64(MSR_IA32_PERFCTR0 + ctr);
89 #endif /* !USE_RDPMC */
90 }
91
92 static void
wrIA32_PMCx(uint32_t ctr,uint64_t value)93 wrIA32_PMCx(uint32_t ctr, uint64_t value)
94 {
95 return wrmsr64(MSR_IA32_PERFCTR0 + ctr, value);
96 }
97
98 static uint64_t
IA32_PERFEVTSELx(uint32_t ctr)99 IA32_PERFEVTSELx(uint32_t ctr)
100 {
101 return rdmsr64(MSR_IA32_EVNTSEL0 + ctr);
102 }
103
104 static void
wrIA32_PERFEVTSELx(uint32_t ctr,uint64_t value)105 wrIA32_PERFEVTSELx(uint32_t ctr, uint64_t value)
106 {
107 wrmsr64(MSR_IA32_EVNTSEL0 + ctr, value);
108 }
109
110
111 /* internal functions */
112
113 boolean_t
kpc_is_running_fixed(void)114 kpc_is_running_fixed(void)
115 {
116 return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK;
117 }
118
119 boolean_t
kpc_is_running_configurable(uint64_t pmc_mask)120 kpc_is_running_configurable(uint64_t pmc_mask)
121 {
122 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
123 return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) &&
124 ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask);
125 }
126
127 uint32_t
kpc_fixed_count(void)128 kpc_fixed_count(void)
129 {
130 i386_cpu_info_t *info = NULL;
131 info = cpuid_info();
132 return info->cpuid_arch_perf_leaf.fixed_number;
133 }
134
135 uint32_t
kpc_configurable_count(void)136 kpc_configurable_count(void)
137 {
138 i386_cpu_info_t *info = NULL;
139 info = cpuid_info();
140 return info->cpuid_arch_perf_leaf.number;
141 }
142
143 uint32_t
kpc_fixed_config_count(void)144 kpc_fixed_config_count(void)
145 {
146 return KPC_X86_64_FIXED_CONFIGS;
147 }
148
149 uint32_t
kpc_configurable_config_count(uint64_t pmc_mask)150 kpc_configurable_config_count(uint64_t pmc_mask)
151 {
152 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
153 return kpc_popcount(pmc_mask);
154 }
155
156 uint32_t
kpc_rawpmu_config_count(void)157 kpc_rawpmu_config_count(void)
158 {
159 // RAW PMU access not implemented.
160 return 0;
161 }
162
163 int
kpc_get_rawpmu_config(__unused kpc_config_t * configv)164 kpc_get_rawpmu_config(__unused kpc_config_t *configv)
165 {
166 return 0;
167 }
168
169 static uint8_t
kpc_fixed_width(void)170 kpc_fixed_width(void)
171 {
172 i386_cpu_info_t *info = NULL;
173
174 info = cpuid_info();
175
176 return info->cpuid_arch_perf_leaf.fixed_width;
177 }
178
179 static uint8_t
kpc_configurable_width(void)180 kpc_configurable_width(void)
181 {
182 i386_cpu_info_t *info = NULL;
183
184 info = cpuid_info();
185
186 return info->cpuid_arch_perf_leaf.width;
187 }
188
189 uint64_t
kpc_fixed_max(void)190 kpc_fixed_max(void)
191 {
192 return (1ULL << kpc_fixed_width()) - 1;
193 }
194
195 uint64_t
kpc_configurable_max(void)196 kpc_configurable_max(void)
197 {
198 return (1ULL << kpc_configurable_width()) - 1;
199 }
200
201 #ifdef FIXED_COUNTER_SHADOW
202 static uint64_t
kpc_reload_fixed(int ctr)203 kpc_reload_fixed(int ctr)
204 {
205 uint64_t old = IA32_FIXED_CTRx(ctr);
206 wrIA32_FIXED_CTRx(ctr, FIXED_RELOAD(ctr));
207 return old;
208 }
209 #endif
210
211 static uint64_t
kpc_reload_configurable(int ctr)212 kpc_reload_configurable(int ctr)
213 {
214 uint64_t cfg = IA32_PERFEVTSELx(ctr);
215
216 /* counters must be disabled before they can be written to */
217 uint64_t old = IA32_PMCx(ctr);
218 wrIA32_PERFEVTSELx(ctr, cfg & ~IA32_PERFEVTSEL_EN);
219 wrIA32_PMCx(ctr, CONFIGURABLE_RELOAD(ctr));
220 wrIA32_PERFEVTSELx(ctr, cfg);
221 return old;
222 }
223
224 void kpc_pmi_handler(void);
225
226 static void
set_running_fixed(boolean_t on)227 set_running_fixed(boolean_t on)
228 {
229 uint64_t global = 0, mask = 0, fixed_ctrl = 0;
230 uint32_t i;
231 boolean_t enabled;
232
233 if (on) {
234 /* these are per-thread in SMT */
235 for (i = 0; i < kpc_fixed_count(); i++) {
236 fixed_ctrl |= ((uint64_t)(IA32_FIXED_CTR_ENABLE_ALL_RINGS | IA32_FIXED_CTR_ENABLE_PMI) << (4 * i));
237 }
238 } else {
239 /* don't allow disabling fixed counters */
240 return;
241 }
242
243 wrmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL, fixed_ctrl );
244
245 enabled = ml_set_interrupts_enabled(FALSE);
246
247 /* rmw the global control */
248 global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL);
249 for (i = 0; i < kpc_fixed_count(); i++) {
250 mask |= (1ULL << (32 + i));
251 }
252
253 if (on) {
254 global |= mask;
255 } else {
256 global &= ~mask;
257 }
258
259 wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
260
261 ml_set_interrupts_enabled(enabled);
262 }
263
264 static void
set_running_configurable(uint64_t target_mask,uint64_t state_mask)265 set_running_configurable(uint64_t target_mask, uint64_t state_mask)
266 {
267 uint32_t cfg_count = kpc_configurable_count();
268 uint64_t global = 0ULL, cfg = 0ULL, save = 0ULL;
269 boolean_t enabled;
270
271 enabled = ml_set_interrupts_enabled(FALSE);
272
273 /* rmw the global control */
274 global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL);
275
276 /* need to save and restore counter since it resets when reconfigured */
277 for (uint32_t i = 0; i < cfg_count; ++i) {
278 cfg = IA32_PERFEVTSELx(i);
279 save = IA32_PMCx(i);
280 wrIA32_PERFEVTSELx(i, cfg | IA32_PERFEVTSEL_PMI | IA32_PERFEVTSEL_EN);
281 wrIA32_PMCx(i, save);
282 }
283
284 /* update the global control value */
285 global &= ~target_mask; /* clear the targeted PMCs bits */
286 global |= state_mask; /* update the targeted PMCs bits with their new states */
287 wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
288
289 ml_set_interrupts_enabled(enabled);
290 }
291
292 static void
kpc_set_running_mp_call(void * vstate)293 kpc_set_running_mp_call( void *vstate )
294 {
295 struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate;
296 assert(mp_config);
297
298 if (kpc_controls_fixed_counters()) {
299 set_running_fixed(mp_config->classes & KPC_CLASS_FIXED_MASK);
300 }
301
302 set_running_configurable(mp_config->cfg_target_mask,
303 mp_config->cfg_state_mask);
304 }
305
306 int
kpc_get_fixed_config(kpc_config_t * configv)307 kpc_get_fixed_config(kpc_config_t *configv)
308 {
309 configv[0] = IA32_FIXED_CTR_CTRL();
310 return 0;
311 }
312
313 static int
kpc_set_fixed_config(kpc_config_t * configv)314 kpc_set_fixed_config(kpc_config_t *configv)
315 {
316 (void) configv;
317
318 /* NYI */
319 return -1;
320 }
321
322 int
kpc_get_fixed_counters(uint64_t * counterv)323 kpc_get_fixed_counters(uint64_t *counterv)
324 {
325 #if MONOTONIC
326 mt_fixed_counts(counterv);
327 return 0;
328 #else /* MONOTONIC */
329 #pragma unused(counterv)
330 return ENOTSUP;
331 #endif /* !MONOTONIC */
332 }
333
334 int
kpc_get_configurable_config(kpc_config_t * configv,uint64_t pmc_mask)335 kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
336 {
337 uint32_t cfg_count = kpc_configurable_count();
338
339 assert(configv);
340
341 for (uint32_t i = 0; i < cfg_count; ++i) {
342 if ((1ULL << i) & pmc_mask) {
343 *configv++ = IA32_PERFEVTSELx(i);
344 }
345 }
346 return 0;
347 }
348
349 static int
kpc_set_configurable_config(kpc_config_t * configv,uint64_t pmc_mask)350 kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
351 {
352 uint32_t cfg_count = kpc_configurable_count();
353 uint64_t save;
354
355 for (uint32_t i = 0; i < cfg_count; i++) {
356 if (((1ULL << i) & pmc_mask) == 0) {
357 continue;
358 }
359
360 /* need to save and restore counter since it resets when reconfigured */
361 save = IA32_PMCx(i);
362
363 /*
364 * Some bits are not safe to set from user space.
365 * Allow these bits to be set:
366 *
367 * 0-7 Event select
368 * 8-15 UMASK
369 * 16 USR
370 * 17 OS
371 * 18 E
372 * 22 EN
373 * 23 INV
374 * 24-31 CMASK
375 *
376 * Excluding:
377 *
378 * 19 PC
379 * 20 INT
380 * 21 AnyThread
381 * 32 IN_TX
382 * 33 IN_TXCP
383 * 34-63 Reserved
384 */
385 wrIA32_PERFEVTSELx(i, *configv & 0xffc7ffffull);
386 wrIA32_PMCx(i, save);
387
388 /* next configuration word */
389 configv++;
390 }
391
392 return 0;
393 }
394
395 int
kpc_get_configurable_counters(uint64_t * counterv,uint64_t pmc_mask)396 kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask)
397 {
398 uint32_t cfg_count = kpc_configurable_count();
399 uint64_t status, *it_counterv = counterv;
400
401 /* snap the counters */
402 for (uint32_t i = 0; i < cfg_count; ++i) {
403 if ((1ULL << i) & pmc_mask) {
404 *it_counterv++ = CONFIGURABLE_SHADOW(i) +
405 (IA32_PMCx(i) - CONFIGURABLE_RELOAD(i));
406 }
407 }
408
409 /* Grab the overflow bits */
410 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
411
412 /* reset the iterator */
413 it_counterv = counterv;
414
415 /*
416 * If the overflow bit is set for a counter, our previous read may or may not have been
417 * before the counter overflowed. Re-read any counter with it's overflow bit set so
418 * we know for sure that it has overflowed. The reason this matters is that the math
419 * is different for a counter that has overflowed.
420 */
421 for (uint32_t i = 0; i < cfg_count; ++i) {
422 if (((1ULL << i) & pmc_mask) &&
423 ((1ULL << i) & status)) {
424 *it_counterv++ = CONFIGURABLE_SHADOW(i) +
425 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i)) + IA32_PMCx(i);
426 }
427 }
428
429 return 0;
430 }
431
432 static void
kpc_get_curcpu_counters_mp_call(void * args)433 kpc_get_curcpu_counters_mp_call(void *args)
434 {
435 struct kpc_get_counters_remote *handler = args;
436 int offset = 0, r = 0;
437
438 assert(handler);
439 assert(handler->buf);
440
441 offset = cpu_number() * handler->buf_stride;
442 r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]);
443
444 /* number of counters added by this CPU, needs to be atomic */
445 os_atomic_add(&(handler->nb_counters), r, relaxed);
446 }
447
448 int
kpc_get_all_cpus_counters(uint32_t classes,int * curcpu,uint64_t * buf)449 kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf)
450 {
451 int enabled = 0;
452
453 struct kpc_get_counters_remote hdl = {
454 .classes = classes, .nb_counters = 0,
455 .buf_stride = kpc_get_counter_count(classes), .buf = buf
456 };
457
458 assert(buf);
459
460 enabled = ml_set_interrupts_enabled(FALSE);
461
462 if (curcpu) {
463 *curcpu = cpu_number();
464 }
465 mp_cpus_call(CPUMASK_ALL, ASYNC, kpc_get_curcpu_counters_mp_call, &hdl);
466
467 ml_set_interrupts_enabled(enabled);
468
469 return hdl.nb_counters;
470 }
471
472 static void
kpc_set_config_mp_call(void * vmp_config)473 kpc_set_config_mp_call(void *vmp_config)
474 {
475 struct kpc_config_remote *mp_config = vmp_config;
476 kpc_config_t *new_config = NULL;
477 uint32_t classes = 0, count = 0;
478 boolean_t enabled;
479
480 assert(mp_config);
481 assert(mp_config->configv);
482 classes = mp_config->classes;
483 new_config = mp_config->configv;
484
485 enabled = ml_set_interrupts_enabled(FALSE);
486
487 if (classes & KPC_CLASS_FIXED_MASK) {
488 kpc_set_fixed_config(&new_config[count]);
489 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
490 }
491
492 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
493 kpc_set_configurable_config(&new_config[count], mp_config->pmc_mask);
494 count += kpc_popcount(mp_config->pmc_mask);
495 }
496
497 ml_set_interrupts_enabled(enabled);
498 }
499
500 static void
kpc_set_reload_mp_call(void * vmp_config)501 kpc_set_reload_mp_call(void *vmp_config)
502 {
503 struct kpc_config_remote *mp_config = vmp_config;
504 uint64_t *new_period = NULL, max = kpc_configurable_max();
505 uint32_t classes = 0, count = 0;
506 boolean_t enabled;
507
508 assert(mp_config);
509 assert(mp_config->configv);
510 classes = mp_config->classes;
511 new_period = mp_config->configv;
512
513 enabled = ml_set_interrupts_enabled(FALSE);
514
515 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
516 /*
517 * Update _all_ shadow counters, this cannot be done for only
518 * selected PMCs. Otherwise, we would corrupt the configurable
519 * shadow buffer since the PMCs are muxed according to the pmc
520 * mask.
521 */
522 uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1;
523 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask);
524
525 /* set the new period */
526 count = kpc_configurable_count();
527 for (uint32_t i = 0; i < count; ++i) {
528 /* ignore the counter */
529 if (((1ULL << i) & mp_config->pmc_mask) == 0) {
530 continue;
531 }
532
533 if (*new_period == 0) {
534 *new_period = kpc_configurable_max();
535 }
536
537 CONFIGURABLE_RELOAD(i) = max - *new_period;
538
539 /* reload the counter */
540 kpc_reload_configurable(i);
541
542 /* clear overflow bit just in case */
543 wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << i);
544
545 /* next period value */
546 new_period++;
547 }
548 }
549
550 ml_set_interrupts_enabled(enabled);
551 }
552
553 int
kpc_set_period_arch(struct kpc_config_remote * mp_config)554 kpc_set_period_arch( struct kpc_config_remote *mp_config )
555 {
556 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_reload_mp_call, mp_config );
557
558 return 0;
559 }
560
561
562 /* interface functions */
563
564 void
kpc_arch_init(void)565 kpc_arch_init(void)
566 {
567 i386_cpu_info_t *info = cpuid_info();
568 uint8_t version_id = info->cpuid_arch_perf_leaf.version;
569 /*
570 * kpc only supports Intel PMU versions 2 and above.
571 */
572 if (version_id < 2) {
573 kpc_supported = false;
574 }
575 }
576
577 uint32_t
kpc_get_classes(void)578 kpc_get_classes(void)
579 {
580 return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK;
581 }
582
583 int
kpc_set_running_arch(struct kpc_running_remote * mp_config)584 kpc_set_running_arch(struct kpc_running_remote *mp_config)
585 {
586 assert(mp_config);
587
588 /* dispatch to all CPUs */
589 mp_cpus_call(CPUMASK_ALL, ASYNC, kpc_set_running_mp_call, mp_config);
590
591 kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask;
592 kpc_running_classes = mp_config->classes;
593
594 return 0;
595 }
596
597 int
kpc_set_config_arch(struct kpc_config_remote * mp_config)598 kpc_set_config_arch(struct kpc_config_remote *mp_config)
599 {
600 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_config_mp_call, mp_config );
601
602 return 0;
603 }
604
605 static uintptr_t
get_interrupted_pc(bool * kernel_out)606 get_interrupted_pc(bool *kernel_out)
607 {
608 x86_saved_state_t *state = current_cpu_datap()->cpu_int_state;
609 if (!state) {
610 return 0;
611 }
612
613 bool state_64 = is_saved_state64(state);
614 uint64_t cs;
615 if (state_64) {
616 cs = saved_state64(state)->isf.cs;
617 } else {
618 cs = saved_state32(state)->cs;
619 }
620 bool kernel = (cs & SEL_PL) != SEL_PL_U;
621 *kernel_out = kernel;
622
623 uintptr_t pc = 0;
624 if (state_64) {
625 pc = saved_state64(state)->isf.rip;
626 } else {
627 pc = saved_state32(state)->eip;
628 }
629 if (kernel) {
630 pc = VM_KERNEL_UNSLIDE(pc);
631 }
632 return pc;
633 }
634
635 static void
kpc_sample_kperf_x86(uint32_t ctr,uint32_t actionid,uint64_t count,uint64_t config)636 kpc_sample_kperf_x86(uint32_t ctr, uint32_t actionid, uint64_t count,
637 uint64_t config)
638 {
639 bool kernel = false;
640 uintptr_t pc = get_interrupted_pc(&kernel);
641 kperf_kpc_flags_t flags = kernel ? KPC_KERNEL_PC : 0;
642 if ((config) & IA32_PERFEVT_USER_EN) {
643 flags |= KPC_USER_COUNTING;
644 }
645 if ((config) & IA32_PERFEVT_OS_EN) {
646 flags |= KPC_KERNEL_COUNTING;
647 }
648 kpc_sample_kperf(actionid, ctr,
649 config & 0xffff /* just the number and umask */, count, pc, flags);
650 }
651
652 void
kpc_pmi_handler(void)653 kpc_pmi_handler(void)
654 {
655 uint64_t status, extra;
656 uint32_t ctr;
657 int enabled;
658
659 enabled = ml_set_interrupts_enabled(FALSE);
660
661 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
662
663 #ifdef FIXED_COUNTER_SHADOW
664 for (ctr = 0; ctr < kpc_fixed_count(); ctr++) {
665 if ((1ULL << (ctr + 32)) & status) {
666 extra = kpc_reload_fixed(ctr);
667
668 FIXED_SHADOW(ctr)
669 += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra;
670
671 uint32_t actionid = FIXED_ACTIONID(ctr);
672 BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, actionid);
673
674 if (actionid != 0) {
675 kpc_sample_kperf_x86(ctr, actionid, FIXED_SHADOW(ctr) + extra, 0);
676 }
677 }
678 }
679 #endif // FIXED_COUNTER_SHADOW
680
681 for (ctr = 0; ctr < kpc_configurable_count(); ctr++) {
682 if ((1ULL << ctr) & status) {
683 extra = kpc_reload_configurable(ctr);
684
685 CONFIGURABLE_SHADOW(ctr) += kpc_configurable_max() -
686 CONFIGURABLE_RELOAD(ctr) + extra;
687
688 /* kperf can grab the PMCs when it samples so we need to make sure the overflow
689 * bits are in the correct state before the call to kperf_sample */
690 wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << ctr);
691
692 unsigned int actionid = CONFIGURABLE_ACTIONID(ctr);
693 BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, actionid);
694
695 if (actionid != 0) {
696 uint64_t config = IA32_PERFEVTSELx(ctr);
697 kpc_sample_kperf_x86(ctr + kpc_fixed_count(), actionid,
698 CONFIGURABLE_SHADOW(ctr) + extra, config);
699 }
700 }
701 }
702
703 ml_set_interrupts_enabled(enabled);
704 }
705
706 int
kpc_set_sw_inc(uint32_t mask __unused)707 kpc_set_sw_inc( uint32_t mask __unused )
708 {
709 return ENOTSUP;
710 }
711
712 int
kpc_get_pmu_version(void)713 kpc_get_pmu_version(void)
714 {
715 i386_cpu_info_t *info = cpuid_info();
716
717 uint8_t version_id = info->cpuid_arch_perf_leaf.version;
718
719 if (version_id == 3) {
720 return KPC_PMU_INTEL_V3;
721 } else if (version_id == 2) {
722 return KPC_PMU_INTEL_V2;
723 }
724
725 return KPC_PMU_ERROR;
726 }
727