xref: /xnu-8019.80.24/osfmk/arm/kpc_arm.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2012 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/thread.h>
33 #include <sys/errno.h>
34 #include <arm/cpu_data_internal.h>
35 #include <arm/cpu_internal.h>
36 #include <kern/kpc.h>
37 
38 #ifdef ARMA7
39 /* PMU v2 based implementation for A7 */
40 static uint32_t saved_PMXEVTYPER[MAX_CPUS][KPC_ARM_TOTAL_COUNT];
41 static uint32_t saved_PMCNTENSET[MAX_CPUS];
42 static uint64_t saved_counter[MAX_CPUS][KPC_ARM_TOTAL_COUNT];
43 static uint32_t saved_PMOVSR[MAX_CPUS];
44 
45 static uint32_t kpc_configured = 0;
46 static uint32_t kpc_xcall_sync;
47 static uint64_t kpc_running_cfg_pmc_mask = 0;
48 static uint32_t kpc_running_classes = 0;
49 static uint32_t kpc_reload_sync;
50 static uint32_t kpc_enabled_counters = 0;
51 
52 static int first_time = 1;
53 
54 /* Private */
55 
56 static uint64_t get_counter_config(uint32_t counter);
57 
58 
59 static boolean_t
enable_counter(uint32_t counter)60 enable_counter(uint32_t counter)
61 {
62 	boolean_t enabled;
63 	uint32_t PMCNTENSET;
64 	/* Cycle counter is MSB; configurable counters reside in LSBs */
65 	uint32_t mask = (counter == 0) ? (1 << 31) : (1 << (counter - 1));
66 
67 	/* Enabled? */
68 	__asm__ volatile ("mrc p15, 0, %0, c9, c12, 1;" : "=r" (PMCNTENSET));
69 
70 	enabled = (PMCNTENSET & mask);
71 	if (!enabled) {
72 		/* Counter interrupt enable (PMINTENSET) */
73 		__asm__ volatile ("mcr p15, 0, %0, c9, c14, 1;" : : "r" (mask));
74 
75 		/* Individual counter enable set (PMCNTENSET) */
76 		__asm__ volatile ("mcr p15, 0, %0, c9, c12, 1;" : : "r" (mask));
77 
78 		kpc_enabled_counters++;
79 
80 		/* 1st enabled counter? Set the master enable bit in PMCR */
81 		if (kpc_enabled_counters == 1) {
82 			uint32_t PMCR = 1;
83 			__asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
84 		}
85 	}
86 
87 	return enabled;
88 }
89 
90 static boolean_t
disable_counter(uint32_t counter)91 disable_counter(uint32_t counter)
92 {
93 	boolean_t enabled;
94 	uint32_t PMCNTENCLR;
95 	/* Cycle counter is MSB; configurable counters reside in LSBs */
96 	uint32_t mask = (counter == 0) ? (1 << 31) : (1 << (counter - 1));
97 
98 	/* Enabled? */
99 	__asm__ volatile ("mrc p15, 0, %0, c9, c12, 2;" : "=r" (PMCNTENCLR));
100 
101 	enabled = (PMCNTENCLR & mask);
102 	if (enabled) {
103 		/* Individual counter enable clear (PMCNTENCLR) */
104 		__asm__ volatile ("mcr p15, 0, %0, c9, c12, 2;" : : "r" (mask));
105 
106 		/* Counter interrupt disable (PMINTENCLR) */
107 		__asm__ volatile ("mcr p15, 0, %0, c9, c14, 2;" : : "r" (mask));
108 
109 		kpc_enabled_counters--;
110 
111 		/* Last enabled counter? Clear the master enable bit in PMCR */
112 		if (kpc_enabled_counters == 0) {
113 			uint32_t PMCR = 0;
114 			__asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
115 		}
116 	}
117 
118 	return enabled;
119 }
120 
121 static uint64_t
read_counter(uint32_t counter)122 read_counter(uint32_t counter)
123 {
124 	uint32_t low = 0;
125 
126 	switch (counter) {
127 	case 0:
128 		/* Fixed counter */
129 		__asm__ volatile ("mrc p15, 0, %0, c9, c13, 0;" : "=r" (low));
130 		break;
131 	case 1:
132 	case 2:
133 	case 3:
134 	case 4:
135 		/* Configurable. Set PMSELR... */
136 		__asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1));
137 		/* ...then read PMXEVCNTR */
138 		__asm__ volatile ("mrc p15, 0, %0, c9, c13, 2;" : "=r" (low));
139 		break;
140 	default:
141 		/* ??? */
142 		break;
143 	}
144 
145 	return (uint64_t)low;
146 }
147 
148 static void
write_counter(uint32_t counter,uint64_t value)149 write_counter(uint32_t counter, uint64_t value)
150 {
151 	uint32_t low = value & 0xFFFFFFFF;
152 
153 	switch (counter) {
154 	case 0:
155 		/* Fixed counter */
156 		__asm__ volatile ("mcr p15, 0, %0, c9, c13, 0;" : : "r" (low));
157 		break;
158 	case 1:
159 	case 2:
160 	case 3:
161 	case 4:
162 		/* Configurable. Set PMSELR... */
163 		__asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1));
164 		/* ...then write PMXEVCNTR */
165 		__asm__ volatile ("mcr p15, 0, %0, c9, c13, 2;" : : "r" (low));
166 		break;
167 	default:
168 		/* ??? */
169 		break;
170 	}
171 }
172 
173 static uint64_t
kpc_reload_counter(int ctr)174 kpc_reload_counter(int ctr)
175 {
176 	uint64_t old = read_counter(ctr);
177 	write_counter(ctr, FIXED_RELOAD(ctr));
178 	return old;
179 }
180 
181 static void
set_running_fixed(boolean_t on)182 set_running_fixed(boolean_t on)
183 {
184 	int i;
185 	boolean_t enabled;
186 	int n = KPC_ARM_FIXED_COUNT;
187 
188 	enabled = ml_set_interrupts_enabled(FALSE);
189 
190 	for (i = 0; i < n; i++) {
191 		if (on) {
192 			enable_counter(i);
193 		} else {
194 			disable_counter(i);
195 		}
196 	}
197 
198 	ml_set_interrupts_enabled(enabled);
199 }
200 
201 static void
set_running_configurable(uint64_t target_mask,uint64_t state_mask)202 set_running_configurable(uint64_t target_mask, uint64_t state_mask)
203 {
204 	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
205 	boolean_t enabled;
206 
207 	enabled = ml_set_interrupts_enabled(FALSE);
208 
209 	for (uint32_t i = 0; i < cfg_count; ++i) {
210 		if (((1ULL << i) & target_mask) == 0) {
211 			continue;
212 		}
213 		assert(kpc_controls_counter(offset + i));
214 
215 		if ((1ULL << i) & state_mask) {
216 			enable_counter(offset + i);
217 		} else {
218 			disable_counter(offset + i);
219 		}
220 	}
221 
222 	ml_set_interrupts_enabled(enabled);
223 }
224 
225 static uintptr_t
get_interrupted_pc(bool * kernel_out)226 get_interrupted_pc(bool *kernel_out)
227 {
228 	struct arm_saved_state *state = getCpuDatap()->cpu_int_state;
229 	if (!state) {
230 		return 0;
231 	}
232 
233 	bool kernel = !PSR_IS_USER(get_saved_state_cpsr(state));
234 	*kernel_out = kernel;
235 	uintptr_t pc = get_saved_state_pc(state);
236 	if (kernel) {
237 		pc = VM_KERNEL_UNSLIDE(pc);
238 	}
239 	return pc;
240 }
241 
242 void kpc_pmi_handler(cpu_id_t source);
243 void
kpc_pmi_handler(cpu_id_t source)244 kpc_pmi_handler(cpu_id_t source)
245 {
246 	uint64_t extra;
247 	int ctr;
248 	int enabled;
249 
250 	enabled = ml_set_interrupts_enabled(FALSE);
251 
252 	/* The pmi must be delivered to the CPU that generated it */
253 	if (source != getCpuDatap()->interrupt_nub) {
254 		panic("pmi from IOCPU %p delivered to IOCPU %p", source, getCpuDatap()->interrupt_nub);
255 	}
256 
257 	for (ctr = 0;
258 	    ctr < (KPC_ARM_FIXED_COUNT + KPC_ARM_CONFIGURABLE_COUNT);
259 	    ctr++) {
260 		uint32_t PMOVSR;
261 		uint32_t mask;
262 
263 		/* check the counter for overflow */
264 		if (ctr == 0) {
265 			mask = 1 << 31;
266 		} else {
267 			mask = 1 << (ctr - 1);
268 		}
269 
270 		/* read PMOVSR */
271 		__asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR));
272 
273 		if (PMOVSR & mask) {
274 			extra = kpc_reload_counter(ctr);
275 
276 			FIXED_SHADOW(ctr)
277 			        += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* wrap */) + extra;
278 
279 			if (FIXED_ACTIONID(ctr)) {
280 				bool kernel = false;
281 				uintptr_t pc = get_interrupted_pc(&kernel);
282 				kpc_sample_kperf(FIXED_ACTIONID(ctr), ctr, get_counter_config(ctr),
283 				    FIXED_SHADOW(ctr), pc, kernel ? KPC_KERNEL_PC : 0);
284 			}
285 
286 			/* clear PMOVSR bit */
287 			__asm__ volatile ("mcr p15, 0, %0, c9, c12, 3;" : : "r" (mask));
288 		}
289 	}
290 
291 	ml_set_interrupts_enabled(enabled);
292 }
293 
294 static void
kpc_set_running_xcall(void * vstate)295 kpc_set_running_xcall( void *vstate )
296 {
297 	struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate;
298 	assert(mp_config);
299 
300 	if (kpc_controls_fixed_counters()) {
301 		set_running_fixed(mp_config->classes & KPC_CLASS_FIXED_MASK);
302 	}
303 
304 	set_running_configurable(mp_config->cfg_target_mask,
305 	    mp_config->cfg_state_mask);
306 
307 	if (os_atomic_dec(&kpc_xcall_sync, relaxed) == 0) {
308 		thread_wakeup((event_t) &kpc_xcall_sync);
309 	}
310 }
311 
312 static uint64_t
get_counter_config(uint32_t counter)313 get_counter_config(uint32_t counter)
314 {
315 	uint32_t config = 0;
316 
317 	switch (counter) {
318 	case 0:
319 		/* Fixed counter accessed via top bit... */
320 		counter = 31;
321 		/* Write PMSELR.SEL */
322 		__asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter));
323 		/* Read PMXEVTYPER */
324 		__asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : "=r" (config));
325 		break;
326 	case 1:
327 	case 2:
328 	case 3:
329 	case 4:
330 		/* Offset */
331 		counter -= 1;
332 		/* Write PMSELR.SEL to select the configurable counter */
333 		__asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter));
334 		/* Read PMXEVTYPER to get the config */
335 		__asm__ volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (config));
336 		break;
337 	default:
338 		break;
339 	}
340 
341 	return config;
342 }
343 
344 static void
set_counter_config(uint32_t counter,uint64_t config)345 set_counter_config(uint32_t counter, uint64_t config)
346 {
347 	switch (counter) {
348 	case 0:
349 		/* Write PMSELR.SEL */
350 		__asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
351 		/* Write PMXEVTYPER */
352 		__asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF));
353 		break;
354 	case 1:
355 	case 2:
356 	case 3:
357 	case 4:
358 		/* Write PMSELR.SEL */
359 		__asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1));
360 		/* Write PMXEVTYPER */
361 		__asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF));
362 		break;
363 	default:
364 		break;
365 	}
366 }
367 
368 /* Common */
369 
370 void
kpc_arch_init(void)371 kpc_arch_init(void)
372 {
373 	uint32_t PMCR;
374 	uint32_t event_counters;
375 
376 	/* read PMOVSR and determine the number of event counters */
377 	__asm__ volatile ("mrc p15, 0, %0, c9, c12, 0;" : "=r" (PMCR));
378 	event_counters = (PMCR >> 11) & 0x1F;
379 
380 	assert(event_counters >= KPC_ARM_CONFIGURABLE_COUNT);
381 }
382 
383 uint32_t
kpc_get_classes(void)384 kpc_get_classes(void)
385 {
386 	return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK;
387 }
388 
389 uint32_t
kpc_fixed_count(void)390 kpc_fixed_count(void)
391 {
392 	return KPC_ARM_FIXED_COUNT;
393 }
394 
395 uint32_t
kpc_configurable_count(void)396 kpc_configurable_count(void)
397 {
398 	return KPC_ARM_CONFIGURABLE_COUNT;
399 }
400 
401 uint32_t
kpc_fixed_config_count(void)402 kpc_fixed_config_count(void)
403 {
404 	return KPC_ARM_FIXED_COUNT;
405 }
406 
407 uint32_t
kpc_configurable_config_count(uint64_t pmc_mask)408 kpc_configurable_config_count(uint64_t pmc_mask)
409 {
410 	assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
411 	return kpc_popcount(pmc_mask);
412 }
413 
414 int
kpc_get_fixed_config(kpc_config_t * configv)415 kpc_get_fixed_config(kpc_config_t *configv)
416 {
417 	configv[0] = get_counter_config(0);
418 	return 0;
419 }
420 
421 uint64_t
kpc_fixed_max(void)422 kpc_fixed_max(void)
423 {
424 	return (1ULL << KPC_ARM_COUNTER_WIDTH) - 1;
425 }
426 
427 uint64_t
kpc_configurable_max(void)428 kpc_configurable_max(void)
429 {
430 	return (1ULL << KPC_ARM_COUNTER_WIDTH) - 1;
431 }
432 
433 int
kpc_get_configurable_counters(uint64_t * counterv,uint64_t pmc_mask)434 kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask)
435 {
436 	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
437 
438 	assert(counterv);
439 
440 	for (uint32_t i = 0; i < cfg_count; ++i) {
441 		uint32_t PMOVSR;
442 		uint32_t mask;
443 		uint64_t ctr;
444 
445 		if (((1ULL << i) & pmc_mask) == 0) {
446 			continue;
447 		}
448 		ctr = read_counter(i + offset);
449 
450 		/* check the counter for overflow */
451 		mask = 1 << i;
452 
453 		/* read PMOVSR */
454 		__asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR));
455 
456 		if (PMOVSR & mask) {
457 			ctr = CONFIGURABLE_SHADOW(i) +
458 			    (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) +
459 			    ctr;
460 		} else {
461 			ctr = CONFIGURABLE_SHADOW(i) +
462 			    (ctr - CONFIGURABLE_RELOAD(i));
463 		}
464 
465 		*counterv++ = ctr;
466 	}
467 
468 	return 0;
469 }
470 
471 int
kpc_get_fixed_counters(uint64_t * counterv)472 kpc_get_fixed_counters(uint64_t *counterv)
473 {
474 	uint32_t PMOVSR;
475 	uint32_t mask;
476 	uint64_t ctr;
477 
478 	/* check the counter for overflow */
479 	mask = 1 << 31;
480 
481 	/* read PMOVSR */
482 	__asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR));
483 
484 	ctr = read_counter(0);
485 
486 	if (PMOVSR & mask) {
487 		ctr = FIXED_SHADOW(0) +
488 		    (kpc_fixed_max() - FIXED_RELOAD(0) + 1 /* Wrap */) +
489 		    (ctr & 0xFFFFFFFF);
490 	} else {
491 		ctr = FIXED_SHADOW(0) +
492 		    (ctr - FIXED_RELOAD(0));
493 	}
494 
495 	counterv[0] = ctr;
496 
497 	return 0;
498 }
499 boolean_t
kpc_is_running_fixed(void)500 kpc_is_running_fixed(void)
501 {
502 	return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK;
503 }
504 
505 boolean_t
kpc_is_running_configurable(uint64_t pmc_mask)506 kpc_is_running_configurable(uint64_t pmc_mask)
507 {
508 	assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
509 	return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) &&
510 	       ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask);
511 }
512 
513 int
kpc_set_running_arch(struct kpc_running_remote * mp_config)514 kpc_set_running_arch(struct kpc_running_remote *mp_config)
515 {
516 	unsigned int cpu;
517 
518 	assert(mp_config);
519 
520 	if (first_time) {
521 		kprintf( "kpc: setting PMI handler\n" );
522 		PE_cpu_perfmon_interrupt_install_handler(kpc_pmi_handler);
523 		for (cpu = 0; cpu < real_ncpus; cpu++) {
524 			PE_cpu_perfmon_interrupt_enable(cpu_datap(cpu)->cpu_id,
525 			    TRUE);
526 		}
527 		first_time = 0;
528 	}
529 
530 	/* dispatch to all CPUs */
531 	cpu_broadcast_xcall(&kpc_xcall_sync, TRUE, kpc_set_running_xcall,
532 	    mp_config);
533 
534 	kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask;
535 	kpc_running_classes = mp_config->classes;
536 	kpc_configured = 1;
537 
538 	return 0;
539 }
540 
541 static void
save_regs(void)542 save_regs(void)
543 {
544 	int i;
545 	int cpuid = cpu_number();
546 	uint32_t PMCR = 0;
547 
548 	__builtin_arm_dmb(DMB_ISH);
549 
550 	/* Clear master enable */
551 	__asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
552 
553 	/* Save individual enable state */
554 	__asm__ volatile ("mrc p15, 0, %0, c9, c12, 1;" : "=r" (saved_PMCNTENSET[cpuid]));
555 
556 	/* Save PMOVSR */
557 	__asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (saved_PMOVSR[cpuid]));
558 
559 	/* Select fixed counter with PMSELR.SEL */
560 	__asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
561 	/* Read PMXEVTYPER */
562 	__asm__ volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][0]));
563 
564 	/* Save configurable event selections */
565 	for (i = 0; i < 4; i++) {
566 		/* Select counter with PMSELR.SEL */
567 		__asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i));
568 		/* Read PMXEVTYPER */
569 		__asm__ volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][i + 1]));
570 	}
571 
572 	/* Finally, save count for each counter */
573 	for (i = 0; i < 5; i++) {
574 		saved_counter[cpuid][i] = read_counter(i);
575 	}
576 }
577 
578 static void
restore_regs(void)579 restore_regs(void)
580 {
581 	int i;
582 	int cpuid = cpu_number();
583 	uint64_t extra;
584 	uint32_t PMCR = 1;
585 
586 	/* Restore counter values */
587 	for (i = 0; i < 5; i++) {
588 		/* did we overflow? if so handle it now since we won't get a pmi */
589 		uint32_t mask;
590 
591 		/* check the counter for overflow */
592 		if (i == 0) {
593 			mask = 1 << 31;
594 		} else {
595 			mask = 1 << (i - 1);
596 		}
597 
598 		if (saved_PMOVSR[cpuid] & mask) {
599 			extra = kpc_reload_counter(i);
600 
601 			/*
602 			 * CONFIGURABLE_* directly follows FIXED, so we can simply
603 			 * increment the index here. Although it's ugly.
604 			 */
605 			FIXED_SHADOW(i)
606 			        += (kpc_fixed_max() - FIXED_RELOAD(i) + 1 /* Wrap */) + extra;
607 
608 			if (FIXED_ACTIONID(i)) {
609 				bool kernel = false;
610 				uintptr_t pc = get_interrupted_pc(&kernel);
611 				kpc_sample_kperf(FIXED_ACTIONID(i), i, get_counter_config(i),
612 				    FIXED_SHADOW(i), pc, kernel ? KPC_KERNEL_PC : 0);
613 			}
614 		} else {
615 			write_counter(i, saved_counter[cpuid][i]);
616 		}
617 	}
618 
619 	/* Restore configuration - first, the fixed... */
620 	__asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
621 	/* Write PMXEVTYPER */
622 	__asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][0]));
623 
624 	/* ...then the configurable */
625 	for (i = 0; i < 4; i++) {
626 		/* Select counter with PMSELR.SEL */
627 		__asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i));
628 		/* Write PMXEVTYPER */
629 		__asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][i + 1]));
630 	}
631 
632 	/* Restore enable state */
633 	__asm__ volatile ("mcr p15, 0, %0, c9, c12, 1;" : : "r" (saved_PMCNTENSET[cpuid]));
634 
635 	/* Counter master re-enable */
636 	__asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
637 }
638 
639 static void
kpc_set_reload_xcall(void * vmp_config)640 kpc_set_reload_xcall(void *vmp_config)
641 {
642 	struct kpc_config_remote *mp_config = vmp_config;
643 	uint32_t classes = 0, count = 0, offset = kpc_fixed_count();
644 	uint64_t *new_period = NULL, max = kpc_configurable_max();
645 	boolean_t enabled;
646 
647 	assert(mp_config);
648 	assert(mp_config->configv);
649 	classes = mp_config->classes;
650 	new_period = mp_config->configv;
651 
652 	enabled = ml_set_interrupts_enabled(FALSE);
653 
654 	if ((classes & KPC_CLASS_FIXED_MASK) && kpc_controls_fixed_counters()) {
655 		/* update shadow counters */
656 		kpc_get_fixed_counters(&FIXED_SHADOW(0));
657 
658 		/* set the new period */
659 		count = kpc_fixed_count();
660 		for (uint32_t i = 0; i < count; ++i) {
661 			if (*new_period == 0) {
662 				*new_period = kpc_fixed_max();
663 			}
664 			FIXED_RELOAD(i) = max - *new_period;
665 			/* reload the counter if possible */
666 			kpc_reload_counter(i);
667 			/* next period value */
668 			new_period++;
669 		}
670 	}
671 
672 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
673 		/*
674 		 * Update _all_ shadow counters, this cannot be done for only
675 		 * selected PMCs. Otherwise, we would corrupt the configurable
676 		 * shadow buffer since the PMCs are muxed according to the pmc
677 		 * mask.
678 		 */
679 		uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1;
680 		kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask);
681 
682 		/* set the new period */
683 		count = kpc_configurable_count();
684 		for (uint32_t i = 0; i < count; ++i) {
685 			/* ignore the counter */
686 			if (((1ULL << i) & mp_config->pmc_mask) == 0) {
687 				continue;
688 			}
689 			if (*new_period == 0) {
690 				*new_period = kpc_configurable_max();
691 			}
692 			CONFIGURABLE_RELOAD(i) = max - *new_period;
693 			/* reload the counter */
694 			kpc_reload_counter(offset + i);
695 			/* next period value */
696 			new_period++;
697 		}
698 	}
699 
700 	ml_set_interrupts_enabled(enabled);
701 
702 	if (os_atomic_dec(&kpc_reload_sync, relaxed) == 0) {
703 		thread_wakeup((event_t) &kpc_reload_sync);
704 	}
705 }
706 
707 
708 int
kpc_set_period_arch(struct kpc_config_remote * mp_config)709 kpc_set_period_arch(struct kpc_config_remote *mp_config)
710 {
711 	/* dispatch to all CPUs */
712 	cpu_broadcast_xcall(&kpc_reload_sync, TRUE, kpc_set_reload_xcall, mp_config);
713 
714 	kpc_configured = 1;
715 
716 	return 0;
717 }
718 
719 int
kpc_get_configurable_config(kpc_config_t * configv,uint64_t pmc_mask)720 kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
721 {
722 	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
723 
724 	assert(configv);
725 
726 	for (uint32_t i = 0; i < cfg_count; ++i) {
727 		if ((1ULL << i) & pmc_mask) {
728 			*configv++ = get_counter_config(i + offset);
729 		}
730 	}
731 
732 	return 0;
733 }
734 
735 static int
kpc_set_configurable_config(kpc_config_t * configv,uint64_t pmc_mask)736 kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
737 {
738 	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
739 	boolean_t enabled;
740 
741 	assert(configv);
742 
743 	enabled = ml_set_interrupts_enabled(FALSE);
744 
745 	for (uint32_t i = 0; i < cfg_count; ++i) {
746 		if (((1ULL << i) & pmc_mask) == 0) {
747 			continue;
748 		}
749 		assert(kpc_controls_counter(i + offset));
750 
751 		set_counter_config(i + offset, *configv++);
752 	}
753 
754 	ml_set_interrupts_enabled(enabled);
755 
756 	return 0;
757 }
758 
759 static uint32_t kpc_config_sync;
760 static void
kpc_set_config_xcall(void * vmp_config)761 kpc_set_config_xcall(void *vmp_config)
762 {
763 	struct kpc_config_remote *mp_config = vmp_config;
764 	kpc_config_t *new_config = NULL;
765 	uint32_t classes = 0ULL;
766 
767 	assert(mp_config);
768 	assert(mp_config->configv);
769 	classes = mp_config->classes;
770 	new_config = mp_config->configv;
771 
772 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
773 		kpc_set_configurable_config(new_config, mp_config->pmc_mask);
774 		new_config += kpc_popcount(mp_config->pmc_mask);
775 	}
776 
777 	if (os_atomic_dec(&kpc_config_sync, relaxed) == 0) {
778 		thread_wakeup((event_t) &kpc_config_sync);
779 	}
780 }
781 
782 int
kpc_set_config_arch(struct kpc_config_remote * mp_config)783 kpc_set_config_arch(struct kpc_config_remote *mp_config)
784 {
785 	/* dispatch to all CPUs */
786 	cpu_broadcast_xcall(&kpc_config_sync, TRUE, kpc_set_config_xcall, mp_config);
787 
788 	kpc_configured = 1;
789 
790 	return 0;
791 }
792 
793 void
kpc_idle(void)794 kpc_idle(void)
795 {
796 	if (kpc_configured) {
797 		save_regs();
798 	}
799 }
800 
801 void
kpc_idle_exit(void)802 kpc_idle_exit(void)
803 {
804 	if (kpc_configured) {
805 		restore_regs();
806 	}
807 }
808 
809 static uint32_t kpc_xread_sync;
810 static void
kpc_get_curcpu_counters_xcall(void * args)811 kpc_get_curcpu_counters_xcall(void *args)
812 {
813 	struct kpc_get_counters_remote *handler = args;
814 	int offset = 0, r = 0;
815 
816 	assert(handler);
817 	assert(handler->buf);
818 
819 	offset = cpu_number() * handler->buf_stride;
820 	r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]);
821 
822 	/* number of counters added by this CPU, needs to be atomic  */
823 	os_atomic_add(&(handler->nb_counters), r, relaxed);
824 
825 	if (os_atomic_dec(&kpc_xread_sync, relaxed) == 0) {
826 		thread_wakeup((event_t) &kpc_xread_sync);
827 	}
828 }
829 
830 int
kpc_get_all_cpus_counters(uint32_t classes,int * curcpu,uint64_t * buf)831 kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf)
832 {
833 	int enabled = 0;
834 
835 	struct kpc_get_counters_remote hdl = {
836 		.classes = classes, .nb_counters = 0,
837 		.buf_stride = kpc_get_counter_count(classes),
838 		.buf = buf
839 	};
840 
841 	assert(buf);
842 
843 	enabled = ml_set_interrupts_enabled(FALSE);
844 
845 	if (curcpu) {
846 		*curcpu = cpu_number();
847 	}
848 	cpu_broadcast_xcall(&kpc_xread_sync, TRUE, kpc_get_curcpu_counters_xcall, &hdl);
849 
850 	ml_set_interrupts_enabled(enabled);
851 
852 	return hdl.nb_counters;
853 }
854 
855 int
kpc_get_pmu_version(void)856 kpc_get_pmu_version(void)
857 {
858 	return KPC_PMU_ARM_V2;
859 }
860 
861 int
kpc_set_sw_inc(uint32_t mask)862 kpc_set_sw_inc( uint32_t mask )
863 {
864 	/* Only works with the configurable counters set to count the increment event (0x0) */
865 
866 	/* Write to PMSWINC */
867 	__asm__ volatile ("mcr p15, 0, %0, c9, c12, 4;" : : "r" (mask));
868 
869 	return 0;
870 }
871 
872 #else /* !ARMA7 */
873 
874 /* no kpc */
875 
876 void
kpc_arch_init(void)877 kpc_arch_init(void)
878 {
879 	/* No-op */
880 }
881 
882 uint32_t
kpc_get_classes(void)883 kpc_get_classes(void)
884 {
885 	return 0;
886 }
887 
888 uint32_t
kpc_fixed_count(void)889 kpc_fixed_count(void)
890 {
891 	return 0;
892 }
893 
894 uint32_t
kpc_configurable_count(void)895 kpc_configurable_count(void)
896 {
897 	return 0;
898 }
899 
900 uint32_t
kpc_fixed_config_count(void)901 kpc_fixed_config_count(void)
902 {
903 	return 0;
904 }
905 
906 uint32_t
kpc_configurable_config_count(uint64_t pmc_mask __unused)907 kpc_configurable_config_count(uint64_t pmc_mask __unused)
908 {
909 	return 0;
910 }
911 
912 int
kpc_get_fixed_config(kpc_config_t * configv __unused)913 kpc_get_fixed_config(kpc_config_t *configv __unused)
914 {
915 	return 0;
916 }
917 
918 uint64_t
kpc_fixed_max(void)919 kpc_fixed_max(void)
920 {
921 	return 0;
922 }
923 
924 uint64_t
kpc_configurable_max(void)925 kpc_configurable_max(void)
926 {
927 	return 0;
928 }
929 
930 int
kpc_get_configurable_config(kpc_config_t * configv __unused,uint64_t pmc_mask __unused)931 kpc_get_configurable_config(kpc_config_t *configv __unused, uint64_t pmc_mask __unused)
932 {
933 	return ENOTSUP;
934 }
935 
936 int
kpc_get_configurable_counters(uint64_t * counterv __unused,uint64_t pmc_mask __unused)937 kpc_get_configurable_counters(uint64_t *counterv __unused, uint64_t pmc_mask __unused)
938 {
939 	return ENOTSUP;
940 }
941 
942 int
kpc_get_fixed_counters(uint64_t * counterv __unused)943 kpc_get_fixed_counters(uint64_t *counterv __unused)
944 {
945 	return 0;
946 }
947 
948 boolean_t
kpc_is_running_fixed(void)949 kpc_is_running_fixed(void)
950 {
951 	return FALSE;
952 }
953 
954 boolean_t
kpc_is_running_configurable(uint64_t pmc_mask __unused)955 kpc_is_running_configurable(uint64_t pmc_mask __unused)
956 {
957 	return FALSE;
958 }
959 
960 int
kpc_set_running_arch(struct kpc_running_remote * mp_config __unused)961 kpc_set_running_arch(struct kpc_running_remote *mp_config __unused)
962 {
963 	return ENOTSUP;
964 }
965 
966 int
kpc_set_period_arch(struct kpc_config_remote * mp_config __unused)967 kpc_set_period_arch(struct kpc_config_remote *mp_config __unused)
968 {
969 	return ENOTSUP;
970 }
971 
972 int
kpc_set_config_arch(struct kpc_config_remote * mp_config __unused)973 kpc_set_config_arch(struct kpc_config_remote *mp_config __unused)
974 {
975 	return ENOTSUP;
976 }
977 
978 void
kpc_idle(void)979 kpc_idle(void)
980 {
981 	// do nothing
982 }
983 
984 void
kpc_idle_exit(void)985 kpc_idle_exit(void)
986 {
987 	// do nothing
988 }
989 
990 int
kpc_get_all_cpus_counters(uint32_t classes,int * curcpu,uint64_t * buf)991 kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf)
992 {
993 #pragma unused(classes)
994 #pragma unused(curcpu)
995 #pragma unused(buf)
996 
997 	return 0;
998 }
999 
1000 int
kpc_set_sw_inc(uint32_t mask __unused)1001 kpc_set_sw_inc( uint32_t mask __unused )
1002 {
1003 	return ENOTSUP;
1004 }
1005 
1006 int
kpc_get_pmu_version(void)1007 kpc_get_pmu_version(void)
1008 {
1009 	return KPC_PMU_ERROR;
1010 }
1011 
1012 #endif
1013 
1014 /*
1015  * RAWPMU isn't implemented for any of the 32-bit ARMs.
1016  */
1017 
1018 uint32_t
kpc_rawpmu_config_count(void)1019 kpc_rawpmu_config_count(void)
1020 {
1021 	return 0;
1022 }
1023 
1024 int
kpc_get_rawpmu_config(__unused kpc_config_t * configv)1025 kpc_get_rawpmu_config(__unused kpc_config_t *configv)
1026 {
1027 	return 0;
1028 }
1029