xref: /xnu-11215.81.4/osfmk/arm64/kpc.c (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452) !
1 /*
2  * Copyright (c) 2012-2018 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <arm/cpu_data_internal.h>
30 #include <arm/cpu_internal.h>
31 #include <kern/cpu_number.h>
32 #include <kern/kpc.h>
33 #include <kern/thread.h>
34 #include <kern/processor.h>
35 #include <kern/monotonic.h>
36 #include <mach/mach_types.h>
37 #include <machine/machine_routines.h>
38 #include <kern/cpc.h>
39 #include <stdint.h>
40 #include <sys/errno.h>
41 
42 #if HAS_CPMU_PC_CAPTURE
43 int kpc_pc_capture = 1;
44 #else /* HAS_CPMU_PC_CAPTURE */
45 int kpc_pc_capture = 0;
46 #endif /* !HAS_CPMU_PC_CAPTURE */
47 
48 #if DEVELOPMENT || DEBUG
49 bool kpc_allows_counting_system = true;
50 #else // DEVELOPMENT || DEBUG
51 __security_const_late bool kpc_allows_counting_system = false;
52 #endif // !(DEVELOPMENT || DEBUG)
53 
54 #if APPLE_ARM64_ARCH_FAMILY
55 
56 void kpc_pmi_handler(unsigned int ctr);
57 
58 /*
59  * PMCs 8 and 9 were added to Hurricane and to maintain the existing bit
60  * positions of the other PMCs, their configuration bits start at position 32.
61  */
62 #define PMCR_PMC_8_9_OFFSET     (32)
63 #define PMCR_PMC_8_9_SHIFT(PMC) (((PMC) - 8) + PMCR_PMC_8_9_OFFSET)
64 #define PMCR_PMC_SHIFT(PMC)     (((PMC) <= 7) ? (PMC) : \
65 	                          PMCR_PMC_8_9_SHIFT(PMC))
66 
67 /*
68  * PMCR0 controls enabling, interrupts, and overflow of performance counters.
69  */
70 
71 /* PMC is enabled */
72 #define PMCR0_PMC_ENABLE_MASK(PMC)  (UINT64_C(0x1) << PMCR_PMC_SHIFT(PMC))
73 #define PMCR0_PMC_DISABLE_MASK(PMC) (~PMCR0_PMC_ENABLE_MASK(PMC))
74 
75 /* overflow on a PMC generates an interrupt */
76 #define PMCR0_PMI_OFFSET            (12)
77 #define PMCR0_PMI_SHIFT(PMC)        (PMCR0_PMI_OFFSET + PMCR_PMC_SHIFT(PMC))
78 #define PMCR0_PMI_ENABLE_MASK(PMC)  (UINT64_C(1) << PMCR0_PMI_SHIFT(PMC))
79 #define PMCR0_PMI_DISABLE_MASK(PMC) (~PMCR0_PMI_ENABLE_MASK(PMC))
80 
81 /* disable counting when a PMI is signaled (except for AIC interrupts) */
82 #define PMCR0_DISCNT_SHIFT        (20)
83 #define PMCR0_DISCNT_ENABLE_MASK  (UINT64_C(1) << PMCR0_DISCNT_SHIFT)
84 #define PMCR0_DISCNT_DISABLE_MASK (~PMCR0_DISCNT_ENABLE_MASK)
85 
86 /* 21 unused */
87 
88 /* block PMIs until ERET retires */
89 #define PMCR0_WFRFE_SHIFT        (22)
90 #define PMCR0_WFRFE_ENABLE_MASK  (UINT64_C(1) << PMCR0_WFRE_SHIFT)
91 #define PMCR0_WFRFE_DISABLE_MASK (~PMCR0_WFRFE_ENABLE_MASK)
92 
93 /* count global L2C events */
94 #define PMCR0_L2CGLOBAL_SHIFT        (23)
95 #define PMCR0_L2CGLOBAL_ENABLE_MASK  (UINT64_C(1) << PMCR0_L2CGLOBAL_SHIFT)
96 #define PMCR0_L2CGLOBAL_DISABLE_MASK (~PMCR0_L2CGLOBAL_ENABLE_MASK)
97 
98 /* allow user mode access to configuration registers */
99 #define PMCR0_USEREN_SHIFT        (30)
100 #define PMCR0_USEREN_ENABLE_MASK  (UINT64_C(1) << PMCR0_USEREN_SHIFT)
101 #define PMCR0_USEREN_DISABLE_MASK (~PMCR0_USEREN_ENABLE_MASK)
102 
103 /* force the CPMU clocks in case of a clocking bug */
104 #define PMCR0_CLKEN_SHIFT        (31)
105 #define PMCR0_CLKEN_ENABLE_MASK  (UINT64_C(1) << PMCR0_CLKEN_SHIFT)
106 #define PMCR0_CLKEN_DISABLE_MASK (~PMCR0_CLKEN_ENABLE_MASK)
107 
108 /* 32 - 44 mirror the low bits for PMCs 8 and 9 */
109 
110 /* PMCR1 enables counters in different processor modes */
111 
112 #define PMCR1_EL0_A32_OFFSET (0)
113 #define PMCR1_EL0_A64_OFFSET (8)
114 #define PMCR1_EL1_A64_OFFSET (16)
115 #define PMCR1_EL3_A64_OFFSET (24)
116 
117 #define PMCR1_EL0_A32_SHIFT(PMC) (PMCR1_EL0_A32_OFFSET + PMCR_PMC_SHIFT(PMC))
118 #define PMCR1_EL0_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
119 #define PMCR1_EL1_A64_SHIFT(PMC) (PMCR1_EL1_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
120 #define PMCR1_EL3_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
121 
122 #define PMCR1_EL0_A32_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A32_SHIFT(PMC))
123 #define PMCR1_EL0_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A64_SHIFT(PMC))
124 #define PMCR1_EL1_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL1_A64_SHIFT(PMC))
125 /* PMCR1_EL3_A64 is not supported on PMCs 8 and 9 */
126 #if NO_MONITOR
127 #define PMCR1_EL3_A64_ENABLE_MASK(PMC) UINT64_C(0)
128 #else
129 #define PMCR1_EL3_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL3_A64_SHIFT(PMC))
130 #endif
131 
132 #define PMCR1_EL_ALL_ENABLE_MASK(PMC) (PMCR1_EL0_A32_ENABLE_MASK(PMC) | \
133 	                               PMCR1_EL0_A64_ENABLE_MASK(PMC) | \
134 	                               PMCR1_EL1_A64_ENABLE_MASK(PMC) | \
135 	                               PMCR1_EL3_A64_ENABLE_MASK(PMC))
136 #define PMCR1_EL_ALL_DISABLE_MASK(PMC) (~PMCR1_EL_ALL_ENABLE_MASK(PMC))
137 
138 #if KPC_MAX_COUNTERS > 8
139 #define PMCR1_EL0_MASK \
140 	(PMCR1_EL0_A64_ENABLE_MASK(0) | PMCR1_EL0_A64_ENABLE_MASK(1) | \
141 	PMCR1_EL0_A64_ENABLE_MASK(2) | PMCR1_EL0_A64_ENABLE_MASK(3) | \
142 	PMCR1_EL0_A64_ENABLE_MASK(4) | PMCR1_EL0_A64_ENABLE_MASK(5) | \
143 	PMCR1_EL0_A64_ENABLE_MASK(6) | PMCR1_EL0_A64_ENABLE_MASK(7) | \
144 	PMCR1_EL0_A64_ENABLE_MASK(8) | PMCR1_EL0_A64_ENABLE_MASK(9))
145 #else /* KPC_MAX_COUNTERS > 8 */
146 #define PMCR1_EL0_MASK \
147 	(PMCR1_EL0_A64_ENABLE_MASK(0) | PMCR1_EL0_A64_ENABLE_MASK(1) | \
148 	PMCR1_EL0_A64_ENABLE_MASK(2) | PMCR1_EL0_A64_ENABLE_MASK(3) | \
149 	PMCR1_EL0_A64_ENABLE_MASK(4) | PMCR1_EL0_A64_ENABLE_MASK(5) | \
150 	PMCR1_EL0_A64_ENABLE_MASK(6) | PMCR1_EL0_A64_ENABLE_MASK(7))
151 #endif /* KPC_MAX_COUNTERS > 8 */
152 
153 /* PMESR0 and PMESR1 are event selection registers */
154 
155 /* PMESR0 selects which event is counted on PMCs 2, 3, 4, and 5 */
156 /* PMESR1 selects which event is counted on PMCs 6, 7, 8, and 9 */
157 
158 #if CPMU_16BIT_EVENTS
159 #define PMESR_PMC_WIDTH           UINT64_C(16)
160 #define PMESR_PMC_MASK            ((uint64_t)UINT16_MAX)
161 #else // CPMU_16BIT_EVENTS
162 #define PMESR_PMC_WIDTH           UINT64_C(8)
163 #define PMESR_PMC_MASK            ((uint64_t)UINT8_MAX)
164 #endif // !CPMU_16BIT_EVENTS
165 
166 #define PMESR_SHIFT(PMC, OFF)     ((PMESR_PMC_WIDTH) * ((PMC) - (OFF)))
167 #define PMESR_EVT_MASK(PMC, OFF)  (PMESR_PMC_MASK << PMESR_SHIFT(PMC, OFF))
168 #define PMESR_EVT_CLEAR(PMC, OFF) (~PMESR_EVT_MASK(PMC, OFF))
169 
170 #define PMESR_EVT_DECODE(PMESR, PMC, OFF) \
171 	(((PMESR) >> PMESR_SHIFT(PMC, OFF)) & PMESR_PMC_MASK)
172 #define PMESR_EVT_ENCODE(EVT, PMC, OFF) \
173 	(((EVT) & PMESR_PMC_MASK) << PMESR_SHIFT(PMC, OFF))
174 
175 /*
176  * The low 8 bits of a configuration words select the event to program on
177  * PMESR{0,1}. Bits 16-19 are mapped to PMCR1 bits.
178  */
179 #define CFGWORD_EL0A32EN_MASK (0x10000)
180 #define CFGWORD_EL0A64EN_MASK (0x20000)
181 #define CFGWORD_EL1EN_MASK    (0x40000)
182 #define CFGWORD_EL3EN_MASK    (0x80000)
183 #define CFGWORD_ALLMODES_MASK (0xf0000)
184 
185 /* ACC offsets for PIO */
186 #define ACC_CPMU_PMC0_OFFSET (0x200)
187 #define ACC_CPMU_PMC8_OFFSET (0x280)
188 
189 /*
190  * Macros for reading and writing system registers.
191  *
192  * SR must be one of the SREG_* defines above.
193  */
194 #define SREG_WRITE(SR, V) __asm__ volatile("msr " SR ", %0 ; isb" : : "r"(V))
195 #define SREG_READ(SR)     ({ uint64_t VAL; \
196 	                     __asm__ volatile("mrs %0, " SR : "=r"(VAL)); \
197 	                     VAL; })
198 
199 /*
200  * Configuration registers that can be controlled by RAWPMU:
201  *
202  * All: PMCR2-4, OPMAT0-1, OPMSK0-1.
203  * Typhoon/Twister/Hurricane: PMMMAP, PMTRHLD2/4/6.
204  */
205 #if HAS_EARLY_APPLE_CPMU
206 #define RAWPMU_CONFIG_COUNT 7
207 #else /* HAS_EARLY_APPLE_CPMU */
208 #define RAWPMU_CONFIG_COUNT 11
209 #endif /* !HAS_EARLY_APPLE_CPMU */
210 
211 #if HAS_CPMU_PC_CAPTURE
212 #define PMC_SUPPORTS_PC_CAPTURE(CTR) (((CTR) >= 5) && ((CTR) <= 7))
213 #define PC_CAPTURE_PMC(PCC_VAL) (((PCC_VAL) >> 56) & 0x7)
214 #define PC_CAPTURE_PC(PCC_VAL) ((PCC_VAL) & ((UINT64_C(1) << 48) - 1))
215 #endif /* HAS_CPMU_PC_CAPTURE */
216 
217 struct kpc_save_state {
218 	uint64_t pmcr[2];
219 	uint64_t pmesr[2];
220 	uint64_t rawpmu[RAWPMU_CONFIG_COUNT];
221 	uint64_t counter[MAX_CPUS][KPC_MAX_COUNTERS];
222 };
223 
224 static __security_const_late struct kpc_save_state *kpc_state;
225 
226 static uint64_t kpc_running_cfg_pmc_mask = 0;
227 static uint32_t kpc_running_classes = 0;
228 static uint32_t kpc_configured = 0;
229 
230 #ifdef KPC_DEBUG
231 static void
dump_regs(void)232 dump_regs(void)
233 {
234 	uint64_t val;
235 	kprintf("PMCR0 = 0x%" PRIx64 "\n", SREG_READ("PMCR0_EL1"));
236 	kprintf("PMCR1 = 0x%" PRIx64 "\n", SREG_READ("PMCR1_EL1"));
237 	kprintf("PMCR2 = 0x%" PRIx64 "\n", SREG_READ("PMCR2_EL1"));
238 	kprintf("PMCR3 = 0x%" PRIx64 "\n", SREG_READ("PMCR3_EL1"));
239 	kprintf("PMCR4 = 0x%" PRIx64 "\n", SREG_READ("PMCR4_EL1"));
240 	kprintf("PMESR0 = 0x%" PRIx64 "\n", SREG_READ("PMESR0_EL1"));
241 	kprintf("PMESR1 = 0x%" PRIx64 "\n", SREG_READ("PMESR1_EL1"));
242 
243 	kprintf("PMC0 = 0x%" PRIx64 "\n", SREG_READ("PMC0"));
244 	kprintf("PMC1 = 0x%" PRIx64 "\n", SREG_READ("PMC1"));
245 	kprintf("PMC2 = 0x%" PRIx64 "\n", SREG_READ("PMC2"));
246 	kprintf("PMC3 = 0x%" PRIx64 "\n", SREG_READ("PMC3"));
247 	kprintf("PMC4 = 0x%" PRIx64 "\n", SREG_READ("PMC4"));
248 	kprintf("PMC5 = 0x%" PRIx64 "\n", SREG_READ("PMC5"));
249 	kprintf("PMC6 = 0x%" PRIx64 "\n", SREG_READ("PMC6"));
250 	kprintf("PMC7 = 0x%" PRIx64 "\n", SREG_READ("PMC7"));
251 
252 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
253 	kprintf("PMC8 = 0x%" PRIx64 "\n", SREG_READ("PMC8"));
254 	kprintf("PMC9 = 0x%" PRIx64 "\n", SREG_READ("PMC9"));
255 #endif
256 }
257 #endif
258 
259 static boolean_t
enable_counter(uint32_t counter)260 enable_counter(uint32_t counter)
261 {
262 	uint64_t pmcr0 = 0;
263 	boolean_t counter_running, pmi_enabled, enabled;
264 
265 	pmcr0 = SREG_READ("PMCR0_EL1") | 0x3 /* leave the fixed counters enabled for monotonic */;
266 
267 	counter_running = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0;
268 	pmi_enabled = (pmcr0 & PMCR0_PMI_ENABLE_MASK(counter)) != 0;
269 
270 	enabled = counter_running && pmi_enabled;
271 
272 	if (!enabled) {
273 		pmcr0 |= PMCR0_PMC_ENABLE_MASK(counter);
274 		pmcr0 |= PMCR0_PMI_ENABLE_MASK(counter);
275 		SREG_WRITE("PMCR0_EL1", pmcr0);
276 	}
277 
278 	return enabled;
279 }
280 
281 static boolean_t
disable_counter(uint32_t counter)282 disable_counter(uint32_t counter)
283 {
284 	uint64_t pmcr0;
285 	boolean_t enabled;
286 
287 	if (counter < 2) {
288 		return true;
289 	}
290 
291 	pmcr0 = SREG_READ("PMCR0_EL1") | 0x3;
292 	enabled = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0;
293 
294 	if (enabled) {
295 		pmcr0 &= PMCR0_PMC_DISABLE_MASK(counter);
296 		SREG_WRITE("PMCR0_EL1", pmcr0);
297 	}
298 
299 	return enabled;
300 }
301 
302 /*
303  * Enable counter in processor modes determined by configuration word.
304  */
305 static void
set_modes(uint32_t counter,kpc_config_t cfgword,bool secure)306 set_modes(uint32_t counter, kpc_config_t cfgword, bool secure)
307 {
308 	bool const allow_kernel = !secure || kpc_allows_counting_system;
309 	uint64_t bits = 0;
310 
311 	if (cfgword & CFGWORD_EL0A32EN_MASK) {
312 		bits |= PMCR1_EL0_A32_ENABLE_MASK(counter);
313 	}
314 	if (cfgword & CFGWORD_EL0A64EN_MASK) {
315 		bits |= PMCR1_EL0_A64_ENABLE_MASK(counter);
316 	}
317 	if (allow_kernel && (cfgword & CFGWORD_EL1EN_MASK)) {
318 		bits |= PMCR1_EL1_A64_ENABLE_MASK(counter);
319 	}
320 
321 	/*
322 	 * Backwards compatibility: Writing a non-zero configuration word with
323 	 * all zeros in bits 16-19 is interpreted as enabling in all modes.
324 	 * This matches the behavior when the PMCR1 bits weren't exposed.
325 	 */
326 	if (bits == 0 && cfgword != 0) {
327 		bits = allow_kernel ?
328 		    PMCR1_EL_ALL_ENABLE_MASK(counter)
329 		    : PMCR1_EL0_A64_ENABLE_MASK(counter);
330 	}
331 
332 	uint64_t pmcr1 = kpc_state->pmcr[1];
333 	pmcr1 &= PMCR1_EL_ALL_DISABLE_MASK(counter);
334 	pmcr1 |= bits;
335 	pmcr1 |= 0x30303; /* monotonic compatibility */
336 	kpc_state->pmcr[1] = pmcr1;
337 }
338 
339 static uint64_t
read_counter(uint32_t counter)340 read_counter(uint32_t counter)
341 {
342 	switch (counter) {
343 	// case 0: return SREG_READ("PMC0");
344 	// case 1: return SREG_READ("PMC1");
345 	case 2: return SREG_READ("PMC2");
346 	case 3: return SREG_READ("PMC3");
347 	case 4: return SREG_READ("PMC4");
348 	case 5: return SREG_READ("PMC5");
349 	case 6: return SREG_READ("PMC6");
350 	case 7: return SREG_READ("PMC7");
351 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
352 	case 8: return SREG_READ("PMC8");
353 	case 9: return SREG_READ("PMC9");
354 #endif // KPC_ARM64_CONFIGURABLE_COUNT > 6
355 	default: return 0;
356 	}
357 }
358 
359 static void
write_counter(uint32_t counter,uint64_t value)360 write_counter(uint32_t counter, uint64_t value)
361 {
362 	switch (counter) {
363 	case 2: SREG_WRITE("PMC2", value); break;
364 	case 3: SREG_WRITE("PMC3", value); break;
365 	case 4: SREG_WRITE("PMC4", value); break;
366 	case 5: SREG_WRITE("PMC5", value); break;
367 	case 6: SREG_WRITE("PMC6", value); break;
368 	case 7: SREG_WRITE("PMC7", value); break;
369 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
370 	case 8: SREG_WRITE("PMC8", value); break;
371 	case 9: SREG_WRITE("PMC9", value); break;
372 #endif // KPC_ARM64_CONFIGURABLE_COUNT > 6
373 	default: break;
374 	}
375 }
376 
377 uint32_t
kpc_rawpmu_config_count(void)378 kpc_rawpmu_config_count(void)
379 {
380 	return RAWPMU_CONFIG_COUNT;
381 }
382 
383 int
kpc_get_rawpmu_config(kpc_config_t * configv)384 kpc_get_rawpmu_config(kpc_config_t *configv)
385 {
386 	configv[0] = SREG_READ("PMCR2_EL1");
387 	configv[1] = SREG_READ("PMCR3_EL1");
388 	configv[2] = SREG_READ("PMCR4_EL1");
389 	configv[3] = SREG_READ("OPMAT0_EL1");
390 	configv[4] = SREG_READ("OPMAT1_EL1");
391 	configv[5] = SREG_READ("OPMSK0_EL1");
392 	configv[6] = SREG_READ("OPMSK1_EL1");
393 #if RAWPMU_CONFIG_COUNT > 7
394 	configv[7] = SREG_READ("PMMMAP_EL1");
395 	configv[8] = SREG_READ("PMTRHLD2_EL1");
396 	configv[9] = SREG_READ("PMTRHLD4_EL1");
397 	configv[10] = SREG_READ("PMTRHLD6_EL1");
398 #endif
399 	return 0;
400 }
401 
402 static void
save_regs(void)403 save_regs(void)
404 {
405 	int cpuid = cpu_number();
406 	__builtin_arm_dmb(DMB_ISH);
407 	assert(ml_get_interrupts_enabled() == FALSE);
408 	for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) {
409 		kpc_state->counter[cpuid][i] = read_counter(i);
410 	}
411 }
412 
413 static void
restore_control_regs(uint32_t classes)414 restore_control_regs(uint32_t classes)
415 {
416 	const uint64_t pmcr1_mask = kpc_allows_counting_system ? PMCR1_EL0_MASK : ~0ULL;
417 	SREG_WRITE("PMCR1_EL1", (kpc_state->pmcr[1] & pmcr1_mask) | 0x30303);
418 #if CONFIG_EXCLAVES
419 	SREG_WRITE("PMCR1_EL12", (kpc_state->pmcr[1] & pmcr1_mask) | 0x30303);
420 #endif
421 	SREG_WRITE("PMESR0_EL1", kpc_state->pmesr[0]);
422 	SREG_WRITE("PMESR1_EL1", kpc_state->pmesr[1]);
423 
424 	if (classes & KPC_CLASS_RAWPMU_MASK) {
425 		SREG_WRITE("PMCR2_EL1", kpc_state->rawpmu[0]);
426 		SREG_WRITE("PMCR3_EL1", kpc_state->rawpmu[1]);
427 		SREG_WRITE("PMCR4_EL1", kpc_state->rawpmu[2]);
428 		SREG_WRITE("OPMAT0_EL1", kpc_state->rawpmu[3]);
429 		SREG_WRITE("OPMAT1_EL1", kpc_state->rawpmu[4]);
430 		SREG_WRITE("OPMSK0_EL1", kpc_state->rawpmu[5]);
431 		SREG_WRITE("OPMSK1_EL1", kpc_state->rawpmu[6]);
432 #if RAWPMU_CONFIG_COUNT > 7
433 		SREG_WRITE("PMMMAP_EL1", kpc_state->rawpmu[7]);
434 		SREG_WRITE("PMTRHLD2_EL1", kpc_state->rawpmu[8]);
435 		SREG_WRITE("PMTRHLD4_EL1", kpc_state->rawpmu[9]);
436 		SREG_WRITE("PMTRHLD6_EL1", kpc_state->rawpmu[10]);
437 #endif // RAWPMU_CONFIG_COUNT > 7
438 	}
439 }
440 
441 static void
restore_regs(void)442 restore_regs(void)
443 {
444 	int cpuid = cpu_number();
445 	for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) {
446 		write_counter(i, kpc_state->counter[cpuid][i]);
447 	}
448 	restore_control_regs(kpc_running_classes);
449 }
450 
451 static uint64_t
get_counter_config(uint32_t counter)452 get_counter_config(uint32_t counter)
453 {
454 	uint64_t pmesr;
455 
456 	switch (counter) {
457 	case 2:
458 	case 3:
459 	case 4:
460 	case 5:
461 		pmesr = PMESR_EVT_DECODE(SREG_READ("PMESR0_EL1"), counter, 2);
462 		break;
463 	case 6:
464 	case 7:
465 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
466 	case 8:
467 	case 9:
468 #endif // KPC_ARM64_CONFIGURABLE_COUNT > 6
469 		pmesr = PMESR_EVT_DECODE(SREG_READ("PMESR1_EL1"), counter, 6);
470 		break;
471 	default:
472 		pmesr = 0;
473 		break;
474 	}
475 
476 	kpc_config_t config = pmesr;
477 
478 	uint64_t pmcr1 = SREG_READ("PMCR1_EL1");
479 
480 	if (pmcr1 & PMCR1_EL0_A32_ENABLE_MASK(counter)) {
481 		config |= CFGWORD_EL0A32EN_MASK;
482 	}
483 	if (pmcr1 & PMCR1_EL0_A64_ENABLE_MASK(counter)) {
484 		config |= CFGWORD_EL0A64EN_MASK;
485 	}
486 	if (pmcr1 & PMCR1_EL1_A64_ENABLE_MASK(counter)) {
487 		config |= CFGWORD_EL1EN_MASK;
488 #if NO_MONITOR
489 		config |= CFGWORD_EL3EN_MASK;
490 #endif
491 	}
492 #if !NO_MONITOR
493 	if (pmcr1 & PMCR1_EL3_A64_ENABLE_MASK(counter)) {
494 		config |= CFGWORD_EL3EN_MASK;
495 	}
496 #endif
497 
498 	return config;
499 }
500 
501 /* internal functions */
502 
503 static bool
kpc_cpu_callback(void * __unused param,enum cpu_event event,unsigned int __unused cpu_or_cluster)504 kpc_cpu_callback(void * __unused param, enum cpu_event event,
505     unsigned int __unused cpu_or_cluster)
506 {
507 	if (!kpc_configured) {
508 		return true;
509 	}
510 
511 	switch (event) {
512 	case CPU_BOOTED:
513 		restore_regs();
514 		break;
515 
516 	case CPU_DOWN:
517 		save_regs();
518 		break;
519 
520 	default:
521 		break;
522 	}
523 	return true;
524 }
525 
526 void
kpc_arch_init(void)527 kpc_arch_init(void)
528 {
529 	kpc_state = kalloc_type(struct kpc_save_state, Z_ZERO | Z_NOFAIL);
530 	cpu_event_register_callback(kpc_cpu_callback, NULL);
531 	kpc_allows_counting_system = PE_i_can_has_debugger(NULL);
532 }
533 
534 boolean_t
kpc_is_running_fixed(void)535 kpc_is_running_fixed(void)
536 {
537 	return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK;
538 }
539 
540 boolean_t
kpc_is_running_configurable(uint64_t pmc_mask)541 kpc_is_running_configurable(uint64_t pmc_mask)
542 {
543 	assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
544 	return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) &&
545 	       ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask);
546 }
547 
548 uint32_t
kpc_fixed_count(void)549 kpc_fixed_count(void)
550 {
551 	return KPC_ARM64_FIXED_COUNT;
552 }
553 
554 uint32_t
kpc_configurable_count(void)555 kpc_configurable_count(void)
556 {
557 	return KPC_ARM64_CONFIGURABLE_COUNT;
558 }
559 
560 uint32_t
kpc_fixed_config_count(void)561 kpc_fixed_config_count(void)
562 {
563 	return 0;
564 }
565 
566 uint32_t
kpc_configurable_config_count(uint64_t pmc_mask)567 kpc_configurable_config_count(uint64_t pmc_mask)
568 {
569 	assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
570 	return kpc_popcount(pmc_mask);
571 }
572 
573 int
kpc_get_fixed_config(kpc_config_t * configv __unused)574 kpc_get_fixed_config(kpc_config_t *configv __unused)
575 {
576 	return 0;
577 }
578 
579 uint64_t
kpc_fixed_max(void)580 kpc_fixed_max(void)
581 {
582 	return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1;
583 }
584 
585 uint64_t
kpc_configurable_max(void)586 kpc_configurable_max(void)
587 {
588 	return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1;
589 }
590 
591 static void
set_running_configurable(uint64_t target_mask,uint64_t state_mask)592 set_running_configurable(uint64_t target_mask, uint64_t state_mask)
593 {
594 	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
595 	boolean_t enabled;
596 
597 	enabled = ml_set_interrupts_enabled(FALSE);
598 
599 	for (uint32_t i = 0; i < cfg_count; ++i) {
600 		if (((1ULL << i) & target_mask) == 0) {
601 			continue;
602 		}
603 		assert(kpc_controls_counter(offset + i));
604 
605 		if ((1ULL << i) & state_mask) {
606 			enable_counter(offset + i);
607 		} else {
608 			disable_counter(offset + i);
609 		}
610 	}
611 
612 	ml_set_interrupts_enabled(enabled);
613 }
614 
615 static uint32_t kpc_xcall_sync;
616 static void
kpc_set_running_xcall(void * vstate)617 kpc_set_running_xcall( void *vstate )
618 {
619 	struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate;
620 	assert(mp_config);
621 
622 	set_running_configurable(mp_config->cfg_target_mask,
623 	    mp_config->cfg_state_mask);
624 
625 	if (os_atomic_dec(&kpc_xcall_sync, relaxed) == 0) {
626 		thread_wakeup((event_t) &kpc_xcall_sync);
627 	}
628 }
629 
630 static uint32_t kpc_xread_sync;
631 static void
kpc_get_curcpu_counters_xcall(void * args)632 kpc_get_curcpu_counters_xcall(void *args)
633 {
634 	struct kpc_get_counters_remote *handler = args;
635 
636 	assert(handler != NULL);
637 	assert(handler->buf != NULL);
638 
639 	int offset = cpu_number() * handler->buf_stride;
640 	int r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]);
641 
642 	/* number of counters added by this CPU, needs to be atomic  */
643 	os_atomic_add(&(handler->nb_counters), r, relaxed);
644 
645 	if (os_atomic_dec(&kpc_xread_sync, relaxed) == 0) {
646 		thread_wakeup((event_t) &kpc_xread_sync);
647 	}
648 }
649 
650 int
kpc_get_all_cpus_counters(uint32_t classes,int * curcpu,uint64_t * buf)651 kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf)
652 {
653 	assert(buf != NULL);
654 
655 	int enabled = ml_set_interrupts_enabled(FALSE);
656 
657 	/* grab counters and CPU number as close as possible */
658 	if (curcpu) {
659 		*curcpu = cpu_number();
660 	}
661 
662 	struct kpc_get_counters_remote hdl = {
663 		.classes = classes,
664 		.nb_counters = 0,
665 		.buf = buf,
666 		.buf_stride = kpc_get_counter_count(classes)
667 	};
668 
669 	cpu_broadcast_xcall(&kpc_xread_sync, TRUE, kpc_get_curcpu_counters_xcall, &hdl);
670 	int offset = hdl.nb_counters;
671 
672 	(void)ml_set_interrupts_enabled(enabled);
673 
674 	return offset;
675 }
676 
677 int
kpc_get_fixed_counters(uint64_t * counterv)678 kpc_get_fixed_counters(uint64_t *counterv)
679 {
680 #if CONFIG_CPU_COUNTERS
681 	mt_fixed_counts(counterv);
682 	return 0;
683 #else /* CONFIG_CPU_COUNTERS */
684 #pragma unused(counterv)
685 	return ENOTSUP;
686 #endif /* !CONFIG_CPU_COUNTERS */
687 }
688 
689 int
kpc_get_configurable_counters(uint64_t * counterv,uint64_t pmc_mask)690 kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask)
691 {
692 	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
693 	uint64_t ctr = 0ULL;
694 
695 	assert(counterv);
696 
697 	for (uint32_t i = 0; i < cfg_count; ++i) {
698 		if (((1ULL << i) & pmc_mask) == 0) {
699 			continue;
700 		}
701 		ctr = read_counter(i + offset);
702 
703 		if (ctr & KPC_ARM64_COUNTER_OVF_MASK) {
704 			ctr = CONFIGURABLE_SHADOW(i) +
705 			    (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) +
706 			    (ctr & KPC_ARM64_COUNTER_MASK);
707 		} else {
708 			ctr = CONFIGURABLE_SHADOW(i) +
709 			    (ctr - CONFIGURABLE_RELOAD(i));
710 		}
711 
712 		*counterv++ = ctr;
713 	}
714 
715 	return 0;
716 }
717 
718 int
kpc_get_configurable_config(kpc_config_t * configv,uint64_t pmc_mask)719 kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
720 {
721 	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
722 
723 	assert(configv);
724 
725 	for (uint32_t i = 0; i < cfg_count; ++i) {
726 		if ((1ULL << i) & pmc_mask) {
727 			*configv++ = get_counter_config(i + offset);
728 		}
729 	}
730 	return 0;
731 }
732 
733 static uint32_t kpc_config_sync;
734 static void
kpc_set_config_xcall(void * vmp_config)735 kpc_set_config_xcall(void *vmp_config)
736 {
737 	struct kpc_config_remote *mp_config = vmp_config;
738 	uint32_t classes = 0ULL;
739 
740 	assert(mp_config);
741 	classes = mp_config->classes;
742 	boolean_t enabled = ml_set_interrupts_enabled(FALSE);
743 	restore_control_regs(classes);
744 	ml_set_interrupts_enabled(enabled);
745 
746 	if (os_atomic_dec(&kpc_config_sync, relaxed) == 0) {
747 		thread_wakeup((event_t) &kpc_config_sync);
748 	}
749 }
750 
751 static uint64_t
kpc_reload_counter(uint32_t ctr)752 kpc_reload_counter(uint32_t ctr)
753 {
754 	assert(ctr < (kpc_configurable_count() + kpc_fixed_count()));
755 
756 	uint64_t old = read_counter(ctr);
757 
758 	if (kpc_controls_counter(ctr)) {
759 		write_counter(ctr, FIXED_RELOAD(ctr));
760 		return old & KPC_ARM64_COUNTER_MASK;
761 	} else {
762 		/*
763 		 * Unset the overflow bit to clear the condition that drives
764 		 * PMIs.  The power manager is not interested in handling PMIs.
765 		 */
766 		write_counter(ctr, old & KPC_ARM64_COUNTER_MASK);
767 		return 0;
768 	}
769 }
770 
771 static uint32_t kpc_reload_sync;
772 static void
kpc_set_reload_xcall(void * vmp_config)773 kpc_set_reload_xcall(void *vmp_config)
774 {
775 	struct kpc_config_remote *mp_config = vmp_config;
776 	uint32_t classes = 0, count = 0, offset = kpc_fixed_count();
777 	uint64_t *new_period = NULL, max = kpc_configurable_max();
778 	boolean_t enabled;
779 
780 	assert(mp_config);
781 	assert(mp_config->configv);
782 	classes = mp_config->classes;
783 	new_period = mp_config->configv;
784 
785 	enabled = ml_set_interrupts_enabled(FALSE);
786 
787 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
788 		/*
789 		 * Update _all_ shadow counters, this cannot be done for only
790 		 * selected PMCs. Otherwise, we would corrupt the configurable
791 		 * shadow buffer since the PMCs are muxed according to the pmc
792 		 * mask.
793 		 */
794 		uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1;
795 		kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask);
796 
797 		/* set the new period */
798 		count = kpc_configurable_count();
799 		for (uint32_t i = 0; i < count; ++i) {
800 			/* ignore the counter */
801 			if (((1ULL << i) & mp_config->pmc_mask) == 0) {
802 				continue;
803 			}
804 			if (*new_period == 0) {
805 				*new_period = kpc_configurable_max();
806 			}
807 			CONFIGURABLE_RELOAD(i) = max - *new_period;
808 			/* reload the counter */
809 			kpc_reload_counter(offset + i);
810 			/* next period value */
811 			new_period++;
812 		}
813 	}
814 
815 	ml_set_interrupts_enabled(enabled);
816 
817 	if (os_atomic_dec(&kpc_reload_sync, relaxed) == 0) {
818 		thread_wakeup((event_t) &kpc_reload_sync);
819 	}
820 }
821 
822 void
kpc_pmi_handler(unsigned int ctr)823 kpc_pmi_handler(unsigned int ctr)
824 {
825 	uintptr_t pc = 0;
826 	bool captured = false;
827 
828 #if HAS_CPMU_PC_CAPTURE
829 	if (FIXED_ACTIONID(ctr) && PMC_SUPPORTS_PC_CAPTURE(ctr)) {
830 		uintptr_t pc_capture = SREG_READ("PM_PMI_PC");
831 		captured = PC_CAPTURE_PMC(pc_capture) == ctr;
832 		if (captured) {
833 			pc = PC_CAPTURE_PC(pc_capture);
834 		}
835 	}
836 #endif // HAS_CPMU_PC_CAPTURE
837 
838 	uint64_t extra = kpc_reload_counter(ctr);
839 
840 	FIXED_SHADOW(ctr) += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra;
841 
842 	if (FIXED_ACTIONID(ctr)) {
843 		bool kernel = true;
844 		struct arm_saved_state *state;
845 		state = getCpuDatap()->cpu_int_state;
846 		if (state) {
847 			kernel = !PSR64_IS_USER(get_saved_state_cpsr(state));
848 			if (!captured) {
849 				pc = get_saved_state_pc(state);
850 			}
851 			if (kernel) {
852 				pc = VM_KERNEL_UNSLIDE(pc);
853 			}
854 		} else {
855 			/*
856 			 * Don't know where the PC came from and may be a kernel address, so
857 			 * clear it to prevent leaking the slide.
858 			 */
859 			pc = 0;
860 		}
861 
862 		uint64_t config = get_counter_config(ctr);
863 		kperf_kpc_flags_t flags = kernel ? KPC_KERNEL_PC : 0;
864 		flags |= captured ? KPC_CAPTURED_PC : 0;
865 		bool custom_mode = false;
866 		if ((config & CFGWORD_EL0A32EN_MASK) || (config & CFGWORD_EL0A64EN_MASK)) {
867 			flags |= KPC_USER_COUNTING;
868 			custom_mode = true;
869 		}
870 		if ((config & CFGWORD_EL1EN_MASK)) {
871 			flags |= KPC_KERNEL_COUNTING;
872 			custom_mode = true;
873 		}
874 		/*
875 		 * For backwards-compatibility.
876 		 */
877 		if (!custom_mode) {
878 			flags |= KPC_USER_COUNTING | KPC_KERNEL_COUNTING;
879 		}
880 		kpc_sample_kperf(FIXED_ACTIONID(ctr), ctr, config & 0xffff, FIXED_SHADOW(ctr),
881 		    pc, flags);
882 	}
883 }
884 
885 uint32_t
kpc_get_classes(void)886 kpc_get_classes(void)
887 {
888 	return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_RAWPMU_MASK;
889 }
890 
891 int
kpc_set_running_arch(struct kpc_running_remote * mp_config)892 kpc_set_running_arch(struct kpc_running_remote *mp_config)
893 {
894 	assert(mp_config != NULL);
895 
896 	/* dispatch to all CPUs */
897 	cpu_broadcast_xcall(&kpc_xcall_sync, TRUE, kpc_set_running_xcall, mp_config);
898 
899 	kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask;
900 	kpc_running_classes = mp_config->classes;
901 	kpc_configured = 1;
902 
903 	return 0;
904 }
905 
906 int
kpc_set_period_arch(struct kpc_config_remote * mp_config)907 kpc_set_period_arch(struct kpc_config_remote *mp_config)
908 {
909 	assert(mp_config);
910 
911 	/* dispatch to all CPUs */
912 	cpu_broadcast_xcall(&kpc_reload_sync, TRUE, kpc_set_reload_xcall, mp_config);
913 
914 	kpc_configured = 1;
915 
916 	return 0;
917 }
918 
919 int
kpc_set_config_arch(struct kpc_config_remote * mp_config)920 kpc_set_config_arch(struct kpc_config_remote *mp_config)
921 {
922 	assert(mp_config);
923 	assert(mp_config->configv);
924 
925 	uint64_t cfg_pmc_mask = mp_config->pmc_mask;
926 	unsigned int cfg_count = kpc_configurable_count();
927 	unsigned int offset = kpc_fixed_count();
928 	unsigned int config_index = 0;
929 
930 	if (mp_config->secure) {
931 		/* Do a pass to find any disallowed events to avoid partial configuration. */
932 		for (uint32_t i = 0; i < cfg_count; ++i) {
933 			if (((1ULL << i) & cfg_pmc_mask) == 0) {
934 				continue;
935 			}
936 			uint64_t config_value = mp_config->configv[config_index];
937 			if (!cpc_event_allowed(CPC_HW_CPMU, config_value & PMESR_PMC_MASK)) {
938 				return EPERM;
939 			}
940 			config_index++;
941 		}
942 	}
943 
944 	config_index = 0;
945 	for (uint32_t i = 0; i < cfg_count; ++i) {
946 		if (((1ULL << i) & cfg_pmc_mask) == 0) {
947 			continue;
948 		}
949 		unsigned int counter = i + offset;
950 		assert(kpc_controls_counter(counter));
951 		uint64_t config_value = mp_config->configv[config_index];
952 
953 		const int pmesr_idx = counter < 6 ? 0 : 1;
954 		const int pmesr_off = counter < 6 ? 2 : 6;
955 		kpc_state->pmesr[pmesr_idx] &= PMESR_EVT_CLEAR(counter, pmesr_off);
956 		kpc_state->pmesr[pmesr_idx] |= PMESR_EVT_ENCODE(config_value, counter,
957 		    pmesr_off);
958 		set_modes(counter, config_value, mp_config->secure);
959 		config_index++;
960 	}
961 
962 	if (mp_config->classes & KPC_CLASS_RAWPMU_MASK) {
963 		unsigned int rawpmu_start = kpc_popcount(mp_config->pmc_mask);
964 		memcpy(&kpc_state->rawpmu, &mp_config->configv[rawpmu_start],
965 		    sizeof(kpc_state->rawpmu));
966 	}
967 
968 	cpu_broadcast_xcall(&kpc_config_sync, TRUE, kpc_set_config_xcall, mp_config);
969 	kpc_configured = 1;
970 
971 	return 0;
972 }
973 
974 void
kpc_idle(void)975 kpc_idle(void)
976 {
977 	if (kpc_configured) {
978 		save_regs();
979 	}
980 }
981 
982 void
kpc_idle_exit(void)983 kpc_idle_exit(void)
984 {
985 	if (kpc_configured) {
986 		restore_regs();
987 	}
988 }
989 
990 int
kpc_set_sw_inc(uint32_t mask __unused)991 kpc_set_sw_inc( uint32_t mask __unused )
992 {
993 	return ENOTSUP;
994 }
995 
996 int
kpc_get_pmu_version(void)997 kpc_get_pmu_version(void)
998 {
999 	return KPC_PMU_ARM_APPLE;
1000 }
1001 
1002 #else /* APPLE_ARM64_ARCH_FAMILY */
1003 
1004 /* We don't currently support non-Apple arm64 PMU configurations like PMUv3 */
1005 
1006 void
kpc_arch_init(void)1007 kpc_arch_init(void)
1008 {
1009 	/* No-op */
1010 }
1011 
1012 uint32_t
kpc_get_classes(void)1013 kpc_get_classes(void)
1014 {
1015 	return 0;
1016 }
1017 
1018 uint32_t
kpc_fixed_count(void)1019 kpc_fixed_count(void)
1020 {
1021 	return 0;
1022 }
1023 
1024 uint32_t
kpc_configurable_count(void)1025 kpc_configurable_count(void)
1026 {
1027 	return 0;
1028 }
1029 
1030 uint32_t
kpc_fixed_config_count(void)1031 kpc_fixed_config_count(void)
1032 {
1033 	return 0;
1034 }
1035 
1036 uint32_t
kpc_configurable_config_count(uint64_t pmc_mask __unused)1037 kpc_configurable_config_count(uint64_t pmc_mask __unused)
1038 {
1039 	return 0;
1040 }
1041 
1042 int
kpc_get_fixed_config(kpc_config_t * configv __unused)1043 kpc_get_fixed_config(kpc_config_t *configv __unused)
1044 {
1045 	return 0;
1046 }
1047 
1048 uint64_t
kpc_fixed_max(void)1049 kpc_fixed_max(void)
1050 {
1051 	return 0;
1052 }
1053 
1054 uint64_t
kpc_configurable_max(void)1055 kpc_configurable_max(void)
1056 {
1057 	return 0;
1058 }
1059 
1060 int
kpc_get_configurable_config(kpc_config_t * configv __unused,uint64_t pmc_mask __unused)1061 kpc_get_configurable_config(kpc_config_t *configv __unused, uint64_t pmc_mask __unused)
1062 {
1063 	return ENOTSUP;
1064 }
1065 
1066 int
kpc_get_configurable_counters(uint64_t * counterv __unused,uint64_t pmc_mask __unused)1067 kpc_get_configurable_counters(uint64_t *counterv __unused, uint64_t pmc_mask __unused)
1068 {
1069 	return ENOTSUP;
1070 }
1071 
1072 int
kpc_get_fixed_counters(uint64_t * counterv __unused)1073 kpc_get_fixed_counters(uint64_t *counterv __unused)
1074 {
1075 	return 0;
1076 }
1077 
1078 boolean_t
kpc_is_running_fixed(void)1079 kpc_is_running_fixed(void)
1080 {
1081 	return FALSE;
1082 }
1083 
1084 boolean_t
kpc_is_running_configurable(uint64_t pmc_mask __unused)1085 kpc_is_running_configurable(uint64_t pmc_mask __unused)
1086 {
1087 	return FALSE;
1088 }
1089 
1090 int
kpc_set_running_arch(struct kpc_running_remote * mp_config __unused)1091 kpc_set_running_arch(struct kpc_running_remote *mp_config __unused)
1092 {
1093 	return ENOTSUP;
1094 }
1095 
1096 int
kpc_set_period_arch(struct kpc_config_remote * mp_config __unused)1097 kpc_set_period_arch(struct kpc_config_remote *mp_config __unused)
1098 {
1099 	return ENOTSUP;
1100 }
1101 
1102 int
kpc_set_config_arch(struct kpc_config_remote * mp_config __unused)1103 kpc_set_config_arch(struct kpc_config_remote *mp_config __unused)
1104 {
1105 	return ENOTSUP;
1106 }
1107 
1108 void
kpc_idle(void)1109 kpc_idle(void)
1110 {
1111 	// do nothing
1112 }
1113 
1114 void
kpc_idle_exit(void)1115 kpc_idle_exit(void)
1116 {
1117 	// do nothing
1118 }
1119 
1120 int
kpc_get_all_cpus_counters(uint32_t classes __unused,int * curcpu __unused,uint64_t * buf __unused)1121 kpc_get_all_cpus_counters(uint32_t classes __unused, int *curcpu __unused, uint64_t *buf __unused)
1122 {
1123 	return 0;
1124 }
1125 
1126 int
kpc_set_sw_inc(uint32_t mask __unused)1127 kpc_set_sw_inc( uint32_t mask __unused )
1128 {
1129 	return ENOTSUP;
1130 }
1131 
1132 int
kpc_get_pmu_version(void)1133 kpc_get_pmu_version(void)
1134 {
1135 	return KPC_PMU_ERROR;
1136 }
1137 
1138 uint32_t
kpc_rawpmu_config_count(void)1139 kpc_rawpmu_config_count(void)
1140 {
1141 	return 0;
1142 }
1143 
1144 int
kpc_get_rawpmu_config(__unused kpc_config_t * configv)1145 kpc_get_rawpmu_config(__unused kpc_config_t *configv)
1146 {
1147 	return 0;
1148 }
1149 
1150 #endif /* !APPLE_ARM64_ARCH_FAMILY */
1151