xref: /xnu-11417.140.69/osfmk/arm64/kpc.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2012-2018 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <arm/cpu_data_internal.h>
30 #include <arm/cpu_internal.h>
31 #include <kern/cpu_number.h>
32 #include <kern/kpc.h>
33 #include <kern/thread.h>
34 #include <kern/processor.h>
35 #include <kern/monotonic.h>
36 #include <mach/mach_types.h>
37 #include <machine/machine_routines.h>
38 #include <kern/cpc.h>
39 #include <stdint.h>
40 #include <sys/errno.h>
41 
42 #if HAS_CPMU_PC_CAPTURE
43 int kpc_pc_capture = 1;
44 #else /* HAS_CPMU_PC_CAPTURE */
45 int kpc_pc_capture = 0;
46 #endif /* !HAS_CPMU_PC_CAPTURE */
47 
48 #if DEVELOPMENT || DEBUG
49 bool kpc_allows_counting_system = true;
50 #else // DEVELOPMENT || DEBUG
51 __security_const_late bool kpc_allows_counting_system = false;
52 #endif // !(DEVELOPMENT || DEBUG)
53 
54 #if APPLE_ARM64_ARCH_FAMILY
55 
56 void kpc_pmi_handler(unsigned int ctr);
57 
58 /*
59  * PMCs 8 and 9 were added to Hurricane and to maintain the existing bit
60  * positions of the other PMCs, their configuration bits start at position 32.
61  */
62 #define PMCR_PMC_8_9_OFFSET     (32)
63 #define PMCR_PMC_8_9_SHIFT(PMC) (((PMC) - 8) + PMCR_PMC_8_9_OFFSET)
64 #define PMCR_PMC_SHIFT(PMC)     (((PMC) <= 7) ? (PMC) : \
65 	                          PMCR_PMC_8_9_SHIFT(PMC))
66 
67 /*
68  * PMCR0 controls enabling, interrupts, and overflow of performance counters.
69  */
70 
71 /* PMC is enabled */
72 #define PMCR0_PMC_ENABLE_MASK(PMC)  (UINT64_C(0x1) << PMCR_PMC_SHIFT(PMC))
73 #define PMCR0_PMC_DISABLE_MASK(PMC) (~PMCR0_PMC_ENABLE_MASK(PMC))
74 
75 /* overflow on a PMC generates an interrupt */
76 #define PMCR0_PMI_OFFSET            (12)
77 #define PMCR0_PMI_SHIFT(PMC)        (PMCR0_PMI_OFFSET + PMCR_PMC_SHIFT(PMC))
78 #define PMCR0_PMI_ENABLE_MASK(PMC)  (UINT64_C(1) << PMCR0_PMI_SHIFT(PMC))
79 #define PMCR0_PMI_DISABLE_MASK(PMC) (~PMCR0_PMI_ENABLE_MASK(PMC))
80 
81 /* disable counting when a PMI is signaled (except for AIC interrupts) */
82 #define PMCR0_DISCNT_SHIFT        (20)
83 #define PMCR0_DISCNT_ENABLE_MASK  (UINT64_C(1) << PMCR0_DISCNT_SHIFT)
84 #define PMCR0_DISCNT_DISABLE_MASK (~PMCR0_DISCNT_ENABLE_MASK)
85 
86 /* 21 unused */
87 
88 /* block PMIs until ERET retires */
89 #define PMCR0_WFRFE_SHIFT        (22)
90 #define PMCR0_WFRFE_ENABLE_MASK  (UINT64_C(1) << PMCR0_WFRE_SHIFT)
91 #define PMCR0_WFRFE_DISABLE_MASK (~PMCR0_WFRFE_ENABLE_MASK)
92 
93 /* count global L2C events */
94 #define PMCR0_L2CGLOBAL_SHIFT        (23)
95 #define PMCR0_L2CGLOBAL_ENABLE_MASK  (UINT64_C(1) << PMCR0_L2CGLOBAL_SHIFT)
96 #define PMCR0_L2CGLOBAL_DISABLE_MASK (~PMCR0_L2CGLOBAL_ENABLE_MASK)
97 
98 /* allow user mode access to configuration registers */
99 #define PMCR0_USEREN_SHIFT        (30)
100 #define PMCR0_USEREN_ENABLE_MASK  (UINT64_C(1) << PMCR0_USEREN_SHIFT)
101 #define PMCR0_USEREN_DISABLE_MASK (~PMCR0_USEREN_ENABLE_MASK)
102 
103 /* force the CPMU clocks in case of a clocking bug */
104 #define PMCR0_CLKEN_SHIFT        (31)
105 #define PMCR0_CLKEN_ENABLE_MASK  (UINT64_C(1) << PMCR0_CLKEN_SHIFT)
106 #define PMCR0_CLKEN_DISABLE_MASK (~PMCR0_CLKEN_ENABLE_MASK)
107 
108 /* 32 - 44 mirror the low bits for PMCs 8 and 9 */
109 
110 /* PMCR1 enables counters in different processor modes */
111 
112 #define PMCR1_EL0_A32_OFFSET (0)
113 #define PMCR1_EL0_A64_OFFSET (8)
114 #define S3_1_C15_C1_0_A64_OFFSET (16)
115 #define PMCR1_EL3_A64_OFFSET (24)
116 
117 #define PMCR1_EL0_A32_SHIFT(PMC) (PMCR1_EL0_A32_OFFSET + PMCR_PMC_SHIFT(PMC))
118 #define PMCR1_EL0_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
119 #define S3_1_C15_C1_0_A64_SHIFT(PMC) (S3_1_C15_C1_0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
120 #define PMCR1_EL3_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
121 
122 #define PMCR1_EL0_A32_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A32_SHIFT(PMC))
123 #define PMCR1_EL0_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A64_SHIFT(PMC))
124 #define S3_1_C15_C1_0_A64_ENABLE_MASK(PMC) (UINT64_C(1) << S3_1_C15_C1_0_A64_SHIFT(PMC))
125 /* PMCR1_EL3_A64 is not supported on PMCs 8 and 9 */
126 #if NO_MONITOR
127 #define PMCR1_EL3_A64_ENABLE_MASK(PMC) UINT64_C(0)
128 #else
129 #define PMCR1_EL3_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL3_A64_SHIFT(PMC))
130 #endif
131 
132 #define PMCR1_EL_ALL_ENABLE_MASK(PMC) (PMCR1_EL0_A32_ENABLE_MASK(PMC) | \
133 	                               PMCR1_EL0_A64_ENABLE_MASK(PMC) | \
134 	                               S3_1_C15_C1_0_A64_ENABLE_MASK(PMC) | \
135 	                               PMCR1_EL3_A64_ENABLE_MASK(PMC))
136 #define PMCR1_EL_ALL_DISABLE_MASK(PMC) (~PMCR1_EL_ALL_ENABLE_MASK(PMC))
137 
138 #if KPC_MAX_COUNTERS > 8
139 #define PMCR1_EL0_MASK \
140 	(PMCR1_EL0_A64_ENABLE_MASK(0) | PMCR1_EL0_A64_ENABLE_MASK(1) | \
141 	PMCR1_EL0_A64_ENABLE_MASK(2) | PMCR1_EL0_A64_ENABLE_MASK(3) | \
142 	PMCR1_EL0_A64_ENABLE_MASK(4) | PMCR1_EL0_A64_ENABLE_MASK(5) | \
143 	PMCR1_EL0_A64_ENABLE_MASK(6) | PMCR1_EL0_A64_ENABLE_MASK(7) | \
144 	PMCR1_EL0_A64_ENABLE_MASK(8) | PMCR1_EL0_A64_ENABLE_MASK(9))
145 #else /* KPC_MAX_COUNTERS > 8 */
146 #define PMCR1_EL0_MASK \
147 	(PMCR1_EL0_A64_ENABLE_MASK(0) | PMCR1_EL0_A64_ENABLE_MASK(1) | \
148 	PMCR1_EL0_A64_ENABLE_MASK(2) | PMCR1_EL0_A64_ENABLE_MASK(3) | \
149 	PMCR1_EL0_A64_ENABLE_MASK(4) | PMCR1_EL0_A64_ENABLE_MASK(5) | \
150 	PMCR1_EL0_A64_ENABLE_MASK(6) | PMCR1_EL0_A64_ENABLE_MASK(7))
151 #endif /* KPC_MAX_COUNTERS > 8 */
152 
153 #define PMCR1_ALL_MASK (~0ULL)
154 
155 /* PMESR0 and PMESR1 are event selection registers */
156 
157 /* PMESR0 selects which event is counted on PMCs 2, 3, 4, and 5 */
158 /* PMESR1 selects which event is counted on PMCs 6, 7, 8, and 9 */
159 
160 #if CPMU_16BIT_EVENTS
161 #define PMESR_PMC_WIDTH           UINT64_C(16)
162 #define PMESR_PMC_MASK            ((uint64_t)UINT16_MAX)
163 #else // CPMU_16BIT_EVENTS
164 #define PMESR_PMC_WIDTH           UINT64_C(8)
165 #define PMESR_PMC_MASK            ((uint64_t)UINT8_MAX)
166 #endif // !CPMU_16BIT_EVENTS
167 
168 #define PMESR_SHIFT(PMC, OFF)     ((PMESR_PMC_WIDTH) * ((PMC) - (OFF)))
169 #define PMESR_EVT_MASK(PMC, OFF)  (PMESR_PMC_MASK << PMESR_SHIFT(PMC, OFF))
170 #define PMESR_EVT_CLEAR(PMC, OFF) (~PMESR_EVT_MASK(PMC, OFF))
171 
172 #define PMESR_EVT_DECODE(PMESR, PMC, OFF) \
173 	(((PMESR) >> PMESR_SHIFT(PMC, OFF)) & PMESR_PMC_MASK)
174 #define PMESR_EVT_ENCODE(EVT, PMC, OFF) \
175 	(((EVT) & PMESR_PMC_MASK) << PMESR_SHIFT(PMC, OFF))
176 
177 /*
178  * The low 8 bits of a configuration words select the event to program on
179  * PMESR{0,1}. Bits 16-19 are mapped to PMCR1 bits.
180  */
181 #define CFGWORD_EL0A32EN_MASK (0x10000)
182 #define CFGWORD_EL0A64EN_MASK (0x20000)
183 #define CFGWORD_EL1EN_MASK    (0x40000)
184 #define CFGWORD_EL3EN_MASK    (0x80000)
185 #define CFGWORD_ALLMODES_MASK (0xf0000)
186 
187 /* ACC offsets for PIO */
188 #define ACC_CPMU_S3_2_C15_C0_0_OFFSET (0x200)
189 #define ACC_CPMU_S3_2_C15_C9_0_OFFSET (0x280)
190 
191 /*
192  * Macros for reading and writing system registers.
193  *
194  * SR must be one of the SREG_* defines above.
195  */
196 #define SREG_WRITE(SR, V) __asm__ volatile("msr " SR ", %0 ; isb" : : "r"(V))
197 #define SREG_READ(SR)     ({ uint64_t VAL; \
198 	                     __asm__ volatile("mrs %0, " SR : "=r"(VAL)); \
199 	                     VAL; })
200 
201 /*
202  * Configuration registers that can be controlled by RAWPMU:
203  *
204  * All: PMCR2-4, OPMAT0-1, OPMSK0-1.
205  * Typhoon/Twister/Hurricane: PMMMAP, PMTRHLD2/4/6.
206  */
207 #if HAS_EARLY_APPLE_CPMU
208 #define RAWPMU_CONFIG_COUNT 7
209 #else /* HAS_EARLY_APPLE_CPMU */
210 #define RAWPMU_CONFIG_COUNT 11
211 #endif /* !HAS_EARLY_APPLE_CPMU */
212 
213 #if HAS_CPMU_PC_CAPTURE
214 #define PMC_SUPPORTS_PC_CAPTURE(CTR) (((CTR) >= 5) && ((CTR) <= 7))
215 #define PC_CAPTURE_PMC(PCC_VAL) (((PCC_VAL) >> 56) & 0x7)
216 #define PC_CAPTURE_PC(PCC_VAL) ((PCC_VAL) & ((UINT64_C(1) << 48) - 1))
217 #endif /* HAS_CPMU_PC_CAPTURE */
218 
219 struct kpc_save_state {
220 	uint64_t pmcr[2];
221 	uint64_t pmesr[2];
222 	uint64_t rawpmu[RAWPMU_CONFIG_COUNT];
223 	uint64_t counter[MAX_CPUS][KPC_MAX_COUNTERS];
224 };
225 
226 static __security_const_late struct kpc_save_state *kpc_state;
227 
228 static uint64_t kpc_running_cfg_pmc_mask = 0;
229 static uint32_t kpc_running_classes = 0;
230 static uint32_t kpc_configured = 0;
231 
232 #ifdef KPC_DEBUG
233 static void
dump_regs(void)234 dump_regs(void)
235 {
236 	uint64_t val;
237 	kprintf("PMCR0 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C0_0"));
238 	kprintf("PMCR1 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C1_0"));
239 	kprintf("PMCR2 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C2_0"));
240 	kprintf("PMCR3 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C3_0"));
241 	kprintf("PMCR4 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C4_0"));
242 	kprintf("PMESR0 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C5_0"));
243 	kprintf("PMESR1 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C6_0"));
244 
245 	kprintf("S3_2_C15_C0_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C0_0"));
246 	kprintf("S3_2_C15_C1_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C1_0"));
247 	kprintf("S3_2_C15_C2_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C2_0"));
248 	kprintf("S3_2_C15_C3_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C3_0"));
249 	kprintf("S3_2_C15_C4_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C4_0"));
250 	kprintf("S3_2_C15_C5_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C5_0"));
251 	kprintf("S3_2_C15_C6_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C6_0"));
252 	kprintf("S3_2_C15_C7_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C7_0"));
253 
254 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
255 	kprintf("S3_2_C15_C9_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C9_0"));
256 	kprintf("S3_2_C15_C10_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C10_0"));
257 #endif
258 }
259 #endif
260 
261 static boolean_t
enable_counter(uint32_t counter)262 enable_counter(uint32_t counter)
263 {
264 	uint64_t pmcr0 = 0;
265 	boolean_t counter_running, pmi_enabled, enabled;
266 
267 	pmcr0 = SREG_READ("S3_1_C15_C0_0") | 0x3 /* leave the fixed counters enabled for monotonic */;
268 
269 	counter_running = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0;
270 	pmi_enabled = (pmcr0 & PMCR0_PMI_ENABLE_MASK(counter)) != 0;
271 
272 	enabled = counter_running && pmi_enabled;
273 
274 	if (!enabled) {
275 		pmcr0 |= PMCR0_PMC_ENABLE_MASK(counter);
276 		pmcr0 |= PMCR0_PMI_ENABLE_MASK(counter);
277 		SREG_WRITE("S3_1_C15_C0_0", pmcr0);
278 	}
279 
280 	return enabled;
281 }
282 
283 static boolean_t
disable_counter(uint32_t counter)284 disable_counter(uint32_t counter)
285 {
286 	uint64_t pmcr0;
287 	boolean_t enabled;
288 
289 	if (counter < 2) {
290 		return true;
291 	}
292 
293 	pmcr0 = SREG_READ("S3_1_C15_C0_0") | 0x3;
294 	enabled = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0;
295 
296 	if (enabled) {
297 		pmcr0 &= PMCR0_PMC_DISABLE_MASK(counter);
298 		SREG_WRITE("S3_1_C15_C0_0", pmcr0);
299 	}
300 
301 	return enabled;
302 }
303 
304 /*
305  * Enable counter in processor modes determined by configuration word.
306  */
307 static void
set_modes(uint32_t counter,kpc_config_t cfgword,bool secure)308 set_modes(uint32_t counter, kpc_config_t cfgword, bool secure)
309 {
310 	bool const allow_kernel = !secure || kpc_allows_counting_system;
311 	uint64_t bits = 0;
312 
313 	if (cfgword & CFGWORD_EL0A32EN_MASK) {
314 		bits |= PMCR1_EL0_A32_ENABLE_MASK(counter);
315 	}
316 	if (cfgword & CFGWORD_EL0A64EN_MASK) {
317 		bits |= PMCR1_EL0_A64_ENABLE_MASK(counter);
318 	}
319 	if (allow_kernel && (cfgword & CFGWORD_EL1EN_MASK)) {
320 		bits |= S3_1_C15_C1_0_A64_ENABLE_MASK(counter);
321 	}
322 
323 	/*
324 	 * Backwards compatibility: Writing a non-zero configuration word with
325 	 * all zeros in bits 16-19 is interpreted as enabling in all modes.
326 	 * This matches the behavior when the PMCR1 bits weren't exposed.
327 	 */
328 	if (bits == 0 && cfgword != 0) {
329 		bits = allow_kernel ?
330 		    PMCR1_EL_ALL_ENABLE_MASK(counter)
331 		    : PMCR1_EL0_A64_ENABLE_MASK(counter);
332 	}
333 
334 	uint64_t pmcr1 = kpc_state->pmcr[1];
335 	pmcr1 &= PMCR1_EL_ALL_DISABLE_MASK(counter);
336 	pmcr1 |= bits;
337 	pmcr1 |= 0x30303; /* monotonic compatibility */
338 	kpc_state->pmcr[1] = pmcr1;
339 }
340 
341 static uint64_t
read_counter(uint32_t counter)342 read_counter(uint32_t counter)
343 {
344 	switch (counter) {
345 	// case 0: return SREG_READ("S3_2_C15_C0_0");
346 	// case 1: return SREG_READ("S3_2_C15_C1_0");
347 	case 2: return SREG_READ("S3_2_C15_C2_0");
348 	case 3: return SREG_READ("S3_2_C15_C3_0");
349 	case 4: return SREG_READ("S3_2_C15_C4_0");
350 	case 5: return SREG_READ("S3_2_C15_C5_0");
351 	case 6: return SREG_READ("S3_2_C15_C6_0");
352 	case 7: return SREG_READ("S3_2_C15_C7_0");
353 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
354 	case 8: return SREG_READ("S3_2_C15_C9_0");
355 	case 9: return SREG_READ("S3_2_C15_C10_0");
356 #endif // KPC_ARM64_CONFIGURABLE_COUNT > 6
357 	default: return 0;
358 	}
359 }
360 
361 static void
write_counter(uint32_t counter,uint64_t value)362 write_counter(uint32_t counter, uint64_t value)
363 {
364 	switch (counter) {
365 	case 2: SREG_WRITE("S3_2_C15_C2_0", value); break;
366 	case 3: SREG_WRITE("S3_2_C15_C3_0", value); break;
367 	case 4: SREG_WRITE("S3_2_C15_C4_0", value); break;
368 	case 5: SREG_WRITE("S3_2_C15_C5_0", value); break;
369 	case 6: SREG_WRITE("S3_2_C15_C6_0", value); break;
370 	case 7: SREG_WRITE("S3_2_C15_C7_0", value); break;
371 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
372 	case 8: SREG_WRITE("S3_2_C15_C9_0", value); break;
373 	case 9: SREG_WRITE("S3_2_C15_C10_0", value); break;
374 #endif // KPC_ARM64_CONFIGURABLE_COUNT > 6
375 	default: break;
376 	}
377 }
378 
379 uint32_t
kpc_rawpmu_config_count(void)380 kpc_rawpmu_config_count(void)
381 {
382 	return RAWPMU_CONFIG_COUNT;
383 }
384 
385 int
kpc_get_rawpmu_config(kpc_config_t * configv)386 kpc_get_rawpmu_config(kpc_config_t *configv)
387 {
388 	configv[0] = SREG_READ("S3_1_C15_C2_0");
389 	configv[1] = SREG_READ("S3_1_C15_C3_0");
390 	configv[2] = SREG_READ("S3_1_C15_C4_0");
391 	configv[3] = SREG_READ("S3_1_C15_C7_0");
392 	configv[4] = SREG_READ("S3_1_C15_C8_0");
393 	configv[5] = SREG_READ("S3_1_C15_C9_0");
394 	configv[6] = SREG_READ("S3_1_C15_C10_0");
395 #if RAWPMU_CONFIG_COUNT > 7
396 	configv[7] = SREG_READ("S3_2_C15_C15_0");
397 	configv[8] = SREG_READ("S3_2_C15_C14_0");
398 	configv[9] = SREG_READ("S3_2_C15_C13_0");
399 	configv[10] = SREG_READ("S3_2_C15_C12_0");
400 #endif
401 	return 0;
402 }
403 
404 static void
save_regs(void)405 save_regs(void)
406 {
407 	int cpuid = cpu_number();
408 	__builtin_arm_dmb(DMB_ISH);
409 	assert(ml_get_interrupts_enabled() == FALSE);
410 	for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) {
411 		kpc_state->counter[cpuid][i] = read_counter(i);
412 	}
413 }
414 
415 static void
restore_control_regs(uint32_t classes)416 restore_control_regs(uint32_t classes)
417 {
418 	const uint64_t pmcr1_mask = kpc_allows_counting_system ? PMCR1_ALL_MASK : PMCR1_EL0_MASK;
419 	SREG_WRITE("S3_1_C15_C1_0", (kpc_state->pmcr[1] & pmcr1_mask) | 0x30303);
420 #if CONFIG_EXCLAVES
421 	SREG_WRITE("S3_1_C15_C7_2", (kpc_state->pmcr[1] & pmcr1_mask) | 0x30303);
422 #endif
423 	SREG_WRITE("S3_1_C15_C5_0", kpc_state->pmesr[0]);
424 	SREG_WRITE("S3_1_C15_C6_0", kpc_state->pmesr[1]);
425 
426 	if (classes & KPC_CLASS_RAWPMU_MASK) {
427 		SREG_WRITE("S3_1_C15_C2_0", kpc_state->rawpmu[0]);
428 		SREG_WRITE("S3_1_C15_C3_0", kpc_state->rawpmu[1]);
429 		SREG_WRITE("S3_1_C15_C4_0", kpc_state->rawpmu[2]);
430 		SREG_WRITE("S3_1_C15_C7_0", kpc_state->rawpmu[3]);
431 		SREG_WRITE("S3_1_C15_C8_0", kpc_state->rawpmu[4]);
432 		SREG_WRITE("S3_1_C15_C9_0", kpc_state->rawpmu[5]);
433 		SREG_WRITE("S3_1_C15_C10_0", kpc_state->rawpmu[6]);
434 #if RAWPMU_CONFIG_COUNT > 7
435 		SREG_WRITE("S3_2_C15_C15_0", kpc_state->rawpmu[7]);
436 		SREG_WRITE("S3_2_C15_C14_0", kpc_state->rawpmu[8]);
437 		SREG_WRITE("S3_2_C15_C13_0", kpc_state->rawpmu[9]);
438 		SREG_WRITE("S3_2_C15_C12_0", kpc_state->rawpmu[10]);
439 #endif // RAWPMU_CONFIG_COUNT > 7
440 	}
441 }
442 
443 static void
restore_regs(void)444 restore_regs(void)
445 {
446 	int cpuid = cpu_number();
447 	for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) {
448 		write_counter(i, kpc_state->counter[cpuid][i]);
449 	}
450 	restore_control_regs(kpc_running_classes);
451 }
452 
453 static uint64_t
get_counter_config(uint32_t counter)454 get_counter_config(uint32_t counter)
455 {
456 	uint64_t pmesr;
457 
458 	switch (counter) {
459 	case 2:
460 	case 3:
461 	case 4:
462 	case 5:
463 		pmesr = PMESR_EVT_DECODE(SREG_READ("S3_1_C15_C5_0"), counter, 2);
464 		break;
465 	case 6:
466 	case 7:
467 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
468 	case 8:
469 	case 9:
470 #endif // KPC_ARM64_CONFIGURABLE_COUNT > 6
471 		pmesr = PMESR_EVT_DECODE(SREG_READ("S3_1_C15_C6_0"), counter, 6);
472 		break;
473 	default:
474 		pmesr = 0;
475 		break;
476 	}
477 
478 	kpc_config_t config = pmesr;
479 
480 	uint64_t pmcr1 = SREG_READ("S3_1_C15_C1_0");
481 
482 	if (pmcr1 & PMCR1_EL0_A32_ENABLE_MASK(counter)) {
483 		config |= CFGWORD_EL0A32EN_MASK;
484 	}
485 	if (pmcr1 & PMCR1_EL0_A64_ENABLE_MASK(counter)) {
486 		config |= CFGWORD_EL0A64EN_MASK;
487 	}
488 	if (pmcr1 & S3_1_C15_C1_0_A64_ENABLE_MASK(counter)) {
489 		config |= CFGWORD_EL1EN_MASK;
490 #if NO_MONITOR
491 		config |= CFGWORD_EL3EN_MASK;
492 #endif
493 	}
494 #if !NO_MONITOR
495 	if (pmcr1 & PMCR1_EL3_A64_ENABLE_MASK(counter)) {
496 		config |= CFGWORD_EL3EN_MASK;
497 	}
498 #endif
499 
500 	return config;
501 }
502 
503 /* internal functions */
504 
505 static bool
kpc_cpu_callback(void * __unused param,enum cpu_event event,unsigned int __unused cpu_or_cluster)506 kpc_cpu_callback(void * __unused param, enum cpu_event event,
507     unsigned int __unused cpu_or_cluster)
508 {
509 	if (!kpc_configured) {
510 		return true;
511 	}
512 
513 	switch (event) {
514 	case CPU_BOOTED:
515 		restore_regs();
516 		break;
517 
518 	case CPU_DOWN:
519 		save_regs();
520 		break;
521 
522 	default:
523 		break;
524 	}
525 	return true;
526 }
527 
528 void
kpc_arch_init(void)529 kpc_arch_init(void)
530 {
531 	kpc_state = kalloc_type(struct kpc_save_state, Z_ZERO | Z_NOFAIL);
532 	cpu_event_register_callback(kpc_cpu_callback, NULL);
533 	kpc_allows_counting_system = PE_i_can_has_debugger(NULL);
534 }
535 
536 boolean_t
kpc_is_running_fixed(void)537 kpc_is_running_fixed(void)
538 {
539 	return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK;
540 }
541 
542 boolean_t
kpc_is_running_configurable(uint64_t pmc_mask)543 kpc_is_running_configurable(uint64_t pmc_mask)
544 {
545 	assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
546 	return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) &&
547 	       ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask);
548 }
549 
550 uint32_t
kpc_fixed_count(void)551 kpc_fixed_count(void)
552 {
553 	return KPC_ARM64_FIXED_COUNT;
554 }
555 
556 uint32_t
kpc_configurable_count(void)557 kpc_configurable_count(void)
558 {
559 	return KPC_ARM64_CONFIGURABLE_COUNT;
560 }
561 
562 uint32_t
kpc_fixed_config_count(void)563 kpc_fixed_config_count(void)
564 {
565 	return 0;
566 }
567 
568 uint32_t
kpc_configurable_config_count(uint64_t pmc_mask)569 kpc_configurable_config_count(uint64_t pmc_mask)
570 {
571 	assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
572 	return kpc_popcount(pmc_mask);
573 }
574 
575 int
kpc_get_fixed_config(kpc_config_t * configv __unused)576 kpc_get_fixed_config(kpc_config_t *configv __unused)
577 {
578 	return 0;
579 }
580 
581 uint64_t
kpc_fixed_max(void)582 kpc_fixed_max(void)
583 {
584 	return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1;
585 }
586 
587 uint64_t
kpc_configurable_max(void)588 kpc_configurable_max(void)
589 {
590 	return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1;
591 }
592 
593 static void
set_running_configurable(uint64_t target_mask,uint64_t state_mask)594 set_running_configurable(uint64_t target_mask, uint64_t state_mask)
595 {
596 	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
597 	boolean_t enabled;
598 
599 	enabled = ml_set_interrupts_enabled(FALSE);
600 
601 	for (uint32_t i = 0; i < cfg_count; ++i) {
602 		if (((1ULL << i) & target_mask) == 0) {
603 			continue;
604 		}
605 		assert(kpc_controls_counter(offset + i));
606 
607 		if ((1ULL << i) & state_mask) {
608 			enable_counter(offset + i);
609 		} else {
610 			disable_counter(offset + i);
611 		}
612 	}
613 
614 	ml_set_interrupts_enabled(enabled);
615 }
616 
617 static uint32_t kpc_xcall_sync;
618 static void
kpc_set_running_xcall(void * vstate)619 kpc_set_running_xcall( void *vstate )
620 {
621 	struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate;
622 	assert(mp_config);
623 
624 	set_running_configurable(mp_config->cfg_target_mask,
625 	    mp_config->cfg_state_mask);
626 
627 	if (os_atomic_dec(&kpc_xcall_sync, relaxed) == 0) {
628 		thread_wakeup((event_t) &kpc_xcall_sync);
629 	}
630 }
631 
632 static uint32_t kpc_xread_sync;
633 static void
kpc_get_curcpu_counters_xcall(void * args)634 kpc_get_curcpu_counters_xcall(void *args)
635 {
636 	struct kpc_get_counters_remote *handler = args;
637 
638 	assert(handler != NULL);
639 	assert(handler->buf != NULL);
640 
641 	int offset = cpu_number() * handler->buf_stride;
642 	int r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]);
643 
644 	/* number of counters added by this CPU, needs to be atomic  */
645 	os_atomic_add(&(handler->nb_counters), r, relaxed);
646 
647 	if (os_atomic_dec(&kpc_xread_sync, relaxed) == 0) {
648 		thread_wakeup((event_t) &kpc_xread_sync);
649 	}
650 }
651 
652 int
kpc_get_all_cpus_counters(uint32_t classes,int * curcpu,uint64_t * buf)653 kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf)
654 {
655 	assert(buf != NULL);
656 
657 	int enabled = ml_set_interrupts_enabled(FALSE);
658 
659 	/* grab counters and CPU number as close as possible */
660 	if (curcpu) {
661 		*curcpu = cpu_number();
662 	}
663 
664 	struct kpc_get_counters_remote hdl = {
665 		.classes = classes,
666 		.nb_counters = 0,
667 		.buf = buf,
668 		.buf_stride = kpc_get_counter_count(classes)
669 	};
670 
671 	cpu_broadcast_xcall(&kpc_xread_sync, TRUE, kpc_get_curcpu_counters_xcall, &hdl);
672 	int offset = hdl.nb_counters;
673 
674 	(void)ml_set_interrupts_enabled(enabled);
675 
676 	return offset;
677 }
678 
679 int
kpc_get_fixed_counters(uint64_t * counterv)680 kpc_get_fixed_counters(uint64_t *counterv)
681 {
682 #if CONFIG_CPU_COUNTERS
683 	mt_fixed_counts(counterv);
684 	return 0;
685 #else /* CONFIG_CPU_COUNTERS */
686 #pragma unused(counterv)
687 	return ENOTSUP;
688 #endif /* !CONFIG_CPU_COUNTERS */
689 }
690 
691 int
kpc_get_configurable_counters(uint64_t * counterv,uint64_t pmc_mask)692 kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask)
693 {
694 	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
695 	uint64_t ctr = 0ULL;
696 
697 	assert(counterv);
698 
699 	for (uint32_t i = 0; i < cfg_count; ++i) {
700 		if (((1ULL << i) & pmc_mask) == 0) {
701 			continue;
702 		}
703 		ctr = read_counter(i + offset);
704 
705 		if (ctr & KPC_ARM64_COUNTER_OVF_MASK) {
706 			ctr = CONFIGURABLE_SHADOW(i) +
707 			    (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) +
708 			    (ctr & KPC_ARM64_COUNTER_MASK);
709 		} else {
710 			ctr = CONFIGURABLE_SHADOW(i) +
711 			    (ctr - CONFIGURABLE_RELOAD(i));
712 		}
713 
714 		*counterv++ = ctr;
715 	}
716 
717 	return 0;
718 }
719 
720 int
kpc_get_configurable_config(kpc_config_t * configv,uint64_t pmc_mask)721 kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
722 {
723 	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
724 
725 	assert(configv);
726 
727 	for (uint32_t i = 0; i < cfg_count; ++i) {
728 		if ((1ULL << i) & pmc_mask) {
729 			*configv++ = get_counter_config(i + offset);
730 		}
731 	}
732 	return 0;
733 }
734 
735 static uint32_t kpc_config_sync;
736 static void
kpc_set_config_xcall(void * vmp_config)737 kpc_set_config_xcall(void *vmp_config)
738 {
739 	struct kpc_config_remote *mp_config = vmp_config;
740 	uint32_t classes = 0ULL;
741 
742 	assert(mp_config);
743 	classes = mp_config->classes;
744 	boolean_t enabled = ml_set_interrupts_enabled(FALSE);
745 	restore_control_regs(classes);
746 	ml_set_interrupts_enabled(enabled);
747 
748 	if (os_atomic_dec(&kpc_config_sync, relaxed) == 0) {
749 		thread_wakeup((event_t) &kpc_config_sync);
750 	}
751 }
752 
753 static uint64_t
kpc_reload_counter(uint32_t ctr)754 kpc_reload_counter(uint32_t ctr)
755 {
756 	assert(ctr < (kpc_configurable_count() + kpc_fixed_count()));
757 
758 	uint64_t old = read_counter(ctr);
759 
760 	if (kpc_controls_counter(ctr)) {
761 		write_counter(ctr, FIXED_RELOAD(ctr));
762 		return old & KPC_ARM64_COUNTER_MASK;
763 	} else {
764 		/*
765 		 * Unset the overflow bit to clear the condition that drives
766 		 * PMIs.  The power manager is not interested in handling PMIs.
767 		 */
768 		write_counter(ctr, old & KPC_ARM64_COUNTER_MASK);
769 		return 0;
770 	}
771 }
772 
773 static uint32_t kpc_reload_sync;
774 static void
kpc_set_reload_xcall(void * vmp_config)775 kpc_set_reload_xcall(void *vmp_config)
776 {
777 	struct kpc_config_remote *mp_config = vmp_config;
778 	uint32_t classes = 0, count = 0, offset = kpc_fixed_count();
779 	uint64_t *new_period = NULL, max = kpc_configurable_max();
780 	boolean_t enabled;
781 
782 	assert(mp_config);
783 	assert(mp_config->configv);
784 	classes = mp_config->classes;
785 	new_period = mp_config->configv;
786 
787 	enabled = ml_set_interrupts_enabled(FALSE);
788 
789 	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
790 		/*
791 		 * Update _all_ shadow counters, this cannot be done for only
792 		 * selected PMCs. Otherwise, we would corrupt the configurable
793 		 * shadow buffer since the PMCs are muxed according to the pmc
794 		 * mask.
795 		 */
796 		uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1;
797 		kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask);
798 
799 		/* set the new period */
800 		count = kpc_configurable_count();
801 		for (uint32_t i = 0; i < count; ++i) {
802 			/* ignore the counter */
803 			if (((1ULL << i) & mp_config->pmc_mask) == 0) {
804 				continue;
805 			}
806 			if (*new_period == 0) {
807 				*new_period = kpc_configurable_max();
808 			}
809 			CONFIGURABLE_RELOAD(i) = max - *new_period;
810 			/* reload the counter */
811 			kpc_reload_counter(offset + i);
812 			/* next period value */
813 			new_period++;
814 		}
815 	}
816 
817 	ml_set_interrupts_enabled(enabled);
818 
819 	if (os_atomic_dec(&kpc_reload_sync, relaxed) == 0) {
820 		thread_wakeup((event_t) &kpc_reload_sync);
821 	}
822 }
823 
824 void
kpc_pmi_handler(unsigned int ctr)825 kpc_pmi_handler(unsigned int ctr)
826 {
827 	uintptr_t pc = 0;
828 	bool captured = false;
829 
830 #if HAS_CPMU_PC_CAPTURE
831 	if (FIXED_ACTIONID(ctr) && PMC_SUPPORTS_PC_CAPTURE(ctr)) {
832 		uintptr_t pc_capture = SREG_READ("S3_1_C15_C14_1");
833 		captured = PC_CAPTURE_PMC(pc_capture) == ctr;
834 		if (captured) {
835 			pc = PC_CAPTURE_PC(pc_capture);
836 		}
837 	}
838 #endif // HAS_CPMU_PC_CAPTURE
839 
840 	uint64_t extra = kpc_reload_counter(ctr);
841 
842 	FIXED_SHADOW(ctr) += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra;
843 
844 	if (FIXED_ACTIONID(ctr)) {
845 		bool kernel = true;
846 		struct arm_saved_state *state;
847 		state = getCpuDatap()->cpu_int_state;
848 		if (state) {
849 			kernel = !PSR64_IS_USER(get_saved_state_cpsr(state));
850 			if (!captured) {
851 				pc = get_saved_state_pc(state);
852 			}
853 			if (kernel) {
854 				pc = VM_KERNEL_UNSLIDE(pc);
855 			}
856 		} else {
857 			/*
858 			 * Don't know where the PC came from and may be a kernel address, so
859 			 * clear it to prevent leaking the slide.
860 			 */
861 			pc = 0;
862 		}
863 
864 		uint64_t config = get_counter_config(ctr);
865 		kperf_kpc_flags_t flags = kernel ? KPC_KERNEL_PC : 0;
866 		flags |= captured ? KPC_CAPTURED_PC : 0;
867 		bool custom_mode = false;
868 		if ((config & CFGWORD_EL0A32EN_MASK) || (config & CFGWORD_EL0A64EN_MASK)) {
869 			flags |= KPC_USER_COUNTING;
870 			custom_mode = true;
871 		}
872 		if ((config & CFGWORD_EL1EN_MASK)) {
873 			flags |= KPC_KERNEL_COUNTING;
874 			custom_mode = true;
875 		}
876 		/*
877 		 * For backwards-compatibility.
878 		 */
879 		if (!custom_mode) {
880 			flags |= KPC_USER_COUNTING | KPC_KERNEL_COUNTING;
881 		}
882 		kpc_sample_kperf(FIXED_ACTIONID(ctr), ctr, config & 0xffff, FIXED_SHADOW(ctr),
883 		    pc, flags);
884 	}
885 }
886 
887 uint32_t
kpc_get_classes(void)888 kpc_get_classes(void)
889 {
890 	return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_RAWPMU_MASK;
891 }
892 
893 int
kpc_set_running_arch(struct kpc_running_remote * mp_config)894 kpc_set_running_arch(struct kpc_running_remote *mp_config)
895 {
896 	assert(mp_config != NULL);
897 
898 	/* dispatch to all CPUs */
899 	cpu_broadcast_xcall(&kpc_xcall_sync, TRUE, kpc_set_running_xcall, mp_config);
900 
901 	kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask;
902 	kpc_running_classes = mp_config->classes;
903 	kpc_configured = 1;
904 
905 	return 0;
906 }
907 
908 int
kpc_set_period_arch(struct kpc_config_remote * mp_config)909 kpc_set_period_arch(struct kpc_config_remote *mp_config)
910 {
911 	assert(mp_config);
912 
913 	/* dispatch to all CPUs */
914 	cpu_broadcast_xcall(&kpc_reload_sync, TRUE, kpc_set_reload_xcall, mp_config);
915 
916 	kpc_configured = 1;
917 
918 	return 0;
919 }
920 
921 int
kpc_set_config_arch(struct kpc_config_remote * mp_config)922 kpc_set_config_arch(struct kpc_config_remote *mp_config)
923 {
924 	assert(mp_config);
925 	assert(mp_config->configv);
926 
927 	uint64_t cfg_pmc_mask = mp_config->pmc_mask;
928 	unsigned int cfg_count = kpc_configurable_count();
929 	unsigned int offset = kpc_fixed_count();
930 	unsigned int config_index = 0;
931 
932 	if (mp_config->secure) {
933 		/* Do a pass to find any disallowed events to avoid partial configuration. */
934 		for (uint32_t i = 0; i < cfg_count; ++i) {
935 			if (((1ULL << i) & cfg_pmc_mask) == 0) {
936 				continue;
937 			}
938 			uint64_t config_value = mp_config->configv[config_index];
939 			if (!cpc_event_allowed(CPC_HW_CPMU, config_value & PMESR_PMC_MASK)) {
940 				return EPERM;
941 			}
942 			config_index++;
943 		}
944 	}
945 
946 	config_index = 0;
947 	for (uint32_t i = 0; i < cfg_count; ++i) {
948 		if (((1ULL << i) & cfg_pmc_mask) == 0) {
949 			continue;
950 		}
951 		unsigned int counter = i + offset;
952 		assert(kpc_controls_counter(counter));
953 		uint64_t config_value = mp_config->configv[config_index];
954 
955 		const int pmesr_idx = counter < 6 ? 0 : 1;
956 		const int pmesr_off = counter < 6 ? 2 : 6;
957 		kpc_state->pmesr[pmesr_idx] &= PMESR_EVT_CLEAR(counter, pmesr_off);
958 		kpc_state->pmesr[pmesr_idx] |= PMESR_EVT_ENCODE(config_value, counter,
959 		    pmesr_off);
960 		set_modes(counter, config_value, mp_config->secure);
961 		config_index++;
962 	}
963 
964 	if (mp_config->classes & KPC_CLASS_RAWPMU_MASK) {
965 		unsigned int rawpmu_start = kpc_popcount(mp_config->pmc_mask);
966 		memcpy(&kpc_state->rawpmu, &mp_config->configv[rawpmu_start],
967 		    sizeof(kpc_state->rawpmu));
968 	}
969 
970 	cpu_broadcast_xcall(&kpc_config_sync, TRUE, kpc_set_config_xcall, mp_config);
971 	kpc_configured = 1;
972 
973 	return 0;
974 }
975 
976 void
kpc_idle(void)977 kpc_idle(void)
978 {
979 	if (kpc_configured) {
980 		save_regs();
981 	}
982 }
983 
984 void
kpc_idle_exit(void)985 kpc_idle_exit(void)
986 {
987 	if (kpc_configured) {
988 		restore_regs();
989 	}
990 }
991 
992 int
kpc_set_sw_inc(uint32_t mask __unused)993 kpc_set_sw_inc( uint32_t mask __unused )
994 {
995 	return ENOTSUP;
996 }
997 
998 int
kpc_get_pmu_version(void)999 kpc_get_pmu_version(void)
1000 {
1001 	return KPC_PMU_ARM_APPLE;
1002 }
1003 
1004 #else /* APPLE_ARM64_ARCH_FAMILY */
1005 
1006 /* We don't currently support non-Apple arm64 PMU configurations like PMUv3 */
1007 
1008 void
kpc_arch_init(void)1009 kpc_arch_init(void)
1010 {
1011 	/* No-op */
1012 }
1013 
1014 uint32_t
kpc_get_classes(void)1015 kpc_get_classes(void)
1016 {
1017 	return 0;
1018 }
1019 
1020 uint32_t
kpc_fixed_count(void)1021 kpc_fixed_count(void)
1022 {
1023 	return 0;
1024 }
1025 
1026 uint32_t
kpc_configurable_count(void)1027 kpc_configurable_count(void)
1028 {
1029 	return 0;
1030 }
1031 
1032 uint32_t
kpc_fixed_config_count(void)1033 kpc_fixed_config_count(void)
1034 {
1035 	return 0;
1036 }
1037 
1038 uint32_t
kpc_configurable_config_count(uint64_t pmc_mask __unused)1039 kpc_configurable_config_count(uint64_t pmc_mask __unused)
1040 {
1041 	return 0;
1042 }
1043 
1044 int
kpc_get_fixed_config(kpc_config_t * configv __unused)1045 kpc_get_fixed_config(kpc_config_t *configv __unused)
1046 {
1047 	return 0;
1048 }
1049 
1050 uint64_t
kpc_fixed_max(void)1051 kpc_fixed_max(void)
1052 {
1053 	return 0;
1054 }
1055 
1056 uint64_t
kpc_configurable_max(void)1057 kpc_configurable_max(void)
1058 {
1059 	return 0;
1060 }
1061 
1062 int
kpc_get_configurable_config(kpc_config_t * configv __unused,uint64_t pmc_mask __unused)1063 kpc_get_configurable_config(kpc_config_t *configv __unused, uint64_t pmc_mask __unused)
1064 {
1065 	return ENOTSUP;
1066 }
1067 
1068 int
kpc_get_configurable_counters(uint64_t * counterv __unused,uint64_t pmc_mask __unused)1069 kpc_get_configurable_counters(uint64_t *counterv __unused, uint64_t pmc_mask __unused)
1070 {
1071 	return ENOTSUP;
1072 }
1073 
1074 int
kpc_get_fixed_counters(uint64_t * counterv __unused)1075 kpc_get_fixed_counters(uint64_t *counterv __unused)
1076 {
1077 	return 0;
1078 }
1079 
1080 boolean_t
kpc_is_running_fixed(void)1081 kpc_is_running_fixed(void)
1082 {
1083 	return FALSE;
1084 }
1085 
1086 boolean_t
kpc_is_running_configurable(uint64_t pmc_mask __unused)1087 kpc_is_running_configurable(uint64_t pmc_mask __unused)
1088 {
1089 	return FALSE;
1090 }
1091 
1092 int
kpc_set_running_arch(struct kpc_running_remote * mp_config __unused)1093 kpc_set_running_arch(struct kpc_running_remote *mp_config __unused)
1094 {
1095 	return ENOTSUP;
1096 }
1097 
1098 int
kpc_set_period_arch(struct kpc_config_remote * mp_config __unused)1099 kpc_set_period_arch(struct kpc_config_remote *mp_config __unused)
1100 {
1101 	return ENOTSUP;
1102 }
1103 
1104 int
kpc_set_config_arch(struct kpc_config_remote * mp_config __unused)1105 kpc_set_config_arch(struct kpc_config_remote *mp_config __unused)
1106 {
1107 	return ENOTSUP;
1108 }
1109 
1110 void
kpc_idle(void)1111 kpc_idle(void)
1112 {
1113 	// do nothing
1114 }
1115 
1116 void
kpc_idle_exit(void)1117 kpc_idle_exit(void)
1118 {
1119 	// do nothing
1120 }
1121 
1122 int
kpc_get_all_cpus_counters(uint32_t classes __unused,int * curcpu __unused,uint64_t * buf __unused)1123 kpc_get_all_cpus_counters(uint32_t classes __unused, int *curcpu __unused, uint64_t *buf __unused)
1124 {
1125 	return 0;
1126 }
1127 
1128 int
kpc_set_sw_inc(uint32_t mask __unused)1129 kpc_set_sw_inc( uint32_t mask __unused )
1130 {
1131 	return ENOTSUP;
1132 }
1133 
1134 int
kpc_get_pmu_version(void)1135 kpc_get_pmu_version(void)
1136 {
1137 	return KPC_PMU_ERROR;
1138 }
1139 
1140 uint32_t
kpc_rawpmu_config_count(void)1141 kpc_rawpmu_config_count(void)
1142 {
1143 	return 0;
1144 }
1145 
1146 int
kpc_get_rawpmu_config(__unused kpc_config_t * configv)1147 kpc_get_rawpmu_config(__unused kpc_config_t *configv)
1148 {
1149 	return 0;
1150 }
1151 
1152 #endif /* !APPLE_ARM64_ARCH_FAMILY */
1153