xref: /xnu-8020.140.41/osfmk/arm64/monotonic_arm64.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2017-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <arm/cpu_data_internal.h>
30 #include <arm/machine_routines.h>
31 #include <arm64/monotonic.h>
32 #include <kern/assert.h>
33 #include <kern/debug.h> /* panic */
34 #include <kern/kpc.h>
35 #include <kern/monotonic.h>
36 #include <machine/atomic.h>
37 #include <machine/limits.h> /* CHAR_BIT */
38 #include <os/overflow.h>
39 #include <pexpert/arm64/board_config.h>
40 #include <pexpert/device_tree.h> /* SecureDTFindEntry */
41 #include <pexpert/pexpert.h>
42 #include <stdatomic.h>
43 #include <stdint.h>
44 #include <string.h>
45 #include <sys/errno.h>
46 #include <sys/monotonic.h>
47 
48 /*
49  * Ensure that control registers read back what was written under MACH_ASSERT
50  * kernels.
51  *
52  * A static inline function cannot be used due to passing the register through
53  * the builtin -- it requires a constant string as its first argument, since
54  * MSRs registers are encoded as an immediate in the instruction.
55  */
56 #if MACH_ASSERT
57 #define CTRL_REG_SET(reg, val) do { \
58 	__builtin_arm_wsr64((reg), (val)); \
59 	uint64_t __check_reg = __builtin_arm_rsr64((reg)); \
60 	if (__check_reg != (val)) { \
61 	        panic("value written to %s was not read back (wrote %llx, read %llx)", \
62 	            #reg, (val), __check_reg); \
63 	} \
64 } while (0)
65 #else /* MACH_ASSERT */
66 #define CTRL_REG_SET(reg, val) __builtin_arm_wsr64((reg), (val))
67 #endif /* MACH_ASSERT */
68 
69 #pragma mark core counters
70 
71 const bool mt_core_supported = true;
72 
73 static const ml_topology_info_t *topology_info;
74 
75 /*
76  * PMC[0-1] are the 48/64-bit fixed counters -- PMC0 is cycles and PMC1 is
77  * instructions (see arm64/monotonic.h).
78  *
79  * PMC2+ are currently handled by kpc.
80  */
81 #define PMC_0_7(X, A) X(0, A); X(1, A); X(2, A); X(3, A); X(4, A); X(5, A); \
82     X(6, A); X(7, A)
83 
84 #if CORE_NCTRS > 8
85 #define PMC_8_9(X, A) X(8, A); X(9, A)
86 #else // CORE_NCTRS > 8
87 #define PMC_8_9(X, A)
88 #endif // CORE_NCTRS > 8
89 
90 #define PMC_ALL(X, A) PMC_0_7(X, A); PMC_8_9(X, A)
91 
92 #if CPMU_64BIT_PMCS
93 #define PMC_WIDTH (63)
94 #else // UPMU_64BIT_PMCS
95 #define PMC_WIDTH (47)
96 #endif // !UPMU_64BIT_PMCS
97 
98 #define CTR_MAX ((UINT64_C(1) << PMC_WIDTH) - 1)
99 
100 #define CYCLES 0
101 #define INSTRS 1
102 
103 /*
104  * PMC0's offset into a core's PIO range.
105  *
106  * This allows cores to remotely query another core's counters.
107  */
108 
109 #define PIO_PMC0_OFFSET (0x200)
110 
111 /*
112  * The offset of the counter in the configuration registers.  Post-Hurricane
113  * devices have additional counters that need a larger shift than the original
114  * counters.
115  *
116  * XXX For now, just support the lower-numbered counters.
117  */
118 #define CTR_POS(CTR) (CTR)
119 
120 /*
121  * PMCR0 is the main control register for the performance monitor.  It
122  * controls whether the counters are enabled, how they deliver interrupts, and
123  * other features.
124  */
125 
126 #define PMCR0_CTR_EN(CTR) (UINT64_C(1) << CTR_POS(CTR))
127 #define PMCR0_FIXED_EN (PMCR0_CTR_EN(CYCLES) | PMCR0_CTR_EN(INSTRS))
128 /* how interrupts are delivered on a PMI */
129 enum {
130 	PMCR0_INTGEN_OFF = 0,
131 	PMCR0_INTGEN_PMI = 1,
132 	PMCR0_INTGEN_AIC = 2,
133 	PMCR0_INTGEN_HALT = 3,
134 	PMCR0_INTGEN_FIQ = 4,
135 };
136 #define PMCR0_INTGEN_SET(X) ((uint64_t)(X) << 8)
137 
138 #if CPMU_AIC_PMI
139 #define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_AIC)
140 #else /* CPMU_AIC_PMI */
141 #define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_FIQ)
142 #endif /* !CPMU_AIC_PMI */
143 
144 #define PMCR0_PMI_SHIFT (12)
145 #define PMCR0_CTR_GE8_PMI_SHIFT (44)
146 #define PMCR0_PMI_EN(CTR) (UINT64_C(1) << (PMCR0_PMI_SHIFT + CTR_POS(CTR)))
147 /* fixed counters are always counting */
148 #define PMCR0_PMI_INIT (PMCR0_PMI_EN(CYCLES) | PMCR0_PMI_EN(INSTRS))
149 /* disable counting on a PMI */
150 #define PMCR0_DISCNT_EN (UINT64_C(1) << 20)
151 /* block PMIs until ERET retires */
152 #define PMCR0_WFRFE_EN (UINT64_C(1) << 22)
153 /* count global (not just core-local) L2C events */
154 #define PMCR0_L2CGLOBAL_EN (UINT64_C(1) << 23)
155 /* user mode access to configuration registers */
156 #define PMCR0_USEREN_EN (UINT64_C(1) << 30)
157 #define PMCR0_CTR_GE8_EN_SHIFT (32)
158 
159 #if HAS_CPMU_PC_CAPTURE
160 #define PMCR0_PCC_INIT (UINT64_C(0x7) << 24)
161 #else /* HAS_CPMU_PC_CAPTURE */
162 #define PMCR0_PCC_INIT (0)
163 #endif /* !HAS_CPMU_PC_CAPTURE */
164 
165 #define PMCR0_INIT (PMCR0_INTGEN_INIT | PMCR0_PMI_INIT | PMCR0_PCC_INIT)
166 
167 /*
168  * PMCR1 controls which execution modes count events.
169  */
170 #define PMCR1_EL0A32_EN(CTR) (UINT64_C(1) << (0 + CTR_POS(CTR)))
171 #define PMCR1_EL0A64_EN(CTR) (UINT64_C(1) << (8 + CTR_POS(CTR)))
172 #define PMCR1_EL1A64_EN(CTR) (UINT64_C(1) << (16 + CTR_POS(CTR)))
173 /* PMCR1_EL3A64 is not supported on systems with no monitor */
174 #if defined(APPLEHURRICANE)
175 #define PMCR1_EL3A64_EN(CTR) UINT64_C(0)
176 #else
177 #define PMCR1_EL3A64_EN(CTR) (UINT64_C(1) << (24 + CTR_POS(CTR)))
178 #endif
179 #define PMCR1_ALL_EN(CTR) (PMCR1_EL0A32_EN(CTR) | PMCR1_EL0A64_EN(CTR) | \
180 	                   PMCR1_EL1A64_EN(CTR) | PMCR1_EL3A64_EN(CTR))
181 
182 /* fixed counters always count in all modes */
183 #define PMCR1_INIT (PMCR1_ALL_EN(CYCLES) | PMCR1_ALL_EN(INSTRS))
184 
185 static inline void
core_init_execution_modes(void)186 core_init_execution_modes(void)
187 {
188 	uint64_t pmcr1;
189 
190 	pmcr1 = __builtin_arm_rsr64("PMCR1_EL1");
191 	pmcr1 |= PMCR1_INIT;
192 	__builtin_arm_wsr64("PMCR1_EL1", pmcr1);
193 }
194 
195 #define PMSR_OVF(CTR) (1ULL << (CTR))
196 
197 static int
core_init(__unused mt_device_t dev)198 core_init(__unused mt_device_t dev)
199 {
200 	/* the dev node interface to the core counters is still unsupported */
201 	return ENOTSUP;
202 }
203 
204 struct mt_cpu *
mt_cur_cpu(void)205 mt_cur_cpu(void)
206 {
207 	return &getCpuDatap()->cpu_monotonic;
208 }
209 
210 uint64_t
mt_core_snap(unsigned int ctr)211 mt_core_snap(unsigned int ctr)
212 {
213 	switch (ctr) {
214 #define PMC_RD(CTR, UNUSED) case (CTR): return __builtin_arm_rsr64(__MSR_STR(PMC ## CTR))
215 		PMC_ALL(PMC_RD, 0);
216 #undef PMC_RD
217 	default:
218 		panic("monotonic: invalid core counter read: %u", ctr);
219 		__builtin_unreachable();
220 	}
221 }
222 
223 void
mt_core_set_snap(unsigned int ctr,uint64_t count)224 mt_core_set_snap(unsigned int ctr, uint64_t count)
225 {
226 	switch (ctr) {
227 	case 0:
228 		__builtin_arm_wsr64("PMC0", count);
229 		break;
230 	case 1:
231 		__builtin_arm_wsr64("PMC1", count);
232 		break;
233 	default:
234 		panic("monotonic: invalid core counter %u write %llu", ctr, count);
235 		__builtin_unreachable();
236 	}
237 }
238 
239 static void
core_set_enabled(void)240 core_set_enabled(void)
241 {
242 	uint32_t kpc_mask = kpc_get_running() &
243 	    (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK);
244 	uint64_t pmcr0 = __builtin_arm_rsr64("PMCR0_EL1");
245 	pmcr0 |= PMCR0_INIT | PMCR0_FIXED_EN;
246 
247 	if (kpc_mask != 0) {
248 		uint64_t kpc_ctrs = kpc_get_configurable_pmc_mask(kpc_mask) <<
249 		        MT_CORE_NFIXED;
250 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
251 		uint64_t ctrs_ge8 = kpc_ctrs >> 8;
252 		pmcr0 |= ctrs_ge8 << PMCR0_CTR_GE8_EN_SHIFT;
253 		pmcr0 |= ctrs_ge8 << PMCR0_CTR_GE8_PMI_SHIFT;
254 		kpc_ctrs &= (1ULL << 8) - 1;
255 #endif /* KPC_ARM64_CONFIGURABLE_COUNT > 6 */
256 		kpc_ctrs |= kpc_ctrs << PMCR0_PMI_SHIFT;
257 		pmcr0 |= kpc_ctrs;
258 	}
259 
260 	__builtin_arm_wsr64("PMCR0_EL1", pmcr0);
261 #if MACH_ASSERT
262 	/*
263 	 * Only check for the values that were ORed in.
264 	 */
265 	uint64_t pmcr0_check = __builtin_arm_rsr64("PMCR0_EL1");
266 	if ((pmcr0_check & (PMCR0_INIT | PMCR0_FIXED_EN)) != (PMCR0_INIT | PMCR0_FIXED_EN)) {
267 		panic("monotonic: hardware ignored enable (read %llx, wrote %llx)",
268 		    pmcr0_check, pmcr0);
269 	}
270 #endif /* MACH_ASSERT */
271 }
272 
273 static void
core_idle(__unused cpu_data_t * cpu)274 core_idle(__unused cpu_data_t *cpu)
275 {
276 	assert(cpu != NULL);
277 	assert(ml_get_interrupts_enabled() == FALSE);
278 
279 #if DEBUG
280 	uint64_t pmcr0 = __builtin_arm_rsr64("PMCR0_EL1");
281 	if ((pmcr0 & PMCR0_FIXED_EN) == 0) {
282 		panic("monotonic: counters disabled before idling, pmcr0 = 0x%llx", pmcr0);
283 	}
284 	uint64_t pmcr1 = __builtin_arm_rsr64("PMCR1_EL1");
285 	if ((pmcr1 & PMCR1_INIT) == 0) {
286 		panic("monotonic: counter modes disabled before idling, pmcr1 = 0x%llx", pmcr1);
287 	}
288 #endif /* DEBUG */
289 
290 	/* disable counters before updating */
291 	__builtin_arm_wsr64("PMCR0_EL1", PMCR0_INIT);
292 
293 	mt_update_fixed_counts();
294 }
295 
296 #pragma mark uncore performance monitor
297 
298 #if HAS_UNCORE_CTRS
299 
300 static bool mt_uncore_initted = false;
301 
302 /*
303  * Uncore Performance Monitor
304  *
305  * Uncore performance monitors provide event-counting for the last-level caches
306  * (LLCs).  Each LLC has its own uncore performance monitor, which can only be
307  * accessed by cores that use that LLC.  Like the core performance monitoring
308  * unit, uncore counters are configured globally.  If there is more than one
309  * LLC on the system, PIO reads must be used to satisfy uncore requests (using
310  * the `_r` remote variants of the access functions).  Otherwise, local MSRs
311  * suffice (using the `_l` local variants of the access functions).
312  */
313 
314 #if UNCORE_PER_CLUSTER
315 #define MAX_NMONITORS MAX_CPU_CLUSTERS
316 static uintptr_t cpm_impl[MAX_NMONITORS] = {};
317 #else
318 #define MAX_NMONITORS (1)
319 #endif /* UNCORE_PER_CLUSTER */
320 
321 #if UNCORE_VERSION >= 2
322 /*
323  * V2 uncore monitors feature a CTI mechanism -- the second bit of UPMSR is
324  * used to track if a CTI has been triggered due to an overflow.
325  */
326 #define UPMSR_OVF_POS 2
327 #else /* UNCORE_VERSION >= 2 */
328 #define UPMSR_OVF_POS 1
329 #endif /* UNCORE_VERSION < 2 */
330 #define UPMSR_OVF(R, CTR) ((R) >> ((CTR) + UPMSR_OVF_POS) & 0x1)
331 #define UPMSR_OVF_MASK    (((UINT64_C(1) << UNCORE_NCTRS) - 1) << UPMSR_OVF_POS)
332 
333 #define UPMPCM_CORE(ID) (UINT64_C(1) << (ID))
334 
335 #if UPMU_64BIT_PMCS
336 #define UPMC_WIDTH (63)
337 #else // UPMU_64BIT_PMCS
338 #define UPMC_WIDTH (47)
339 #endif // !UPMU_64BIT_PMCS
340 
341 /*
342  * The uncore_pmi_mask is a bitmask of CPUs that receive uncore PMIs.  It's
343  * initialized by uncore_init and controllable by the uncore_pmi_mask boot-arg.
344  */
345 static int32_t uncore_pmi_mask = 0;
346 
347 /*
348  * The uncore_active_ctrs is a bitmask of uncore counters that are currently
349  * requested.
350  */
351 static uint16_t uncore_active_ctrs = 0;
352 static_assert(sizeof(uncore_active_ctrs) * CHAR_BIT >= UNCORE_NCTRS,
353     "counter mask should fit the full range of counters");
354 
355 /*
356  * mt_uncore_enabled is true when any uncore counters are active.
357  */
358 bool mt_uncore_enabled = false;
359 
360 /*
361  * The uncore_events are the event configurations for each uncore counter -- as
362  * a union to make it easy to program the hardware registers.
363  */
364 static struct uncore_config {
365 	union {
366 		uint8_t uce_ctrs[UNCORE_NCTRS];
367 		uint64_t uce_regs[UNCORE_NCTRS / 8];
368 	} uc_events;
369 	union {
370 		uint16_t uccm_masks[UNCORE_NCTRS];
371 		uint64_t uccm_regs[UNCORE_NCTRS / 4];
372 	} uc_cpu_masks[MAX_NMONITORS];
373 } uncore_config;
374 
375 static struct uncore_monitor {
376 	/*
377 	 * The last snapshot of each of the hardware counter values.
378 	 */
379 	uint64_t um_snaps[UNCORE_NCTRS];
380 
381 	/*
382 	 * The accumulated counts for each counter.
383 	 */
384 	uint64_t um_counts[UNCORE_NCTRS];
385 
386 	/*
387 	 * Protects accessing the hardware registers and fields in this structure.
388 	 */
389 	lck_spin_t um_lock;
390 
391 	/*
392 	 * Whether this monitor needs its registers restored after wake.
393 	 */
394 	bool um_sleeping;
395 } uncore_monitors[MAX_NMONITORS];
396 
397 /*
398  * Each uncore unit has its own monitor, corresponding to the memory hierarchy
399  * of the LLCs.
400  */
401 static unsigned int
uncore_nmonitors(void)402 uncore_nmonitors(void)
403 {
404 #if UNCORE_PER_CLUSTER
405 	return topology_info->num_clusters;
406 #else /* UNCORE_PER_CLUSTER */
407 	return 1;
408 #endif /* !UNCORE_PER_CLUSTER */
409 }
410 
411 static unsigned int
uncmon_get_curid(void)412 uncmon_get_curid(void)
413 {
414 #if UNCORE_PER_CLUSTER
415 	return cpu_cluster_id();
416 #else /* UNCORE_PER_CLUSTER */
417 	return 0;
418 #endif /* !UNCORE_PER_CLUSTER */
419 }
420 
421 /*
422  * Per-monitor locks are required to prevent races with the PMI handlers, not
423  * from other CPUs that are configuring (those are serialized with monotonic's
424  * per-device lock).
425  */
426 
427 static int
uncmon_lock(struct uncore_monitor * mon)428 uncmon_lock(struct uncore_monitor *mon)
429 {
430 	int intrs_en = ml_set_interrupts_enabled(FALSE);
431 	lck_spin_lock(&mon->um_lock);
432 	return intrs_en;
433 }
434 
435 static void
uncmon_unlock(struct uncore_monitor * mon,int intrs_en)436 uncmon_unlock(struct uncore_monitor *mon, int intrs_en)
437 {
438 	lck_spin_unlock(&mon->um_lock);
439 	(void)ml_set_interrupts_enabled(intrs_en);
440 }
441 
442 /*
443  * Helper functions for accessing the hardware -- these require the monitor be
444  * locked to prevent other CPUs' PMI handlers from making local modifications
445  * or updating the counts.
446  */
447 
448 #if UNCORE_VERSION >= 2
449 #define UPMCR0_INTEN_POS 20
450 #define UPMCR0_INTGEN_POS 16
451 #else /* UNCORE_VERSION >= 2 */
452 #define UPMCR0_INTEN_POS 12
453 #define UPMCR0_INTGEN_POS 8
454 #endif /* UNCORE_VERSION < 2 */
455 enum {
456 	UPMCR0_INTGEN_OFF = 0,
457 	/* fast PMIs are only supported on core CPMU */
458 	UPMCR0_INTGEN_AIC = 2,
459 	UPMCR0_INTGEN_HALT = 3,
460 	UPMCR0_INTGEN_FIQ = 4,
461 };
462 /* always enable interrupts for all counters */
463 #define UPMCR0_INTEN (((1ULL << UNCORE_NCTRS) - 1) << UPMCR0_INTEN_POS)
464 /* route uncore PMIs through the FIQ path */
465 #define UPMCR0_INIT (UPMCR0_INTEN | (UPMCR0_INTGEN_FIQ << UPMCR0_INTGEN_POS))
466 
467 /*
468  * Turn counting on for counters set in the `enctrmask` and off, otherwise.
469  */
470 static inline void
uncmon_set_counting_locked_l(__unused unsigned int monid,uint64_t enctrmask)471 uncmon_set_counting_locked_l(__unused unsigned int monid, uint64_t enctrmask)
472 {
473 	/*
474 	 * UPMCR0 controls which counters are enabled and how interrupts are generated
475 	 * for overflows.
476 	 */
477 	__builtin_arm_wsr64("UPMCR0_EL1", UPMCR0_INIT | enctrmask);
478 }
479 
480 #if UNCORE_PER_CLUSTER
481 
482 /*
483  * Turn counting on for counters set in the `enctrmask` and off, otherwise.
484  */
485 static inline void
uncmon_set_counting_locked_r(unsigned int monid,uint64_t enctrmask)486 uncmon_set_counting_locked_r(unsigned int monid, uint64_t enctrmask)
487 {
488 	const uintptr_t upmcr0_offset = 0x4180;
489 	*(uint64_t *)(cpm_impl[monid] + upmcr0_offset) = UPMCR0_INIT | enctrmask;
490 }
491 
492 #endif /* UNCORE_PER_CLUSTER */
493 
494 /*
495  * The uncore performance monitoring counters (UPMCs) are 48/64-bits wide.  The
496  * high bit is an overflow bit, triggering a PMI, providing 47/63 usable bits.
497  */
498 
499 #define UPMC_MAX ((UINT64_C(1) << UPMC_WIDTH) - 1)
500 
501 /*
502  * The `__builtin_arm_{r,w}sr` functions require constant strings, since the
503  * MSR/MRS instructions encode the registers as immediates.  Otherwise, this
504  * would be indexing into an array of strings.
505  */
506 
507 #define UPMC_0_7(X, A) X(0, A); X(1, A); X(2, A); X(3, A); X(4, A); X(5, A); \
508 	        X(6, A); X(7, A)
509 #if UNCORE_NCTRS <= 8
510 #define UPMC_ALL(X, A) UPMC_0_7(X, A)
511 #else /* UNCORE_NCTRS <= 8 */
512 #define UPMC_8_15(X, A) X(8, A); X(9, A); X(10, A); X(11, A); X(12, A); \
513 	        X(13, A); X(14, A); X(15, A)
514 #define UPMC_ALL(X, A) UPMC_0_7(X, A); UPMC_8_15(X, A)
515 #endif /* UNCORE_NCTRS > 8 */
516 
517 static inline uint64_t
uncmon_read_counter_locked_l(__unused unsigned int monid,unsigned int ctr)518 uncmon_read_counter_locked_l(__unused unsigned int monid, unsigned int ctr)
519 {
520 	assert(ctr < UNCORE_NCTRS);
521 	switch (ctr) {
522 #define UPMC_RD(CTR, UNUSED) case (CTR): return __builtin_arm_rsr64(__MSR_STR(UPMC ## CTR))
523 		UPMC_ALL(UPMC_RD, 0);
524 #undef UPMC_RD
525 	default:
526 		panic("monotonic: invalid counter read %u", ctr);
527 		__builtin_unreachable();
528 	}
529 }
530 
531 static inline void
uncmon_write_counter_locked_l(__unused unsigned int monid,unsigned int ctr,uint64_t count)532 uncmon_write_counter_locked_l(__unused unsigned int monid, unsigned int ctr,
533     uint64_t count)
534 {
535 	assert(count < UPMC_MAX);
536 	assert(ctr < UNCORE_NCTRS);
537 	switch (ctr) {
538 #define UPMC_WR(CTR, COUNT) case (CTR): \
539 	        return __builtin_arm_wsr64(__MSR_STR(UPMC ## CTR), (COUNT))
540 		UPMC_ALL(UPMC_WR, count);
541 #undef UPMC_WR
542 	default:
543 		panic("monotonic: invalid counter write %u", ctr);
544 	}
545 }
546 
547 #if UNCORE_PER_CLUSTER
548 
549 uintptr_t upmc_offs[UNCORE_NCTRS] = {
550 	[0] = 0x4100, [1] = 0x4248, [2] = 0x4110, [3] = 0x4250, [4] = 0x4120,
551 	[5] = 0x4258, [6] = 0x4130, [7] = 0x4260, [8] = 0x4140, [9] = 0x4268,
552 	[10] = 0x4150, [11] = 0x4270, [12] = 0x4160, [13] = 0x4278,
553 	[14] = 0x4170, [15] = 0x4280,
554 };
555 
556 static inline uint64_t
uncmon_read_counter_locked_r(unsigned int mon_id,unsigned int ctr)557 uncmon_read_counter_locked_r(unsigned int mon_id, unsigned int ctr)
558 {
559 	assert(mon_id < uncore_nmonitors());
560 	assert(ctr < UNCORE_NCTRS);
561 	return *(uint64_t *)(cpm_impl[mon_id] + upmc_offs[ctr]);
562 }
563 
564 static inline void
uncmon_write_counter_locked_r(unsigned int mon_id,unsigned int ctr,uint64_t count)565 uncmon_write_counter_locked_r(unsigned int mon_id, unsigned int ctr,
566     uint64_t count)
567 {
568 	assert(count < UPMC_MAX);
569 	assert(ctr < UNCORE_NCTRS);
570 	assert(mon_id < uncore_nmonitors());
571 	*(uint64_t *)(cpm_impl[mon_id] + upmc_offs[ctr]) = count;
572 }
573 
574 #endif /* UNCORE_PER_CLUSTER */
575 
576 static inline void
uncmon_update_locked(unsigned int monid,unsigned int curid,unsigned int ctr)577 uncmon_update_locked(unsigned int monid, unsigned int curid, unsigned int ctr)
578 {
579 	struct uncore_monitor *mon = &uncore_monitors[monid];
580 	uint64_t snap = 0;
581 	if (curid == monid) {
582 		snap = uncmon_read_counter_locked_l(monid, ctr);
583 	} else {
584 #if UNCORE_PER_CLUSTER
585 		snap = uncmon_read_counter_locked_r(monid, ctr);
586 #endif /* UNCORE_PER_CLUSTER */
587 	}
588 	/* counters should increase monotonically */
589 	assert(snap >= mon->um_snaps[ctr]);
590 	mon->um_counts[ctr] += snap - mon->um_snaps[ctr];
591 	mon->um_snaps[ctr] = snap;
592 }
593 
594 static inline void
uncmon_program_events_locked_l(unsigned int monid)595 uncmon_program_events_locked_l(unsigned int monid)
596 {
597 	/*
598 	 * UPMESR[01] is the event selection register that determines which event a
599 	 * counter will count.
600 	 */
601 	CTRL_REG_SET("UPMESR0_EL1", uncore_config.uc_events.uce_regs[0]);
602 
603 #if UNCORE_NCTRS > 8
604 	CTRL_REG_SET("UPMESR1_EL1", uncore_config.uc_events.uce_regs[1]);
605 #endif /* UNCORE_NCTRS > 8 */
606 
607 	/*
608 	 * UPMECM[0123] are the event core masks for each counter -- whether or not
609 	 * that counter counts events generated by an agent.  These are set to all
610 	 * ones so the uncore counters count events from all cores.
611 	 *
612 	 * The bits are based off the start of the cluster -- e.g. even if a core
613 	 * has a CPU ID of 4, it might be the first CPU in a cluster.  Shift the
614 	 * registers right by the ID of the first CPU in the cluster.
615 	 */
616 	CTRL_REG_SET("UPMECM0_EL1",
617 	    uncore_config.uc_cpu_masks[monid].uccm_regs[0]);
618 	CTRL_REG_SET("UPMECM1_EL1",
619 	    uncore_config.uc_cpu_masks[monid].uccm_regs[1]);
620 
621 #if UNCORE_NCTRS > 8
622 	CTRL_REG_SET("UPMECM2_EL1",
623 	    uncore_config.uc_cpu_masks[monid].uccm_regs[2]);
624 	CTRL_REG_SET("UPMECM3_EL1",
625 	    uncore_config.uc_cpu_masks[monid].uccm_regs[3]);
626 #endif /* UNCORE_NCTRS > 8 */
627 }
628 
629 #if UNCORE_PER_CLUSTER
630 
631 static inline void
uncmon_program_events_locked_r(unsigned int monid)632 uncmon_program_events_locked_r(unsigned int monid)
633 {
634 	const uintptr_t upmesr_offs[2] = {[0] = 0x41b0, [1] = 0x41b8, };
635 
636 	for (unsigned int i = 0; i < sizeof(upmesr_offs) / sizeof(upmesr_offs[0]);
637 	    i++) {
638 		*(uint64_t *)(cpm_impl[monid] + upmesr_offs[i]) =
639 		    uncore_config.uc_events.uce_regs[i];
640 	}
641 
642 	const uintptr_t upmecm_offs[4] = {
643 		[0] = 0x4190, [1] = 0x4198, [2] = 0x41a0, [3] = 0x41a8,
644 	};
645 
646 	for (unsigned int i = 0; i < sizeof(upmecm_offs) / sizeof(upmecm_offs[0]);
647 	    i++) {
648 		*(uint64_t *)(cpm_impl[monid] + upmecm_offs[i]) =
649 		    uncore_config.uc_cpu_masks[monid].uccm_regs[i];
650 	}
651 }
652 
653 #endif /* UNCORE_PER_CLUSTER */
654 
655 static void
uncmon_clear_int_locked_l(__unused unsigned int monid)656 uncmon_clear_int_locked_l(__unused unsigned int monid)
657 {
658 	__builtin_arm_wsr64("UPMSR_EL1", 0);
659 }
660 
661 #if UNCORE_PER_CLUSTER
662 
663 static void
uncmon_clear_int_locked_r(unsigned int monid)664 uncmon_clear_int_locked_r(unsigned int monid)
665 {
666 	const uintptr_t upmsr_off = 0x41c0;
667 	*(uint64_t *)(cpm_impl[monid] + upmsr_off) = 0;
668 }
669 
670 #endif /* UNCORE_PER_CLUSTER */
671 
672 /*
673  * Get the PMI mask for the provided `monid` -- that is, the bitmap of CPUs
674  * that should be sent PMIs for a particular monitor.
675  */
676 static uint64_t
uncmon_get_pmi_mask(unsigned int monid)677 uncmon_get_pmi_mask(unsigned int monid)
678 {
679 	uint64_t pmi_mask = uncore_pmi_mask;
680 
681 #if UNCORE_PER_CLUSTER
682 	pmi_mask &= topology_info->clusters[monid].cpu_mask;
683 #else /* UNCORE_PER_CLUSTER */
684 #pragma unused(monid)
685 #endif /* !UNCORE_PER_CLUSTER */
686 
687 	return pmi_mask;
688 }
689 
690 /*
691  * Initialization routines for the uncore counters.
692  */
693 
694 static void
uncmon_init_locked_l(unsigned int monid)695 uncmon_init_locked_l(unsigned int monid)
696 {
697 	/*
698 	 * UPMPCM defines the PMI core mask for the UPMCs -- which cores should
699 	 * receive interrupts on overflow.
700 	 */
701 	CTRL_REG_SET("UPMPCM_EL1", uncmon_get_pmi_mask(monid));
702 	uncmon_set_counting_locked_l(monid,
703 	    mt_uncore_enabled ? uncore_active_ctrs : 0);
704 }
705 
706 #if UNCORE_PER_CLUSTER
707 
708 static uintptr_t acc_impl[MAX_NMONITORS] = {};
709 
710 static void
uncmon_init_locked_r(unsigned int monid)711 uncmon_init_locked_r(unsigned int monid)
712 {
713 	const uintptr_t upmpcm_off = 0x1010;
714 
715 	*(uint64_t *)(acc_impl[monid] + upmpcm_off) = uncmon_get_pmi_mask(monid);
716 	uncmon_set_counting_locked_r(monid,
717 	    mt_uncore_enabled ? uncore_active_ctrs : 0);
718 }
719 
720 #endif /* UNCORE_PER_CLUSTER */
721 
722 /*
723  * Initialize the uncore device for monotonic.
724  */
725 static int
uncore_init(__unused mt_device_t dev)726 uncore_init(__unused mt_device_t dev)
727 {
728 #if HAS_UNCORE_CTRS
729 	assert(MT_NDEVS > 0);
730 	mt_devices[MT_NDEVS - 1].mtd_nmonitors = (uint8_t)uncore_nmonitors();
731 #endif
732 
733 #if DEVELOPMENT || DEBUG
734 	/*
735 	 * Development and debug kernels observe the `uncore_pmi_mask` boot-arg,
736 	 * allowing PMIs to be routed to the CPUs present in the supplied bitmap.
737 	 * Do some sanity checks on the value provided.
738 	 */
739 	bool parsed_arg = PE_parse_boot_argn("uncore_pmi_mask", &uncore_pmi_mask,
740 	    sizeof(uncore_pmi_mask));
741 	if (parsed_arg) {
742 #if UNCORE_PER_CLUSTER
743 		if (__builtin_popcount(uncore_pmi_mask) != (int)uncore_nmonitors()) {
744 			panic("monotonic: invalid uncore PMI mask 0x%x", uncore_pmi_mask);
745 		}
746 		for (unsigned int i = 0; i < uncore_nmonitors(); i++) {
747 			if (__builtin_popcountll(uncmon_get_pmi_mask(i)) != 1) {
748 				panic("monotonic: invalid uncore PMI CPU for cluster %d in mask 0x%x",
749 				    i, uncore_pmi_mask);
750 			}
751 		}
752 #else /* UNCORE_PER_CLUSTER */
753 		if (__builtin_popcount(uncore_pmi_mask) != 1) {
754 			panic("monotonic: invalid uncore PMI mask 0x%x", uncore_pmi_mask);
755 		}
756 #endif /* !UNCORE_PER_CLUSTER */
757 	} else
758 #endif /* DEVELOPMENT || DEBUG */
759 	{
760 #if UNCORE_PER_CLUSTER
761 		for (unsigned int i = 0; i < topology_info->num_clusters; i++) {
762 			uncore_pmi_mask |= 1ULL << topology_info->clusters[i].first_cpu_id;
763 		}
764 #else /* UNCORE_PER_CLUSTER */
765 		/* arbitrarily route to core 0 */
766 		uncore_pmi_mask |= 1;
767 #endif /* !UNCORE_PER_CLUSTER */
768 	}
769 	assert(uncore_pmi_mask != 0);
770 
771 	for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
772 #if UNCORE_PER_CLUSTER
773 		ml_topology_cluster_t *cluster = &topology_info->clusters[monid];
774 		cpm_impl[monid] = (uintptr_t)cluster->cpm_IMPL_regs;
775 		acc_impl[monid] = (uintptr_t)cluster->acc_IMPL_regs;
776 		assert(cpm_impl[monid] != 0 && acc_impl[monid] != 0);
777 #endif /* UNCORE_PER_CLUSTER */
778 
779 		struct uncore_monitor *mon = &uncore_monitors[monid];
780 		lck_spin_init(&mon->um_lock, &mt_lock_grp, LCK_ATTR_NULL);
781 	}
782 
783 	mt_uncore_initted = true;
784 
785 	return 0;
786 }
787 
788 /*
789  * Support for monotonic's mtd_read function.
790  */
791 
792 static void
uncmon_read_all_counters(unsigned int monid,unsigned int curmonid,uint64_t ctr_mask,uint64_t * counts)793 uncmon_read_all_counters(unsigned int monid, unsigned int curmonid,
794     uint64_t ctr_mask, uint64_t *counts)
795 {
796 	struct uncore_monitor *mon = &uncore_monitors[monid];
797 
798 	int intrs_en = uncmon_lock(mon);
799 
800 	for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
801 		if (ctr_mask & (1ULL << ctr)) {
802 			uncmon_update_locked(monid, curmonid, ctr);
803 			counts[ctr] = mon->um_counts[ctr];
804 		}
805 	}
806 
807 	uncmon_unlock(mon, intrs_en);
808 }
809 
810 /*
811  * Read all monitor's counters.
812  */
813 static int
uncore_read(uint64_t ctr_mask,uint64_t * counts_out)814 uncore_read(uint64_t ctr_mask, uint64_t *counts_out)
815 {
816 	assert(ctr_mask != 0);
817 	assert(counts_out != NULL);
818 
819 	if (!uncore_active_ctrs) {
820 		return EPWROFF;
821 	}
822 	if (ctr_mask & ~uncore_active_ctrs) {
823 		return EINVAL;
824 	}
825 
826 	unsigned int curmonid = uncmon_get_curid();
827 	for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
828 		/*
829 		 * Find this monitor's starting offset into the `counts_out` array.
830 		 */
831 		uint64_t *counts = counts_out + (UNCORE_NCTRS * monid);
832 
833 		uncmon_read_all_counters(monid, curmonid, ctr_mask, counts);
834 	}
835 
836 	return 0;
837 }
838 
839 /*
840  * Support for monotonic's mtd_add function.
841  */
842 
843 /*
844  * Add an event to the current uncore configuration.  This doesn't take effect
845  * until the counters are enabled again, so there's no need to involve the
846  * monitors.
847  */
848 static int
uncore_add(struct monotonic_config * config,uint32_t * ctr_out)849 uncore_add(struct monotonic_config *config, uint32_t *ctr_out)
850 {
851 	if (mt_uncore_enabled) {
852 		return EBUSY;
853 	}
854 
855 	uint32_t available = ~uncore_active_ctrs & config->allowed_ctr_mask;
856 
857 	if (available == 0) {
858 		return ENOSPC;
859 	}
860 
861 	uint32_t valid_ctrs = (UINT32_C(1) << UNCORE_NCTRS) - 1;
862 	if ((available & valid_ctrs) == 0) {
863 		return E2BIG;
864 	}
865 	/*
866 	 * Clear the UPMCs the first time an event is added.
867 	 */
868 	unsigned int curmonid = uncmon_get_curid();
869 	if (uncore_active_ctrs == 0) {
870 		for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
871 			struct uncore_monitor *mon = &uncore_monitors[monid];
872 			bool remote = monid != curmonid;
873 
874 			int intrs_en = uncmon_lock(mon);
875 			for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
876 				if (remote) {
877 #if UNCORE_PER_CLUSTER
878 					uncmon_write_counter_locked_r(monid, ctr, 0);
879 	#endif /* UNCORE_PER_CLUSTER */
880 				} else {
881 					uncmon_write_counter_locked_l(monid, ctr, 0);
882 				}
883 			}
884 			memset(&mon->um_snaps, 0, sizeof(mon->um_snaps));
885 			memset(&mon->um_counts, 0, sizeof(mon->um_counts));
886 			uncmon_unlock(mon, intrs_en);
887 		}
888 	}
889 
890 	uint32_t ctr = __builtin_ffsll(available) - 1;
891 
892 	uncore_active_ctrs |= UINT64_C(1) << ctr;
893 	uncore_config.uc_events.uce_ctrs[ctr] = (uint8_t)config->event;
894 	uint64_t cpu_mask = UINT64_MAX;
895 	if (config->cpu_mask != 0) {
896 		cpu_mask = config->cpu_mask;
897 	}
898 	for (unsigned int i = 0; i < uncore_nmonitors(); i++) {
899 #if UNCORE_PER_CLUSTER
900 		const unsigned int shift = topology_info->clusters[i].first_cpu_id;
901 #else /* UNCORE_PER_CLUSTER */
902 		const unsigned int shift = 0;
903 #endif /* !UNCORE_PER_CLUSTER */
904 		uncore_config.uc_cpu_masks[i].uccm_masks[ctr] = (uint16_t)(cpu_mask >> shift);
905 	}
906 
907 	*ctr_out = ctr;
908 	return 0;
909 }
910 
911 /*
912  * Support for monotonic's mtd_reset function.
913  */
914 
915 /*
916  * Reset all configuration and disable the counters if they're currently
917  * counting.
918  */
919 static void
uncore_reset(void)920 uncore_reset(void)
921 {
922 	mt_uncore_enabled = false;
923 
924 	unsigned int curmonid = uncmon_get_curid();
925 
926 	if (mt_owns_counters()) {
927 		for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
928 			struct uncore_monitor *mon = &uncore_monitors[monid];
929 			bool remote = monid != curmonid;
930 
931 			int intrs_en = uncmon_lock(mon);
932 			if (remote) {
933 	#if UNCORE_PER_CLUSTER
934 				uncmon_set_counting_locked_r(monid, 0);
935 	#endif /* UNCORE_PER_CLUSTER */
936 			} else {
937 				uncmon_set_counting_locked_l(monid, 0);
938 			}
939 
940 			for (int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
941 				if (uncore_active_ctrs & (1U << ctr)) {
942 					if (remote) {
943 	#if UNCORE_PER_CLUSTER
944 						uncmon_write_counter_locked_r(monid, ctr, 0);
945 	#endif /* UNCORE_PER_CLUSTER */
946 					} else {
947 						uncmon_write_counter_locked_l(monid, ctr, 0);
948 					}
949 				}
950 			}
951 
952 			memset(&mon->um_snaps, 0, sizeof(mon->um_snaps));
953 			memset(&mon->um_counts, 0, sizeof(mon->um_counts));
954 			if (remote) {
955 	#if UNCORE_PER_CLUSTER
956 				uncmon_clear_int_locked_r(monid);
957 	#endif /* UNCORE_PER_CLUSTER */
958 			} else {
959 				uncmon_clear_int_locked_l(monid);
960 			}
961 
962 			uncmon_unlock(mon, intrs_en);
963 		}
964 	}
965 
966 	uncore_active_ctrs = 0;
967 	memset(&uncore_config, 0, sizeof(uncore_config));
968 
969 	if (mt_owns_counters()) {
970 		for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
971 			struct uncore_monitor *mon = &uncore_monitors[monid];
972 			bool remote = monid != curmonid;
973 
974 			int intrs_en = uncmon_lock(mon);
975 			if (remote) {
976 #if UNCORE_PER_CLUSTER
977 				uncmon_program_events_locked_r(monid);
978 #endif /* UNCORE_PER_CLUSTER */
979 			} else {
980 				uncmon_program_events_locked_l(monid);
981 			}
982 			uncmon_unlock(mon, intrs_en);
983 		}
984 	}
985 }
986 
987 /*
988  * Support for monotonic's mtd_enable function.
989  */
990 
991 static void
uncmon_set_enabled_l(unsigned int monid,bool enable)992 uncmon_set_enabled_l(unsigned int monid, bool enable)
993 {
994 	struct uncore_monitor *mon = &uncore_monitors[monid];
995 	int intrs_en = uncmon_lock(mon);
996 
997 	if (enable) {
998 		uncmon_init_locked_l(monid);
999 		uncmon_program_events_locked_l(monid);
1000 		uncmon_set_counting_locked_l(monid, uncore_active_ctrs);
1001 	} else {
1002 		uncmon_set_counting_locked_l(monid, 0);
1003 	}
1004 
1005 	uncmon_unlock(mon, intrs_en);
1006 }
1007 
1008 #if UNCORE_PER_CLUSTER
1009 
1010 static void
uncmon_set_enabled_r(unsigned int monid,bool enable)1011 uncmon_set_enabled_r(unsigned int monid, bool enable)
1012 {
1013 	struct uncore_monitor *mon = &uncore_monitors[monid];
1014 	int intrs_en = uncmon_lock(mon);
1015 
1016 	if (enable) {
1017 		uncmon_init_locked_r(monid);
1018 		uncmon_program_events_locked_r(monid);
1019 		uncmon_set_counting_locked_r(monid, uncore_active_ctrs);
1020 	} else {
1021 		uncmon_set_counting_locked_r(monid, 0);
1022 	}
1023 
1024 	uncmon_unlock(mon, intrs_en);
1025 }
1026 
1027 #endif /* UNCORE_PER_CLUSTER */
1028 
1029 static void
uncore_set_enabled(bool enable)1030 uncore_set_enabled(bool enable)
1031 {
1032 	mt_uncore_enabled = enable;
1033 
1034 	unsigned int curmonid = uncmon_get_curid();
1035 	for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1036 		if (monid != curmonid) {
1037 #if UNCORE_PER_CLUSTER
1038 			uncmon_set_enabled_r(monid, enable);
1039 #endif /* UNCORE_PER_CLUSTER */
1040 		} else {
1041 			uncmon_set_enabled_l(monid, enable);
1042 		}
1043 	}
1044 }
1045 
1046 /*
1047  * Hooks in the machine layer.
1048  */
1049 
1050 static void
uncore_fiq(uint64_t upmsr)1051 uncore_fiq(uint64_t upmsr)
1052 {
1053 	/*
1054 	 * Determine which counters overflowed.
1055 	 */
1056 	uint64_t disable_ctr_mask = (upmsr & UPMSR_OVF_MASK) >> UPMSR_OVF_POS;
1057 	/* should not receive interrupts from inactive counters */
1058 	assert(!(disable_ctr_mask & ~uncore_active_ctrs));
1059 
1060 	if (uncore_active_ctrs == 0) {
1061 		return;
1062 	}
1063 
1064 	unsigned int monid = uncmon_get_curid();
1065 	struct uncore_monitor *mon = &uncore_monitors[monid];
1066 
1067 	int intrs_en = uncmon_lock(mon);
1068 
1069 	/*
1070 	 * Disable any counters that overflowed.
1071 	 */
1072 	uncmon_set_counting_locked_l(monid,
1073 	    uncore_active_ctrs & ~disable_ctr_mask);
1074 
1075 	/*
1076 	 * With the overflowing counters disabled, capture their counts and reset
1077 	 * the UPMCs and their snapshots to 0.
1078 	 */
1079 	for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1080 		if (UPMSR_OVF(upmsr, ctr)) {
1081 			uncmon_update_locked(monid, monid, ctr);
1082 			mon->um_snaps[ctr] = 0;
1083 			uncmon_write_counter_locked_l(monid, ctr, 0);
1084 		}
1085 	}
1086 
1087 	/*
1088 	 * Acknowledge the interrupt, now that any overflowed PMCs have been reset.
1089 	 */
1090 	uncmon_clear_int_locked_l(monid);
1091 
1092 	/*
1093 	 * Re-enable all active counters.
1094 	 */
1095 	uncmon_set_counting_locked_l(monid, uncore_active_ctrs);
1096 
1097 	uncmon_unlock(mon, intrs_en);
1098 }
1099 
1100 static void
uncore_save(void)1101 uncore_save(void)
1102 {
1103 	if (!uncore_active_ctrs) {
1104 		return;
1105 	}
1106 
1107 	unsigned int curmonid = uncmon_get_curid();
1108 
1109 	for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1110 		struct uncore_monitor *mon = &uncore_monitors[monid];
1111 		int intrs_en = uncmon_lock(mon);
1112 
1113 		if (mt_uncore_enabled) {
1114 			if (monid != curmonid) {
1115 #if UNCORE_PER_CLUSTER
1116 				uncmon_set_counting_locked_r(monid, 0);
1117 #endif /* UNCORE_PER_CLUSTER */
1118 			} else {
1119 				uncmon_set_counting_locked_l(monid, 0);
1120 			}
1121 		}
1122 
1123 		for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1124 			if (uncore_active_ctrs & (1U << ctr)) {
1125 				uncmon_update_locked(monid, curmonid, ctr);
1126 				mon->um_snaps[ctr] = 0;
1127 				uncmon_write_counter_locked_l(monid, ctr, 0);
1128 			}
1129 		}
1130 
1131 		mon->um_sleeping = true;
1132 		uncmon_unlock(mon, intrs_en);
1133 	}
1134 }
1135 
1136 static void
uncore_restore(void)1137 uncore_restore(void)
1138 {
1139 	if (!uncore_active_ctrs) {
1140 		return;
1141 	}
1142 	unsigned int curmonid = uncmon_get_curid();
1143 
1144 	struct uncore_monitor *mon = &uncore_monitors[curmonid];
1145 	int intrs_en = uncmon_lock(mon);
1146 	if (!mon->um_sleeping) {
1147 		goto out;
1148 	}
1149 
1150 	for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1151 		if (uncore_active_ctrs & (1U << ctr)) {
1152 			uncmon_write_counter_locked_l(curmonid, ctr, mon->um_snaps[ctr]);
1153 		}
1154 	}
1155 	uncmon_program_events_locked_l(curmonid);
1156 	uncmon_init_locked_l(curmonid);
1157 	mon->um_sleeping = false;
1158 
1159 out:
1160 	uncmon_unlock(mon, intrs_en);
1161 }
1162 
1163 #endif /* HAS_UNCORE_CTRS */
1164 
1165 #pragma mark common hooks
1166 
1167 void
mt_early_init(void)1168 mt_early_init(void)
1169 {
1170 	topology_info = ml_get_topology_info();
1171 }
1172 
1173 void
mt_cpu_idle(cpu_data_t * cpu)1174 mt_cpu_idle(cpu_data_t *cpu)
1175 {
1176 	core_idle(cpu);
1177 }
1178 
1179 void
mt_cpu_run(cpu_data_t * cpu)1180 mt_cpu_run(cpu_data_t *cpu)
1181 {
1182 	struct mt_cpu *mtc;
1183 
1184 	assert(cpu != NULL);
1185 	assert(ml_get_interrupts_enabled() == FALSE);
1186 
1187 	mtc = &cpu->cpu_monotonic;
1188 
1189 	for (int i = 0; i < MT_CORE_NFIXED; i++) {
1190 		mt_core_set_snap(i, mtc->mtc_snaps[i]);
1191 	}
1192 
1193 	/* re-enable the counters */
1194 	core_init_execution_modes();
1195 
1196 	core_set_enabled();
1197 }
1198 
1199 void
mt_cpu_down(cpu_data_t * cpu)1200 mt_cpu_down(cpu_data_t *cpu)
1201 {
1202 	mt_cpu_idle(cpu);
1203 }
1204 
1205 void
mt_cpu_up(cpu_data_t * cpu)1206 mt_cpu_up(cpu_data_t *cpu)
1207 {
1208 	mt_cpu_run(cpu);
1209 }
1210 
1211 void
mt_sleep(void)1212 mt_sleep(void)
1213 {
1214 #if HAS_UNCORE_CTRS
1215 	uncore_save();
1216 #endif /* HAS_UNCORE_CTRS */
1217 }
1218 
1219 void
mt_wake_per_core(void)1220 mt_wake_per_core(void)
1221 {
1222 #if HAS_UNCORE_CTRS
1223 	if (mt_uncore_initted) {
1224 		uncore_restore();
1225 	}
1226 #endif /* HAS_UNCORE_CTRS */
1227 }
1228 
1229 uint64_t
mt_count_pmis(void)1230 mt_count_pmis(void)
1231 {
1232 	uint64_t npmis = 0;
1233 	for (unsigned int i = 0; i < topology_info->num_cpus; i++) {
1234 		cpu_data_t *cpu = (cpu_data_t *)CpuDataEntries[topology_info->cpus[i].cpu_id].cpu_data_vaddr;
1235 		npmis += cpu->cpu_monotonic.mtc_npmis;
1236 	}
1237 	return npmis;
1238 }
1239 
1240 static void
mt_cpu_pmi(cpu_data_t * cpu,uint64_t pmcr0)1241 mt_cpu_pmi(cpu_data_t *cpu, uint64_t pmcr0)
1242 {
1243 	assert(cpu != NULL);
1244 	assert(ml_get_interrupts_enabled() == FALSE);
1245 
1246 	__builtin_arm_wsr64("PMCR0_EL1", PMCR0_INIT);
1247 	/*
1248 	 * Ensure the CPMU has flushed any increments at this point, so PMSR is up
1249 	 * to date.
1250 	 */
1251 	__builtin_arm_isb(ISB_SY);
1252 
1253 	cpu->cpu_monotonic.mtc_npmis += 1;
1254 	cpu->cpu_stat.pmi_cnt_wake += 1;
1255 
1256 #if MONOTONIC_DEBUG
1257 	if (!PMCR0_PMI(pmcr0)) {
1258 		kprintf("monotonic: mt_cpu_pmi but no PMI (PMCR0 = %#llx)\n",
1259 		    pmcr0);
1260 	}
1261 #else /* MONOTONIC_DEBUG */
1262 #pragma unused(pmcr0)
1263 #endif /* !MONOTONIC_DEBUG */
1264 
1265 	uint64_t pmsr = __builtin_arm_rsr64("PMSR_EL1");
1266 
1267 #if MONOTONIC_DEBUG
1268 	printf("monotonic: cpu = %d, PMSR = 0x%llx, PMCR0 = 0x%llx\n",
1269 	    cpu_number(), pmsr, pmcr0);
1270 #endif /* MONOTONIC_DEBUG */
1271 
1272 #if MACH_ASSERT
1273 	uint64_t handled = 0;
1274 #endif /* MACH_ASSERT */
1275 
1276 	/*
1277 	 * monotonic handles any fixed counter PMIs.
1278 	 */
1279 	for (unsigned int i = 0; i < MT_CORE_NFIXED; i++) {
1280 		if ((pmsr & PMSR_OVF(i)) == 0) {
1281 			continue;
1282 		}
1283 
1284 #if MACH_ASSERT
1285 		handled |= 1ULL << i;
1286 #endif /* MACH_ASSERT */
1287 		uint64_t count = mt_cpu_update_count(cpu, i);
1288 		cpu->cpu_monotonic.mtc_counts[i] += count;
1289 		mt_core_set_snap(i, mt_core_reset_values[i]);
1290 		cpu->cpu_monotonic.mtc_snaps[i] = mt_core_reset_values[i];
1291 
1292 		if (mt_microstackshots && mt_microstackshot_ctr == i) {
1293 			bool user_mode = false;
1294 			arm_saved_state_t *state = get_user_regs(current_thread());
1295 			if (state) {
1296 				user_mode = PSR64_IS_USER(get_saved_state_cpsr(state));
1297 			}
1298 			KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 1),
1299 			    mt_microstackshot_ctr, user_mode);
1300 			mt_microstackshot_pmi_handler(user_mode, mt_microstackshot_ctx);
1301 		} else if (mt_debug) {
1302 			KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 2),
1303 			    i, count);
1304 		}
1305 	}
1306 
1307 	/*
1308 	 * KPC handles the configurable counter PMIs.
1309 	 */
1310 	for (unsigned int i = MT_CORE_NFIXED; i < CORE_NCTRS; i++) {
1311 		if (pmsr & PMSR_OVF(i)) {
1312 #if MACH_ASSERT
1313 			handled |= 1ULL << i;
1314 #endif /* MACH_ASSERT */
1315 			extern void kpc_pmi_handler(unsigned int ctr);
1316 			kpc_pmi_handler(i);
1317 		}
1318 	}
1319 
1320 #if MACH_ASSERT
1321 	uint64_t pmsr_after_handling = __builtin_arm_rsr64("PMSR_EL1");
1322 	if (pmsr_after_handling != 0) {
1323 		unsigned int first_ctr_ovf = __builtin_ffsll(pmsr_after_handling) - 1;
1324 		uint64_t count = 0;
1325 		const char *extra = "";
1326 		if (first_ctr_ovf >= CORE_NCTRS) {
1327 			extra = " (invalid counter)";
1328 		} else {
1329 			count = mt_core_snap(first_ctr_ovf);
1330 		}
1331 
1332 		panic("monotonic: PMI status not cleared on exit from handler, "
1333 		    "PMSR = 0x%llx HANDLE -> -> 0x%llx, handled 0x%llx, "
1334 		    "PMCR0 = 0x%llx, PMC%d = 0x%llx%s", pmsr, pmsr_after_handling,
1335 		    handled, __builtin_arm_rsr64("PMCR0_EL1"), first_ctr_ovf, count, extra);
1336 	}
1337 #endif /* MACH_ASSERT */
1338 
1339 	core_set_enabled();
1340 }
1341 
1342 #if CPMU_AIC_PMI
1343 void
mt_cpmu_aic_pmi(cpu_id_t source)1344 mt_cpmu_aic_pmi(cpu_id_t source)
1345 {
1346 	struct cpu_data *curcpu = getCpuDatap();
1347 	if (source != curcpu->interrupt_nub) {
1348 		panic("monotonic: PMI from IOCPU %p delivered to %p", source,
1349 		    curcpu->interrupt_nub);
1350 	}
1351 	mt_cpu_pmi(curcpu, __builtin_arm_rsr64("PMCR0_EL1"));
1352 }
1353 #endif /* CPMU_AIC_PMI */
1354 
1355 void
mt_fiq(void * cpu,uint64_t pmcr0,uint64_t upmsr)1356 mt_fiq(void *cpu, uint64_t pmcr0, uint64_t upmsr)
1357 {
1358 #if CPMU_AIC_PMI
1359 #pragma unused(cpu, pmcr0)
1360 #else /* CPMU_AIC_PMI */
1361 	mt_cpu_pmi(cpu, pmcr0);
1362 #endif /* !CPMU_AIC_PMI */
1363 
1364 #if HAS_UNCORE_CTRS
1365 	if (upmsr != 0) {
1366 		uncore_fiq(upmsr);
1367 	}
1368 #else /* HAS_UNCORE_CTRS */
1369 #pragma unused(upmsr)
1370 #endif /* !HAS_UNCORE_CTRS */
1371 }
1372 
1373 void
mt_ownership_change(bool available)1374 mt_ownership_change(bool available)
1375 {
1376 #if HAS_UNCORE_CTRS
1377 	/*
1378 	 * No need to take the lock here, as this is only manipulated in the UPMU
1379 	 * when the current task already owns the counters and is on its way out.
1380 	 */
1381 	if (!available && uncore_active_ctrs) {
1382 		uncore_reset();
1383 	}
1384 #else
1385 #pragma unused(available)
1386 #endif /* HAS_UNCORE_CTRS */
1387 }
1388 
1389 static uint32_t mt_xc_sync;
1390 
1391 static void
mt_microstackshot_start_remote(__unused void * arg)1392 mt_microstackshot_start_remote(__unused void *arg)
1393 {
1394 	cpu_data_t *cpu = getCpuDatap();
1395 
1396 	__builtin_arm_wsr64("PMCR0_EL1", PMCR0_INIT);
1397 
1398 	for (int i = 0; i < MT_CORE_NFIXED; i++) {
1399 		uint64_t count = mt_cpu_update_count(cpu, i);
1400 		cpu->cpu_monotonic.mtc_counts[i] += count;
1401 		mt_core_set_snap(i, mt_core_reset_values[i]);
1402 		cpu->cpu_monotonic.mtc_snaps[i] = mt_core_reset_values[i];
1403 	}
1404 
1405 	core_set_enabled();
1406 
1407 	if (os_atomic_dec(&mt_xc_sync, relaxed) == 0) {
1408 		thread_wakeup((event_t)&mt_xc_sync);
1409 	}
1410 }
1411 
1412 int
mt_microstackshot_start_arch(uint64_t period)1413 mt_microstackshot_start_arch(uint64_t period)
1414 {
1415 	uint64_t reset_value = 0;
1416 	int ovf = os_sub_overflow(CTR_MAX, period, &reset_value);
1417 	if (ovf) {
1418 		return ERANGE;
1419 	}
1420 
1421 	mt_core_reset_values[mt_microstackshot_ctr] = reset_value;
1422 	cpu_broadcast_xcall(&mt_xc_sync, TRUE, mt_microstackshot_start_remote,
1423 	    mt_microstackshot_start_remote /* cannot pass NULL */);
1424 	return 0;
1425 }
1426 
1427 #pragma mark dev nodes
1428 
1429 struct mt_device mt_devices[] = {
1430 	[0] = {
1431 		.mtd_name = "core",
1432 		.mtd_init = core_init,
1433 	},
1434 #if HAS_UNCORE_CTRS
1435 	[1] = {
1436 		.mtd_name = "uncore",
1437 		.mtd_init = uncore_init,
1438 		.mtd_add = uncore_add,
1439 		.mtd_reset = uncore_reset,
1440 		.mtd_enable = uncore_set_enabled,
1441 		.mtd_read = uncore_read,
1442 
1443 		.mtd_ncounters = UNCORE_NCTRS,
1444 	}
1445 #endif /* HAS_UNCORE_CTRS */
1446 };
1447 
1448 static_assert(
1449 	(sizeof(mt_devices) / sizeof(mt_devices[0])) == MT_NDEVS,
1450 	"MT_NDEVS macro should be same as the length of mt_devices");
1451