xref: /xnu-11417.121.6/osfmk/arm64/monotonic_arm64.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2017-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <arm/cpu_data_internal.h>
30 #include <arm/machine_routines.h>
31 #include <arm64/monotonic.h>
32 #include <kern/assert.h>
33 #include <kern/cpc.h>
34 #include <kern/debug.h> /* panic */
35 #include <kern/kpc.h>
36 #include <kern/monotonic.h>
37 #include <machine/atomic.h>
38 #include <machine/limits.h> /* CHAR_BIT */
39 #include <os/overflow.h>
40 #include <pexpert/arm64/board_config.h>
41 #include <pexpert/device_tree.h> /* SecureDTFindEntry */
42 #include <pexpert/pexpert.h>
43 #include <stdatomic.h>
44 #include <stdint.h>
45 #include <string.h>
46 #include <sys/errno.h>
47 #include <sys/monotonic.h>
48 
49 /*
50  * Ensure that control registers read back what was written under MACH_ASSERT
51  * kernels.
52  *
53  * A static inline function cannot be used due to passing the register through
54  * the builtin -- it requires a constant string as its first argument, since
55  * MSRs registers are encoded as an immediate in the instruction.
56  */
57 #if MACH_ASSERT
58 #define CTRL_REG_SET(reg, val) do { \
59 	__builtin_arm_wsr64((reg), (val)); \
60 	uint64_t __check_reg = __builtin_arm_rsr64((reg)); \
61 	if (__check_reg != (val)) { \
62 	        panic("value written to %s was not read back (wrote %llx, read %llx)", \
63 	            #reg, (val), __check_reg); \
64 	} \
65 } while (0)
66 #else /* MACH_ASSERT */
67 #define CTRL_REG_SET(reg, val) __builtin_arm_wsr64((reg), (val))
68 #endif /* MACH_ASSERT */
69 
70 #pragma mark core counters
71 
72 const bool mt_core_supported = true;
73 
74 static const ml_topology_info_t *topology_info;
75 
76 /*
77  * PMC[0-1] are the 48/64-bit fixed counters -- S3_2_C15_C0_0 is cycles and S3_2_C15_C1_0 is
78  * instructions (see arm64/monotonic.h).
79  *
80  * S3_2_C15_C2_0+ are currently handled by kpc.
81  */
82 
83 #if CPMU_64BIT_PMCS
84 #define PMC_WIDTH (63)
85 #else // UPMU_64BIT_PMCS
86 #define PMC_WIDTH (47)
87 #endif // !UPMU_64BIT_PMCS
88 
89 #define CTR_MAX ((UINT64_C(1) << PMC_WIDTH) - 1)
90 
91 #define CYCLES 0
92 #define INSTRS 1
93 
94 /*
95  * S3_2_C15_C0_0's offset into a core's PIO range.
96  *
97  * This allows cores to remotely query another core's counters.
98  */
99 
100 #define PIO_S3_2_C15_C0_0_OFFSET (0x200)
101 
102 /*
103  * The offset of the counter in the configuration registers.  Post-Hurricane
104  * devices have additional counters that need a larger shift than the original
105  * counters.
106  *
107  * XXX For now, just support the lower-numbered counters.
108  */
109 #define CTR_POS(CTR) (CTR)
110 
111 /*
112  * PMCR0 is the main control register for the performance monitor.  It
113  * controls whether the counters are enabled, how they deliver interrupts, and
114  * other features.
115  */
116 
117 #define PMCR0_CTR_EN(CTR) (UINT64_C(1) << CTR_POS(CTR))
118 #define PMCR0_FIXED_EN (PMCR0_CTR_EN(CYCLES) | PMCR0_CTR_EN(INSTRS))
119 /* how interrupts are delivered on a PMI */
120 enum {
121 	PMCR0_INTGEN_OFF = 0,
122 	PMCR0_INTGEN_PMI = 1,
123 	PMCR0_INTGEN_AIC = 2,
124 	PMCR0_INTGEN_HALT = 3,
125 	PMCR0_INTGEN_FIQ = 4,
126 };
127 #define PMCR0_INTGEN_SET(X) ((uint64_t)(X) << 8)
128 
129 #if CPMU_AIC_PMI
130 #define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_AIC)
131 #else /* CPMU_AIC_PMI */
132 #define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_FIQ)
133 #endif /* !CPMU_AIC_PMI */
134 
135 #define PMCR0_PMI_SHIFT (12)
136 #define PMCR0_CTR_GE8_PMI_SHIFT (44)
137 #define PMCR0_PMI_EN(CTR) (UINT64_C(1) << (PMCR0_PMI_SHIFT + CTR_POS(CTR)))
138 /* fixed counters are always counting */
139 #define PMCR0_PMI_INIT (PMCR0_PMI_EN(CYCLES) | PMCR0_PMI_EN(INSTRS))
140 /* disable counting on a PMI */
141 #define PMCR0_DISCNT_EN (UINT64_C(1) << 20)
142 /* block PMIs until ERET retires */
143 #define PMCR0_WFRFE_EN (UINT64_C(1) << 22)
144 /* count global (not just core-local) L2C events */
145 #define PMCR0_L2CGLOBAL_EN (UINT64_C(1) << 23)
146 /* user mode access to configuration registers */
147 #define PMCR0_USEREN_EN (UINT64_C(1) << 30)
148 #define PMCR0_CTR_GE8_EN_SHIFT (32)
149 
150 #if HAS_CPMU_PC_CAPTURE
151 #define PMCR0_PCC_INIT (UINT64_C(0x7) << 24)
152 #else /* HAS_CPMU_PC_CAPTURE */
153 #define PMCR0_PCC_INIT (0)
154 #endif /* !HAS_CPMU_PC_CAPTURE */
155 
156 #define PMCR0_INIT (PMCR0_INTGEN_INIT | PMCR0_PMI_INIT | PMCR0_PCC_INIT)
157 
158 /*
159  * PMCR1 controls which execution modes count events.
160  */
161 #define PMCR1_EL0A32_EN(CTR) (UINT64_C(1) << (0 + CTR_POS(CTR)))
162 #define PMCR1_EL0A64_EN(CTR) (UINT64_C(1) << (8 + CTR_POS(CTR)))
163 #define S3_1_C15_C1_0A64_EN(CTR) (UINT64_C(1) << (16 + CTR_POS(CTR)))
164 /* PMCR1_EL3A64 is not supported on systems with no monitor */
165 #if defined(APPLEHURRICANE)
166 #define PMCR1_EL3A64_EN(CTR) UINT64_C(0)
167 #else
168 #define PMCR1_EL3A64_EN(CTR) (UINT64_C(1) << (24 + CTR_POS(CTR)))
169 #endif
170 #define PMCR1_ALL_EN(CTR) (PMCR1_EL0A32_EN(CTR) | PMCR1_EL0A64_EN(CTR) | \
171 	                   S3_1_C15_C1_0A64_EN(CTR) | PMCR1_EL3A64_EN(CTR))
172 
173 /* fixed counters always count in all modes */
174 #define PMCR1_INIT (PMCR1_ALL_EN(CYCLES) | PMCR1_ALL_EN(INSTRS))
175 
176 static inline void
core_init_execution_modes(void)177 core_init_execution_modes(void)
178 {
179 	uint64_t pmcr1;
180 
181 	pmcr1 = __builtin_arm_rsr64("S3_1_C15_C1_0");
182 	pmcr1 |= PMCR1_INIT;
183 	__builtin_arm_wsr64("S3_1_C15_C1_0", pmcr1);
184 #if CONFIG_EXCLAVES
185 	__builtin_arm_wsr64("S3_1_C15_C7_2", pmcr1);
186 #endif
187 }
188 
189 #define PMSR_OVF(CTR) (1ULL << (CTR))
190 
191 static int
core_init(__unused mt_device_t dev)192 core_init(__unused mt_device_t dev)
193 {
194 	/* the dev node interface to the core counters is still unsupported */
195 	return ENOTSUP;
196 }
197 
198 struct mt_cpu *
mt_cur_cpu(void)199 mt_cur_cpu(void)
200 {
201 	return &getCpuDatap()->cpu_monotonic;
202 }
203 
204 uint64_t
mt_core_snap(unsigned int ctr)205 mt_core_snap(unsigned int ctr)
206 {
207 	switch (ctr) {
208 	case 0:
209 		return __builtin_arm_rsr64("S3_2_C15_C0_0");
210 	case 1:
211 		return __builtin_arm_rsr64("S3_2_C15_C1_0");
212 	case 2:
213 		return __builtin_arm_rsr64("S3_2_C15_C2_0");
214 	case 3:
215 		return __builtin_arm_rsr64("S3_2_C15_C3_0");
216 	case 4:
217 		return __builtin_arm_rsr64("S3_2_C15_C4_0");
218 	case 5:
219 		return __builtin_arm_rsr64("S3_2_C15_C5_0");
220 	case 6:
221 		return __builtin_arm_rsr64("S3_2_C15_C6_0");
222 	case 7:
223 		return __builtin_arm_rsr64("S3_2_C15_C7_0");
224 #if CORE_NCTRS > 8
225 	case 8:
226 		return __builtin_arm_rsr64("S3_2_C15_C9_0");
227 	case 9:
228 		return __builtin_arm_rsr64("S3_2_C15_C10_0");
229 #endif /* CORE_NCTRS > 8 */
230 	default:
231 		panic("monotonic: invalid core counter read: %u", ctr);
232 		__builtin_unreachable();
233 	}
234 }
235 
236 void
mt_core_set_snap(unsigned int ctr,uint64_t count)237 mt_core_set_snap(unsigned int ctr, uint64_t count)
238 {
239 	switch (ctr) {
240 	case 0:
241 		__builtin_arm_wsr64("S3_2_C15_C0_0", count);
242 		break;
243 	case 1:
244 		__builtin_arm_wsr64("S3_2_C15_C1_0", count);
245 		break;
246 	default:
247 		panic("monotonic: invalid core counter %u write %llu", ctr, count);
248 		__builtin_unreachable();
249 	}
250 }
251 
252 static void
core_set_enabled(void)253 core_set_enabled(void)
254 {
255 	uint32_t kpc_mask = kpc_get_running() &
256 	    (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK);
257 	uint64_t pmcr0 = __builtin_arm_rsr64("S3_1_C15_C0_0");
258 	pmcr0 |= PMCR0_INIT | PMCR0_FIXED_EN;
259 
260 	if (kpc_mask != 0) {
261 		uint64_t kpc_ctrs = kpc_get_configurable_pmc_mask(kpc_mask) <<
262 		        MT_CORE_NFIXED;
263 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
264 		uint64_t ctrs_ge8 = kpc_ctrs >> 8;
265 		pmcr0 |= ctrs_ge8 << PMCR0_CTR_GE8_EN_SHIFT;
266 		pmcr0 |= ctrs_ge8 << PMCR0_CTR_GE8_PMI_SHIFT;
267 		kpc_ctrs &= (1ULL << 8) - 1;
268 #endif /* KPC_ARM64_CONFIGURABLE_COUNT > 6 */
269 		kpc_ctrs |= kpc_ctrs << PMCR0_PMI_SHIFT;
270 		pmcr0 |= kpc_ctrs;
271 	}
272 
273 	__builtin_arm_wsr64("S3_1_C15_C0_0", pmcr0);
274 #if MACH_ASSERT
275 	/*
276 	 * Only check for the values that were ORed in.
277 	 */
278 	uint64_t pmcr0_check = __builtin_arm_rsr64("S3_1_C15_C0_0");
279 	if ((pmcr0_check & (PMCR0_INIT | PMCR0_FIXED_EN)) != (PMCR0_INIT | PMCR0_FIXED_EN)) {
280 		panic("monotonic: hardware ignored enable (read %llx, wrote %llx)",
281 		    pmcr0_check, pmcr0);
282 	}
283 #endif /* MACH_ASSERT */
284 }
285 
286 static void
core_idle(__unused cpu_data_t * cpu)287 core_idle(__unused cpu_data_t *cpu)
288 {
289 	assert(cpu != NULL);
290 	assert(ml_get_interrupts_enabled() == FALSE);
291 
292 #if DEBUG
293 	uint64_t pmcr0 = __builtin_arm_rsr64("S3_1_C15_C0_0");
294 	if ((pmcr0 & PMCR0_FIXED_EN) == 0) {
295 		panic("monotonic: counters disabled before idling, pmcr0 = 0x%llx", pmcr0);
296 	}
297 	uint64_t pmcr1 = __builtin_arm_rsr64("S3_1_C15_C1_0");
298 	if ((pmcr1 & PMCR1_INIT) == 0) {
299 		panic("monotonic: counter modes disabled before idling, pmcr1 = 0x%llx", pmcr1);
300 	}
301 #endif /* DEBUG */
302 
303 	/* disable counters before updating */
304 	__builtin_arm_wsr64("S3_1_C15_C0_0", PMCR0_INIT);
305 
306 	mt_update_fixed_counts();
307 }
308 
309 #pragma mark uncore performance monitor
310 
311 #if HAS_UNCORE_CTRS
312 
313 static bool mt_uncore_initted = false;
314 
315 static bool mt_uncore_suspended_cpd = false;
316 
317 /*
318  * Uncore Performance Monitor
319  *
320  * Uncore performance monitors provide event-counting for the last-level caches
321  * (LLCs).  Each LLC has its own uncore performance monitor, which can only be
322  * accessed by cores that use that LLC.  Like the core performance monitoring
323  * unit, uncore counters are configured globally.  If there is more than one
324  * LLC on the system, PIO reads must be used to satisfy uncore requests (using
325  * the `_r` remote variants of the access functions).  Otherwise, local MSRs
326  * suffice (using the `_l` local variants of the access functions).
327  */
328 
329 #if UNCORE_PER_CLUSTER
330 #define MAX_NMONITORS MAX_CPU_CLUSTERS
331 static uintptr_t cpm_impl[MAX_NMONITORS] = {};
332 #else
333 #define MAX_NMONITORS (1)
334 #endif /* UNCORE_PER_CLUSTER */
335 
336 #if UNCORE_VERSION >= 2
337 /*
338  * V2 uncore monitors feature a CTI mechanism -- the second bit of UPMSR is
339  * used to track if a CTI has been triggered due to an overflow.
340  */
341 #define UPMSR_OVF_POS 2
342 #else /* UNCORE_VERSION >= 2 */
343 #define UPMSR_OVF_POS 1
344 #endif /* UNCORE_VERSION < 2 */
345 #define UPMSR_OVF(R, CTR) ((R) >> ((CTR) + UPMSR_OVF_POS) & 0x1)
346 #define UPMSR_OVF_MASK    (((UINT64_C(1) << UNCORE_NCTRS) - 1) << UPMSR_OVF_POS)
347 
348 #define UPMPCM_CORE(ID) (UINT64_C(1) << (ID))
349 
350 #if UPMU_64BIT_PMCS
351 #define UPMC_WIDTH (63)
352 #else // UPMU_64BIT_PMCS
353 #define UPMC_WIDTH (47)
354 #endif // !UPMU_64BIT_PMCS
355 
356 /*
357  * The uncore_pmi_mask is a bitmask of CPUs that receive uncore PMIs.  It's
358  * initialized by uncore_init and controllable by the uncore_pmi_mask boot-arg.
359  */
360 static int32_t uncore_pmi_mask = 0;
361 
362 /*
363  * The uncore_active_ctrs is a bitmask of uncore counters that are currently
364  * requested.
365  */
366 static uint16_t uncore_active_ctrs = 0;
367 static_assert(sizeof(uncore_active_ctrs) * CHAR_BIT >= UNCORE_NCTRS,
368     "counter mask should fit the full range of counters");
369 
370 /*
371  * mt_uncore_enabled is true when any uncore counters are active.
372  */
373 bool mt_uncore_enabled = false;
374 
375 /*
376  * The uncore_events are the event configurations for each uncore counter -- as
377  * a union to make it easy to program the hardware registers.
378  */
379 static struct uncore_config {
380 	union {
381 		uint8_t uce_ctrs[UNCORE_NCTRS];
382 		uint64_t uce_regs[UNCORE_NCTRS / 8];
383 	} uc_events;
384 	union {
385 		uint16_t uccm_masks[UNCORE_NCTRS];
386 		uint64_t uccm_regs[UNCORE_NCTRS / 4];
387 	} uc_cpu_masks[MAX_NMONITORS];
388 } uncore_config;
389 
390 static struct uncore_monitor {
391 	/*
392 	 * The last snapshot of each of the hardware counter values.
393 	 */
394 	uint64_t um_snaps[UNCORE_NCTRS];
395 
396 	/*
397 	 * The accumulated counts for each counter.
398 	 */
399 	uint64_t um_counts[UNCORE_NCTRS];
400 
401 	/*
402 	 * Protects accessing the hardware registers and fields in this structure.
403 	 */
404 	lck_spin_t um_lock;
405 
406 	/*
407 	 * Whether this monitor needs its registers restored after wake.
408 	 */
409 	bool um_sleeping;
410 
411 #if MACH_ASSERT
412 	/*
413 	 * Save the last ID that read from this monitor.
414 	 */
415 	uint8_t um_last_read_id;
416 
417 	/*
418 	 * Save whether this monitor has been read since sleeping.
419 	 */
420 	bool um_read_since_sleep;
421 #endif /* MACH_ASSERT */
422 } uncore_monitors[MAX_NMONITORS];
423 
424 /*
425  * Each uncore unit has its own monitor, corresponding to the memory hierarchy
426  * of the LLCs.
427  */
428 static unsigned int
uncore_nmonitors(void)429 uncore_nmonitors(void)
430 {
431 #if UNCORE_PER_CLUSTER
432 	return topology_info->num_clusters;
433 #else /* UNCORE_PER_CLUSTER */
434 	return 1;
435 #endif /* !UNCORE_PER_CLUSTER */
436 }
437 
438 static unsigned int
uncmon_get_curid(void)439 uncmon_get_curid(void)
440 {
441 #if UNCORE_PER_CLUSTER
442 	return cpu_cluster_id();
443 #else /* UNCORE_PER_CLUSTER */
444 	return 0;
445 #endif /* !UNCORE_PER_CLUSTER */
446 }
447 
448 /*
449  * Per-monitor locks are required to prevent races with the PMI handlers, not
450  * from other CPUs that are configuring (those are serialized with monotonic's
451  * per-device lock).
452  */
453 
454 static int
uncmon_lock(struct uncore_monitor * mon)455 uncmon_lock(struct uncore_monitor *mon)
456 {
457 	int intrs_en = ml_set_interrupts_enabled(FALSE);
458 	lck_spin_lock(&mon->um_lock);
459 	return intrs_en;
460 }
461 
462 static void
uncmon_unlock(struct uncore_monitor * mon,int intrs_en)463 uncmon_unlock(struct uncore_monitor *mon, int intrs_en)
464 {
465 	lck_spin_unlock(&mon->um_lock);
466 	(void)ml_set_interrupts_enabled(intrs_en);
467 }
468 
469 static bool
uncmon_is_remote(unsigned int monid)470 uncmon_is_remote(unsigned int monid)
471 {
472 	if (monid >= MAX_NMONITORS) {
473 		panic("monotonic: %s: invalid monid %u (> %u)", __FUNCTION__, monid, MAX_NMONITORS);
474 	}
475 	struct uncore_monitor *mon = &uncore_monitors[monid];
476 #pragma unused(mon)
477 	LCK_SPIN_ASSERT(&mon->um_lock, LCK_ASSERT_OWNED);
478 	return monid == uncmon_get_curid();
479 }
480 
481 /*
482  * Helper functions for accessing the hardware -- these require the monitor be
483  * locked to prevent other CPUs' PMI handlers from making local modifications
484  * or updating the counts.
485  */
486 
487 #if UNCORE_VERSION >= 2
488 #define UPMCR0_INTEN_POS 20
489 #define UPMCR0_INTGEN_POS 16
490 #else /* UNCORE_VERSION >= 2 */
491 #define UPMCR0_INTEN_POS 12
492 #define UPMCR0_INTGEN_POS 8
493 #endif /* UNCORE_VERSION < 2 */
494 enum {
495 	UPMCR0_INTGEN_OFF = 0,
496 	/* fast PMIs are only supported on core CPMU */
497 	UPMCR0_INTGEN_AIC = 2,
498 	UPMCR0_INTGEN_HALT = 3,
499 	UPMCR0_INTGEN_FIQ = 4,
500 };
501 /* always enable interrupts for all counters */
502 #define UPMCR0_INTEN (((1ULL << UNCORE_NCTRS) - 1) << UPMCR0_INTEN_POS)
503 /* route uncore PMIs through the FIQ path */
504 #define UPMCR0_INIT (UPMCR0_INTEN | (UPMCR0_INTGEN_FIQ << UPMCR0_INTGEN_POS))
505 
506 /*
507  * Turn counting on for counters set in the `enctrmask` and off, otherwise.
508  */
509 static inline void
uncmon_set_counting_locked_l(__unused unsigned int monid,uint64_t enctrmask)510 uncmon_set_counting_locked_l(__unused unsigned int monid, uint64_t enctrmask)
511 {
512 	/*
513 	 * UPMCR0 controls which counters are enabled and how interrupts are generated
514 	 * for overflows.
515 	 */
516 	__builtin_arm_wsr64("S3_7_C15_C0_4", UPMCR0_INIT | enctrmask);
517 }
518 
519 #if UNCORE_PER_CLUSTER
520 
521 /*
522  * Turn counting on for counters set in the `enctrmask` and off, otherwise.
523  */
524 static inline void
uncmon_set_counting_locked_r(unsigned int monid,uint64_t enctrmask)525 uncmon_set_counting_locked_r(unsigned int monid, uint64_t enctrmask)
526 {
527 	const uintptr_t upmcr0_offset = 0x4180;
528 	*(uint64_t *)(cpm_impl[monid] + upmcr0_offset) = UPMCR0_INIT | enctrmask;
529 }
530 
531 #endif /* UNCORE_PER_CLUSTER */
532 
533 /*
534  * The uncore performance monitoring counters (UPMCs) are 48/64-bits wide.  The
535  * high bit is an overflow bit, triggering a PMI, providing 47/63 usable bits.
536  */
537 
538 #define UPMC_MAX ((UINT64_C(1) << UPMC_WIDTH) - 1)
539 
540 static void
_broadcast_block_trampoline(void * blk)541 _broadcast_block_trampoline(void *blk)
542 {
543 	void (^cb)(unsigned int) = blk;
544 	const ml_topology_info_t *topo = ml_get_topology_info();
545 	unsigned int cpu = cpu_number();
546 	unsigned int cluster = topo->cpus[cpu].cluster_id;
547 	if (topo->clusters[cluster].first_cpu_id == cpu) {
548 		cb(topo->cpus[cpu].cluster_id);
549 	}
550 }
551 
552 __unused
553 static void
554 _broadcast_each_cluster(void (^cb)(unsigned int cluster_id))
555 {
556 	cpu_broadcast_xcall_simple(TRUE, _broadcast_block_trampoline, cb);
557 }
558 
559 __unused
560 static inline uint64_t
uncmon_read_counter_locked_l(__unused unsigned int monid,unsigned int ctr)561 uncmon_read_counter_locked_l(__unused unsigned int monid, unsigned int ctr)
562 {
563 	assert(ctr < UNCORE_NCTRS);
564 	switch (ctr) {
565 	case 0:
566 		return __builtin_arm_rsr64("S3_7_C15_C7_4");
567 	case 1:
568 		return __builtin_arm_rsr64("S3_7_C15_C8_4");
569 	case 2:
570 		return __builtin_arm_rsr64("S3_7_C15_C9_4");
571 	case 3:
572 		return __builtin_arm_rsr64("S3_7_C15_C10_4");
573 	case 4:
574 		return __builtin_arm_rsr64("S3_7_C15_C11_4");
575 	case 5:
576 		return __builtin_arm_rsr64("S3_7_C15_C12_4");
577 	case 6:
578 		return __builtin_arm_rsr64("S3_7_C15_C13_4");
579 	case 7:
580 		return __builtin_arm_rsr64("S3_7_C15_C14_4");
581 #if UNCORE_NCTRS > 8
582 	case 8:
583 		return __builtin_arm_rsr64("S3_7_C15_C0_5");
584 	case 9:
585 		return __builtin_arm_rsr64("S3_7_C15_C1_5");
586 	case 10:
587 		return __builtin_arm_rsr64("S3_7_C15_C2_5");
588 	case 11:
589 		return __builtin_arm_rsr64("S3_7_C15_C3_5");
590 	case 12:
591 		return __builtin_arm_rsr64("S3_7_C15_C4_5");
592 	case 13:
593 		return __builtin_arm_rsr64("S3_7_C15_C5_5");
594 	case 14:
595 		return __builtin_arm_rsr64("S3_7_C15_C6_5");
596 	case 15:
597 		return __builtin_arm_rsr64("S3_7_C15_C7_5");
598 #endif /* UNCORE_NCTRS > 8 */
599 	default:
600 		panic("monotonic: invalid counter read %u", ctr);
601 		__builtin_unreachable();
602 	}
603 }
604 
605 static inline void
uncmon_write_counter_locked_l(__unused unsigned int monid,unsigned int ctr,uint64_t count)606 uncmon_write_counter_locked_l(__unused unsigned int monid, unsigned int ctr,
607     uint64_t count)
608 {
609 	assert(count < UPMC_MAX);
610 	assert(ctr < UNCORE_NCTRS);
611 	switch (ctr) {
612 	case 0:
613 		return __builtin_arm_wsr64("S3_7_C15_C7_4", count);
614 	case 1:
615 		return __builtin_arm_wsr64("S3_7_C15_C8_4", count);
616 	case 2:
617 		return __builtin_arm_wsr64("S3_7_C15_C9_4", count);
618 	case 3:
619 		return __builtin_arm_wsr64("S3_7_C15_C10_4", count);
620 	case 4:
621 		return __builtin_arm_wsr64("S3_7_C15_C11_4", count);
622 	case 5:
623 		return __builtin_arm_wsr64("S3_7_C15_C12_4", count);
624 	case 6:
625 		return __builtin_arm_wsr64("S3_7_C15_C13_4", count);
626 	case 7:
627 		return __builtin_arm_wsr64("S3_7_C15_C14_4", count);
628 #if UNCORE_NCTRS > 8
629 	case 8:
630 		return __builtin_arm_wsr64("S3_7_C15_C0_5", count);
631 	case 9:
632 		return __builtin_arm_wsr64("S3_7_C15_C1_5", count);
633 	case 10:
634 		return __builtin_arm_wsr64("S3_7_C15_C2_5", count);
635 	case 11:
636 		return __builtin_arm_wsr64("S3_7_C15_C3_5", count);
637 	case 12:
638 		return __builtin_arm_wsr64("S3_7_C15_C4_5", count);
639 	case 13:
640 		return __builtin_arm_wsr64("S3_7_C15_C5_5", count);
641 	case 14:
642 		return __builtin_arm_wsr64("S3_7_C15_C6_5", count);
643 	case 15:
644 		return __builtin_arm_wsr64("S3_7_C15_C7_5", count);
645 #endif /* UNCORE_NCTRS > 8 */
646 	default:
647 		panic("monotonic: invalid counter write %u", ctr);
648 	}
649 }
650 
651 #if UNCORE_PER_CLUSTER
652 
653 uintptr_t upmc_offs[UNCORE_NCTRS] = {
654 	[0] = 0x4100, [1] = 0x4248, [2] = 0x4110, [3] = 0x4250, [4] = 0x4120,
655 	[5] = 0x4258, [6] = 0x4130, [7] = 0x4260, [8] = 0x4140, [9] = 0x4268,
656 	[10] = 0x4150, [11] = 0x4270, [12] = 0x4160, [13] = 0x4278,
657 	[14] = 0x4170, [15] = 0x4280,
658 };
659 
660 static inline uint64_t
uncmon_read_counter_locked_r(unsigned int mon_id,unsigned int ctr)661 uncmon_read_counter_locked_r(unsigned int mon_id, unsigned int ctr)
662 {
663 	assert(mon_id < uncore_nmonitors());
664 	assert(ctr < UNCORE_NCTRS);
665 	return *(uint64_t *)(cpm_impl[mon_id] + upmc_offs[ctr]);
666 }
667 
668 static inline void
uncmon_write_counter_locked_r(unsigned int mon_id,unsigned int ctr,uint64_t count)669 uncmon_write_counter_locked_r(unsigned int mon_id, unsigned int ctr,
670     uint64_t count)
671 {
672 	assert(count < UPMC_MAX);
673 	assert(ctr < UNCORE_NCTRS);
674 	assert(mon_id < uncore_nmonitors());
675 	*(uint64_t *)(cpm_impl[mon_id] + upmc_offs[ctr]) = count;
676 }
677 
678 #endif /* UNCORE_PER_CLUSTER */
679 
680 static inline void
uncmon_update_locked(unsigned int monid,unsigned int ctr)681 uncmon_update_locked(unsigned int monid, unsigned int ctr)
682 {
683 	struct uncore_monitor *mon = &uncore_monitors[monid];
684 	if (!mon->um_sleeping) {
685 		uint64_t snap = 0;
686 #if UNCORE_PER_CLUSTER
687 		snap = uncmon_read_counter_locked_r(monid, ctr);
688 #else /* UNCORE_PER_CLUSTER */
689 		snap = uncmon_read_counter_locked_l(monid, ctr);
690 #endif /* UNCORE_PER_CLUSTER */
691 		if (snap < mon->um_snaps[ctr]) {
692 #if MACH_ASSERT
693 #if UNCORE_PER_CLUSTER
694 			uint64_t remote_value = uncmon_read_counter_locked_r(monid, ctr);
695 #endif /* UNCORE_PER_CLUSTER */
696 			panic("monotonic: UPMC%d on UPMU %d went backwards from "
697 			    "%llx to %llx, read via %s, last was %s from UPMU %hhd%s"
698 #if UNCORE_PER_CLUSTER
699 			    ", re-read remote value is %llx"
700 #endif /* UNCORE_PER_CLUSTER */
701 			    , ctr,
702 			    monid, mon->um_snaps[ctr], snap,
703 			    uncmon_get_curid() == monid ? "local" : "remote",
704 			    mon->um_last_read_id == monid ? "local" : "remote",
705 			    mon->um_last_read_id,
706 			    mon->um_read_since_sleep ? "" : ", first read since sleep"
707 #if UNCORE_PER_CLUSTER
708 			    , remote_value
709 #endif /* UNCORE_PER_CLUSTER */
710 			    );
711 #else /* MACH_ASSERT */
712 			snap = mon->um_snaps[ctr];
713 #endif /* !MACH_ASSERT */
714 		}
715 		mon->um_counts[ctr] += snap - mon->um_snaps[ctr];
716 		mon->um_snaps[ctr] = snap;
717 	}
718 }
719 
720 static inline void
uncmon_program_events_locked_l(unsigned int monid)721 uncmon_program_events_locked_l(unsigned int monid)
722 {
723 	/*
724 	 * UPMESR[01] is the event selection register that determines which event a
725 	 * counter will count.
726 	 */
727 	CTRL_REG_SET("S3_7_C15_C1_4", uncore_config.uc_events.uce_regs[0]);
728 
729 #if UNCORE_NCTRS > 8
730 	CTRL_REG_SET("S3_7_C15_C11_5", uncore_config.uc_events.uce_regs[1]);
731 #endif /* UNCORE_NCTRS > 8 */
732 
733 	/*
734 	 * UPMECM[0123] are the event core masks for each counter -- whether or not
735 	 * that counter counts events generated by an agent.  These are set to all
736 	 * ones so the uncore counters count events from all cores.
737 	 *
738 	 * The bits are based off the start of the cluster -- e.g. even if a core
739 	 * has a CPU ID of 4, it might be the first CPU in a cluster.  Shift the
740 	 * registers right by the ID of the first CPU in the cluster.
741 	 */
742 	CTRL_REG_SET("S3_7_C15_C3_4",
743 	    uncore_config.uc_cpu_masks[monid].uccm_regs[0]);
744 	CTRL_REG_SET("S3_7_C15_C4_4",
745 	    uncore_config.uc_cpu_masks[monid].uccm_regs[1]);
746 
747 #if UNCORE_NCTRS > 8
748 	CTRL_REG_SET("S3_7_C15_C8_5",
749 	    uncore_config.uc_cpu_masks[monid].uccm_regs[2]);
750 	CTRL_REG_SET("S3_7_C15_C9_5",
751 	    uncore_config.uc_cpu_masks[monid].uccm_regs[3]);
752 #endif /* UNCORE_NCTRS > 8 */
753 }
754 
755 #if UNCORE_PER_CLUSTER
756 
757 static inline void
uncmon_program_events_locked_r(unsigned int monid)758 uncmon_program_events_locked_r(unsigned int monid)
759 {
760 	const uintptr_t upmesr_offs[2] = {[0] = 0x41b0, [1] = 0x41b8, };
761 
762 	for (unsigned int i = 0; i < sizeof(upmesr_offs) / sizeof(upmesr_offs[0]);
763 	    i++) {
764 		*(uint64_t *)(cpm_impl[monid] + upmesr_offs[i]) =
765 		    uncore_config.uc_events.uce_regs[i];
766 	}
767 
768 	const uintptr_t upmecm_offs[4] = {
769 		[0] = 0x4190, [1] = 0x4198, [2] = 0x41a0, [3] = 0x41a8,
770 	};
771 
772 	for (unsigned int i = 0; i < sizeof(upmecm_offs) / sizeof(upmecm_offs[0]);
773 	    i++) {
774 		*(uint64_t *)(cpm_impl[monid] + upmecm_offs[i]) =
775 		    uncore_config.uc_cpu_masks[monid].uccm_regs[i];
776 	}
777 }
778 
779 #endif /* UNCORE_PER_CLUSTER */
780 
781 static void
uncmon_clear_int_locked_l(__unused unsigned int monid)782 uncmon_clear_int_locked_l(__unused unsigned int monid)
783 {
784 	__builtin_arm_wsr64("S3_7_C15_C6_4", 0);
785 }
786 
787 #if UNCORE_PER_CLUSTER
788 
789 static void
uncmon_clear_int_locked_r(unsigned int monid)790 uncmon_clear_int_locked_r(unsigned int monid)
791 {
792 	const uintptr_t upmsr_off = 0x41c0;
793 	*(uint64_t *)(cpm_impl[monid] + upmsr_off) = 0;
794 }
795 
796 #endif /* UNCORE_PER_CLUSTER */
797 
798 /*
799  * Get the PMI mask for the provided `monid` -- that is, the bitmap of CPUs
800  * that should be sent PMIs for a particular monitor.
801  */
802 static uint64_t
uncmon_get_pmi_mask(unsigned int monid)803 uncmon_get_pmi_mask(unsigned int monid)
804 {
805 	uint64_t pmi_mask = uncore_pmi_mask;
806 
807 #if UNCORE_PER_CLUSTER
808 	pmi_mask &= topology_info->clusters[monid].cpu_mask;
809 #else /* UNCORE_PER_CLUSTER */
810 #pragma unused(monid)
811 #endif /* !UNCORE_PER_CLUSTER */
812 
813 	return pmi_mask;
814 }
815 
816 /*
817  * Initialization routines for the uncore counters.
818  */
819 
820 static void
uncmon_init_locked_l(unsigned int monid)821 uncmon_init_locked_l(unsigned int monid)
822 {
823 	/*
824 	 * UPMPCM defines the PMI core mask for the UPMCs -- which cores should
825 	 * receive interrupts on overflow.
826 	 */
827 	CTRL_REG_SET("S3_7_C15_C5_4", uncmon_get_pmi_mask(monid));
828 	uncmon_set_counting_locked_l(monid,
829 	    mt_uncore_enabled ? uncore_active_ctrs : 0);
830 }
831 
832 #if UNCORE_PER_CLUSTER
833 
834 static uintptr_t acc_impl[MAX_NMONITORS] = {};
835 
836 static void
uncmon_init_locked_r(unsigned int monid)837 uncmon_init_locked_r(unsigned int monid)
838 {
839 	const uintptr_t upmpcm_off = 0x1010;
840 
841 	*(uint64_t *)(acc_impl[monid] + upmpcm_off) = uncmon_get_pmi_mask(monid);
842 	uncmon_set_counting_locked_r(monid,
843 	    mt_uncore_enabled ? uncore_active_ctrs : 0);
844 }
845 
846 #endif /* UNCORE_PER_CLUSTER */
847 
848 /*
849  * Initialize the uncore device for monotonic.
850  */
851 static int
uncore_init(__unused mt_device_t dev)852 uncore_init(__unused mt_device_t dev)
853 {
854 #if HAS_UNCORE_CTRS
855 	assert(MT_NDEVS > 0);
856 	mt_devices[MT_NDEVS - 1].mtd_nmonitors = (uint8_t)uncore_nmonitors();
857 #endif
858 
859 #if DEVELOPMENT || DEBUG
860 	/*
861 	 * Development and debug kernels observe the `uncore_pmi_mask` boot-arg,
862 	 * allowing PMIs to be routed to the CPUs present in the supplied bitmap.
863 	 * Do some sanity checks on the value provided.
864 	 */
865 	bool parsed_arg = PE_parse_boot_argn("uncore_pmi_mask", &uncore_pmi_mask,
866 	    sizeof(uncore_pmi_mask));
867 	if (parsed_arg) {
868 #if UNCORE_PER_CLUSTER
869 		if (__builtin_popcount(uncore_pmi_mask) != (int)uncore_nmonitors()) {
870 			panic("monotonic: invalid uncore PMI mask 0x%x", uncore_pmi_mask);
871 		}
872 		for (unsigned int i = 0; i < uncore_nmonitors(); i++) {
873 			if (__builtin_popcountll(uncmon_get_pmi_mask(i)) != 1) {
874 				panic("monotonic: invalid uncore PMI CPU for cluster %d in mask 0x%x",
875 				    i, uncore_pmi_mask);
876 			}
877 		}
878 #else /* UNCORE_PER_CLUSTER */
879 		if (__builtin_popcount(uncore_pmi_mask) != 1) {
880 			panic("monotonic: invalid uncore PMI mask 0x%x", uncore_pmi_mask);
881 		}
882 #endif /* !UNCORE_PER_CLUSTER */
883 	} else
884 #endif /* DEVELOPMENT || DEBUG */
885 	{
886 		/* arbitrarily route to core 0 in each cluster */
887 		uncore_pmi_mask |= 1;
888 	}
889 	assert(uncore_pmi_mask != 0);
890 
891 	for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
892 #if UNCORE_PER_CLUSTER
893 		ml_topology_cluster_t *cluster = &topology_info->clusters[monid];
894 		cpm_impl[monid] = (uintptr_t)cluster->cpm_IMPL_regs;
895 		acc_impl[monid] = (uintptr_t)cluster->acc_IMPL_regs;
896 		assert(cpm_impl[monid] != 0 && acc_impl[monid] != 0);
897 #endif /* UNCORE_PER_CLUSTER */
898 
899 		struct uncore_monitor *mon = &uncore_monitors[monid];
900 		lck_spin_init(&mon->um_lock, &mt_lock_grp, LCK_ATTR_NULL);
901 	}
902 
903 	mt_uncore_initted = true;
904 
905 	return 0;
906 }
907 
908 /*
909  * Support for monotonic's mtd_read function.
910  */
911 
912 static void
uncmon_read_all_counters(unsigned int monid,uint64_t ctr_mask,uint64_t * counts)913 uncmon_read_all_counters(unsigned int monid, uint64_t ctr_mask, uint64_t *counts)
914 {
915 	struct uncore_monitor *mon = &uncore_monitors[monid];
916 
917 	int intrs_en = uncmon_lock(mon);
918 
919 	for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
920 		if (ctr_mask & (1ULL << ctr)) {
921 			if (!mon->um_sleeping) {
922 				uncmon_update_locked(monid, ctr);
923 			}
924 			counts[ctr] = mon->um_counts[ctr];
925 		}
926 	}
927 #if MACH_ASSERT
928 	mon->um_read_since_sleep = true;
929 #endif /* MACH_ASSERT */
930 
931 	uncmon_unlock(mon, intrs_en);
932 }
933 
934 /*
935  * Read all monitor's counters.
936  */
937 static int
uncore_read(uint64_t ctr_mask,uint64_t * counts_out)938 uncore_read(uint64_t ctr_mask, uint64_t *counts_out)
939 {
940 	assert(ctr_mask != 0);
941 	assert(counts_out != NULL);
942 
943 	if (!uncore_active_ctrs) {
944 		return EPWROFF;
945 	}
946 	if (ctr_mask & ~uncore_active_ctrs) {
947 		return EINVAL;
948 	}
949 
950 	for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
951 		/*
952 		 * Find this monitor's starting offset into the `counts_out` array.
953 		 */
954 		uint64_t *counts = counts_out + (UNCORE_NCTRS * monid);
955 		uncmon_read_all_counters(monid, ctr_mask, counts);
956 	}
957 
958 	return 0;
959 }
960 
961 /*
962  * Support for monotonic's mtd_add function.
963  */
964 
965 /*
966  * Add an event to the current uncore configuration.  This doesn't take effect
967  * until the counters are enabled again, so there's no need to involve the
968  * monitors.
969  */
970 static int
uncore_add(struct monotonic_config * config,uint32_t * ctr_out)971 uncore_add(struct monotonic_config *config, uint32_t *ctr_out)
972 {
973 	if (mt_uncore_enabled) {
974 		return EBUSY;
975 	}
976 
977 	uint8_t selector = (uint8_t)config->event;
978 	uint32_t available = ~uncore_active_ctrs & config->allowed_ctr_mask;
979 
980 	if (available == 0) {
981 		return ENOSPC;
982 	}
983 
984 	if (!cpc_event_allowed(CPC_HW_UPMU, selector)) {
985 		return EPERM;
986 	}
987 
988 	uint32_t valid_ctrs = (UINT32_C(1) << UNCORE_NCTRS) - 1;
989 	if ((available & valid_ctrs) == 0) {
990 		return E2BIG;
991 	}
992 	/*
993 	 * Clear the UPMCs the first time an event is added.
994 	 */
995 	if (uncore_active_ctrs == 0) {
996 		/*
997 		 * Suspend powerdown until the next reset.
998 		 */
999 		assert(!mt_uncore_suspended_cpd);
1000 		suspend_cluster_powerdown();
1001 		mt_uncore_suspended_cpd = true;
1002 
1003 		for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1004 			struct uncore_monitor *mon = &uncore_monitors[monid];
1005 
1006 			int intrs_en = uncmon_lock(mon);
1007 			bool remote = uncmon_is_remote(monid);
1008 
1009 			if (!mon->um_sleeping) {
1010 				for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1011 					if (remote) {
1012 #if UNCORE_PER_CLUSTER
1013 						uncmon_write_counter_locked_r(monid, ctr, 0);
1014 #endif /* UNCORE_PER_CLUSTER */
1015 					} else {
1016 						uncmon_write_counter_locked_l(monid, ctr, 0);
1017 					}
1018 				}
1019 			}
1020 			memset(&mon->um_snaps, 0, sizeof(mon->um_snaps));
1021 			memset(&mon->um_counts, 0, sizeof(mon->um_counts));
1022 			uncmon_unlock(mon, intrs_en);
1023 		}
1024 	}
1025 
1026 	uint32_t ctr = __builtin_ffsll(available) - 1;
1027 
1028 	uncore_active_ctrs |= UINT64_C(1) << ctr;
1029 	uncore_config.uc_events.uce_ctrs[ctr] = selector;
1030 	uint64_t cpu_mask = UINT64_MAX;
1031 	if (config->cpu_mask != 0) {
1032 		cpu_mask = config->cpu_mask;
1033 	}
1034 	for (unsigned int i = 0; i < uncore_nmonitors(); i++) {
1035 #if UNCORE_PER_CLUSTER
1036 		const unsigned int shift = topology_info->clusters[i].first_cpu_id;
1037 #else /* UNCORE_PER_CLUSTER */
1038 		const unsigned int shift = 0;
1039 #endif /* !UNCORE_PER_CLUSTER */
1040 		uncore_config.uc_cpu_masks[i].uccm_masks[ctr] = (uint16_t)(cpu_mask >> shift);
1041 	}
1042 
1043 	*ctr_out = ctr;
1044 	return 0;
1045 }
1046 
1047 /*
1048  * Support for monotonic's mtd_reset function.
1049  */
1050 
1051 /*
1052  * Reset all configuration and disable the counters if they're currently
1053  * counting.
1054  */
1055 static void
uncore_reset(void)1056 uncore_reset(void)
1057 {
1058 	mt_uncore_enabled = false;
1059 
1060 	if (!mt_uncore_suspended_cpd) {
1061 		/* If we haven't already suspended CPD, we need to do so now to ensure we can issue remote reads
1062 		 * to every cluster. */
1063 		suspend_cluster_powerdown();
1064 		mt_uncore_suspended_cpd = true;
1065 	}
1066 
1067 	if (mt_owns_counters()) {
1068 		for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1069 			struct uncore_monitor *mon = &uncore_monitors[monid];
1070 
1071 			int intrs_en = uncmon_lock(mon);
1072 			bool remote = uncmon_is_remote(monid);
1073 			if (!mon->um_sleeping) {
1074 				if (remote) {
1075 #if UNCORE_PER_CLUSTER
1076 					uncmon_set_counting_locked_r(monid, 0);
1077 #endif /* UNCORE_PER_CLUSTER */
1078 				} else {
1079 					uncmon_set_counting_locked_l(monid, 0);
1080 				}
1081 
1082 				for (int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1083 					if (uncore_active_ctrs & (1U << ctr)) {
1084 						if (remote) {
1085 #if UNCORE_PER_CLUSTER
1086 							uncmon_write_counter_locked_r(monid, ctr, 0);
1087 #endif /* UNCORE_PER_CLUSTER */
1088 						} else {
1089 							uncmon_write_counter_locked_l(monid, ctr, 0);
1090 						}
1091 					}
1092 				}
1093 			}
1094 
1095 			memset(&mon->um_snaps, 0, sizeof(mon->um_snaps));
1096 			memset(&mon->um_counts, 0, sizeof(mon->um_counts));
1097 			if (!mon->um_sleeping) {
1098 				if (remote) {
1099 #if UNCORE_PER_CLUSTER
1100 					uncmon_clear_int_locked_r(monid);
1101 #endif /* UNCORE_PER_CLUSTER */
1102 				} else {
1103 					uncmon_clear_int_locked_l(monid);
1104 				}
1105 			}
1106 
1107 			uncmon_unlock(mon, intrs_en);
1108 		}
1109 	}
1110 
1111 	uncore_active_ctrs = 0;
1112 	memset(&uncore_config, 0, sizeof(uncore_config));
1113 
1114 	if (mt_owns_counters()) {
1115 		for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1116 			struct uncore_monitor *mon = &uncore_monitors[monid];
1117 
1118 			int intrs_en = uncmon_lock(mon);
1119 			bool remote = uncmon_is_remote(monid);
1120 			if (!mon->um_sleeping) {
1121 				if (remote) {
1122 	#if UNCORE_PER_CLUSTER
1123 					uncmon_program_events_locked_r(monid);
1124 	#endif /* UNCORE_PER_CLUSTER */
1125 				} else {
1126 					uncmon_program_events_locked_l(monid);
1127 				}
1128 			}
1129 			uncmon_unlock(mon, intrs_en);
1130 		}
1131 	}
1132 
1133 	/* After reset, no counters should be active, so we can allow powerdown again */
1134 	if (mt_uncore_suspended_cpd) {
1135 		resume_cluster_powerdown();
1136 		mt_uncore_suspended_cpd = false;
1137 	}
1138 }
1139 
1140 /*
1141  * Support for monotonic's mtd_enable function.
1142  */
1143 
1144 static void
uncmon_set_enabled_l_locked(unsigned int monid,bool enable)1145 uncmon_set_enabled_l_locked(unsigned int monid, bool enable)
1146 {
1147 	struct uncore_monitor *mon = &uncore_monitors[monid];
1148 #pragma unused(mon)
1149 	LCK_SPIN_ASSERT(&mon->um_lock, LCK_ASSERT_OWNED);
1150 
1151 	if (enable) {
1152 		uncmon_init_locked_l(monid);
1153 		uncmon_program_events_locked_l(monid);
1154 		uncmon_set_counting_locked_l(monid, uncore_active_ctrs);
1155 	} else {
1156 		uncmon_set_counting_locked_l(monid, 0);
1157 	}
1158 }
1159 
1160 #if UNCORE_PER_CLUSTER
1161 
1162 __unused
1163 static void
uncmon_set_enabled_r_locked(unsigned int monid,bool enable)1164 uncmon_set_enabled_r_locked(unsigned int monid, bool enable)
1165 {
1166 	struct uncore_monitor *mon = &uncore_monitors[monid];
1167 #pragma unused(mon)
1168 	LCK_SPIN_ASSERT(&mon->um_lock, LCK_ASSERT_OWNED);
1169 
1170 	if (!mon->um_sleeping) {
1171 		if (enable) {
1172 			uncmon_init_locked_r(monid);
1173 			uncmon_program_events_locked_r(monid);
1174 			uncmon_set_counting_locked_r(monid, uncore_active_ctrs);
1175 		} else {
1176 			uncmon_set_counting_locked_r(monid, 0);
1177 		}
1178 	}
1179 }
1180 
1181 #endif /* UNCORE_PER_CLUSTER */
1182 
1183 static void
uncore_set_enabled(bool enable)1184 uncore_set_enabled(bool enable)
1185 {
1186 	mt_uncore_enabled = enable;
1187 
1188 	_broadcast_each_cluster(^(unsigned int cluster_id) {
1189 		struct uncore_monitor *mon = &uncore_monitors[cluster_id];
1190 		int intrs_en = uncmon_lock(mon);
1191 		uncmon_set_enabled_l_locked(cluster_id, enable);
1192 		uncmon_unlock(mon, intrs_en);
1193 	});
1194 }
1195 
1196 /*
1197  * Hooks in the machine layer.
1198  */
1199 
1200 static void
uncore_fiq(uint64_t upmsr)1201 uncore_fiq(uint64_t upmsr)
1202 {
1203 	/*
1204 	 * Determine which counters overflowed.
1205 	 */
1206 	uint64_t disable_ctr_mask = (upmsr & UPMSR_OVF_MASK) >> UPMSR_OVF_POS;
1207 	/* should not receive interrupts from inactive counters */
1208 	assert(!(disable_ctr_mask & ~uncore_active_ctrs));
1209 
1210 	if (uncore_active_ctrs == 0) {
1211 		return;
1212 	}
1213 
1214 	unsigned int monid = uncmon_get_curid();
1215 	struct uncore_monitor *mon = &uncore_monitors[monid];
1216 
1217 	int intrs_en = uncmon_lock(mon);
1218 
1219 	/*
1220 	 * Disable any counters that overflowed.
1221 	 */
1222 	uncmon_set_counting_locked_l(monid,
1223 	    uncore_active_ctrs & ~disable_ctr_mask);
1224 
1225 	/*
1226 	 * With the overflowing counters disabled, capture their counts and reset
1227 	 * the UPMCs and their snapshots to 0.
1228 	 */
1229 	for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1230 		if (UPMSR_OVF(upmsr, ctr)) {
1231 			uncmon_update_locked(monid, ctr);
1232 			mon->um_snaps[ctr] = 0;
1233 			uncmon_write_counter_locked_l(monid, ctr, 0);
1234 		}
1235 	}
1236 
1237 	/*
1238 	 * Acknowledge the interrupt, now that any overflowed PMCs have been reset.
1239 	 */
1240 	uncmon_clear_int_locked_l(monid);
1241 
1242 	/*
1243 	 * Re-enable all active counters.
1244 	 */
1245 	uncmon_set_counting_locked_l(monid, uncore_active_ctrs);
1246 
1247 	uncmon_unlock(mon, intrs_en);
1248 }
1249 
1250 static void
uncore_save(void)1251 uncore_save(void)
1252 {
1253 	if (!uncore_active_ctrs) {
1254 		return;
1255 	}
1256 
1257 	for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1258 		struct uncore_monitor *mon = &uncore_monitors[monid];
1259 		int intrs_en = uncmon_lock(mon);
1260 
1261 		if (mt_uncore_enabled) {
1262 			if (uncmon_is_remote(monid)) {
1263 #if UNCORE_PER_CLUSTER
1264 				uncmon_set_counting_locked_r(monid, 0);
1265 #endif /* UNCORE_PER_CLUSTER */
1266 			} else {
1267 				uncmon_set_counting_locked_l(monid, 0);
1268 			}
1269 		}
1270 
1271 		for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1272 			if (uncore_active_ctrs & (1U << ctr)) {
1273 				uncmon_update_locked(monid, ctr);
1274 				mon->um_snaps[ctr] = 0;
1275 				uncmon_write_counter_locked_l(monid, ctr, 0);
1276 			}
1277 		}
1278 
1279 		mon->um_sleeping = true;
1280 		uncmon_unlock(mon, intrs_en);
1281 	}
1282 }
1283 
1284 static void
uncore_restore(void)1285 uncore_restore(void)
1286 {
1287 	if (!uncore_active_ctrs) {
1288 		return;
1289 	}
1290 	/* Ensure interrupts disabled before reading uncmon_get_curid */
1291 	bool intr = ml_set_interrupts_enabled(false);
1292 	unsigned int curmonid = uncmon_get_curid();
1293 
1294 	struct uncore_monitor *mon = &uncore_monitors[curmonid];
1295 	int intrs_en = uncmon_lock(mon);
1296 	if (!mon->um_sleeping) {
1297 		goto out;
1298 	}
1299 
1300 	for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1301 		if (uncore_active_ctrs & (1U << ctr)) {
1302 			uncmon_write_counter_locked_l(curmonid, ctr, mon->um_snaps[ctr]);
1303 		}
1304 	}
1305 	uncmon_program_events_locked_l(curmonid);
1306 	uncmon_init_locked_l(curmonid);
1307 	mon->um_sleeping = false;
1308 #if MACH_ASSERT
1309 	mon->um_read_since_sleep = false;
1310 #endif /* MACH_ASSERT */
1311 
1312 out:
1313 	uncmon_unlock(mon, intrs_en);
1314 	ml_set_interrupts_enabled(intr);
1315 }
1316 
1317 #endif /* HAS_UNCORE_CTRS */
1318 
1319 #pragma mark common hooks
1320 
1321 void
mt_early_init(void)1322 mt_early_init(void)
1323 {
1324 	topology_info = ml_get_topology_info();
1325 }
1326 
1327 void
mt_cpu_idle(cpu_data_t * cpu)1328 mt_cpu_idle(cpu_data_t *cpu)
1329 {
1330 	core_idle(cpu);
1331 }
1332 
1333 void
mt_cpu_run(cpu_data_t * cpu)1334 mt_cpu_run(cpu_data_t *cpu)
1335 {
1336 	struct mt_cpu *mtc;
1337 
1338 	assert(cpu != NULL);
1339 	assert(ml_get_interrupts_enabled() == FALSE);
1340 
1341 	mtc = &cpu->cpu_monotonic;
1342 
1343 	for (int i = 0; i < MT_CORE_NFIXED; i++) {
1344 		mt_core_set_snap(i, mtc->mtc_snaps[i]);
1345 	}
1346 
1347 	/* re-enable the counters */
1348 	core_init_execution_modes();
1349 
1350 	core_set_enabled();
1351 }
1352 
1353 void
mt_cpu_down(cpu_data_t * cpu)1354 mt_cpu_down(cpu_data_t *cpu)
1355 {
1356 	mt_cpu_idle(cpu);
1357 }
1358 
1359 void
mt_cpu_up(cpu_data_t * cpu)1360 mt_cpu_up(cpu_data_t *cpu)
1361 {
1362 	mt_cpu_run(cpu);
1363 }
1364 
1365 void
mt_sleep(void)1366 mt_sleep(void)
1367 {
1368 #if HAS_UNCORE_CTRS
1369 	uncore_save();
1370 #endif /* HAS_UNCORE_CTRS */
1371 }
1372 
1373 void
mt_wake_per_core(void)1374 mt_wake_per_core(void)
1375 {
1376 #if HAS_UNCORE_CTRS
1377 	if (mt_uncore_initted) {
1378 		uncore_restore();
1379 	}
1380 #endif /* HAS_UNCORE_CTRS */
1381 }
1382 
1383 uint64_t
mt_count_pmis(void)1384 mt_count_pmis(void)
1385 {
1386 	uint64_t npmis = 0;
1387 	for (unsigned int i = 0; i < topology_info->num_cpus; i++) {
1388 		cpu_data_t *cpu = (cpu_data_t *)CpuDataEntries[topology_info->cpus[i].cpu_id].cpu_data_vaddr;
1389 		npmis += cpu->cpu_monotonic.mtc_npmis;
1390 	}
1391 	return npmis;
1392 }
1393 
1394 static void
mt_cpu_pmi(cpu_data_t * cpu,uint64_t pmcr0)1395 mt_cpu_pmi(cpu_data_t *cpu, uint64_t pmcr0)
1396 {
1397 	assert(cpu != NULL);
1398 	assert(ml_get_interrupts_enabled() == FALSE);
1399 
1400 	__builtin_arm_wsr64("S3_1_C15_C0_0", PMCR0_INIT);
1401 	/*
1402 	 * Ensure the CPMU has flushed any increments at this point, so PMSR is up
1403 	 * to date.
1404 	 */
1405 	__builtin_arm_isb(ISB_SY);
1406 
1407 	cpu->cpu_monotonic.mtc_npmis += 1;
1408 	cpu->cpu_stat.pmi_cnt_wake += 1;
1409 
1410 #if MONOTONIC_DEBUG
1411 	if (!PMCR0_PMI(pmcr0)) {
1412 		kprintf("monotonic: mt_cpu_pmi but no PMI (PMCR0 = %#llx)\n",
1413 		    pmcr0);
1414 	}
1415 #else /* MONOTONIC_DEBUG */
1416 #pragma unused(pmcr0)
1417 #endif /* !MONOTONIC_DEBUG */
1418 
1419 	uint64_t pmsr = __builtin_arm_rsr64("S3_1_C15_C13_0");
1420 
1421 #if MONOTONIC_DEBUG
1422 	printf("monotonic: cpu = %d, PMSR = 0x%llx, PMCR0 = 0x%llx\n",
1423 	    cpu_number(), pmsr, pmcr0);
1424 #endif /* MONOTONIC_DEBUG */
1425 
1426 #if MACH_ASSERT
1427 	uint64_t handled = 0;
1428 #endif /* MACH_ASSERT */
1429 
1430 	/*
1431 	 * monotonic handles any fixed counter PMIs.
1432 	 */
1433 	for (unsigned int i = 0; i < MT_CORE_NFIXED; i++) {
1434 		if ((pmsr & PMSR_OVF(i)) == 0) {
1435 			continue;
1436 		}
1437 
1438 #if MACH_ASSERT
1439 		handled |= 1ULL << i;
1440 #endif /* MACH_ASSERT */
1441 		uint64_t count = mt_cpu_update_count(cpu, i);
1442 		cpu->cpu_monotonic.mtc_counts[i] += count;
1443 		mt_core_set_snap(i, mt_core_reset_values[i]);
1444 		cpu->cpu_monotonic.mtc_snaps[i] = mt_core_reset_values[i];
1445 
1446 		if (mt_microstackshots && mt_microstackshot_ctr == i) {
1447 			bool user_mode = false;
1448 			arm_saved_state_t *state = get_user_regs(current_thread());
1449 			if (state) {
1450 				user_mode = PSR64_IS_USER(get_saved_state_cpsr(state));
1451 			}
1452 			KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 1),
1453 			    mt_microstackshot_ctr, user_mode);
1454 			mt_microstackshot_pmi_handler(user_mode, mt_microstackshot_ctx);
1455 		} else if (mt_debug) {
1456 			KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 2),
1457 			    i, count);
1458 		}
1459 	}
1460 
1461 	/*
1462 	 * KPC handles the configurable counter PMIs.
1463 	 */
1464 	for (unsigned int i = MT_CORE_NFIXED; i < CORE_NCTRS; i++) {
1465 		if (pmsr & PMSR_OVF(i)) {
1466 #if MACH_ASSERT
1467 			handled |= 1ULL << i;
1468 #endif /* MACH_ASSERT */
1469 			extern void kpc_pmi_handler(unsigned int ctr);
1470 			kpc_pmi_handler(i);
1471 		}
1472 	}
1473 
1474 #if MACH_ASSERT
1475 	uint64_t pmsr_after_handling = __builtin_arm_rsr64("S3_1_C15_C13_0");
1476 	if (pmsr_after_handling != 0) {
1477 		unsigned int first_ctr_ovf = __builtin_ffsll(pmsr_after_handling) - 1;
1478 		uint64_t count = 0;
1479 		const char *extra = "";
1480 		if (first_ctr_ovf >= CORE_NCTRS) {
1481 			extra = " (invalid counter)";
1482 		} else {
1483 			count = mt_core_snap(first_ctr_ovf);
1484 		}
1485 
1486 		panic("monotonic: PMI status not cleared on exit from handler, "
1487 		    "PMSR = 0x%llx HANDLE -> -> 0x%llx, handled 0x%llx, "
1488 		    "PMCR0 = 0x%llx, PMC%d = 0x%llx%s", pmsr, pmsr_after_handling,
1489 		    handled, __builtin_arm_rsr64("S3_1_C15_C0_0"), first_ctr_ovf, count, extra);
1490 	}
1491 #endif /* MACH_ASSERT */
1492 
1493 	core_set_enabled();
1494 }
1495 
1496 #if CPMU_AIC_PMI
1497 void
mt_cpmu_aic_pmi(cpu_id_t source)1498 mt_cpmu_aic_pmi(cpu_id_t source)
1499 {
1500 	struct cpu_data *curcpu = getCpuDatap();
1501 	if (source != curcpu->interrupt_nub) {
1502 		panic("monotonic: PMI from IOCPU %p delivered to %p", source,
1503 		    curcpu->interrupt_nub);
1504 	}
1505 	mt_cpu_pmi(curcpu, __builtin_arm_rsr64("S3_1_C15_C0_0"));
1506 }
1507 #endif /* CPMU_AIC_PMI */
1508 
1509 void
mt_fiq(void * cpu,uint64_t pmcr0,uint64_t upmsr)1510 mt_fiq(void *cpu, uint64_t pmcr0, uint64_t upmsr)
1511 {
1512 #if CPMU_AIC_PMI
1513 #pragma unused(cpu, pmcr0)
1514 #else /* CPMU_AIC_PMI */
1515 	mt_cpu_pmi(cpu, pmcr0);
1516 #endif /* !CPMU_AIC_PMI */
1517 
1518 #if HAS_UNCORE_CTRS
1519 	if (upmsr != 0) {
1520 		uncore_fiq(upmsr);
1521 	}
1522 #else /* HAS_UNCORE_CTRS */
1523 #pragma unused(upmsr)
1524 #endif /* !HAS_UNCORE_CTRS */
1525 }
1526 
1527 void
mt_ownership_change(bool available)1528 mt_ownership_change(bool available)
1529 {
1530 #if HAS_UNCORE_CTRS
1531 	/*
1532 	 * No need to take the lock here, as this is only manipulated in the UPMU
1533 	 * when the current task already owns the counters and is on its way out.
1534 	 */
1535 	if (!available && uncore_active_ctrs) {
1536 		uncore_reset();
1537 	}
1538 #else
1539 #pragma unused(available)
1540 #endif /* HAS_UNCORE_CTRS */
1541 }
1542 
1543 static uint32_t mt_xc_sync;
1544 
1545 static void
mt_microstackshot_start_remote(__unused void * arg)1546 mt_microstackshot_start_remote(__unused void *arg)
1547 {
1548 	cpu_data_t *cpu = getCpuDatap();
1549 
1550 	__builtin_arm_wsr64("S3_1_C15_C0_0", PMCR0_INIT);
1551 
1552 	for (int i = 0; i < MT_CORE_NFIXED; i++) {
1553 		uint64_t count = mt_cpu_update_count(cpu, i);
1554 		cpu->cpu_monotonic.mtc_counts[i] += count;
1555 		mt_core_set_snap(i, mt_core_reset_values[i]);
1556 		cpu->cpu_monotonic.mtc_snaps[i] = mt_core_reset_values[i];
1557 	}
1558 
1559 	core_set_enabled();
1560 
1561 	if (os_atomic_dec(&mt_xc_sync, relaxed) == 0) {
1562 		thread_wakeup((event_t)&mt_xc_sync);
1563 	}
1564 }
1565 
1566 int
mt_microstackshot_start_arch(uint64_t period)1567 mt_microstackshot_start_arch(uint64_t period)
1568 {
1569 	uint64_t reset_value = 0;
1570 	int ovf = os_sub_overflow(CTR_MAX, period, &reset_value);
1571 	if (ovf) {
1572 		return ERANGE;
1573 	}
1574 
1575 	mt_core_reset_values[mt_microstackshot_ctr] = reset_value;
1576 	cpu_broadcast_xcall(&mt_xc_sync, TRUE, mt_microstackshot_start_remote,
1577 	    mt_microstackshot_start_remote /* cannot pass NULL */);
1578 	return 0;
1579 }
1580 
1581 #pragma mark dev nodes
1582 
1583 struct mt_device mt_devices[] = {
1584 	[0] = {
1585 		.mtd_name = "core",
1586 		.mtd_init = core_init,
1587 	},
1588 #if HAS_UNCORE_CTRS
1589 	[1] = {
1590 		.mtd_name = "uncore",
1591 		.mtd_init = uncore_init,
1592 		.mtd_add = uncore_add,
1593 		.mtd_reset = uncore_reset,
1594 		.mtd_enable = uncore_set_enabled,
1595 		.mtd_read = uncore_read,
1596 
1597 		.mtd_ncounters = UNCORE_NCTRS,
1598 	}
1599 #endif /* HAS_UNCORE_CTRS */
1600 };
1601 
1602 static_assert(
1603 	(sizeof(mt_devices) / sizeof(mt_devices[0])) == MT_NDEVS,
1604 	"MT_NDEVS macro should be same as the length of mt_devices");
1605