1 /*
2 * Copyright (c) 2017-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/cpu_data_internal.h>
30 #include <arm/machine_routines.h>
31 #include <arm64/monotonic.h>
32 #include <kern/assert.h>
33 #include <kern/cpc.h>
34 #include <kern/debug.h> /* panic */
35 #include <kern/kpc.h>
36 #include <kern/monotonic.h>
37 #include <machine/atomic.h>
38 #include <machine/limits.h> /* CHAR_BIT */
39 #include <os/overflow.h>
40 #include <pexpert/arm64/board_config.h>
41 #include <pexpert/device_tree.h> /* SecureDTFindEntry */
42 #include <pexpert/pexpert.h>
43 #include <stdatomic.h>
44 #include <stdint.h>
45 #include <string.h>
46 #include <sys/errno.h>
47 #include <sys/monotonic.h>
48
49 /*
50 * Ensure that control registers read back what was written under MACH_ASSERT
51 * kernels.
52 *
53 * A static inline function cannot be used due to passing the register through
54 * the builtin -- it requires a constant string as its first argument, since
55 * MSRs registers are encoded as an immediate in the instruction.
56 */
57 #if MACH_ASSERT
58 #define CTRL_REG_SET(reg, val) do { \
59 __builtin_arm_wsr64((reg), (val)); \
60 uint64_t __check_reg = __builtin_arm_rsr64((reg)); \
61 if (__check_reg != (val)) { \
62 panic("value written to %s was not read back (wrote %llx, read %llx)", \
63 #reg, (val), __check_reg); \
64 } \
65 } while (0)
66 #else /* MACH_ASSERT */
67 #define CTRL_REG_SET(reg, val) __builtin_arm_wsr64((reg), (val))
68 #endif /* MACH_ASSERT */
69
70 #pragma mark core counters
71
72 const bool mt_core_supported = true;
73
74 static const ml_topology_info_t *topology_info;
75
76 /*
77 * PMC[0-1] are the 48/64-bit fixed counters -- PMC0 is cycles and PMC1 is
78 * instructions (see arm64/monotonic.h).
79 *
80 * PMC2+ are currently handled by kpc.
81 */
82 #define PMC_0_7(X, A) X(0, A); X(1, A); X(2, A); X(3, A); X(4, A); X(5, A); \
83 X(6, A); X(7, A)
84
85 #if CORE_NCTRS > 8
86 #define PMC_8_9(X, A) X(8, A); X(9, A)
87 #else // CORE_NCTRS > 8
88 #define PMC_8_9(X, A)
89 #endif // CORE_NCTRS > 8
90
91 #define PMC_ALL(X, A) PMC_0_7(X, A); PMC_8_9(X, A)
92
93 #if CPMU_64BIT_PMCS
94 #define PMC_WIDTH (63)
95 #else // UPMU_64BIT_PMCS
96 #define PMC_WIDTH (47)
97 #endif // !UPMU_64BIT_PMCS
98
99 #define CTR_MAX ((UINT64_C(1) << PMC_WIDTH) - 1)
100
101 #define CYCLES 0
102 #define INSTRS 1
103
104 /*
105 * PMC0's offset into a core's PIO range.
106 *
107 * This allows cores to remotely query another core's counters.
108 */
109
110 #define PIO_PMC0_OFFSET (0x200)
111
112 /*
113 * The offset of the counter in the configuration registers. Post-Hurricane
114 * devices have additional counters that need a larger shift than the original
115 * counters.
116 *
117 * XXX For now, just support the lower-numbered counters.
118 */
119 #define CTR_POS(CTR) (CTR)
120
121 /*
122 * PMCR0 is the main control register for the performance monitor. It
123 * controls whether the counters are enabled, how they deliver interrupts, and
124 * other features.
125 */
126
127 #define PMCR0_CTR_EN(CTR) (UINT64_C(1) << CTR_POS(CTR))
128 #define PMCR0_FIXED_EN (PMCR0_CTR_EN(CYCLES) | PMCR0_CTR_EN(INSTRS))
129 /* how interrupts are delivered on a PMI */
130 enum {
131 PMCR0_INTGEN_OFF = 0,
132 PMCR0_INTGEN_PMI = 1,
133 PMCR0_INTGEN_AIC = 2,
134 PMCR0_INTGEN_HALT = 3,
135 PMCR0_INTGEN_FIQ = 4,
136 };
137 #define PMCR0_INTGEN_SET(X) ((uint64_t)(X) << 8)
138
139 #if CPMU_AIC_PMI
140 #define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_AIC)
141 #else /* CPMU_AIC_PMI */
142 #define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_FIQ)
143 #endif /* !CPMU_AIC_PMI */
144
145 #define PMCR0_PMI_SHIFT (12)
146 #define PMCR0_CTR_GE8_PMI_SHIFT (44)
147 #define PMCR0_PMI_EN(CTR) (UINT64_C(1) << (PMCR0_PMI_SHIFT + CTR_POS(CTR)))
148 /* fixed counters are always counting */
149 #define PMCR0_PMI_INIT (PMCR0_PMI_EN(CYCLES) | PMCR0_PMI_EN(INSTRS))
150 /* disable counting on a PMI */
151 #define PMCR0_DISCNT_EN (UINT64_C(1) << 20)
152 /* block PMIs until ERET retires */
153 #define PMCR0_WFRFE_EN (UINT64_C(1) << 22)
154 /* count global (not just core-local) L2C events */
155 #define PMCR0_L2CGLOBAL_EN (UINT64_C(1) << 23)
156 /* user mode access to configuration registers */
157 #define PMCR0_USEREN_EN (UINT64_C(1) << 30)
158 #define PMCR0_CTR_GE8_EN_SHIFT (32)
159
160 #if HAS_CPMU_PC_CAPTURE
161 #define PMCR0_PCC_INIT (UINT64_C(0x7) << 24)
162 #else /* HAS_CPMU_PC_CAPTURE */
163 #define PMCR0_PCC_INIT (0)
164 #endif /* !HAS_CPMU_PC_CAPTURE */
165
166 #define PMCR0_INIT (PMCR0_INTGEN_INIT | PMCR0_PMI_INIT | PMCR0_PCC_INIT)
167
168 /*
169 * PMCR1 controls which execution modes count events.
170 */
171 #define PMCR1_EL0A32_EN(CTR) (UINT64_C(1) << (0 + CTR_POS(CTR)))
172 #define PMCR1_EL0A64_EN(CTR) (UINT64_C(1) << (8 + CTR_POS(CTR)))
173 #define PMCR1_EL1A64_EN(CTR) (UINT64_C(1) << (16 + CTR_POS(CTR)))
174 /* PMCR1_EL3A64 is not supported on systems with no monitor */
175 #if defined(APPLEHURRICANE)
176 #define PMCR1_EL3A64_EN(CTR) UINT64_C(0)
177 #else
178 #define PMCR1_EL3A64_EN(CTR) (UINT64_C(1) << (24 + CTR_POS(CTR)))
179 #endif
180 #define PMCR1_ALL_EN(CTR) (PMCR1_EL0A32_EN(CTR) | PMCR1_EL0A64_EN(CTR) | \
181 PMCR1_EL1A64_EN(CTR) | PMCR1_EL3A64_EN(CTR))
182
183 /* fixed counters always count in all modes */
184 #define PMCR1_INIT (PMCR1_ALL_EN(CYCLES) | PMCR1_ALL_EN(INSTRS))
185
186 static inline void
core_init_execution_modes(void)187 core_init_execution_modes(void)
188 {
189 uint64_t pmcr1;
190
191 pmcr1 = __builtin_arm_rsr64("PMCR1_EL1");
192 pmcr1 |= PMCR1_INIT;
193 __builtin_arm_wsr64("PMCR1_EL1", pmcr1);
194 #if CONFIG_EXCLAVES
195 __builtin_arm_wsr64("PMCR1_EL12", pmcr1);
196 #endif
197 }
198
199 #define PMSR_OVF(CTR) (1ULL << (CTR))
200
201 static int
core_init(__unused mt_device_t dev)202 core_init(__unused mt_device_t dev)
203 {
204 /* the dev node interface to the core counters is still unsupported */
205 return ENOTSUP;
206 }
207
208 struct mt_cpu *
mt_cur_cpu(void)209 mt_cur_cpu(void)
210 {
211 return &getCpuDatap()->cpu_monotonic;
212 }
213
214 uint64_t
mt_core_snap(unsigned int ctr)215 mt_core_snap(unsigned int ctr)
216 {
217 switch (ctr) {
218 #define PMC_RD(CTR, UNUSED) case (CTR): return __builtin_arm_rsr64(__MSR_STR(PMC ## CTR))
219 PMC_ALL(PMC_RD, 0);
220 #undef PMC_RD
221 default:
222 panic("monotonic: invalid core counter read: %u", ctr);
223 __builtin_unreachable();
224 }
225 }
226
227 void
mt_core_set_snap(unsigned int ctr,uint64_t count)228 mt_core_set_snap(unsigned int ctr, uint64_t count)
229 {
230 switch (ctr) {
231 case 0:
232 __builtin_arm_wsr64("PMC0", count);
233 break;
234 case 1:
235 __builtin_arm_wsr64("PMC1", count);
236 break;
237 default:
238 panic("monotonic: invalid core counter %u write %llu", ctr, count);
239 __builtin_unreachable();
240 }
241 }
242
243 static void
core_set_enabled(void)244 core_set_enabled(void)
245 {
246 uint32_t kpc_mask = kpc_get_running() &
247 (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK);
248 uint64_t pmcr0 = __builtin_arm_rsr64("PMCR0_EL1");
249 pmcr0 |= PMCR0_INIT | PMCR0_FIXED_EN;
250
251 if (kpc_mask != 0) {
252 uint64_t kpc_ctrs = kpc_get_configurable_pmc_mask(kpc_mask) <<
253 MT_CORE_NFIXED;
254 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
255 uint64_t ctrs_ge8 = kpc_ctrs >> 8;
256 pmcr0 |= ctrs_ge8 << PMCR0_CTR_GE8_EN_SHIFT;
257 pmcr0 |= ctrs_ge8 << PMCR0_CTR_GE8_PMI_SHIFT;
258 kpc_ctrs &= (1ULL << 8) - 1;
259 #endif /* KPC_ARM64_CONFIGURABLE_COUNT > 6 */
260 kpc_ctrs |= kpc_ctrs << PMCR0_PMI_SHIFT;
261 pmcr0 |= kpc_ctrs;
262 }
263
264 __builtin_arm_wsr64("PMCR0_EL1", pmcr0);
265 #if MACH_ASSERT
266 /*
267 * Only check for the values that were ORed in.
268 */
269 uint64_t pmcr0_check = __builtin_arm_rsr64("PMCR0_EL1");
270 if ((pmcr0_check & (PMCR0_INIT | PMCR0_FIXED_EN)) != (PMCR0_INIT | PMCR0_FIXED_EN)) {
271 panic("monotonic: hardware ignored enable (read %llx, wrote %llx)",
272 pmcr0_check, pmcr0);
273 }
274 #endif /* MACH_ASSERT */
275 }
276
277 static void
core_idle(__unused cpu_data_t * cpu)278 core_idle(__unused cpu_data_t *cpu)
279 {
280 assert(cpu != NULL);
281 assert(ml_get_interrupts_enabled() == FALSE);
282
283 #if DEBUG
284 uint64_t pmcr0 = __builtin_arm_rsr64("PMCR0_EL1");
285 if ((pmcr0 & PMCR0_FIXED_EN) == 0) {
286 panic("monotonic: counters disabled before idling, pmcr0 = 0x%llx", pmcr0);
287 }
288 uint64_t pmcr1 = __builtin_arm_rsr64("PMCR1_EL1");
289 if ((pmcr1 & PMCR1_INIT) == 0) {
290 panic("monotonic: counter modes disabled before idling, pmcr1 = 0x%llx", pmcr1);
291 }
292 #endif /* DEBUG */
293
294 /* disable counters before updating */
295 __builtin_arm_wsr64("PMCR0_EL1", PMCR0_INIT);
296
297 mt_update_fixed_counts();
298 }
299
300 #pragma mark uncore performance monitor
301
302 #if HAS_UNCORE_CTRS
303
304 static bool mt_uncore_initted = false;
305
306 /*
307 * Uncore Performance Monitor
308 *
309 * Uncore performance monitors provide event-counting for the last-level caches
310 * (LLCs). Each LLC has its own uncore performance monitor, which can only be
311 * accessed by cores that use that LLC. Like the core performance monitoring
312 * unit, uncore counters are configured globally. If there is more than one
313 * LLC on the system, PIO reads must be used to satisfy uncore requests (using
314 * the `_r` remote variants of the access functions). Otherwise, local MSRs
315 * suffice (using the `_l` local variants of the access functions).
316 */
317
318 #if UNCORE_PER_CLUSTER
319 #define MAX_NMONITORS MAX_CPU_CLUSTERS
320 static uintptr_t cpm_impl[MAX_NMONITORS] = {};
321 #else
322 #define MAX_NMONITORS (1)
323 #endif /* UNCORE_PER_CLUSTER */
324
325 #if UNCORE_VERSION >= 2
326 /*
327 * V2 uncore monitors feature a CTI mechanism -- the second bit of UPMSR is
328 * used to track if a CTI has been triggered due to an overflow.
329 */
330 #define UPMSR_OVF_POS 2
331 #else /* UNCORE_VERSION >= 2 */
332 #define UPMSR_OVF_POS 1
333 #endif /* UNCORE_VERSION < 2 */
334 #define UPMSR_OVF(R, CTR) ((R) >> ((CTR) + UPMSR_OVF_POS) & 0x1)
335 #define UPMSR_OVF_MASK (((UINT64_C(1) << UNCORE_NCTRS) - 1) << UPMSR_OVF_POS)
336
337 #define UPMPCM_CORE(ID) (UINT64_C(1) << (ID))
338
339 #if UPMU_64BIT_PMCS
340 #define UPMC_WIDTH (63)
341 #else // UPMU_64BIT_PMCS
342 #define UPMC_WIDTH (47)
343 #endif // !UPMU_64BIT_PMCS
344
345 /*
346 * The uncore_pmi_mask is a bitmask of CPUs that receive uncore PMIs. It's
347 * initialized by uncore_init and controllable by the uncore_pmi_mask boot-arg.
348 */
349 static int32_t uncore_pmi_mask = 0;
350
351 /*
352 * The uncore_active_ctrs is a bitmask of uncore counters that are currently
353 * requested.
354 */
355 static uint16_t uncore_active_ctrs = 0;
356 static_assert(sizeof(uncore_active_ctrs) * CHAR_BIT >= UNCORE_NCTRS,
357 "counter mask should fit the full range of counters");
358
359 /*
360 * mt_uncore_enabled is true when any uncore counters are active.
361 */
362 bool mt_uncore_enabled = false;
363
364 /*
365 * The uncore_events are the event configurations for each uncore counter -- as
366 * a union to make it easy to program the hardware registers.
367 */
368 static struct uncore_config {
369 union {
370 uint8_t uce_ctrs[UNCORE_NCTRS];
371 uint64_t uce_regs[UNCORE_NCTRS / 8];
372 } uc_events;
373 union {
374 uint16_t uccm_masks[UNCORE_NCTRS];
375 uint64_t uccm_regs[UNCORE_NCTRS / 4];
376 } uc_cpu_masks[MAX_NMONITORS];
377 } uncore_config;
378
379 static struct uncore_monitor {
380 /*
381 * The last snapshot of each of the hardware counter values.
382 */
383 uint64_t um_snaps[UNCORE_NCTRS];
384
385 /*
386 * The accumulated counts for each counter.
387 */
388 uint64_t um_counts[UNCORE_NCTRS];
389
390 /*
391 * Protects accessing the hardware registers and fields in this structure.
392 */
393 lck_spin_t um_lock;
394
395 /*
396 * Whether this monitor needs its registers restored after wake.
397 */
398 bool um_sleeping;
399
400 #if MACH_ASSERT
401 /*
402 * Save the last ID that read from this monitor.
403 */
404 uint8_t um_last_read_id;
405
406 /*
407 * Save whether this monitor has been read since sleeping.
408 */
409 bool um_read_since_sleep;
410 #endif /* MACH_ASSERT */
411 } uncore_monitors[MAX_NMONITORS];
412
413 /*
414 * Each uncore unit has its own monitor, corresponding to the memory hierarchy
415 * of the LLCs.
416 */
417 static unsigned int
uncore_nmonitors(void)418 uncore_nmonitors(void)
419 {
420 #if UNCORE_PER_CLUSTER
421 return topology_info->num_clusters;
422 #else /* UNCORE_PER_CLUSTER */
423 return 1;
424 #endif /* !UNCORE_PER_CLUSTER */
425 }
426
427 static unsigned int
uncmon_get_curid(void)428 uncmon_get_curid(void)
429 {
430 #if UNCORE_PER_CLUSTER
431 return cpu_cluster_id();
432 #else /* UNCORE_PER_CLUSTER */
433 return 0;
434 #endif /* !UNCORE_PER_CLUSTER */
435 }
436
437 /*
438 * Per-monitor locks are required to prevent races with the PMI handlers, not
439 * from other CPUs that are configuring (those are serialized with monotonic's
440 * per-device lock).
441 */
442
443 static int
uncmon_lock(struct uncore_monitor * mon)444 uncmon_lock(struct uncore_monitor *mon)
445 {
446 int intrs_en = ml_set_interrupts_enabled(FALSE);
447 lck_spin_lock(&mon->um_lock);
448 return intrs_en;
449 }
450
451 static void
uncmon_unlock(struct uncore_monitor * mon,int intrs_en)452 uncmon_unlock(struct uncore_monitor *mon, int intrs_en)
453 {
454 lck_spin_unlock(&mon->um_lock);
455 (void)ml_set_interrupts_enabled(intrs_en);
456 }
457
458 /*
459 * Helper functions for accessing the hardware -- these require the monitor be
460 * locked to prevent other CPUs' PMI handlers from making local modifications
461 * or updating the counts.
462 */
463
464 #if UNCORE_VERSION >= 2
465 #define UPMCR0_INTEN_POS 20
466 #define UPMCR0_INTGEN_POS 16
467 #else /* UNCORE_VERSION >= 2 */
468 #define UPMCR0_INTEN_POS 12
469 #define UPMCR0_INTGEN_POS 8
470 #endif /* UNCORE_VERSION < 2 */
471 enum {
472 UPMCR0_INTGEN_OFF = 0,
473 /* fast PMIs are only supported on core CPMU */
474 UPMCR0_INTGEN_AIC = 2,
475 UPMCR0_INTGEN_HALT = 3,
476 UPMCR0_INTGEN_FIQ = 4,
477 };
478 /* always enable interrupts for all counters */
479 #define UPMCR0_INTEN (((1ULL << UNCORE_NCTRS) - 1) << UPMCR0_INTEN_POS)
480 /* route uncore PMIs through the FIQ path */
481 #define UPMCR0_INIT (UPMCR0_INTEN | (UPMCR0_INTGEN_FIQ << UPMCR0_INTGEN_POS))
482
483 /*
484 * Turn counting on for counters set in the `enctrmask` and off, otherwise.
485 */
486 static inline void
uncmon_set_counting_locked_l(__unused unsigned int monid,uint64_t enctrmask)487 uncmon_set_counting_locked_l(__unused unsigned int monid, uint64_t enctrmask)
488 {
489 /*
490 * UPMCR0 controls which counters are enabled and how interrupts are generated
491 * for overflows.
492 */
493 __builtin_arm_wsr64("UPMCR0_EL1", UPMCR0_INIT | enctrmask);
494 }
495
496 #if UNCORE_PER_CLUSTER
497
498 /*
499 * Turn counting on for counters set in the `enctrmask` and off, otherwise.
500 */
501 static inline void
uncmon_set_counting_locked_r(unsigned int monid,uint64_t enctrmask)502 uncmon_set_counting_locked_r(unsigned int monid, uint64_t enctrmask)
503 {
504 const uintptr_t upmcr0_offset = 0x4180;
505 *(uint64_t *)(cpm_impl[monid] + upmcr0_offset) = UPMCR0_INIT | enctrmask;
506 }
507
508 #endif /* UNCORE_PER_CLUSTER */
509
510 /*
511 * The uncore performance monitoring counters (UPMCs) are 48/64-bits wide. The
512 * high bit is an overflow bit, triggering a PMI, providing 47/63 usable bits.
513 */
514
515 #define UPMC_MAX ((UINT64_C(1) << UPMC_WIDTH) - 1)
516
517 /*
518 * The `__builtin_arm_{r,w}sr` functions require constant strings, since the
519 * MSR/MRS instructions encode the registers as immediates. Otherwise, this
520 * would be indexing into an array of strings.
521 */
522
523 #define UPMC_0_7(X, A) X(0, A); X(1, A); X(2, A); X(3, A); X(4, A); X(5, A); \
524 X(6, A); X(7, A)
525 #if UNCORE_NCTRS <= 8
526 #define UPMC_ALL(X, A) UPMC_0_7(X, A)
527 #else /* UNCORE_NCTRS <= 8 */
528 #define UPMC_8_15(X, A) X(8, A); X(9, A); X(10, A); X(11, A); X(12, A); \
529 X(13, A); X(14, A); X(15, A)
530 #define UPMC_ALL(X, A) UPMC_0_7(X, A); UPMC_8_15(X, A)
531 #endif /* UNCORE_NCTRS > 8 */
532
533 __unused
534 static inline uint64_t
uncmon_read_counter_locked_l(__unused unsigned int monid,unsigned int ctr)535 uncmon_read_counter_locked_l(__unused unsigned int monid, unsigned int ctr)
536 {
537 assert(ctr < UNCORE_NCTRS);
538 switch (ctr) {
539 #define UPMC_RD(CTR, UNUSED) case (CTR): return __builtin_arm_rsr64(__MSR_STR(UPMC ## CTR))
540 UPMC_ALL(UPMC_RD, 0);
541 #undef UPMC_RD
542 default:
543 panic("monotonic: invalid counter read %u", ctr);
544 __builtin_unreachable();
545 }
546 }
547
548 static inline void
uncmon_write_counter_locked_l(__unused unsigned int monid,unsigned int ctr,uint64_t count)549 uncmon_write_counter_locked_l(__unused unsigned int monid, unsigned int ctr,
550 uint64_t count)
551 {
552 assert(count < UPMC_MAX);
553 assert(ctr < UNCORE_NCTRS);
554 switch (ctr) {
555 #define UPMC_WR(CTR, COUNT) case (CTR): \
556 return __builtin_arm_wsr64(__MSR_STR(UPMC ## CTR), (COUNT))
557 UPMC_ALL(UPMC_WR, count);
558 #undef UPMC_WR
559 default:
560 panic("monotonic: invalid counter write %u", ctr);
561 }
562 }
563
564 #if UNCORE_PER_CLUSTER
565
566 uintptr_t upmc_offs[UNCORE_NCTRS] = {
567 [0] = 0x4100, [1] = 0x4248, [2] = 0x4110, [3] = 0x4250, [4] = 0x4120,
568 [5] = 0x4258, [6] = 0x4130, [7] = 0x4260, [8] = 0x4140, [9] = 0x4268,
569 [10] = 0x4150, [11] = 0x4270, [12] = 0x4160, [13] = 0x4278,
570 [14] = 0x4170, [15] = 0x4280,
571 };
572
573 static inline uint64_t
uncmon_read_counter_locked_r(unsigned int mon_id,unsigned int ctr)574 uncmon_read_counter_locked_r(unsigned int mon_id, unsigned int ctr)
575 {
576 assert(mon_id < uncore_nmonitors());
577 assert(ctr < UNCORE_NCTRS);
578 return *(uint64_t *)(cpm_impl[mon_id] + upmc_offs[ctr]);
579 }
580
581 static inline void
uncmon_write_counter_locked_r(unsigned int mon_id,unsigned int ctr,uint64_t count)582 uncmon_write_counter_locked_r(unsigned int mon_id, unsigned int ctr,
583 uint64_t count)
584 {
585 assert(count < UPMC_MAX);
586 assert(ctr < UNCORE_NCTRS);
587 assert(mon_id < uncore_nmonitors());
588 *(uint64_t *)(cpm_impl[mon_id] + upmc_offs[ctr]) = count;
589 }
590
591 #endif /* UNCORE_PER_CLUSTER */
592
593 static inline void
uncmon_update_locked(unsigned int monid,unsigned int __unused curid,unsigned int ctr)594 uncmon_update_locked(unsigned int monid, unsigned int __unused curid,
595 unsigned int ctr)
596 {
597 struct uncore_monitor *mon = &uncore_monitors[monid];
598 if (!mon->um_sleeping) {
599 uint64_t snap = 0;
600 #if UNCORE_PER_CLUSTER
601 snap = uncmon_read_counter_locked_r(monid, ctr);
602 #else /* UNCORE_PER_CLUSTER */
603 snap = uncmon_read_counter_locked_l(monid, ctr);
604 #endif /* UNCORE_PER_CLUSTER */
605 if (snap < mon->um_snaps[ctr]) {
606 #if MACH_ASSERT
607 #if UNCORE_PER_CLUSTER
608 uint64_t remote_value = uncmon_read_counter_locked_r(monid, ctr);
609 #endif /* UNCORE_PER_CLUSTER */
610 panic("monotonic: UPMC%d on UPMU %d went backwards from "
611 "%llx to %llx, read via %s, last was %s from UPMU %hhd%s"
612 #if UNCORE_PER_CLUSTER
613 ", re-read remote value is %llx"
614 #endif /* UNCORE_PER_CLUSTER */
615 , ctr,
616 monid, mon->um_snaps[ctr], snap,
617 curid == monid ? "local" : "remote",
618 mon->um_last_read_id == monid ? "local" : "remote",
619 mon->um_last_read_id,
620 mon->um_read_since_sleep ? "" : ", first read since sleep"
621 #if UNCORE_PER_CLUSTER
622 , remote_value
623 #endif /* UNCORE_PER_CLUSTER */
624 );
625 #else /* MACH_ASSERT */
626 snap = mon->um_snaps[ctr];
627 #endif /* !MACH_ASSERT */
628 }
629 mon->um_counts[ctr] += snap - mon->um_snaps[ctr];
630 mon->um_snaps[ctr] = snap;
631 }
632 }
633
634 static inline void
uncmon_program_events_locked_l(unsigned int monid)635 uncmon_program_events_locked_l(unsigned int monid)
636 {
637 /*
638 * UPMESR[01] is the event selection register that determines which event a
639 * counter will count.
640 */
641 CTRL_REG_SET("UPMESR0_EL1", uncore_config.uc_events.uce_regs[0]);
642
643 #if UNCORE_NCTRS > 8
644 CTRL_REG_SET("UPMESR1_EL1", uncore_config.uc_events.uce_regs[1]);
645 #endif /* UNCORE_NCTRS > 8 */
646
647 /*
648 * UPMECM[0123] are the event core masks for each counter -- whether or not
649 * that counter counts events generated by an agent. These are set to all
650 * ones so the uncore counters count events from all cores.
651 *
652 * The bits are based off the start of the cluster -- e.g. even if a core
653 * has a CPU ID of 4, it might be the first CPU in a cluster. Shift the
654 * registers right by the ID of the first CPU in the cluster.
655 */
656 CTRL_REG_SET("UPMECM0_EL1",
657 uncore_config.uc_cpu_masks[monid].uccm_regs[0]);
658 CTRL_REG_SET("UPMECM1_EL1",
659 uncore_config.uc_cpu_masks[monid].uccm_regs[1]);
660
661 #if UNCORE_NCTRS > 8
662 CTRL_REG_SET("UPMECM2_EL1",
663 uncore_config.uc_cpu_masks[monid].uccm_regs[2]);
664 CTRL_REG_SET("UPMECM3_EL1",
665 uncore_config.uc_cpu_masks[monid].uccm_regs[3]);
666 #endif /* UNCORE_NCTRS > 8 */
667 }
668
669 #if UNCORE_PER_CLUSTER
670
671 static inline void
uncmon_program_events_locked_r(unsigned int monid)672 uncmon_program_events_locked_r(unsigned int monid)
673 {
674 const uintptr_t upmesr_offs[2] = {[0] = 0x41b0, [1] = 0x41b8, };
675
676 for (unsigned int i = 0; i < sizeof(upmesr_offs) / sizeof(upmesr_offs[0]);
677 i++) {
678 *(uint64_t *)(cpm_impl[monid] + upmesr_offs[i]) =
679 uncore_config.uc_events.uce_regs[i];
680 }
681
682 const uintptr_t upmecm_offs[4] = {
683 [0] = 0x4190, [1] = 0x4198, [2] = 0x41a0, [3] = 0x41a8,
684 };
685
686 for (unsigned int i = 0; i < sizeof(upmecm_offs) / sizeof(upmecm_offs[0]);
687 i++) {
688 *(uint64_t *)(cpm_impl[monid] + upmecm_offs[i]) =
689 uncore_config.uc_cpu_masks[monid].uccm_regs[i];
690 }
691 }
692
693 #endif /* UNCORE_PER_CLUSTER */
694
695 static void
uncmon_clear_int_locked_l(__unused unsigned int monid)696 uncmon_clear_int_locked_l(__unused unsigned int monid)
697 {
698 __builtin_arm_wsr64("UPMSR_EL1", 0);
699 }
700
701 #if UNCORE_PER_CLUSTER
702
703 static void
uncmon_clear_int_locked_r(unsigned int monid)704 uncmon_clear_int_locked_r(unsigned int monid)
705 {
706 const uintptr_t upmsr_off = 0x41c0;
707 *(uint64_t *)(cpm_impl[monid] + upmsr_off) = 0;
708 }
709
710 #endif /* UNCORE_PER_CLUSTER */
711
712 /*
713 * Get the PMI mask for the provided `monid` -- that is, the bitmap of CPUs
714 * that should be sent PMIs for a particular monitor.
715 */
716 static uint64_t
uncmon_get_pmi_mask(unsigned int monid)717 uncmon_get_pmi_mask(unsigned int monid)
718 {
719 uint64_t pmi_mask = uncore_pmi_mask;
720
721 #if UNCORE_PER_CLUSTER
722 pmi_mask &= topology_info->clusters[monid].cpu_mask;
723 #else /* UNCORE_PER_CLUSTER */
724 #pragma unused(monid)
725 #endif /* !UNCORE_PER_CLUSTER */
726
727 return pmi_mask;
728 }
729
730 /*
731 * Initialization routines for the uncore counters.
732 */
733
734 static void
uncmon_init_locked_l(unsigned int monid)735 uncmon_init_locked_l(unsigned int monid)
736 {
737 /*
738 * UPMPCM defines the PMI core mask for the UPMCs -- which cores should
739 * receive interrupts on overflow.
740 */
741 CTRL_REG_SET("UPMPCM_EL1", uncmon_get_pmi_mask(monid));
742 uncmon_set_counting_locked_l(monid,
743 mt_uncore_enabled ? uncore_active_ctrs : 0);
744 }
745
746 #if UNCORE_PER_CLUSTER
747
748 static uintptr_t acc_impl[MAX_NMONITORS] = {};
749
750 static void
uncmon_init_locked_r(unsigned int monid)751 uncmon_init_locked_r(unsigned int monid)
752 {
753 const uintptr_t upmpcm_off = 0x1010;
754
755 *(uint64_t *)(acc_impl[monid] + upmpcm_off) = uncmon_get_pmi_mask(monid);
756 uncmon_set_counting_locked_r(monid,
757 mt_uncore_enabled ? uncore_active_ctrs : 0);
758 }
759
760 #endif /* UNCORE_PER_CLUSTER */
761
762 /*
763 * Initialize the uncore device for monotonic.
764 */
765 static int
uncore_init(__unused mt_device_t dev)766 uncore_init(__unused mt_device_t dev)
767 {
768 #if HAS_UNCORE_CTRS
769 assert(MT_NDEVS > 0);
770 mt_devices[MT_NDEVS - 1].mtd_nmonitors = (uint8_t)uncore_nmonitors();
771 #endif
772
773 #if DEVELOPMENT || DEBUG
774 /*
775 * Development and debug kernels observe the `uncore_pmi_mask` boot-arg,
776 * allowing PMIs to be routed to the CPUs present in the supplied bitmap.
777 * Do some sanity checks on the value provided.
778 */
779 bool parsed_arg = PE_parse_boot_argn("uncore_pmi_mask", &uncore_pmi_mask,
780 sizeof(uncore_pmi_mask));
781 if (parsed_arg) {
782 #if UNCORE_PER_CLUSTER
783 if (__builtin_popcount(uncore_pmi_mask) != (int)uncore_nmonitors()) {
784 panic("monotonic: invalid uncore PMI mask 0x%x", uncore_pmi_mask);
785 }
786 for (unsigned int i = 0; i < uncore_nmonitors(); i++) {
787 if (__builtin_popcountll(uncmon_get_pmi_mask(i)) != 1) {
788 panic("monotonic: invalid uncore PMI CPU for cluster %d in mask 0x%x",
789 i, uncore_pmi_mask);
790 }
791 }
792 #else /* UNCORE_PER_CLUSTER */
793 if (__builtin_popcount(uncore_pmi_mask) != 1) {
794 panic("monotonic: invalid uncore PMI mask 0x%x", uncore_pmi_mask);
795 }
796 #endif /* !UNCORE_PER_CLUSTER */
797 } else
798 #endif /* DEVELOPMENT || DEBUG */
799 {
800 /* arbitrarily route to core 0 in each cluster */
801 uncore_pmi_mask |= 1;
802 }
803 assert(uncore_pmi_mask != 0);
804
805 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
806 #if UNCORE_PER_CLUSTER
807 ml_topology_cluster_t *cluster = &topology_info->clusters[monid];
808 cpm_impl[monid] = (uintptr_t)cluster->cpm_IMPL_regs;
809 acc_impl[monid] = (uintptr_t)cluster->acc_IMPL_regs;
810 assert(cpm_impl[monid] != 0 && acc_impl[monid] != 0);
811 #endif /* UNCORE_PER_CLUSTER */
812
813 struct uncore_monitor *mon = &uncore_monitors[monid];
814 lck_spin_init(&mon->um_lock, &mt_lock_grp, LCK_ATTR_NULL);
815 }
816
817 mt_uncore_initted = true;
818
819 return 0;
820 }
821
822 /*
823 * Support for monotonic's mtd_read function.
824 */
825
826 static void
uncmon_read_all_counters(unsigned int monid,unsigned int curmonid,uint64_t ctr_mask,uint64_t * counts)827 uncmon_read_all_counters(unsigned int monid, unsigned int curmonid,
828 uint64_t ctr_mask, uint64_t *counts)
829 {
830 struct uncore_monitor *mon = &uncore_monitors[monid];
831
832 int intrs_en = uncmon_lock(mon);
833
834 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
835 if (ctr_mask & (1ULL << ctr)) {
836 if (!mon->um_sleeping) {
837 uncmon_update_locked(monid, curmonid, ctr);
838 }
839 counts[ctr] = mon->um_counts[ctr];
840 }
841 }
842 #if MACH_ASSERT
843 mon->um_read_since_sleep = true;
844 #endif /* MACH_ASSERT */
845
846 uncmon_unlock(mon, intrs_en);
847 }
848
849 /*
850 * Read all monitor's counters.
851 */
852 static int
uncore_read(uint64_t ctr_mask,uint64_t * counts_out)853 uncore_read(uint64_t ctr_mask, uint64_t *counts_out)
854 {
855 assert(ctr_mask != 0);
856 assert(counts_out != NULL);
857
858 if (!uncore_active_ctrs) {
859 return EPWROFF;
860 }
861 if (ctr_mask & ~uncore_active_ctrs) {
862 return EINVAL;
863 }
864
865 unsigned int curmonid = uncmon_get_curid();
866 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
867 /*
868 * Find this monitor's starting offset into the `counts_out` array.
869 */
870 uint64_t *counts = counts_out + (UNCORE_NCTRS * monid);
871 uncmon_read_all_counters(monid, curmonid, ctr_mask, counts);
872 }
873
874 return 0;
875 }
876
877 /*
878 * Support for monotonic's mtd_add function.
879 */
880
881 /*
882 * Add an event to the current uncore configuration. This doesn't take effect
883 * until the counters are enabled again, so there's no need to involve the
884 * monitors.
885 */
886 static int
uncore_add(struct monotonic_config * config,uint32_t * ctr_out)887 uncore_add(struct monotonic_config *config, uint32_t *ctr_out)
888 {
889 if (mt_uncore_enabled) {
890 return EBUSY;
891 }
892
893 uint8_t selector = (uint8_t)config->event;
894 uint32_t available = ~uncore_active_ctrs & config->allowed_ctr_mask;
895
896 if (available == 0) {
897 return ENOSPC;
898 }
899
900 if (!cpc_event_allowed(CPC_HW_UPMU, selector)) {
901 return EPERM;
902 }
903
904 uint32_t valid_ctrs = (UINT32_C(1) << UNCORE_NCTRS) - 1;
905 if ((available & valid_ctrs) == 0) {
906 return E2BIG;
907 }
908 /*
909 * Clear the UPMCs the first time an event is added.
910 */
911 unsigned int curmonid = uncmon_get_curid();
912 if (uncore_active_ctrs == 0) {
913 /*
914 * Suspend powerdown until the next reset.
915 */
916 suspend_cluster_powerdown();
917
918 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
919 struct uncore_monitor *mon = &uncore_monitors[monid];
920 bool remote = monid != curmonid;
921
922 int intrs_en = uncmon_lock(mon);
923 if (!mon->um_sleeping) {
924 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
925 if (remote) {
926 #if UNCORE_PER_CLUSTER
927 uncmon_write_counter_locked_r(monid, ctr, 0);
928 #endif /* UNCORE_PER_CLUSTER */
929 } else {
930 uncmon_write_counter_locked_l(monid, ctr, 0);
931 }
932 }
933 }
934 memset(&mon->um_snaps, 0, sizeof(mon->um_snaps));
935 memset(&mon->um_counts, 0, sizeof(mon->um_counts));
936 uncmon_unlock(mon, intrs_en);
937 }
938 }
939
940 uint32_t ctr = __builtin_ffsll(available) - 1;
941
942 uncore_active_ctrs |= UINT64_C(1) << ctr;
943 uncore_config.uc_events.uce_ctrs[ctr] = selector;
944 uint64_t cpu_mask = UINT64_MAX;
945 if (config->cpu_mask != 0) {
946 cpu_mask = config->cpu_mask;
947 }
948 for (unsigned int i = 0; i < uncore_nmonitors(); i++) {
949 #if UNCORE_PER_CLUSTER
950 const unsigned int shift = topology_info->clusters[i].first_cpu_id;
951 #else /* UNCORE_PER_CLUSTER */
952 const unsigned int shift = 0;
953 #endif /* !UNCORE_PER_CLUSTER */
954 uncore_config.uc_cpu_masks[i].uccm_masks[ctr] = (uint16_t)(cpu_mask >> shift);
955 }
956
957 *ctr_out = ctr;
958 return 0;
959 }
960
961 /*
962 * Support for monotonic's mtd_reset function.
963 */
964
965 /*
966 * Reset all configuration and disable the counters if they're currently
967 * counting.
968 */
969 static void
uncore_reset(void)970 uncore_reset(void)
971 {
972 mt_uncore_enabled = false;
973
974 unsigned int curmonid = uncmon_get_curid();
975
976 if (mt_owns_counters()) {
977 if (uncore_active_ctrs != 0) {
978 resume_cluster_powerdown();
979 }
980
981 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
982 struct uncore_monitor *mon = &uncore_monitors[monid];
983 bool remote = monid != curmonid;
984
985 int intrs_en = uncmon_lock(mon);
986 if (!mon->um_sleeping) {
987 if (remote) {
988 #if UNCORE_PER_CLUSTER
989 uncmon_set_counting_locked_r(monid, 0);
990 #endif /* UNCORE_PER_CLUSTER */
991 } else {
992 uncmon_set_counting_locked_l(monid, 0);
993 }
994
995 for (int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
996 if (uncore_active_ctrs & (1U << ctr)) {
997 if (remote) {
998 #if UNCORE_PER_CLUSTER
999 uncmon_write_counter_locked_r(monid, ctr, 0);
1000 #endif /* UNCORE_PER_CLUSTER */
1001 } else {
1002 uncmon_write_counter_locked_l(monid, ctr, 0);
1003 }
1004 }
1005 }
1006 }
1007
1008 memset(&mon->um_snaps, 0, sizeof(mon->um_snaps));
1009 memset(&mon->um_counts, 0, sizeof(mon->um_counts));
1010 if (!mon->um_sleeping) {
1011 if (remote) {
1012 #if UNCORE_PER_CLUSTER
1013 uncmon_clear_int_locked_r(monid);
1014 #endif /* UNCORE_PER_CLUSTER */
1015 } else {
1016 uncmon_clear_int_locked_l(monid);
1017 }
1018 }
1019
1020 uncmon_unlock(mon, intrs_en);
1021 }
1022 }
1023
1024 uncore_active_ctrs = 0;
1025 memset(&uncore_config, 0, sizeof(uncore_config));
1026
1027 if (mt_owns_counters()) {
1028 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1029 struct uncore_monitor *mon = &uncore_monitors[monid];
1030 bool remote = monid != curmonid;
1031
1032 int intrs_en = uncmon_lock(mon);
1033 if (!mon->um_sleeping) {
1034 if (remote) {
1035 #if UNCORE_PER_CLUSTER
1036 uncmon_program_events_locked_r(monid);
1037 #endif /* UNCORE_PER_CLUSTER */
1038 } else {
1039 uncmon_program_events_locked_l(monid);
1040 }
1041 }
1042 uncmon_unlock(mon, intrs_en);
1043 }
1044 }
1045 }
1046
1047 /*
1048 * Support for monotonic's mtd_enable function.
1049 */
1050
1051 static void
uncmon_set_enabled_l(unsigned int monid,bool enable)1052 uncmon_set_enabled_l(unsigned int monid, bool enable)
1053 {
1054 struct uncore_monitor *mon = &uncore_monitors[monid];
1055 int intrs_en = uncmon_lock(mon);
1056
1057 if (enable) {
1058 uncmon_init_locked_l(monid);
1059 uncmon_program_events_locked_l(monid);
1060 uncmon_set_counting_locked_l(monid, uncore_active_ctrs);
1061 } else {
1062 uncmon_set_counting_locked_l(monid, 0);
1063 }
1064
1065 uncmon_unlock(mon, intrs_en);
1066 }
1067
1068 #if UNCORE_PER_CLUSTER
1069
1070 static void
uncmon_set_enabled_r(unsigned int monid,bool enable)1071 uncmon_set_enabled_r(unsigned int monid, bool enable)
1072 {
1073 struct uncore_monitor *mon = &uncore_monitors[monid];
1074 int intrs_en = uncmon_lock(mon);
1075
1076 if (!mon->um_sleeping) {
1077 if (enable) {
1078 uncmon_init_locked_r(monid);
1079 uncmon_program_events_locked_r(monid);
1080 uncmon_set_counting_locked_r(monid, uncore_active_ctrs);
1081 } else {
1082 uncmon_set_counting_locked_r(monid, 0);
1083 }
1084 }
1085
1086 uncmon_unlock(mon, intrs_en);
1087 }
1088
1089 #endif /* UNCORE_PER_CLUSTER */
1090
1091 static void
uncore_set_enabled(bool enable)1092 uncore_set_enabled(bool enable)
1093 {
1094 mt_uncore_enabled = enable;
1095
1096 unsigned int curmonid = uncmon_get_curid();
1097 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1098 if (monid != curmonid) {
1099 #if UNCORE_PER_CLUSTER
1100 uncmon_set_enabled_r(monid, enable);
1101 #endif /* UNCORE_PER_CLUSTER */
1102 } else {
1103 uncmon_set_enabled_l(monid, enable);
1104 }
1105 }
1106 }
1107
1108 /*
1109 * Hooks in the machine layer.
1110 */
1111
1112 static void
uncore_fiq(uint64_t upmsr)1113 uncore_fiq(uint64_t upmsr)
1114 {
1115 /*
1116 * Determine which counters overflowed.
1117 */
1118 uint64_t disable_ctr_mask = (upmsr & UPMSR_OVF_MASK) >> UPMSR_OVF_POS;
1119 /* should not receive interrupts from inactive counters */
1120 assert(!(disable_ctr_mask & ~uncore_active_ctrs));
1121
1122 if (uncore_active_ctrs == 0) {
1123 return;
1124 }
1125
1126 unsigned int monid = uncmon_get_curid();
1127 struct uncore_monitor *mon = &uncore_monitors[monid];
1128
1129 int intrs_en = uncmon_lock(mon);
1130
1131 /*
1132 * Disable any counters that overflowed.
1133 */
1134 uncmon_set_counting_locked_l(monid,
1135 uncore_active_ctrs & ~disable_ctr_mask);
1136
1137 /*
1138 * With the overflowing counters disabled, capture their counts and reset
1139 * the UPMCs and their snapshots to 0.
1140 */
1141 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1142 if (UPMSR_OVF(upmsr, ctr)) {
1143 uncmon_update_locked(monid, monid, ctr);
1144 mon->um_snaps[ctr] = 0;
1145 uncmon_write_counter_locked_l(monid, ctr, 0);
1146 }
1147 }
1148
1149 /*
1150 * Acknowledge the interrupt, now that any overflowed PMCs have been reset.
1151 */
1152 uncmon_clear_int_locked_l(monid);
1153
1154 /*
1155 * Re-enable all active counters.
1156 */
1157 uncmon_set_counting_locked_l(monid, uncore_active_ctrs);
1158
1159 uncmon_unlock(mon, intrs_en);
1160 }
1161
1162 static void
uncore_save(void)1163 uncore_save(void)
1164 {
1165 if (!uncore_active_ctrs) {
1166 return;
1167 }
1168
1169 unsigned int curmonid = uncmon_get_curid();
1170
1171 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1172 struct uncore_monitor *mon = &uncore_monitors[monid];
1173 int intrs_en = uncmon_lock(mon);
1174
1175 if (mt_uncore_enabled) {
1176 if (monid != curmonid) {
1177 #if UNCORE_PER_CLUSTER
1178 uncmon_set_counting_locked_r(monid, 0);
1179 #endif /* UNCORE_PER_CLUSTER */
1180 } else {
1181 uncmon_set_counting_locked_l(monid, 0);
1182 }
1183 }
1184
1185 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1186 if (uncore_active_ctrs & (1U << ctr)) {
1187 uncmon_update_locked(monid, curmonid, ctr);
1188 mon->um_snaps[ctr] = 0;
1189 uncmon_write_counter_locked_l(monid, ctr, 0);
1190 }
1191 }
1192
1193 mon->um_sleeping = true;
1194 uncmon_unlock(mon, intrs_en);
1195 }
1196 }
1197
1198 static void
uncore_restore(void)1199 uncore_restore(void)
1200 {
1201 if (!uncore_active_ctrs) {
1202 return;
1203 }
1204 unsigned int curmonid = uncmon_get_curid();
1205
1206 struct uncore_monitor *mon = &uncore_monitors[curmonid];
1207 int intrs_en = uncmon_lock(mon);
1208 if (!mon->um_sleeping) {
1209 goto out;
1210 }
1211
1212 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1213 if (uncore_active_ctrs & (1U << ctr)) {
1214 uncmon_write_counter_locked_l(curmonid, ctr, mon->um_snaps[ctr]);
1215 }
1216 }
1217 uncmon_program_events_locked_l(curmonid);
1218 uncmon_init_locked_l(curmonid);
1219 mon->um_sleeping = false;
1220 #if MACH_ASSERT
1221 mon->um_read_since_sleep = false;
1222 #endif /* MACH_ASSERT */
1223
1224 out:
1225 uncmon_unlock(mon, intrs_en);
1226 }
1227
1228 #endif /* HAS_UNCORE_CTRS */
1229
1230 #pragma mark common hooks
1231
1232 void
mt_early_init(void)1233 mt_early_init(void)
1234 {
1235 topology_info = ml_get_topology_info();
1236 }
1237
1238 void
mt_cpu_idle(cpu_data_t * cpu)1239 mt_cpu_idle(cpu_data_t *cpu)
1240 {
1241 core_idle(cpu);
1242 }
1243
1244 void
mt_cpu_run(cpu_data_t * cpu)1245 mt_cpu_run(cpu_data_t *cpu)
1246 {
1247 struct mt_cpu *mtc;
1248
1249 assert(cpu != NULL);
1250 assert(ml_get_interrupts_enabled() == FALSE);
1251
1252 mtc = &cpu->cpu_monotonic;
1253
1254 for (int i = 0; i < MT_CORE_NFIXED; i++) {
1255 mt_core_set_snap(i, mtc->mtc_snaps[i]);
1256 }
1257
1258 /* re-enable the counters */
1259 core_init_execution_modes();
1260
1261 core_set_enabled();
1262 }
1263
1264 void
mt_cpu_down(cpu_data_t * cpu)1265 mt_cpu_down(cpu_data_t *cpu)
1266 {
1267 mt_cpu_idle(cpu);
1268 }
1269
1270 void
mt_cpu_up(cpu_data_t * cpu)1271 mt_cpu_up(cpu_data_t *cpu)
1272 {
1273 mt_cpu_run(cpu);
1274 }
1275
1276 void
mt_sleep(void)1277 mt_sleep(void)
1278 {
1279 #if HAS_UNCORE_CTRS
1280 uncore_save();
1281 #endif /* HAS_UNCORE_CTRS */
1282 }
1283
1284 void
mt_wake_per_core(void)1285 mt_wake_per_core(void)
1286 {
1287 #if HAS_UNCORE_CTRS
1288 if (mt_uncore_initted) {
1289 uncore_restore();
1290 }
1291 #endif /* HAS_UNCORE_CTRS */
1292 }
1293
1294 uint64_t
mt_count_pmis(void)1295 mt_count_pmis(void)
1296 {
1297 uint64_t npmis = 0;
1298 for (unsigned int i = 0; i < topology_info->num_cpus; i++) {
1299 cpu_data_t *cpu = (cpu_data_t *)CpuDataEntries[topology_info->cpus[i].cpu_id].cpu_data_vaddr;
1300 npmis += cpu->cpu_monotonic.mtc_npmis;
1301 }
1302 return npmis;
1303 }
1304
1305 static void
mt_cpu_pmi(cpu_data_t * cpu,uint64_t pmcr0)1306 mt_cpu_pmi(cpu_data_t *cpu, uint64_t pmcr0)
1307 {
1308 assert(cpu != NULL);
1309 assert(ml_get_interrupts_enabled() == FALSE);
1310
1311 __builtin_arm_wsr64("PMCR0_EL1", PMCR0_INIT);
1312 /*
1313 * Ensure the CPMU has flushed any increments at this point, so PMSR is up
1314 * to date.
1315 */
1316 __builtin_arm_isb(ISB_SY);
1317
1318 cpu->cpu_monotonic.mtc_npmis += 1;
1319 cpu->cpu_stat.pmi_cnt_wake += 1;
1320
1321 #if MONOTONIC_DEBUG
1322 if (!PMCR0_PMI(pmcr0)) {
1323 kprintf("monotonic: mt_cpu_pmi but no PMI (PMCR0 = %#llx)\n",
1324 pmcr0);
1325 }
1326 #else /* MONOTONIC_DEBUG */
1327 #pragma unused(pmcr0)
1328 #endif /* !MONOTONIC_DEBUG */
1329
1330 uint64_t pmsr = __builtin_arm_rsr64("PMSR_EL1");
1331
1332 #if MONOTONIC_DEBUG
1333 printf("monotonic: cpu = %d, PMSR = 0x%llx, PMCR0 = 0x%llx\n",
1334 cpu_number(), pmsr, pmcr0);
1335 #endif /* MONOTONIC_DEBUG */
1336
1337 #if MACH_ASSERT
1338 uint64_t handled = 0;
1339 #endif /* MACH_ASSERT */
1340
1341 /*
1342 * monotonic handles any fixed counter PMIs.
1343 */
1344 for (unsigned int i = 0; i < MT_CORE_NFIXED; i++) {
1345 if ((pmsr & PMSR_OVF(i)) == 0) {
1346 continue;
1347 }
1348
1349 #if MACH_ASSERT
1350 handled |= 1ULL << i;
1351 #endif /* MACH_ASSERT */
1352 uint64_t count = mt_cpu_update_count(cpu, i);
1353 cpu->cpu_monotonic.mtc_counts[i] += count;
1354 mt_core_set_snap(i, mt_core_reset_values[i]);
1355 cpu->cpu_monotonic.mtc_snaps[i] = mt_core_reset_values[i];
1356
1357 if (mt_microstackshots && mt_microstackshot_ctr == i) {
1358 bool user_mode = false;
1359 arm_saved_state_t *state = get_user_regs(current_thread());
1360 if (state) {
1361 user_mode = PSR64_IS_USER(get_saved_state_cpsr(state));
1362 }
1363 KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 1),
1364 mt_microstackshot_ctr, user_mode);
1365 mt_microstackshot_pmi_handler(user_mode, mt_microstackshot_ctx);
1366 } else if (mt_debug) {
1367 KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 2),
1368 i, count);
1369 }
1370 }
1371
1372 /*
1373 * KPC handles the configurable counter PMIs.
1374 */
1375 for (unsigned int i = MT_CORE_NFIXED; i < CORE_NCTRS; i++) {
1376 if (pmsr & PMSR_OVF(i)) {
1377 #if MACH_ASSERT
1378 handled |= 1ULL << i;
1379 #endif /* MACH_ASSERT */
1380 extern void kpc_pmi_handler(unsigned int ctr);
1381 kpc_pmi_handler(i);
1382 }
1383 }
1384
1385 #if MACH_ASSERT
1386 uint64_t pmsr_after_handling = __builtin_arm_rsr64("PMSR_EL1");
1387 if (pmsr_after_handling != 0) {
1388 unsigned int first_ctr_ovf = __builtin_ffsll(pmsr_after_handling) - 1;
1389 uint64_t count = 0;
1390 const char *extra = "";
1391 if (first_ctr_ovf >= CORE_NCTRS) {
1392 extra = " (invalid counter)";
1393 } else {
1394 count = mt_core_snap(first_ctr_ovf);
1395 }
1396
1397 panic("monotonic: PMI status not cleared on exit from handler, "
1398 "PMSR = 0x%llx HANDLE -> -> 0x%llx, handled 0x%llx, "
1399 "PMCR0 = 0x%llx, PMC%d = 0x%llx%s", pmsr, pmsr_after_handling,
1400 handled, __builtin_arm_rsr64("PMCR0_EL1"), first_ctr_ovf, count, extra);
1401 }
1402 #endif /* MACH_ASSERT */
1403
1404 core_set_enabled();
1405 }
1406
1407 #if CPMU_AIC_PMI
1408 void
mt_cpmu_aic_pmi(cpu_id_t source)1409 mt_cpmu_aic_pmi(cpu_id_t source)
1410 {
1411 struct cpu_data *curcpu = getCpuDatap();
1412 if (source != curcpu->interrupt_nub) {
1413 panic("monotonic: PMI from IOCPU %p delivered to %p", source,
1414 curcpu->interrupt_nub);
1415 }
1416 mt_cpu_pmi(curcpu, __builtin_arm_rsr64("PMCR0_EL1"));
1417 }
1418 #endif /* CPMU_AIC_PMI */
1419
1420 void
mt_fiq(void * cpu,uint64_t pmcr0,uint64_t upmsr)1421 mt_fiq(void *cpu, uint64_t pmcr0, uint64_t upmsr)
1422 {
1423 #if CPMU_AIC_PMI
1424 #pragma unused(cpu, pmcr0)
1425 #else /* CPMU_AIC_PMI */
1426 mt_cpu_pmi(cpu, pmcr0);
1427 #endif /* !CPMU_AIC_PMI */
1428
1429 #if HAS_UNCORE_CTRS
1430 if (upmsr != 0) {
1431 uncore_fiq(upmsr);
1432 }
1433 #else /* HAS_UNCORE_CTRS */
1434 #pragma unused(upmsr)
1435 #endif /* !HAS_UNCORE_CTRS */
1436 }
1437
1438 void
mt_ownership_change(bool available)1439 mt_ownership_change(bool available)
1440 {
1441 #if HAS_UNCORE_CTRS
1442 /*
1443 * No need to take the lock here, as this is only manipulated in the UPMU
1444 * when the current task already owns the counters and is on its way out.
1445 */
1446 if (!available && uncore_active_ctrs) {
1447 uncore_reset();
1448 }
1449 #else
1450 #pragma unused(available)
1451 #endif /* HAS_UNCORE_CTRS */
1452 }
1453
1454 static uint32_t mt_xc_sync;
1455
1456 static void
mt_microstackshot_start_remote(__unused void * arg)1457 mt_microstackshot_start_remote(__unused void *arg)
1458 {
1459 cpu_data_t *cpu = getCpuDatap();
1460
1461 __builtin_arm_wsr64("PMCR0_EL1", PMCR0_INIT);
1462
1463 for (int i = 0; i < MT_CORE_NFIXED; i++) {
1464 uint64_t count = mt_cpu_update_count(cpu, i);
1465 cpu->cpu_monotonic.mtc_counts[i] += count;
1466 mt_core_set_snap(i, mt_core_reset_values[i]);
1467 cpu->cpu_monotonic.mtc_snaps[i] = mt_core_reset_values[i];
1468 }
1469
1470 core_set_enabled();
1471
1472 if (os_atomic_dec(&mt_xc_sync, relaxed) == 0) {
1473 thread_wakeup((event_t)&mt_xc_sync);
1474 }
1475 }
1476
1477 int
mt_microstackshot_start_arch(uint64_t period)1478 mt_microstackshot_start_arch(uint64_t period)
1479 {
1480 uint64_t reset_value = 0;
1481 int ovf = os_sub_overflow(CTR_MAX, period, &reset_value);
1482 if (ovf) {
1483 return ERANGE;
1484 }
1485
1486 mt_core_reset_values[mt_microstackshot_ctr] = reset_value;
1487 cpu_broadcast_xcall(&mt_xc_sync, TRUE, mt_microstackshot_start_remote,
1488 mt_microstackshot_start_remote /* cannot pass NULL */);
1489 return 0;
1490 }
1491
1492 #pragma mark dev nodes
1493
1494 struct mt_device mt_devices[] = {
1495 [0] = {
1496 .mtd_name = "core",
1497 .mtd_init = core_init,
1498 },
1499 #if HAS_UNCORE_CTRS
1500 [1] = {
1501 .mtd_name = "uncore",
1502 .mtd_init = uncore_init,
1503 .mtd_add = uncore_add,
1504 .mtd_reset = uncore_reset,
1505 .mtd_enable = uncore_set_enabled,
1506 .mtd_read = uncore_read,
1507
1508 .mtd_ncounters = UNCORE_NCTRS,
1509 }
1510 #endif /* HAS_UNCORE_CTRS */
1511 };
1512
1513 static_assert(
1514 (sizeof(mt_devices) / sizeof(mt_devices[0])) == MT_NDEVS,
1515 "MT_NDEVS macro should be same as the length of mt_devices");
1516