1 /*
2 * Copyright (c) 2017-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/cpu_data_internal.h>
30 #include <arm/machine_routines.h>
31 #include <arm64/monotonic.h>
32 #include <kern/assert.h>
33 #include <kern/cpc.h>
34 #include <kern/debug.h> /* panic */
35 #include <kern/kpc.h>
36 #include <kern/monotonic.h>
37 #include <machine/atomic.h>
38 #include <machine/limits.h> /* CHAR_BIT */
39 #include <os/overflow.h>
40 #include <pexpert/arm64/board_config.h>
41 #include <pexpert/device_tree.h> /* SecureDTFindEntry */
42 #include <pexpert/pexpert.h>
43 #include <stdatomic.h>
44 #include <stdint.h>
45 #include <string.h>
46 #include <sys/errno.h>
47 #include <sys/monotonic.h>
48
49 /*
50 * Ensure that control registers read back what was written under MACH_ASSERT
51 * kernels.
52 *
53 * A static inline function cannot be used due to passing the register through
54 * the builtin -- it requires a constant string as its first argument, since
55 * MSRs registers are encoded as an immediate in the instruction.
56 */
57 #if MACH_ASSERT
58 #define CTRL_REG_SET(reg, val) do { \
59 __builtin_arm_wsr64((reg), (val)); \
60 uint64_t __check_reg = __builtin_arm_rsr64((reg)); \
61 if (__check_reg != (val)) { \
62 panic("value written to %s was not read back (wrote %llx, read %llx)", \
63 #reg, (val), __check_reg); \
64 } \
65 } while (0)
66 #else /* MACH_ASSERT */
67 #define CTRL_REG_SET(reg, val) __builtin_arm_wsr64((reg), (val))
68 #endif /* MACH_ASSERT */
69
70 #pragma mark core counters
71
72 const bool mt_core_supported = true;
73
74 static const ml_topology_info_t *topology_info;
75
76 /*
77 * PMC[0-1] are the 48/64-bit fixed counters -- PMC0 is cycles and PMC1 is
78 * instructions (see arm64/monotonic.h).
79 *
80 * PMC2+ are currently handled by kpc.
81 */
82 #define PMC_0_7(X, A) X(0, A); X(1, A); X(2, A); X(3, A); X(4, A); X(5, A); \
83 X(6, A); X(7, A)
84
85 #if CORE_NCTRS > 8
86 #define PMC_8_9(X, A) X(8, A); X(9, A)
87 #else // CORE_NCTRS > 8
88 #define PMC_8_9(X, A)
89 #endif // CORE_NCTRS > 8
90
91 #define PMC_ALL(X, A) PMC_0_7(X, A); PMC_8_9(X, A)
92
93 #if CPMU_64BIT_PMCS
94 #define PMC_WIDTH (63)
95 #else // UPMU_64BIT_PMCS
96 #define PMC_WIDTH (47)
97 #endif // !UPMU_64BIT_PMCS
98
99 #define CTR_MAX ((UINT64_C(1) << PMC_WIDTH) - 1)
100
101 #define CYCLES 0
102 #define INSTRS 1
103
104 /*
105 * PMC0's offset into a core's PIO range.
106 *
107 * This allows cores to remotely query another core's counters.
108 */
109
110 #define PIO_PMC0_OFFSET (0x200)
111
112 /*
113 * The offset of the counter in the configuration registers. Post-Hurricane
114 * devices have additional counters that need a larger shift than the original
115 * counters.
116 *
117 * XXX For now, just support the lower-numbered counters.
118 */
119 #define CTR_POS(CTR) (CTR)
120
121 /*
122 * PMCR0 is the main control register for the performance monitor. It
123 * controls whether the counters are enabled, how they deliver interrupts, and
124 * other features.
125 */
126
127 #define PMCR0_CTR_EN(CTR) (UINT64_C(1) << CTR_POS(CTR))
128 #define PMCR0_FIXED_EN (PMCR0_CTR_EN(CYCLES) | PMCR0_CTR_EN(INSTRS))
129 /* how interrupts are delivered on a PMI */
130 enum {
131 PMCR0_INTGEN_OFF = 0,
132 PMCR0_INTGEN_PMI = 1,
133 PMCR0_INTGEN_AIC = 2,
134 PMCR0_INTGEN_HALT = 3,
135 PMCR0_INTGEN_FIQ = 4,
136 };
137 #define PMCR0_INTGEN_SET(X) ((uint64_t)(X) << 8)
138
139 #if CPMU_AIC_PMI
140 #define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_AIC)
141 #else /* CPMU_AIC_PMI */
142 #define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_FIQ)
143 #endif /* !CPMU_AIC_PMI */
144
145 #define PMCR0_PMI_SHIFT (12)
146 #define PMCR0_CTR_GE8_PMI_SHIFT (44)
147 #define PMCR0_PMI_EN(CTR) (UINT64_C(1) << (PMCR0_PMI_SHIFT + CTR_POS(CTR)))
148 /* fixed counters are always counting */
149 #define PMCR0_PMI_INIT (PMCR0_PMI_EN(CYCLES) | PMCR0_PMI_EN(INSTRS))
150 /* disable counting on a PMI */
151 #define PMCR0_DISCNT_EN (UINT64_C(1) << 20)
152 /* block PMIs until ERET retires */
153 #define PMCR0_WFRFE_EN (UINT64_C(1) << 22)
154 /* count global (not just core-local) L2C events */
155 #define PMCR0_L2CGLOBAL_EN (UINT64_C(1) << 23)
156 /* user mode access to configuration registers */
157 #define PMCR0_USEREN_EN (UINT64_C(1) << 30)
158 #define PMCR0_CTR_GE8_EN_SHIFT (32)
159
160 #if HAS_CPMU_PC_CAPTURE
161 #define PMCR0_PCC_INIT (UINT64_C(0x7) << 24)
162 #else /* HAS_CPMU_PC_CAPTURE */
163 #define PMCR0_PCC_INIT (0)
164 #endif /* !HAS_CPMU_PC_CAPTURE */
165
166 #define PMCR0_INIT (PMCR0_INTGEN_INIT | PMCR0_PMI_INIT | PMCR0_PCC_INIT)
167
168 /*
169 * PMCR1 controls which execution modes count events.
170 */
171 #define PMCR1_EL0A32_EN(CTR) (UINT64_C(1) << (0 + CTR_POS(CTR)))
172 #define PMCR1_EL0A64_EN(CTR) (UINT64_C(1) << (8 + CTR_POS(CTR)))
173 #define PMCR1_EL1A64_EN(CTR) (UINT64_C(1) << (16 + CTR_POS(CTR)))
174 /* PMCR1_EL3A64 is not supported on systems with no monitor */
175 #if defined(APPLEHURRICANE)
176 #define PMCR1_EL3A64_EN(CTR) UINT64_C(0)
177 #else
178 #define PMCR1_EL3A64_EN(CTR) (UINT64_C(1) << (24 + CTR_POS(CTR)))
179 #endif
180 #define PMCR1_ALL_EN(CTR) (PMCR1_EL0A32_EN(CTR) | PMCR1_EL0A64_EN(CTR) | \
181 PMCR1_EL1A64_EN(CTR) | PMCR1_EL3A64_EN(CTR))
182
183 /* fixed counters always count in all modes */
184 #define PMCR1_INIT (PMCR1_ALL_EN(CYCLES) | PMCR1_ALL_EN(INSTRS))
185
186 static inline void
core_init_execution_modes(void)187 core_init_execution_modes(void)
188 {
189 uint64_t pmcr1;
190
191 pmcr1 = __builtin_arm_rsr64("PMCR1_EL1");
192 pmcr1 |= PMCR1_INIT;
193 __builtin_arm_wsr64("PMCR1_EL1", pmcr1);
194 }
195
196 #define PMSR_OVF(CTR) (1ULL << (CTR))
197
198 static int
core_init(__unused mt_device_t dev)199 core_init(__unused mt_device_t dev)
200 {
201 /* the dev node interface to the core counters is still unsupported */
202 return ENOTSUP;
203 }
204
205 struct mt_cpu *
mt_cur_cpu(void)206 mt_cur_cpu(void)
207 {
208 return &getCpuDatap()->cpu_monotonic;
209 }
210
211 uint64_t
mt_core_snap(unsigned int ctr)212 mt_core_snap(unsigned int ctr)
213 {
214 switch (ctr) {
215 #define PMC_RD(CTR, UNUSED) case (CTR): return __builtin_arm_rsr64(__MSR_STR(PMC ## CTR))
216 PMC_ALL(PMC_RD, 0);
217 #undef PMC_RD
218 default:
219 panic("monotonic: invalid core counter read: %u", ctr);
220 __builtin_unreachable();
221 }
222 }
223
224 void
mt_core_set_snap(unsigned int ctr,uint64_t count)225 mt_core_set_snap(unsigned int ctr, uint64_t count)
226 {
227 switch (ctr) {
228 case 0:
229 __builtin_arm_wsr64("PMC0", count);
230 break;
231 case 1:
232 __builtin_arm_wsr64("PMC1", count);
233 break;
234 default:
235 panic("monotonic: invalid core counter %u write %llu", ctr, count);
236 __builtin_unreachable();
237 }
238 }
239
240 static void
core_set_enabled(void)241 core_set_enabled(void)
242 {
243 uint32_t kpc_mask = kpc_get_running() &
244 (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK);
245 uint64_t pmcr0 = __builtin_arm_rsr64("PMCR0_EL1");
246 pmcr0 |= PMCR0_INIT | PMCR0_FIXED_EN;
247
248 if (kpc_mask != 0) {
249 uint64_t kpc_ctrs = kpc_get_configurable_pmc_mask(kpc_mask) <<
250 MT_CORE_NFIXED;
251 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
252 uint64_t ctrs_ge8 = kpc_ctrs >> 8;
253 pmcr0 |= ctrs_ge8 << PMCR0_CTR_GE8_EN_SHIFT;
254 pmcr0 |= ctrs_ge8 << PMCR0_CTR_GE8_PMI_SHIFT;
255 kpc_ctrs &= (1ULL << 8) - 1;
256 #endif /* KPC_ARM64_CONFIGURABLE_COUNT > 6 */
257 kpc_ctrs |= kpc_ctrs << PMCR0_PMI_SHIFT;
258 pmcr0 |= kpc_ctrs;
259 }
260
261 __builtin_arm_wsr64("PMCR0_EL1", pmcr0);
262 #if MACH_ASSERT
263 /*
264 * Only check for the values that were ORed in.
265 */
266 uint64_t pmcr0_check = __builtin_arm_rsr64("PMCR0_EL1");
267 if ((pmcr0_check & (PMCR0_INIT | PMCR0_FIXED_EN)) != (PMCR0_INIT | PMCR0_FIXED_EN)) {
268 panic("monotonic: hardware ignored enable (read %llx, wrote %llx)",
269 pmcr0_check, pmcr0);
270 }
271 #endif /* MACH_ASSERT */
272 }
273
274 static void
core_idle(__unused cpu_data_t * cpu)275 core_idle(__unused cpu_data_t *cpu)
276 {
277 assert(cpu != NULL);
278 assert(ml_get_interrupts_enabled() == FALSE);
279
280 #if DEBUG
281 uint64_t pmcr0 = __builtin_arm_rsr64("PMCR0_EL1");
282 if ((pmcr0 & PMCR0_FIXED_EN) == 0) {
283 panic("monotonic: counters disabled before idling, pmcr0 = 0x%llx", pmcr0);
284 }
285 uint64_t pmcr1 = __builtin_arm_rsr64("PMCR1_EL1");
286 if ((pmcr1 & PMCR1_INIT) == 0) {
287 panic("monotonic: counter modes disabled before idling, pmcr1 = 0x%llx", pmcr1);
288 }
289 #endif /* DEBUG */
290
291 /* disable counters before updating */
292 __builtin_arm_wsr64("PMCR0_EL1", PMCR0_INIT);
293
294 mt_update_fixed_counts();
295 }
296
297 #pragma mark uncore performance monitor
298
299 #if HAS_UNCORE_CTRS
300
301 static bool mt_uncore_initted = false;
302
303 /*
304 * Uncore Performance Monitor
305 *
306 * Uncore performance monitors provide event-counting for the last-level caches
307 * (LLCs). Each LLC has its own uncore performance monitor, which can only be
308 * accessed by cores that use that LLC. Like the core performance monitoring
309 * unit, uncore counters are configured globally. If there is more than one
310 * LLC on the system, PIO reads must be used to satisfy uncore requests (using
311 * the `_r` remote variants of the access functions). Otherwise, local MSRs
312 * suffice (using the `_l` local variants of the access functions).
313 */
314
315 #if UNCORE_PER_CLUSTER
316 #define MAX_NMONITORS MAX_CPU_CLUSTERS
317 static uintptr_t cpm_impl[MAX_NMONITORS] = {};
318 #else
319 #define MAX_NMONITORS (1)
320 #endif /* UNCORE_PER_CLUSTER */
321
322 #if UNCORE_VERSION >= 2
323 /*
324 * V2 uncore monitors feature a CTI mechanism -- the second bit of UPMSR is
325 * used to track if a CTI has been triggered due to an overflow.
326 */
327 #define UPMSR_OVF_POS 2
328 #else /* UNCORE_VERSION >= 2 */
329 #define UPMSR_OVF_POS 1
330 #endif /* UNCORE_VERSION < 2 */
331 #define UPMSR_OVF(R, CTR) ((R) >> ((CTR) + UPMSR_OVF_POS) & 0x1)
332 #define UPMSR_OVF_MASK (((UINT64_C(1) << UNCORE_NCTRS) - 1) << UPMSR_OVF_POS)
333
334 #define UPMPCM_CORE(ID) (UINT64_C(1) << (ID))
335
336 #if UPMU_64BIT_PMCS
337 #define UPMC_WIDTH (63)
338 #else // UPMU_64BIT_PMCS
339 #define UPMC_WIDTH (47)
340 #endif // !UPMU_64BIT_PMCS
341
342 /*
343 * The uncore_pmi_mask is a bitmask of CPUs that receive uncore PMIs. It's
344 * initialized by uncore_init and controllable by the uncore_pmi_mask boot-arg.
345 */
346 static int32_t uncore_pmi_mask = 0;
347
348 /*
349 * The uncore_active_ctrs is a bitmask of uncore counters that are currently
350 * requested.
351 */
352 static uint16_t uncore_active_ctrs = 0;
353 static_assert(sizeof(uncore_active_ctrs) * CHAR_BIT >= UNCORE_NCTRS,
354 "counter mask should fit the full range of counters");
355
356 /*
357 * mt_uncore_enabled is true when any uncore counters are active.
358 */
359 bool mt_uncore_enabled = false;
360
361 /*
362 * The uncore_events are the event configurations for each uncore counter -- as
363 * a union to make it easy to program the hardware registers.
364 */
365 static struct uncore_config {
366 union {
367 uint8_t uce_ctrs[UNCORE_NCTRS];
368 uint64_t uce_regs[UNCORE_NCTRS / 8];
369 } uc_events;
370 union {
371 uint16_t uccm_masks[UNCORE_NCTRS];
372 uint64_t uccm_regs[UNCORE_NCTRS / 4];
373 } uc_cpu_masks[MAX_NMONITORS];
374 } uncore_config;
375
376 static struct uncore_monitor {
377 /*
378 * The last snapshot of each of the hardware counter values.
379 */
380 uint64_t um_snaps[UNCORE_NCTRS];
381
382 /*
383 * The accumulated counts for each counter.
384 */
385 uint64_t um_counts[UNCORE_NCTRS];
386
387 /*
388 * Protects accessing the hardware registers and fields in this structure.
389 */
390 lck_spin_t um_lock;
391
392 /*
393 * Whether this monitor needs its registers restored after wake.
394 */
395 bool um_sleeping;
396
397 #if MACH_ASSERT
398 /*
399 * Save the last ID that read from this monitor.
400 */
401 uint8_t um_last_read_id;
402
403 /*
404 * Save whether this monitor has been read since sleeping.
405 */
406 bool um_read_since_sleep;
407 #endif /* MACH_ASSERT */
408 } uncore_monitors[MAX_NMONITORS];
409
410 /*
411 * Each uncore unit has its own monitor, corresponding to the memory hierarchy
412 * of the LLCs.
413 */
414 static unsigned int
uncore_nmonitors(void)415 uncore_nmonitors(void)
416 {
417 #if UNCORE_PER_CLUSTER
418 return topology_info->num_clusters;
419 #else /* UNCORE_PER_CLUSTER */
420 return 1;
421 #endif /* !UNCORE_PER_CLUSTER */
422 }
423
424 static unsigned int
uncmon_get_curid(void)425 uncmon_get_curid(void)
426 {
427 #if UNCORE_PER_CLUSTER
428 return cpu_cluster_id();
429 #else /* UNCORE_PER_CLUSTER */
430 return 0;
431 #endif /* !UNCORE_PER_CLUSTER */
432 }
433
434 /*
435 * Per-monitor locks are required to prevent races with the PMI handlers, not
436 * from other CPUs that are configuring (those are serialized with monotonic's
437 * per-device lock).
438 */
439
440 static int
uncmon_lock(struct uncore_monitor * mon)441 uncmon_lock(struct uncore_monitor *mon)
442 {
443 int intrs_en = ml_set_interrupts_enabled(FALSE);
444 lck_spin_lock(&mon->um_lock);
445 return intrs_en;
446 }
447
448 static void
uncmon_unlock(struct uncore_monitor * mon,int intrs_en)449 uncmon_unlock(struct uncore_monitor *mon, int intrs_en)
450 {
451 lck_spin_unlock(&mon->um_lock);
452 (void)ml_set_interrupts_enabled(intrs_en);
453 }
454
455 /*
456 * Helper functions for accessing the hardware -- these require the monitor be
457 * locked to prevent other CPUs' PMI handlers from making local modifications
458 * or updating the counts.
459 */
460
461 #if UNCORE_VERSION >= 2
462 #define UPMCR0_INTEN_POS 20
463 #define UPMCR0_INTGEN_POS 16
464 #else /* UNCORE_VERSION >= 2 */
465 #define UPMCR0_INTEN_POS 12
466 #define UPMCR0_INTGEN_POS 8
467 #endif /* UNCORE_VERSION < 2 */
468 enum {
469 UPMCR0_INTGEN_OFF = 0,
470 /* fast PMIs are only supported on core CPMU */
471 UPMCR0_INTGEN_AIC = 2,
472 UPMCR0_INTGEN_HALT = 3,
473 UPMCR0_INTGEN_FIQ = 4,
474 };
475 /* always enable interrupts for all counters */
476 #define UPMCR0_INTEN (((1ULL << UNCORE_NCTRS) - 1) << UPMCR0_INTEN_POS)
477 /* route uncore PMIs through the FIQ path */
478 #define UPMCR0_INIT (UPMCR0_INTEN | (UPMCR0_INTGEN_FIQ << UPMCR0_INTGEN_POS))
479
480 /*
481 * Turn counting on for counters set in the `enctrmask` and off, otherwise.
482 */
483 static inline void
uncmon_set_counting_locked_l(__unused unsigned int monid,uint64_t enctrmask)484 uncmon_set_counting_locked_l(__unused unsigned int monid, uint64_t enctrmask)
485 {
486 /*
487 * UPMCR0 controls which counters are enabled and how interrupts are generated
488 * for overflows.
489 */
490 __builtin_arm_wsr64("UPMCR0_EL1", UPMCR0_INIT | enctrmask);
491 }
492
493 #if UNCORE_PER_CLUSTER
494
495 /*
496 * Turn counting on for counters set in the `enctrmask` and off, otherwise.
497 */
498 static inline void
uncmon_set_counting_locked_r(unsigned int monid,uint64_t enctrmask)499 uncmon_set_counting_locked_r(unsigned int monid, uint64_t enctrmask)
500 {
501 const uintptr_t upmcr0_offset = 0x4180;
502 *(uint64_t *)(cpm_impl[monid] + upmcr0_offset) = UPMCR0_INIT | enctrmask;
503 }
504
505 #endif /* UNCORE_PER_CLUSTER */
506
507 /*
508 * The uncore performance monitoring counters (UPMCs) are 48/64-bits wide. The
509 * high bit is an overflow bit, triggering a PMI, providing 47/63 usable bits.
510 */
511
512 #define UPMC_MAX ((UINT64_C(1) << UPMC_WIDTH) - 1)
513
514 /*
515 * The `__builtin_arm_{r,w}sr` functions require constant strings, since the
516 * MSR/MRS instructions encode the registers as immediates. Otherwise, this
517 * would be indexing into an array of strings.
518 */
519
520 #define UPMC_0_7(X, A) X(0, A); X(1, A); X(2, A); X(3, A); X(4, A); X(5, A); \
521 X(6, A); X(7, A)
522 #if UNCORE_NCTRS <= 8
523 #define UPMC_ALL(X, A) UPMC_0_7(X, A)
524 #else /* UNCORE_NCTRS <= 8 */
525 #define UPMC_8_15(X, A) X(8, A); X(9, A); X(10, A); X(11, A); X(12, A); \
526 X(13, A); X(14, A); X(15, A)
527 #define UPMC_ALL(X, A) UPMC_0_7(X, A); UPMC_8_15(X, A)
528 #endif /* UNCORE_NCTRS > 8 */
529
530 __unused
531 static inline uint64_t
uncmon_read_counter_locked_l(__unused unsigned int monid,unsigned int ctr)532 uncmon_read_counter_locked_l(__unused unsigned int monid, unsigned int ctr)
533 {
534 assert(ctr < UNCORE_NCTRS);
535 switch (ctr) {
536 #define UPMC_RD(CTR, UNUSED) case (CTR): return __builtin_arm_rsr64(__MSR_STR(UPMC ## CTR))
537 UPMC_ALL(UPMC_RD, 0);
538 #undef UPMC_RD
539 default:
540 panic("monotonic: invalid counter read %u", ctr);
541 __builtin_unreachable();
542 }
543 }
544
545 static inline void
uncmon_write_counter_locked_l(__unused unsigned int monid,unsigned int ctr,uint64_t count)546 uncmon_write_counter_locked_l(__unused unsigned int monid, unsigned int ctr,
547 uint64_t count)
548 {
549 assert(count < UPMC_MAX);
550 assert(ctr < UNCORE_NCTRS);
551 switch (ctr) {
552 #define UPMC_WR(CTR, COUNT) case (CTR): \
553 return __builtin_arm_wsr64(__MSR_STR(UPMC ## CTR), (COUNT))
554 UPMC_ALL(UPMC_WR, count);
555 #undef UPMC_WR
556 default:
557 panic("monotonic: invalid counter write %u", ctr);
558 }
559 }
560
561 #if UNCORE_PER_CLUSTER
562
563 uintptr_t upmc_offs[UNCORE_NCTRS] = {
564 [0] = 0x4100, [1] = 0x4248, [2] = 0x4110, [3] = 0x4250, [4] = 0x4120,
565 [5] = 0x4258, [6] = 0x4130, [7] = 0x4260, [8] = 0x4140, [9] = 0x4268,
566 [10] = 0x4150, [11] = 0x4270, [12] = 0x4160, [13] = 0x4278,
567 [14] = 0x4170, [15] = 0x4280,
568 };
569
570 static inline uint64_t
uncmon_read_counter_locked_r(unsigned int mon_id,unsigned int ctr)571 uncmon_read_counter_locked_r(unsigned int mon_id, unsigned int ctr)
572 {
573 assert(mon_id < uncore_nmonitors());
574 assert(ctr < UNCORE_NCTRS);
575 return *(uint64_t *)(cpm_impl[mon_id] + upmc_offs[ctr]);
576 }
577
578 static inline void
uncmon_write_counter_locked_r(unsigned int mon_id,unsigned int ctr,uint64_t count)579 uncmon_write_counter_locked_r(unsigned int mon_id, unsigned int ctr,
580 uint64_t count)
581 {
582 assert(count < UPMC_MAX);
583 assert(ctr < UNCORE_NCTRS);
584 assert(mon_id < uncore_nmonitors());
585 *(uint64_t *)(cpm_impl[mon_id] + upmc_offs[ctr]) = count;
586 }
587
588 #endif /* UNCORE_PER_CLUSTER */
589
590 static inline void
uncmon_update_locked(unsigned int monid,unsigned int __unused curid,unsigned int ctr)591 uncmon_update_locked(unsigned int monid, unsigned int __unused curid,
592 unsigned int ctr)
593 {
594 struct uncore_monitor *mon = &uncore_monitors[monid];
595 if (!mon->um_sleeping) {
596 uint64_t snap = 0;
597 #if UNCORE_PER_CLUSTER
598 snap = uncmon_read_counter_locked_r(monid, ctr);
599 #else /* UNCORE_PER_CLUSTER */
600 snap = uncmon_read_counter_locked_l(monid, ctr);
601 #endif /* UNCORE_PER_CLUSTER */
602 if (snap < mon->um_snaps[ctr]) {
603 #if MACH_ASSERT
604 #if UNCORE_PER_CLUSTER
605 uint64_t remote_value = uncmon_read_counter_locked_r(monid, ctr);
606 #endif /* UNCORE_PER_CLUSTER */
607 panic("monotonic: UPMC%d on UPMU %d went backwards from "
608 "%llx to %llx, read via %s, last was %s from UPMU %hhd%s"
609 #if UNCORE_PER_CLUSTER
610 ", re-read remote value is %llx"
611 #endif /* UNCORE_PER_CLUSTER */
612 , ctr,
613 monid, mon->um_snaps[ctr], snap,
614 curid == monid ? "local" : "remote",
615 mon->um_last_read_id == monid ? "local" : "remote",
616 mon->um_last_read_id,
617 mon->um_read_since_sleep ? "" : ", first read since sleep"
618 #if UNCORE_PER_CLUSTER
619 , remote_value
620 #endif /* UNCORE_PER_CLUSTER */
621 );
622 #else /* MACH_ASSERT */
623 snap = mon->um_snaps[ctr];
624 #endif /* !MACH_ASSERT */
625 }
626 mon->um_counts[ctr] += snap - mon->um_snaps[ctr];
627 mon->um_snaps[ctr] = snap;
628 }
629 }
630
631 static inline void
uncmon_program_events_locked_l(unsigned int monid)632 uncmon_program_events_locked_l(unsigned int monid)
633 {
634 /*
635 * UPMESR[01] is the event selection register that determines which event a
636 * counter will count.
637 */
638 CTRL_REG_SET("UPMESR0_EL1", uncore_config.uc_events.uce_regs[0]);
639
640 #if UNCORE_NCTRS > 8
641 CTRL_REG_SET("UPMESR1_EL1", uncore_config.uc_events.uce_regs[1]);
642 #endif /* UNCORE_NCTRS > 8 */
643
644 /*
645 * UPMECM[0123] are the event core masks for each counter -- whether or not
646 * that counter counts events generated by an agent. These are set to all
647 * ones so the uncore counters count events from all cores.
648 *
649 * The bits are based off the start of the cluster -- e.g. even if a core
650 * has a CPU ID of 4, it might be the first CPU in a cluster. Shift the
651 * registers right by the ID of the first CPU in the cluster.
652 */
653 CTRL_REG_SET("UPMECM0_EL1",
654 uncore_config.uc_cpu_masks[monid].uccm_regs[0]);
655 CTRL_REG_SET("UPMECM1_EL1",
656 uncore_config.uc_cpu_masks[monid].uccm_regs[1]);
657
658 #if UNCORE_NCTRS > 8
659 CTRL_REG_SET("UPMECM2_EL1",
660 uncore_config.uc_cpu_masks[monid].uccm_regs[2]);
661 CTRL_REG_SET("UPMECM3_EL1",
662 uncore_config.uc_cpu_masks[monid].uccm_regs[3]);
663 #endif /* UNCORE_NCTRS > 8 */
664 }
665
666 #if UNCORE_PER_CLUSTER
667
668 static inline void
uncmon_program_events_locked_r(unsigned int monid)669 uncmon_program_events_locked_r(unsigned int monid)
670 {
671 const uintptr_t upmesr_offs[2] = {[0] = 0x41b0, [1] = 0x41b8, };
672
673 for (unsigned int i = 0; i < sizeof(upmesr_offs) / sizeof(upmesr_offs[0]);
674 i++) {
675 *(uint64_t *)(cpm_impl[monid] + upmesr_offs[i]) =
676 uncore_config.uc_events.uce_regs[i];
677 }
678
679 const uintptr_t upmecm_offs[4] = {
680 [0] = 0x4190, [1] = 0x4198, [2] = 0x41a0, [3] = 0x41a8,
681 };
682
683 for (unsigned int i = 0; i < sizeof(upmecm_offs) / sizeof(upmecm_offs[0]);
684 i++) {
685 *(uint64_t *)(cpm_impl[monid] + upmecm_offs[i]) =
686 uncore_config.uc_cpu_masks[monid].uccm_regs[i];
687 }
688 }
689
690 #endif /* UNCORE_PER_CLUSTER */
691
692 static void
uncmon_clear_int_locked_l(__unused unsigned int monid)693 uncmon_clear_int_locked_l(__unused unsigned int monid)
694 {
695 __builtin_arm_wsr64("UPMSR_EL1", 0);
696 }
697
698 #if UNCORE_PER_CLUSTER
699
700 static void
uncmon_clear_int_locked_r(unsigned int monid)701 uncmon_clear_int_locked_r(unsigned int monid)
702 {
703 const uintptr_t upmsr_off = 0x41c0;
704 *(uint64_t *)(cpm_impl[monid] + upmsr_off) = 0;
705 }
706
707 #endif /* UNCORE_PER_CLUSTER */
708
709 /*
710 * Get the PMI mask for the provided `monid` -- that is, the bitmap of CPUs
711 * that should be sent PMIs for a particular monitor.
712 */
713 static uint64_t
uncmon_get_pmi_mask(unsigned int monid)714 uncmon_get_pmi_mask(unsigned int monid)
715 {
716 uint64_t pmi_mask = uncore_pmi_mask;
717
718 #if UNCORE_PER_CLUSTER
719 pmi_mask &= topology_info->clusters[monid].cpu_mask;
720 #else /* UNCORE_PER_CLUSTER */
721 #pragma unused(monid)
722 #endif /* !UNCORE_PER_CLUSTER */
723
724 return pmi_mask;
725 }
726
727 /*
728 * Initialization routines for the uncore counters.
729 */
730
731 static void
uncmon_init_locked_l(unsigned int monid)732 uncmon_init_locked_l(unsigned int monid)
733 {
734 /*
735 * UPMPCM defines the PMI core mask for the UPMCs -- which cores should
736 * receive interrupts on overflow.
737 */
738 CTRL_REG_SET("UPMPCM_EL1", uncmon_get_pmi_mask(monid));
739 uncmon_set_counting_locked_l(monid,
740 mt_uncore_enabled ? uncore_active_ctrs : 0);
741 }
742
743 #if UNCORE_PER_CLUSTER
744
745 static uintptr_t acc_impl[MAX_NMONITORS] = {};
746
747 static void
uncmon_init_locked_r(unsigned int monid)748 uncmon_init_locked_r(unsigned int monid)
749 {
750 const uintptr_t upmpcm_off = 0x1010;
751
752 *(uint64_t *)(acc_impl[monid] + upmpcm_off) = uncmon_get_pmi_mask(monid);
753 uncmon_set_counting_locked_r(monid,
754 mt_uncore_enabled ? uncore_active_ctrs : 0);
755 }
756
757 #endif /* UNCORE_PER_CLUSTER */
758
759 /*
760 * Initialize the uncore device for monotonic.
761 */
762 static int
uncore_init(__unused mt_device_t dev)763 uncore_init(__unused mt_device_t dev)
764 {
765 #if HAS_UNCORE_CTRS
766 assert(MT_NDEVS > 0);
767 mt_devices[MT_NDEVS - 1].mtd_nmonitors = (uint8_t)uncore_nmonitors();
768 #endif
769
770 #if DEVELOPMENT || DEBUG
771 /*
772 * Development and debug kernels observe the `uncore_pmi_mask` boot-arg,
773 * allowing PMIs to be routed to the CPUs present in the supplied bitmap.
774 * Do some sanity checks on the value provided.
775 */
776 bool parsed_arg = PE_parse_boot_argn("uncore_pmi_mask", &uncore_pmi_mask,
777 sizeof(uncore_pmi_mask));
778 if (parsed_arg) {
779 #if UNCORE_PER_CLUSTER
780 if (__builtin_popcount(uncore_pmi_mask) != (int)uncore_nmonitors()) {
781 panic("monotonic: invalid uncore PMI mask 0x%x", uncore_pmi_mask);
782 }
783 for (unsigned int i = 0; i < uncore_nmonitors(); i++) {
784 if (__builtin_popcountll(uncmon_get_pmi_mask(i)) != 1) {
785 panic("monotonic: invalid uncore PMI CPU for cluster %d in mask 0x%x",
786 i, uncore_pmi_mask);
787 }
788 }
789 #else /* UNCORE_PER_CLUSTER */
790 if (__builtin_popcount(uncore_pmi_mask) != 1) {
791 panic("monotonic: invalid uncore PMI mask 0x%x", uncore_pmi_mask);
792 }
793 #endif /* !UNCORE_PER_CLUSTER */
794 } else
795 #endif /* DEVELOPMENT || DEBUG */
796 {
797 /* arbitrarily route to core 0 in each cluster */
798 uncore_pmi_mask |= 1;
799 }
800 assert(uncore_pmi_mask != 0);
801
802 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
803 #if UNCORE_PER_CLUSTER
804 ml_topology_cluster_t *cluster = &topology_info->clusters[monid];
805 cpm_impl[monid] = (uintptr_t)cluster->cpm_IMPL_regs;
806 acc_impl[monid] = (uintptr_t)cluster->acc_IMPL_regs;
807 assert(cpm_impl[monid] != 0 && acc_impl[monid] != 0);
808 #endif /* UNCORE_PER_CLUSTER */
809
810 struct uncore_monitor *mon = &uncore_monitors[monid];
811 lck_spin_init(&mon->um_lock, &mt_lock_grp, LCK_ATTR_NULL);
812 }
813
814 mt_uncore_initted = true;
815
816 return 0;
817 }
818
819 /*
820 * Support for monotonic's mtd_read function.
821 */
822
823 static void
uncmon_read_all_counters(unsigned int monid,unsigned int curmonid,uint64_t ctr_mask,uint64_t * counts)824 uncmon_read_all_counters(unsigned int monid, unsigned int curmonid,
825 uint64_t ctr_mask, uint64_t *counts)
826 {
827 struct uncore_monitor *mon = &uncore_monitors[monid];
828
829 int intrs_en = uncmon_lock(mon);
830
831 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
832 if (ctr_mask & (1ULL << ctr)) {
833 if (!mon->um_sleeping) {
834 uncmon_update_locked(monid, curmonid, ctr);
835 }
836 counts[ctr] = mon->um_counts[ctr];
837 }
838 }
839 #if MACH_ASSERT
840 mon->um_read_since_sleep = true;
841 #endif /* MACH_ASSERT */
842
843 uncmon_unlock(mon, intrs_en);
844 }
845
846 /*
847 * Read all monitor's counters.
848 */
849 static int
uncore_read(uint64_t ctr_mask,uint64_t * counts_out)850 uncore_read(uint64_t ctr_mask, uint64_t *counts_out)
851 {
852 assert(ctr_mask != 0);
853 assert(counts_out != NULL);
854
855 if (!uncore_active_ctrs) {
856 return EPWROFF;
857 }
858 if (ctr_mask & ~uncore_active_ctrs) {
859 return EINVAL;
860 }
861
862 unsigned int curmonid = uncmon_get_curid();
863 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
864 /*
865 * Find this monitor's starting offset into the `counts_out` array.
866 */
867 uint64_t *counts = counts_out + (UNCORE_NCTRS * monid);
868 uncmon_read_all_counters(monid, curmonid, ctr_mask, counts);
869 }
870
871 return 0;
872 }
873
874 /*
875 * Support for monotonic's mtd_add function.
876 */
877
878 /*
879 * Add an event to the current uncore configuration. This doesn't take effect
880 * until the counters are enabled again, so there's no need to involve the
881 * monitors.
882 */
883 static int
uncore_add(struct monotonic_config * config,uint32_t * ctr_out)884 uncore_add(struct monotonic_config *config, uint32_t *ctr_out)
885 {
886 if (mt_uncore_enabled) {
887 return EBUSY;
888 }
889
890 uint8_t selector = (uint8_t)config->event;
891 uint32_t available = ~uncore_active_ctrs & config->allowed_ctr_mask;
892
893 if (available == 0) {
894 return ENOSPC;
895 }
896
897 if (!cpc_event_allowed(CPC_HW_UPMU, selector)) {
898 return EPERM;
899 }
900
901 uint32_t valid_ctrs = (UINT32_C(1) << UNCORE_NCTRS) - 1;
902 if ((available & valid_ctrs) == 0) {
903 return E2BIG;
904 }
905 /*
906 * Clear the UPMCs the first time an event is added.
907 */
908 unsigned int curmonid = uncmon_get_curid();
909 if (uncore_active_ctrs == 0) {
910 /*
911 * Suspend powerdown until the next reset.
912 */
913 suspend_cluster_powerdown();
914
915 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
916 struct uncore_monitor *mon = &uncore_monitors[monid];
917 bool remote = monid != curmonid;
918
919 int intrs_en = uncmon_lock(mon);
920 if (!mon->um_sleeping) {
921 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
922 if (remote) {
923 #if UNCORE_PER_CLUSTER
924 uncmon_write_counter_locked_r(monid, ctr, 0);
925 #endif /* UNCORE_PER_CLUSTER */
926 } else {
927 uncmon_write_counter_locked_l(monid, ctr, 0);
928 }
929 }
930 }
931 memset(&mon->um_snaps, 0, sizeof(mon->um_snaps));
932 memset(&mon->um_counts, 0, sizeof(mon->um_counts));
933 uncmon_unlock(mon, intrs_en);
934 }
935 }
936
937 uint32_t ctr = __builtin_ffsll(available) - 1;
938
939 uncore_active_ctrs |= UINT64_C(1) << ctr;
940 uncore_config.uc_events.uce_ctrs[ctr] = selector;
941 uint64_t cpu_mask = UINT64_MAX;
942 if (config->cpu_mask != 0) {
943 cpu_mask = config->cpu_mask;
944 }
945 for (unsigned int i = 0; i < uncore_nmonitors(); i++) {
946 #if UNCORE_PER_CLUSTER
947 const unsigned int shift = topology_info->clusters[i].first_cpu_id;
948 #else /* UNCORE_PER_CLUSTER */
949 const unsigned int shift = 0;
950 #endif /* !UNCORE_PER_CLUSTER */
951 uncore_config.uc_cpu_masks[i].uccm_masks[ctr] = (uint16_t)(cpu_mask >> shift);
952 }
953
954 *ctr_out = ctr;
955 return 0;
956 }
957
958 /*
959 * Support for monotonic's mtd_reset function.
960 */
961
962 /*
963 * Reset all configuration and disable the counters if they're currently
964 * counting.
965 */
966 static void
uncore_reset(void)967 uncore_reset(void)
968 {
969 mt_uncore_enabled = false;
970
971 unsigned int curmonid = uncmon_get_curid();
972
973 if (mt_owns_counters()) {
974 if (uncore_active_ctrs != 0) {
975 resume_cluster_powerdown();
976 }
977
978 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
979 struct uncore_monitor *mon = &uncore_monitors[monid];
980 bool remote = monid != curmonid;
981
982 int intrs_en = uncmon_lock(mon);
983 if (!mon->um_sleeping) {
984 if (remote) {
985 #if UNCORE_PER_CLUSTER
986 uncmon_set_counting_locked_r(monid, 0);
987 #endif /* UNCORE_PER_CLUSTER */
988 } else {
989 uncmon_set_counting_locked_l(monid, 0);
990 }
991
992 for (int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
993 if (uncore_active_ctrs & (1U << ctr)) {
994 if (remote) {
995 #if UNCORE_PER_CLUSTER
996 uncmon_write_counter_locked_r(monid, ctr, 0);
997 #endif /* UNCORE_PER_CLUSTER */
998 } else {
999 uncmon_write_counter_locked_l(monid, ctr, 0);
1000 }
1001 }
1002 }
1003 }
1004
1005 memset(&mon->um_snaps, 0, sizeof(mon->um_snaps));
1006 memset(&mon->um_counts, 0, sizeof(mon->um_counts));
1007 if (!mon->um_sleeping) {
1008 if (remote) {
1009 #if UNCORE_PER_CLUSTER
1010 uncmon_clear_int_locked_r(monid);
1011 #endif /* UNCORE_PER_CLUSTER */
1012 } else {
1013 uncmon_clear_int_locked_l(monid);
1014 }
1015 }
1016
1017 uncmon_unlock(mon, intrs_en);
1018 }
1019 }
1020
1021 uncore_active_ctrs = 0;
1022 memset(&uncore_config, 0, sizeof(uncore_config));
1023
1024 if (mt_owns_counters()) {
1025 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1026 struct uncore_monitor *mon = &uncore_monitors[monid];
1027 bool remote = monid != curmonid;
1028
1029 int intrs_en = uncmon_lock(mon);
1030 if (!mon->um_sleeping) {
1031 if (remote) {
1032 #if UNCORE_PER_CLUSTER
1033 uncmon_program_events_locked_r(monid);
1034 #endif /* UNCORE_PER_CLUSTER */
1035 } else {
1036 uncmon_program_events_locked_l(monid);
1037 }
1038 }
1039 uncmon_unlock(mon, intrs_en);
1040 }
1041 }
1042 }
1043
1044 /*
1045 * Support for monotonic's mtd_enable function.
1046 */
1047
1048 static void
uncmon_set_enabled_l(unsigned int monid,bool enable)1049 uncmon_set_enabled_l(unsigned int monid, bool enable)
1050 {
1051 struct uncore_monitor *mon = &uncore_monitors[monid];
1052 int intrs_en = uncmon_lock(mon);
1053
1054 if (enable) {
1055 uncmon_init_locked_l(monid);
1056 uncmon_program_events_locked_l(monid);
1057 uncmon_set_counting_locked_l(monid, uncore_active_ctrs);
1058 } else {
1059 uncmon_set_counting_locked_l(monid, 0);
1060 }
1061
1062 uncmon_unlock(mon, intrs_en);
1063 }
1064
1065 #if UNCORE_PER_CLUSTER
1066
1067 static void
uncmon_set_enabled_r(unsigned int monid,bool enable)1068 uncmon_set_enabled_r(unsigned int monid, bool enable)
1069 {
1070 struct uncore_monitor *mon = &uncore_monitors[monid];
1071 int intrs_en = uncmon_lock(mon);
1072
1073 if (!mon->um_sleeping) {
1074 if (enable) {
1075 uncmon_init_locked_r(monid);
1076 uncmon_program_events_locked_r(monid);
1077 uncmon_set_counting_locked_r(monid, uncore_active_ctrs);
1078 } else {
1079 uncmon_set_counting_locked_r(monid, 0);
1080 }
1081 }
1082
1083 uncmon_unlock(mon, intrs_en);
1084 }
1085
1086 #endif /* UNCORE_PER_CLUSTER */
1087
1088 static void
uncore_set_enabled(bool enable)1089 uncore_set_enabled(bool enable)
1090 {
1091 mt_uncore_enabled = enable;
1092
1093 unsigned int curmonid = uncmon_get_curid();
1094 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1095 if (monid != curmonid) {
1096 #if UNCORE_PER_CLUSTER
1097 uncmon_set_enabled_r(monid, enable);
1098 #endif /* UNCORE_PER_CLUSTER */
1099 } else {
1100 uncmon_set_enabled_l(monid, enable);
1101 }
1102 }
1103 }
1104
1105 /*
1106 * Hooks in the machine layer.
1107 */
1108
1109 static void
uncore_fiq(uint64_t upmsr)1110 uncore_fiq(uint64_t upmsr)
1111 {
1112 /*
1113 * Determine which counters overflowed.
1114 */
1115 uint64_t disable_ctr_mask = (upmsr & UPMSR_OVF_MASK) >> UPMSR_OVF_POS;
1116 /* should not receive interrupts from inactive counters */
1117 assert(!(disable_ctr_mask & ~uncore_active_ctrs));
1118
1119 if (uncore_active_ctrs == 0) {
1120 return;
1121 }
1122
1123 unsigned int monid = uncmon_get_curid();
1124 struct uncore_monitor *mon = &uncore_monitors[monid];
1125
1126 int intrs_en = uncmon_lock(mon);
1127
1128 /*
1129 * Disable any counters that overflowed.
1130 */
1131 uncmon_set_counting_locked_l(monid,
1132 uncore_active_ctrs & ~disable_ctr_mask);
1133
1134 /*
1135 * With the overflowing counters disabled, capture their counts and reset
1136 * the UPMCs and their snapshots to 0.
1137 */
1138 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1139 if (UPMSR_OVF(upmsr, ctr)) {
1140 uncmon_update_locked(monid, monid, ctr);
1141 mon->um_snaps[ctr] = 0;
1142 uncmon_write_counter_locked_l(monid, ctr, 0);
1143 }
1144 }
1145
1146 /*
1147 * Acknowledge the interrupt, now that any overflowed PMCs have been reset.
1148 */
1149 uncmon_clear_int_locked_l(monid);
1150
1151 /*
1152 * Re-enable all active counters.
1153 */
1154 uncmon_set_counting_locked_l(monid, uncore_active_ctrs);
1155
1156 uncmon_unlock(mon, intrs_en);
1157 }
1158
1159 static void
uncore_save(void)1160 uncore_save(void)
1161 {
1162 if (!uncore_active_ctrs) {
1163 return;
1164 }
1165
1166 unsigned int curmonid = uncmon_get_curid();
1167
1168 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1169 struct uncore_monitor *mon = &uncore_monitors[monid];
1170 int intrs_en = uncmon_lock(mon);
1171
1172 if (mt_uncore_enabled) {
1173 if (monid != curmonid) {
1174 #if UNCORE_PER_CLUSTER
1175 uncmon_set_counting_locked_r(monid, 0);
1176 #endif /* UNCORE_PER_CLUSTER */
1177 } else {
1178 uncmon_set_counting_locked_l(monid, 0);
1179 }
1180 }
1181
1182 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1183 if (uncore_active_ctrs & (1U << ctr)) {
1184 uncmon_update_locked(monid, curmonid, ctr);
1185 mon->um_snaps[ctr] = 0;
1186 uncmon_write_counter_locked_l(monid, ctr, 0);
1187 }
1188 }
1189
1190 mon->um_sleeping = true;
1191 uncmon_unlock(mon, intrs_en);
1192 }
1193 }
1194
1195 static void
uncore_restore(void)1196 uncore_restore(void)
1197 {
1198 if (!uncore_active_ctrs) {
1199 return;
1200 }
1201 unsigned int curmonid = uncmon_get_curid();
1202
1203 struct uncore_monitor *mon = &uncore_monitors[curmonid];
1204 int intrs_en = uncmon_lock(mon);
1205 if (!mon->um_sleeping) {
1206 goto out;
1207 }
1208
1209 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1210 if (uncore_active_ctrs & (1U << ctr)) {
1211 uncmon_write_counter_locked_l(curmonid, ctr, mon->um_snaps[ctr]);
1212 }
1213 }
1214 uncmon_program_events_locked_l(curmonid);
1215 uncmon_init_locked_l(curmonid);
1216 mon->um_sleeping = false;
1217 #if MACH_ASSERT
1218 mon->um_read_since_sleep = false;
1219 #endif /* MACH_ASSERT */
1220
1221 out:
1222 uncmon_unlock(mon, intrs_en);
1223 }
1224
1225 #endif /* HAS_UNCORE_CTRS */
1226
1227 #pragma mark common hooks
1228
1229 void
mt_early_init(void)1230 mt_early_init(void)
1231 {
1232 topology_info = ml_get_topology_info();
1233 }
1234
1235 void
mt_cpu_idle(cpu_data_t * cpu)1236 mt_cpu_idle(cpu_data_t *cpu)
1237 {
1238 core_idle(cpu);
1239 }
1240
1241 void
mt_cpu_run(cpu_data_t * cpu)1242 mt_cpu_run(cpu_data_t *cpu)
1243 {
1244 struct mt_cpu *mtc;
1245
1246 assert(cpu != NULL);
1247 assert(ml_get_interrupts_enabled() == FALSE);
1248
1249 mtc = &cpu->cpu_monotonic;
1250
1251 for (int i = 0; i < MT_CORE_NFIXED; i++) {
1252 mt_core_set_snap(i, mtc->mtc_snaps[i]);
1253 }
1254
1255 /* re-enable the counters */
1256 core_init_execution_modes();
1257
1258 core_set_enabled();
1259 }
1260
1261 void
mt_cpu_down(cpu_data_t * cpu)1262 mt_cpu_down(cpu_data_t *cpu)
1263 {
1264 mt_cpu_idle(cpu);
1265 }
1266
1267 void
mt_cpu_up(cpu_data_t * cpu)1268 mt_cpu_up(cpu_data_t *cpu)
1269 {
1270 mt_cpu_run(cpu);
1271 }
1272
1273 void
mt_sleep(void)1274 mt_sleep(void)
1275 {
1276 #if HAS_UNCORE_CTRS
1277 uncore_save();
1278 #endif /* HAS_UNCORE_CTRS */
1279 }
1280
1281 void
mt_wake_per_core(void)1282 mt_wake_per_core(void)
1283 {
1284 #if HAS_UNCORE_CTRS
1285 if (mt_uncore_initted) {
1286 uncore_restore();
1287 }
1288 #endif /* HAS_UNCORE_CTRS */
1289 }
1290
1291 uint64_t
mt_count_pmis(void)1292 mt_count_pmis(void)
1293 {
1294 uint64_t npmis = 0;
1295 for (unsigned int i = 0; i < topology_info->num_cpus; i++) {
1296 cpu_data_t *cpu = (cpu_data_t *)CpuDataEntries[topology_info->cpus[i].cpu_id].cpu_data_vaddr;
1297 npmis += cpu->cpu_monotonic.mtc_npmis;
1298 }
1299 return npmis;
1300 }
1301
1302 static void
mt_cpu_pmi(cpu_data_t * cpu,uint64_t pmcr0)1303 mt_cpu_pmi(cpu_data_t *cpu, uint64_t pmcr0)
1304 {
1305 assert(cpu != NULL);
1306 assert(ml_get_interrupts_enabled() == FALSE);
1307
1308 __builtin_arm_wsr64("PMCR0_EL1", PMCR0_INIT);
1309 /*
1310 * Ensure the CPMU has flushed any increments at this point, so PMSR is up
1311 * to date.
1312 */
1313 __builtin_arm_isb(ISB_SY);
1314
1315 cpu->cpu_monotonic.mtc_npmis += 1;
1316 cpu->cpu_stat.pmi_cnt_wake += 1;
1317
1318 #if MONOTONIC_DEBUG
1319 if (!PMCR0_PMI(pmcr0)) {
1320 kprintf("monotonic: mt_cpu_pmi but no PMI (PMCR0 = %#llx)\n",
1321 pmcr0);
1322 }
1323 #else /* MONOTONIC_DEBUG */
1324 #pragma unused(pmcr0)
1325 #endif /* !MONOTONIC_DEBUG */
1326
1327 uint64_t pmsr = __builtin_arm_rsr64("PMSR_EL1");
1328
1329 #if MONOTONIC_DEBUG
1330 printf("monotonic: cpu = %d, PMSR = 0x%llx, PMCR0 = 0x%llx\n",
1331 cpu_number(), pmsr, pmcr0);
1332 #endif /* MONOTONIC_DEBUG */
1333
1334 #if MACH_ASSERT
1335 uint64_t handled = 0;
1336 #endif /* MACH_ASSERT */
1337
1338 /*
1339 * monotonic handles any fixed counter PMIs.
1340 */
1341 for (unsigned int i = 0; i < MT_CORE_NFIXED; i++) {
1342 if ((pmsr & PMSR_OVF(i)) == 0) {
1343 continue;
1344 }
1345
1346 #if MACH_ASSERT
1347 handled |= 1ULL << i;
1348 #endif /* MACH_ASSERT */
1349 uint64_t count = mt_cpu_update_count(cpu, i);
1350 cpu->cpu_monotonic.mtc_counts[i] += count;
1351 mt_core_set_snap(i, mt_core_reset_values[i]);
1352 cpu->cpu_monotonic.mtc_snaps[i] = mt_core_reset_values[i];
1353
1354 if (mt_microstackshots && mt_microstackshot_ctr == i) {
1355 bool user_mode = false;
1356 arm_saved_state_t *state = get_user_regs(current_thread());
1357 if (state) {
1358 user_mode = PSR64_IS_USER(get_saved_state_cpsr(state));
1359 }
1360 KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 1),
1361 mt_microstackshot_ctr, user_mode);
1362 mt_microstackshot_pmi_handler(user_mode, mt_microstackshot_ctx);
1363 } else if (mt_debug) {
1364 KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 2),
1365 i, count);
1366 }
1367 }
1368
1369 /*
1370 * KPC handles the configurable counter PMIs.
1371 */
1372 for (unsigned int i = MT_CORE_NFIXED; i < CORE_NCTRS; i++) {
1373 if (pmsr & PMSR_OVF(i)) {
1374 #if MACH_ASSERT
1375 handled |= 1ULL << i;
1376 #endif /* MACH_ASSERT */
1377 extern void kpc_pmi_handler(unsigned int ctr);
1378 kpc_pmi_handler(i);
1379 }
1380 }
1381
1382 #if MACH_ASSERT
1383 uint64_t pmsr_after_handling = __builtin_arm_rsr64("PMSR_EL1");
1384 if (pmsr_after_handling != 0) {
1385 unsigned int first_ctr_ovf = __builtin_ffsll(pmsr_after_handling) - 1;
1386 uint64_t count = 0;
1387 const char *extra = "";
1388 if (first_ctr_ovf >= CORE_NCTRS) {
1389 extra = " (invalid counter)";
1390 } else {
1391 count = mt_core_snap(first_ctr_ovf);
1392 }
1393
1394 panic("monotonic: PMI status not cleared on exit from handler, "
1395 "PMSR = 0x%llx HANDLE -> -> 0x%llx, handled 0x%llx, "
1396 "PMCR0 = 0x%llx, PMC%d = 0x%llx%s", pmsr, pmsr_after_handling,
1397 handled, __builtin_arm_rsr64("PMCR0_EL1"), first_ctr_ovf, count, extra);
1398 }
1399 #endif /* MACH_ASSERT */
1400
1401 core_set_enabled();
1402 }
1403
1404 #if CPMU_AIC_PMI
1405 void
mt_cpmu_aic_pmi(cpu_id_t source)1406 mt_cpmu_aic_pmi(cpu_id_t source)
1407 {
1408 struct cpu_data *curcpu = getCpuDatap();
1409 if (source != curcpu->interrupt_nub) {
1410 panic("monotonic: PMI from IOCPU %p delivered to %p", source,
1411 curcpu->interrupt_nub);
1412 }
1413 mt_cpu_pmi(curcpu, __builtin_arm_rsr64("PMCR0_EL1"));
1414 }
1415 #endif /* CPMU_AIC_PMI */
1416
1417 void
mt_fiq(void * cpu,uint64_t pmcr0,uint64_t upmsr)1418 mt_fiq(void *cpu, uint64_t pmcr0, uint64_t upmsr)
1419 {
1420 #if CPMU_AIC_PMI
1421 #pragma unused(cpu, pmcr0)
1422 #else /* CPMU_AIC_PMI */
1423 mt_cpu_pmi(cpu, pmcr0);
1424 #endif /* !CPMU_AIC_PMI */
1425
1426 #if HAS_UNCORE_CTRS
1427 if (upmsr != 0) {
1428 uncore_fiq(upmsr);
1429 }
1430 #else /* HAS_UNCORE_CTRS */
1431 #pragma unused(upmsr)
1432 #endif /* !HAS_UNCORE_CTRS */
1433 }
1434
1435 void
mt_ownership_change(bool available)1436 mt_ownership_change(bool available)
1437 {
1438 #if HAS_UNCORE_CTRS
1439 /*
1440 * No need to take the lock here, as this is only manipulated in the UPMU
1441 * when the current task already owns the counters and is on its way out.
1442 */
1443 if (!available && uncore_active_ctrs) {
1444 uncore_reset();
1445 }
1446 #else
1447 #pragma unused(available)
1448 #endif /* HAS_UNCORE_CTRS */
1449 }
1450
1451 static uint32_t mt_xc_sync;
1452
1453 static void
mt_microstackshot_start_remote(__unused void * arg)1454 mt_microstackshot_start_remote(__unused void *arg)
1455 {
1456 cpu_data_t *cpu = getCpuDatap();
1457
1458 __builtin_arm_wsr64("PMCR0_EL1", PMCR0_INIT);
1459
1460 for (int i = 0; i < MT_CORE_NFIXED; i++) {
1461 uint64_t count = mt_cpu_update_count(cpu, i);
1462 cpu->cpu_monotonic.mtc_counts[i] += count;
1463 mt_core_set_snap(i, mt_core_reset_values[i]);
1464 cpu->cpu_monotonic.mtc_snaps[i] = mt_core_reset_values[i];
1465 }
1466
1467 core_set_enabled();
1468
1469 if (os_atomic_dec(&mt_xc_sync, relaxed) == 0) {
1470 thread_wakeup((event_t)&mt_xc_sync);
1471 }
1472 }
1473
1474 int
mt_microstackshot_start_arch(uint64_t period)1475 mt_microstackshot_start_arch(uint64_t period)
1476 {
1477 uint64_t reset_value = 0;
1478 int ovf = os_sub_overflow(CTR_MAX, period, &reset_value);
1479 if (ovf) {
1480 return ERANGE;
1481 }
1482
1483 mt_core_reset_values[mt_microstackshot_ctr] = reset_value;
1484 cpu_broadcast_xcall(&mt_xc_sync, TRUE, mt_microstackshot_start_remote,
1485 mt_microstackshot_start_remote /* cannot pass NULL */);
1486 return 0;
1487 }
1488
1489 #pragma mark dev nodes
1490
1491 struct mt_device mt_devices[] = {
1492 [0] = {
1493 .mtd_name = "core",
1494 .mtd_init = core_init,
1495 },
1496 #if HAS_UNCORE_CTRS
1497 [1] = {
1498 .mtd_name = "uncore",
1499 .mtd_init = uncore_init,
1500 .mtd_add = uncore_add,
1501 .mtd_reset = uncore_reset,
1502 .mtd_enable = uncore_set_enabled,
1503 .mtd_read = uncore_read,
1504
1505 .mtd_ncounters = UNCORE_NCTRS,
1506 }
1507 #endif /* HAS_UNCORE_CTRS */
1508 };
1509
1510 static_assert(
1511 (sizeof(mt_devices) / sizeof(mt_devices[0])) == MT_NDEVS,
1512 "MT_NDEVS macro should be same as the length of mt_devices");
1513