1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #include <pexpert/pexpert.h>
33 #include <arm/cpuid.h>
34 #include <arm/cpuid_internal.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm64/proc_reg.h>
37 #include <kern/lock_rw.h>
38 #include <vm/vm_page.h>
39
40 #include <libkern/section_keywords.h>
41
42 /* Temporary types to aid decoding,
43 * Everything in Little Endian */
44
45 typedef struct {
46 uint32_t
47 Ctype1:3, /* 2:0 */
48 Ctype2:3, /* 5:3 */
49 Ctype3:3, /* 8:6 */
50 Ctypes:15, /* 6:23 - Don't Care */
51 LoC:3, /* 26-24 - Level of Coherency */
52 LoU:3, /* 29:27 - Level of Unification */
53 RAZ:2; /* 31:30 - Read-As-Zero */
54 } arm_cache_clidr_t;
55
56 typedef union {
57 arm_cache_clidr_t bits;
58 uint32_t value;
59 } arm_cache_clidr_info_t;
60
61
62 typedef struct {
63 uint32_t
64 LineSize:3, /* 2:0 - Number of words in cache line */
65 Assoc:10, /* 12:3 - Associativity of cache */
66 NumSets:15, /* 27:13 - Number of sets in cache */
67 c_type:4; /* 31:28 - Cache type */
68 } arm_cache_ccsidr_t;
69
70
71 typedef union {
72 arm_cache_ccsidr_t bits;
73 uint32_t value;
74 } arm_cache_ccsidr_info_t;
75
76 /* Statics */
77
78 static SECURITY_READ_ONLY_LATE(arm_cpu_info_t) cpuid_cpu_info;
79 static SECURITY_READ_ONLY_LATE(cache_info_t *) cpuid_cache_info_boot_cpu;
80 static cache_info_t cpuid_cache_info[MAX_CPU_TYPES] = { 0 };
81 static _Atomic uint8_t cpuid_cache_info_bitmap = 0;
82
83 /* Code */
84
85 __private_extern__
86 void
do_cpuid(void)87 do_cpuid(void)
88 {
89 cpuid_cpu_info.value = machine_read_midr();
90 #if (__ARM_ARCH__ == 8)
91
92 #if defined(HAS_APPLE_PAC)
93 cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv8E;
94 #else /* defined(HAS_APPLE_PAC) */
95 cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv8;
96 #endif /* defined(HAS_APPLE_PAC) */
97
98 #else /* (__ARM_ARCH__ != 8) */
99 #error Unsupported arch
100 #endif /* (__ARM_ARCH__ != 8) */
101 }
102
103 arm_cpu_info_t *
cpuid_info(void)104 cpuid_info(void)
105 {
106 return &cpuid_cpu_info;
107 }
108
109 int
cpuid_get_cpufamily(void)110 cpuid_get_cpufamily(void)
111 {
112 int cpufamily = 0;
113
114 switch (cpuid_info()->arm_info.arm_implementor) {
115 case CPU_VID_ARM:
116 switch (cpuid_info()->arm_info.arm_part) {
117 case CPU_PART_CORTEXA9:
118 cpufamily = CPUFAMILY_ARM_14;
119 break;
120 case CPU_PART_CORTEXA8:
121 cpufamily = CPUFAMILY_ARM_13;
122 break;
123 case CPU_PART_CORTEXA7:
124 cpufamily = CPUFAMILY_ARM_15;
125 break;
126 case CPU_PART_1136JFS:
127 case CPU_PART_1176JZFS:
128 cpufamily = CPUFAMILY_ARM_11;
129 break;
130 case CPU_PART_926EJS:
131 case CPU_PART_920T:
132 cpufamily = CPUFAMILY_ARM_9;
133 break;
134 default:
135 cpufamily = CPUFAMILY_UNKNOWN;
136 break;
137 }
138 break;
139
140 case CPU_VID_INTEL:
141 cpufamily = CPUFAMILY_ARM_XSCALE;
142 break;
143
144 case CPU_VID_APPLE:
145 switch (cpuid_info()->arm_info.arm_part) {
146 case CPU_PART_TYPHOON:
147 case CPU_PART_TYPHOON_CAPRI:
148 cpufamily = CPUFAMILY_ARM_TYPHOON;
149 break;
150 case CPU_PART_TWISTER:
151 case CPU_PART_TWISTER_ELBA_MALTA:
152 cpufamily = CPUFAMILY_ARM_TWISTER;
153 break;
154 case CPU_PART_HURRICANE:
155 case CPU_PART_HURRICANE_MYST:
156 cpufamily = CPUFAMILY_ARM_HURRICANE;
157 break;
158 case CPU_PART_MONSOON:
159 case CPU_PART_MISTRAL:
160 cpufamily = CPUFAMILY_ARM_MONSOON_MISTRAL;
161 break;
162 case CPU_PART_VORTEX:
163 case CPU_PART_TEMPEST:
164 case CPU_PART_TEMPEST_M9:
165 case CPU_PART_VORTEX_ARUBA:
166 case CPU_PART_TEMPEST_ARUBA:
167 cpufamily = CPUFAMILY_ARM_VORTEX_TEMPEST;
168 break;
169 case CPU_PART_LIGHTNING:
170 case CPU_PART_THUNDER:
171 case CPU_PART_THUNDER_M10:
172 cpufamily = CPUFAMILY_ARM_LIGHTNING_THUNDER;
173 break;
174 case CPU_PART_FIRESTORM_JADE_CHOP:
175 case CPU_PART_FIRESTORM_JADE_DIE:
176 case CPU_PART_ICESTORM_JADE_CHOP:
177 case CPU_PART_ICESTORM_JADE_DIE:
178 case CPU_PART_FIRESTORM:
179 case CPU_PART_ICESTORM:
180 case CPU_PART_FIRESTORM_TONGA:
181 case CPU_PART_ICESTORM_TONGA:
182 cpufamily = CPUFAMILY_ARM_FIRESTORM_ICESTORM;
183 break;
184 case CPU_PART_BLIZZARD_STATEN:
185 case CPU_PART_AVALANCHE_STATEN:
186 case CPU_PART_BLIZZARD_RHODES_CHOP:
187 case CPU_PART_AVALANCHE_RHODES_CHOP:
188 case CPU_PART_BLIZZARD_RHODES_DIE:
189 case CPU_PART_AVALANCHE_RHODES_DIE:
190 case CPU_PART_BLIZZARD:
191 case CPU_PART_AVALANCHE:
192 cpufamily = CPUFAMILY_ARM_BLIZZARD_AVALANCHE;
193 break;
194 case CPU_PART_EVEREST:
195 case CPU_PART_SAWTOOTH:
196 case CPU_PART_SAWTOOTH_M11:
197 cpufamily = CPUFAMILY_ARM_EVEREST_SAWTOOTH;
198 break;
199 case CPU_PART_ECORE_IBIZA:
200 case CPU_PART_PCORE_IBIZA:
201 cpufamily = CPUFAMILY_ARM_IBIZA;
202 break;
203 case CPU_PART_ECORE_PALMA:
204 case CPU_PART_PCORE_PALMA:
205 cpufamily = CPUFAMILY_ARM_PALMA;
206 break;
207 case CPU_PART_ECORE_COLL:
208 case CPU_PART_PCORE_COLL:
209 cpufamily = CPUFAMILY_ARM_COLL;
210 break;
211 case CPU_PART_ECORE_LOBOS:
212 case CPU_PART_PCORE_LOBOS:
213 cpufamily = CPUFAMILY_ARM_LOBOS;
214 break;
215 case CPU_PART_ECORE_DONAN:
216 case CPU_PART_PCORE_DONAN:
217 cpufamily = CPUFAMILY_ARM_DONAN;
218 break;
219 case CPU_PART_ECORE_BRAVA_S:
220 case CPU_PART_PCORE_BRAVA_S:
221 case CPU_PART_ECORE_BRAVA_C:
222 case CPU_PART_PCORE_BRAVA_C:
223 cpufamily = CPUFAMILY_ARM_BRAVA;
224 break;
225 default:
226 cpufamily = CPUFAMILY_UNKNOWN;
227 break;
228 }
229 break;
230
231 default:
232 cpufamily = CPUFAMILY_UNKNOWN;
233 break;
234 }
235
236 return cpufamily;
237 }
238
239 int
cpuid_get_cpusubfamily(void)240 cpuid_get_cpusubfamily(void)
241 {
242 int cpusubfamily = CPUSUBFAMILY_UNKNOWN;
243
244 if (cpuid_info()->arm_info.arm_implementor != CPU_VID_APPLE) {
245 return cpusubfamily;
246 }
247
248 switch (cpuid_info()->arm_info.arm_part) {
249 case CPU_PART_TYPHOON:
250 case CPU_PART_TWISTER:
251 case CPU_PART_HURRICANE:
252 case CPU_PART_MONSOON:
253 case CPU_PART_MISTRAL:
254 case CPU_PART_VORTEX:
255 case CPU_PART_TEMPEST:
256 case CPU_PART_LIGHTNING:
257 case CPU_PART_THUNDER:
258 case CPU_PART_FIRESTORM:
259 case CPU_PART_ICESTORM:
260 case CPU_PART_BLIZZARD:
261 case CPU_PART_AVALANCHE:
262 case CPU_PART_SAWTOOTH:
263 case CPU_PART_EVEREST:
264 cpusubfamily = CPUSUBFAMILY_ARM_HP;
265 break;
266 case CPU_PART_TYPHOON_CAPRI:
267 case CPU_PART_TWISTER_ELBA_MALTA:
268 case CPU_PART_HURRICANE_MYST:
269 case CPU_PART_VORTEX_ARUBA:
270 case CPU_PART_TEMPEST_ARUBA:
271 case CPU_PART_FIRESTORM_TONGA:
272 case CPU_PART_ICESTORM_TONGA:
273 case CPU_PART_BLIZZARD_STATEN:
274 case CPU_PART_AVALANCHE_STATEN:
275 cpusubfamily = CPUSUBFAMILY_ARM_HG;
276 break;
277 case CPU_PART_TEMPEST_M9:
278 case CPU_PART_THUNDER_M10:
279 case CPU_PART_SAWTOOTH_M11:
280 cpusubfamily = CPUSUBFAMILY_ARM_M;
281 break;
282 case CPU_PART_ECORE_IBIZA:
283 case CPU_PART_PCORE_IBIZA:
284 cpusubfamily = CPUSUBFAMILY_ARM_HG;
285 break;
286 case CPU_PART_ECORE_COLL:
287 case CPU_PART_PCORE_COLL:
288 cpusubfamily = CPUSUBFAMILY_ARM_HP;
289 break;
290 case CPU_PART_ECORE_PALMA:
291 case CPU_PART_PCORE_PALMA:
292 cpusubfamily = CPUSUBFAMILY_ARM_HC_HD;
293 break;
294 case CPU_PART_ECORE_LOBOS:
295 case CPU_PART_PCORE_LOBOS:
296 cpusubfamily = CPUSUBFAMILY_ARM_HS;
297 break;
298 case CPU_PART_FIRESTORM_JADE_CHOP:
299 case CPU_PART_ICESTORM_JADE_CHOP:
300 cpusubfamily = CPUSUBFAMILY_ARM_HS;
301 break;
302 case CPU_PART_FIRESTORM_JADE_DIE:
303 case CPU_PART_ICESTORM_JADE_DIE:
304 cpusubfamily = CPUSUBFAMILY_ARM_HC_HD;
305 break;
306 case CPU_PART_BLIZZARD_RHODES_CHOP:
307 case CPU_PART_AVALANCHE_RHODES_CHOP:
308 cpusubfamily = CPUSUBFAMILY_ARM_HS;
309 break;
310 case CPU_PART_BLIZZARD_RHODES_DIE:
311 case CPU_PART_AVALANCHE_RHODES_DIE:
312 cpusubfamily = CPUSUBFAMILY_ARM_HC_HD;
313 break;
314 case CPU_PART_ECORE_DONAN:
315 case CPU_PART_PCORE_DONAN:
316 cpusubfamily = CPUSUBFAMILY_ARM_HG;
317 break;
318 case CPU_PART_ECORE_BRAVA_S:
319 case CPU_PART_PCORE_BRAVA_S:
320 cpusubfamily = CPUSUBFAMILY_ARM_HS;
321 break;
322 case CPU_PART_ECORE_BRAVA_C:
323 case CPU_PART_PCORE_BRAVA_C:
324 cpusubfamily = CPUSUBFAMILY_ARM_HC_HD;
325 break;
326 default:
327 cpusubfamily = CPUSUBFAMILY_UNKNOWN;
328 break;
329 }
330
331 return cpusubfamily;
332 }
333
334 void
do_debugid(void)335 do_debugid(void)
336 {
337 machine_do_debugid();
338 }
339
340 arm_debug_info_t *
arm_debug_info(void)341 arm_debug_info(void)
342 {
343 return machine_arm_debug_info();
344 }
345
346 void
do_mvfpid(void)347 do_mvfpid(void)
348 {
349 return machine_do_mvfpid();
350 }
351
352 arm_mvfp_info_t
353 *
arm_mvfp_info(void)354 arm_mvfp_info(void)
355 {
356 return machine_arm_mvfp_info();
357 }
358
359
360 void
do_cacheid(void)361 do_cacheid(void)
362 {
363 arm_cache_clidr_info_t arm_cache_clidr_info;
364 arm_cache_ccsidr_info_t arm_cache_ccsidr_info;
365
366 /*
367 * We only need to parse cache geometry parameters once per cluster type.
368 * Skip this if some other core of the same type has already parsed them.
369 */
370 cluster_type_t cluster_type = ml_get_topology_info()->cpus[ml_get_cpu_number_local()].cluster_type;
371 uint8_t prev_cpuid_cache_info_bitmap = os_atomic_or_orig(&cpuid_cache_info_bitmap,
372 (uint8_t)(1 << cluster_type), acq_rel);
373 if (prev_cpuid_cache_info_bitmap & (1 << cluster_type)) {
374 return;
375 }
376
377 cache_info_t *cpuid_cache_info_p = &cpuid_cache_info[cluster_type];
378
379 arm_cache_clidr_info.value = machine_read_clidr();
380
381
382 /*
383 * For compatibility purposes with existing callers, let's cache the boot CPU
384 * cache parameters and return those upon any call to cache_info();
385 */
386 if (prev_cpuid_cache_info_bitmap == 0) {
387 cpuid_cache_info_boot_cpu = cpuid_cache_info_p;
388 }
389
390 /* Select L1 data/unified cache */
391
392 machine_write_csselr(CSSELR_L1, CSSELR_DATA_UNIFIED);
393 arm_cache_ccsidr_info.value = machine_read_ccsidr();
394
395 cpuid_cache_info_p->c_unified = (arm_cache_clidr_info.bits.Ctype1 == 0x4) ? 1 : 0;
396
397 switch (arm_cache_ccsidr_info.bits.c_type) {
398 case 0x1:
399 cpuid_cache_info_p->c_type = CACHE_WRITE_ALLOCATION;
400 break;
401 case 0x2:
402 cpuid_cache_info_p->c_type = CACHE_READ_ALLOCATION;
403 break;
404 case 0x4:
405 cpuid_cache_info_p->c_type = CACHE_WRITE_BACK;
406 break;
407 case 0x8:
408 cpuid_cache_info_p->c_type = CACHE_WRITE_THROUGH;
409 break;
410 default:
411 cpuid_cache_info_p->c_type = CACHE_UNKNOWN;
412 }
413
414 cpuid_cache_info_p->c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
415 cpuid_cache_info_p->c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
416
417 /* I cache size */
418 cpuid_cache_info_p->c_isize = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz * cpuid_cache_info_p->c_assoc;
419
420 /* D cache size */
421 cpuid_cache_info_p->c_dsize = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz * cpuid_cache_info_p->c_assoc;
422
423
424 if ((arm_cache_clidr_info.bits.Ctype3 == 0x4) ||
425 (arm_cache_clidr_info.bits.Ctype2 == 0x4) || (arm_cache_clidr_info.bits.Ctype2 == 0x2)) {
426 if (arm_cache_clidr_info.bits.Ctype3 == 0x4) {
427 /* Select L3 (LLC) if the SoC is new enough to have that.
428 * This will be the second-level cache for the highest-performing ACC. */
429 machine_write_csselr(CSSELR_L3, CSSELR_DATA_UNIFIED);
430 } else {
431 /* Select L2 data cache */
432 machine_write_csselr(CSSELR_L2, CSSELR_DATA_UNIFIED);
433 }
434 arm_cache_ccsidr_info.value = machine_read_ccsidr();
435
436 cpuid_cache_info_p->c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
437 cpuid_cache_info_p->c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
438 cpuid_cache_info_p->c_l2size = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz * cpuid_cache_info_p->c_assoc;
439 cpuid_cache_info_p->c_inner_cache_size = cpuid_cache_info_p->c_dsize;
440 cpuid_cache_info_p->c_bulksize_op = cpuid_cache_info_p->c_l2size;
441
442 /* capri has a 2MB L2 cache unlike every other SoC up to this
443 * point with a 1MB L2 cache, so to get the same performance
444 * gain from coloring, we have to double the number of colors.
445 * Note that in general (and in fact as it's implemented in
446 * i386/cpuid.c), the number of colors is calculated as the
447 * cache line size * the number of sets divided by the page
448 * size. Also note that for H8 devices and up, the page size
449 * will be 16k instead of 4, which will reduce the number of
450 * colors required. Thus, this is really a temporary solution
451 * for capri specifically that we may want to generalize later:
452 *
453 * TODO: Are there any special considerations for our unusual
454 * cache geometries (3MB)?
455 */
456 vm_cache_geometry_colors = ((arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz) / PAGE_SIZE;
457 kprintf(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors);
458 } else {
459 cpuid_cache_info_p->c_l2size = 0;
460
461 cpuid_cache_info_p->c_inner_cache_size = cpuid_cache_info_p->c_dsize;
462 cpuid_cache_info_p->c_bulksize_op = cpuid_cache_info_p->c_dsize;
463 }
464
465 if (cpuid_cache_info_p->c_unified == 0) {
466 machine_write_csselr(CSSELR_L1, CSSELR_INSTR);
467 arm_cache_ccsidr_info.value = machine_read_ccsidr();
468 uint32_t c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
469 uint32_t c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
470 /* I cache size */
471 cpuid_cache_info_p->c_isize = (arm_cache_ccsidr_info.bits.NumSets + 1) * c_linesz * c_assoc;
472 }
473
474 if (cpuid_cache_info_p == cpuid_cache_info_boot_cpu) {
475 cpuid_cache_info_p->c_valid = true;
476 } else {
477 os_atomic_store(&cpuid_cache_info_p->c_valid, true, release);
478 thread_wakeup((event_t)&cpuid_cache_info_p->c_valid);
479 }
480
481 kprintf("%s() - %u bytes %s cache (I:%u D:%u (%s)), %u-way assoc, %u bytes/line\n",
482 __FUNCTION__,
483 cpuid_cache_info_p->c_dsize + cpuid_cache_info_p->c_isize,
484 ((cpuid_cache_info_p->c_type == CACHE_WRITE_BACK) ? "WB" :
485 (cpuid_cache_info_p->c_type == CACHE_WRITE_THROUGH ? "WT" : "Unknown")),
486 cpuid_cache_info_p->c_isize,
487 cpuid_cache_info_p->c_dsize,
488 (cpuid_cache_info_p->c_unified) ? "unified" : "separate",
489 cpuid_cache_info_p->c_assoc,
490 cpuid_cache_info_p->c_linesz);
491 }
492
493 cache_info_t *
cache_info(void)494 cache_info(void)
495 {
496 return cpuid_cache_info_boot_cpu;
497 }
498
499 cache_info_t *
cache_info_type(cluster_type_t cluster_type)500 cache_info_type(cluster_type_t cluster_type)
501 {
502 assert((cluster_type >= 0) && (cluster_type < MAX_CPU_TYPES));
503 cache_info_t *ret = &cpuid_cache_info[cluster_type];
504
505 /*
506 * cpuid_cache_info_boot_cpu is always populated by the time
507 * cache_info_type() is callable. Other clusters may not have completed
508 * do_cacheid() yet.
509 */
510 if (ret == cpuid_cache_info_boot_cpu) {
511 return ret;
512 }
513
514 while (!os_atomic_load(&ret->c_valid, acquire)) {
515 assert_wait((event_t)&ret->c_valid, THREAD_UNINT);
516 if (os_atomic_load(&ret->c_valid, relaxed)) {
517 clear_wait(current_thread(), THREAD_AWAKENED);
518 } else {
519 thread_block(THREAD_CONTINUE_NULL);
520 }
521 }
522
523 return ret;
524 }
525