1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #include <pexpert/pexpert.h>
33 #include <arm/cpuid.h>
34 #include <arm/cpuid_internal.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm64/proc_reg.h>
37 #include <kern/lock_rw.h>
38 #include <vm/vm_page.h>
39
40 #include <libkern/section_keywords.h>
41
42 /* Temporary types to aid decoding,
43 * Everything in Little Endian */
44
45 typedef struct {
46 uint32_t
47 Ctype1:3, /* 2:0 */
48 Ctype2:3, /* 5:3 */
49 Ctype3:3, /* 8:6 */
50 Ctypes:15, /* 6:23 - Don't Care */
51 LoC:3, /* 26-24 - Level of Coherency */
52 LoU:3, /* 29:27 - Level of Unification */
53 RAZ:2; /* 31:30 - Read-As-Zero */
54 } arm_cache_clidr_t;
55
56 typedef union {
57 arm_cache_clidr_t bits;
58 uint32_t value;
59 } arm_cache_clidr_info_t;
60
61
62 typedef struct {
63 uint32_t
64 LineSize:3, /* 2:0 - Number of words in cache line */
65 Assoc:10, /* 12:3 - Associativity of cache */
66 NumSets:15, /* 27:13 - Number of sets in cache */
67 c_type:4; /* 31:28 - Cache type */
68 } arm_cache_ccsidr_t;
69
70
71 typedef union {
72 arm_cache_ccsidr_t bits;
73 uint32_t value;
74 } arm_cache_ccsidr_info_t;
75
76 /* Statics */
77
78 static SECURITY_READ_ONLY_LATE(arm_cpu_info_t) cpuid_cpu_info;
79 static SECURITY_READ_ONLY_LATE(cache_info_t *) cpuid_cache_info_boot_cpu;
80 static cache_info_t cpuid_cache_info[MAX_CPU_TYPES] = { 0 };
81 static _Atomic uint8_t cpuid_cache_info_bitmap = 0;
82
83 /* Code */
84
85 __private_extern__
86 void
do_cpuid(void)87 do_cpuid(void)
88 {
89 cpuid_cpu_info.value = machine_read_midr();
90 #if (__ARM_ARCH__ == 8)
91
92 #if defined(HAS_APPLE_PAC)
93 cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv8E;
94 #else /* defined(HAS_APPLE_PAC) */
95 cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv8;
96 #endif /* defined(HAS_APPLE_PAC) */
97
98 #else /* (__ARM_ARCH__ != 8) */
99 #error Unsupported arch
100 #endif /* (__ARM_ARCH__ != 8) */
101 }
102
103 arm_cpu_info_t *
cpuid_info(void)104 cpuid_info(void)
105 {
106 return &cpuid_cpu_info;
107 }
108
109 int
cpuid_get_cpufamily(void)110 cpuid_get_cpufamily(void)
111 {
112 int cpufamily = 0;
113
114 switch (cpuid_info()->arm_info.arm_implementor) {
115 case CPU_VID_ARM:
116 switch (cpuid_info()->arm_info.arm_part) {
117 case CPU_PART_CORTEXA9:
118 cpufamily = CPUFAMILY_ARM_14;
119 break;
120 case CPU_PART_CORTEXA8:
121 cpufamily = CPUFAMILY_ARM_13;
122 break;
123 case CPU_PART_CORTEXA7:
124 cpufamily = CPUFAMILY_ARM_15;
125 break;
126 case CPU_PART_1136JFS:
127 case CPU_PART_1176JZFS:
128 cpufamily = CPUFAMILY_ARM_11;
129 break;
130 case CPU_PART_926EJS:
131 case CPU_PART_920T:
132 cpufamily = CPUFAMILY_ARM_9;
133 break;
134 default:
135 cpufamily = CPUFAMILY_UNKNOWN;
136 break;
137 }
138 break;
139
140 case CPU_VID_INTEL:
141 cpufamily = CPUFAMILY_ARM_XSCALE;
142 break;
143
144 case CPU_VID_APPLE:
145 switch (cpuid_info()->arm_info.arm_part) {
146 case CPU_PART_TYPHOON:
147 case CPU_PART_TYPHOON_CAPRI:
148 cpufamily = CPUFAMILY_ARM_TYPHOON;
149 break;
150 case CPU_PART_TWISTER:
151 case CPU_PART_TWISTER_ELBA_MALTA:
152 cpufamily = CPUFAMILY_ARM_TWISTER;
153 break;
154 case CPU_PART_HURRICANE:
155 case CPU_PART_HURRICANE_MYST:
156 cpufamily = CPUFAMILY_ARM_HURRICANE;
157 break;
158 case CPU_PART_MONSOON:
159 case CPU_PART_MISTRAL:
160 cpufamily = CPUFAMILY_ARM_MONSOON_MISTRAL;
161 break;
162 case CPU_PART_VORTEX:
163 case CPU_PART_TEMPEST:
164 case CPU_PART_TEMPEST_M9:
165 case CPU_PART_VORTEX_ARUBA:
166 case CPU_PART_TEMPEST_ARUBA:
167 cpufamily = CPUFAMILY_ARM_VORTEX_TEMPEST;
168 break;
169 case CPU_PART_LIGHTNING:
170 case CPU_PART_THUNDER:
171 case CPU_PART_THUNDER_M10:
172 cpufamily = CPUFAMILY_ARM_LIGHTNING_THUNDER;
173 break;
174 case CPU_PART_FIRESTORM_JADE_CHOP:
175 case CPU_PART_FIRESTORM_JADE_DIE:
176 case CPU_PART_ICESTORM_JADE_CHOP:
177 case CPU_PART_ICESTORM_JADE_DIE:
178 case CPU_PART_FIRESTORM:
179 case CPU_PART_ICESTORM:
180 case CPU_PART_FIRESTORM_TONGA:
181 case CPU_PART_ICESTORM_TONGA:
182 cpufamily = CPUFAMILY_ARM_FIRESTORM_ICESTORM;
183 break;
184 case CPU_PART_BLIZZARD_STATEN:
185 case CPU_PART_AVALANCHE_STATEN:
186 case CPU_PART_BLIZZARD_RHODES_CHOP:
187 case CPU_PART_AVALANCHE_RHODES_CHOP:
188 case CPU_PART_BLIZZARD_RHODES_DIE:
189 case CPU_PART_AVALANCHE_RHODES_DIE:
190 case CPU_PART_BLIZZARD:
191 case CPU_PART_AVALANCHE:
192 cpufamily = CPUFAMILY_ARM_BLIZZARD_AVALANCHE;
193 break;
194 case CPU_PART_EVEREST:
195 case CPU_PART_SAWTOOTH:
196 case CPU_PART_SAWTOOTH_M11:
197 cpufamily = CPUFAMILY_ARM_EVEREST_SAWTOOTH;
198 break;
199 case CPU_PART_ECORE_IBIZA:
200 case CPU_PART_PCORE_IBIZA:
201 cpufamily = CPUFAMILY_ARM_IBIZA;
202 break;
203 case CPU_PART_ECORE_PALMA:
204 case CPU_PART_PCORE_PALMA:
205 cpufamily = CPUFAMILY_ARM_PALMA;
206 break;
207 case CPU_PART_ECORE_COLL:
208 case CPU_PART_PCORE_COLL:
209 cpufamily = CPUFAMILY_ARM_COLL;
210 break;
211 case CPU_PART_ECORE_LOBOS:
212 case CPU_PART_PCORE_LOBOS:
213 cpufamily = CPUFAMILY_ARM_LOBOS;
214 break;
215 case CPU_PART_ECORE_DONAN:
216 case CPU_PART_PCORE_DONAN:
217 cpufamily = CPUFAMILY_ARM_DONAN;
218 break;
219 default:
220 cpufamily = CPUFAMILY_UNKNOWN;
221 break;
222 }
223 break;
224
225 default:
226 cpufamily = CPUFAMILY_UNKNOWN;
227 break;
228 }
229
230 return cpufamily;
231 }
232
233 int
cpuid_get_cpusubfamily(void)234 cpuid_get_cpusubfamily(void)
235 {
236 int cpusubfamily = CPUSUBFAMILY_UNKNOWN;
237
238 if (cpuid_info()->arm_info.arm_implementor != CPU_VID_APPLE) {
239 return cpusubfamily;
240 }
241
242 switch (cpuid_info()->arm_info.arm_part) {
243 case CPU_PART_TYPHOON:
244 case CPU_PART_TWISTER:
245 case CPU_PART_HURRICANE:
246 case CPU_PART_MONSOON:
247 case CPU_PART_MISTRAL:
248 case CPU_PART_VORTEX:
249 case CPU_PART_TEMPEST:
250 case CPU_PART_LIGHTNING:
251 case CPU_PART_THUNDER:
252 case CPU_PART_FIRESTORM:
253 case CPU_PART_ICESTORM:
254 case CPU_PART_BLIZZARD:
255 case CPU_PART_AVALANCHE:
256 case CPU_PART_SAWTOOTH:
257 case CPU_PART_EVEREST:
258 cpusubfamily = CPUSUBFAMILY_ARM_HP;
259 break;
260 case CPU_PART_TYPHOON_CAPRI:
261 case CPU_PART_TWISTER_ELBA_MALTA:
262 case CPU_PART_HURRICANE_MYST:
263 case CPU_PART_VORTEX_ARUBA:
264 case CPU_PART_TEMPEST_ARUBA:
265 case CPU_PART_FIRESTORM_TONGA:
266 case CPU_PART_ICESTORM_TONGA:
267 case CPU_PART_BLIZZARD_STATEN:
268 case CPU_PART_AVALANCHE_STATEN:
269 cpusubfamily = CPUSUBFAMILY_ARM_HG;
270 break;
271 case CPU_PART_TEMPEST_M9:
272 case CPU_PART_THUNDER_M10:
273 case CPU_PART_SAWTOOTH_M11:
274 cpusubfamily = CPUSUBFAMILY_ARM_M;
275 break;
276 case CPU_PART_ECORE_IBIZA:
277 case CPU_PART_PCORE_IBIZA:
278 cpusubfamily = CPUSUBFAMILY_ARM_HG;
279 break;
280 case CPU_PART_ECORE_COLL:
281 case CPU_PART_PCORE_COLL:
282 cpusubfamily = CPUSUBFAMILY_ARM_HP;
283 break;
284 case CPU_PART_ECORE_PALMA:
285 case CPU_PART_PCORE_PALMA:
286 cpusubfamily = CPUSUBFAMILY_ARM_HC_HD;
287 break;
288 case CPU_PART_ECORE_LOBOS:
289 case CPU_PART_PCORE_LOBOS:
290 cpusubfamily = CPUSUBFAMILY_ARM_HS;
291 break;
292 case CPU_PART_FIRESTORM_JADE_CHOP:
293 case CPU_PART_ICESTORM_JADE_CHOP:
294 cpusubfamily = CPUSUBFAMILY_ARM_HS;
295 break;
296 case CPU_PART_FIRESTORM_JADE_DIE:
297 case CPU_PART_ICESTORM_JADE_DIE:
298 cpusubfamily = CPUSUBFAMILY_ARM_HC_HD;
299 break;
300 case CPU_PART_BLIZZARD_RHODES_CHOP:
301 case CPU_PART_AVALANCHE_RHODES_CHOP:
302 cpusubfamily = CPUSUBFAMILY_ARM_HS;
303 break;
304 case CPU_PART_BLIZZARD_RHODES_DIE:
305 case CPU_PART_AVALANCHE_RHODES_DIE:
306 cpusubfamily = CPUSUBFAMILY_ARM_HC_HD;
307 break;
308 case CPU_PART_ECORE_DONAN:
309 case CPU_PART_PCORE_DONAN:
310 cpusubfamily = CPUSUBFAMILY_ARM_HG;
311 break;
312 default:
313 cpusubfamily = CPUSUBFAMILY_UNKNOWN;
314 break;
315 }
316
317 return cpusubfamily;
318 }
319
320 void
do_debugid(void)321 do_debugid(void)
322 {
323 machine_do_debugid();
324 }
325
326 arm_debug_info_t *
arm_debug_info(void)327 arm_debug_info(void)
328 {
329 return machine_arm_debug_info();
330 }
331
332 void
do_mvfpid(void)333 do_mvfpid(void)
334 {
335 return machine_do_mvfpid();
336 }
337
338 arm_mvfp_info_t
339 *
arm_mvfp_info(void)340 arm_mvfp_info(void)
341 {
342 return machine_arm_mvfp_info();
343 }
344
345 void
do_cacheid(void)346 do_cacheid(void)
347 {
348 arm_cache_clidr_info_t arm_cache_clidr_info;
349 arm_cache_ccsidr_info_t arm_cache_ccsidr_info;
350
351 /*
352 * We only need to parse cache geometry parameters once per cluster type.
353 * Skip this if some other core of the same type has already parsed them.
354 */
355 cluster_type_t cluster_type = ml_get_topology_info()->cpus[ml_get_cpu_number_local()].cluster_type;
356 uint8_t prev_cpuid_cache_info_bitmap = os_atomic_or_orig(&cpuid_cache_info_bitmap,
357 (uint8_t)(1 << cluster_type), acq_rel);
358 if (prev_cpuid_cache_info_bitmap & (1 << cluster_type)) {
359 return;
360 }
361
362 cache_info_t *cpuid_cache_info_p = &cpuid_cache_info[cluster_type];
363
364 arm_cache_clidr_info.value = machine_read_clidr();
365
366 /*
367 * For compatibility purposes with existing callers, let's cache the boot CPU
368 * cache parameters and return those upon any call to cache_info();
369 */
370 if (prev_cpuid_cache_info_bitmap == 0) {
371 cpuid_cache_info_boot_cpu = cpuid_cache_info_p;
372 }
373
374 /* Select L1 data/unified cache */
375
376 machine_write_csselr(CSSELR_L1, CSSELR_DATA_UNIFIED);
377 arm_cache_ccsidr_info.value = machine_read_ccsidr();
378
379 cpuid_cache_info_p->c_unified = (arm_cache_clidr_info.bits.Ctype1 == 0x4) ? 1 : 0;
380
381 switch (arm_cache_ccsidr_info.bits.c_type) {
382 case 0x1:
383 cpuid_cache_info_p->c_type = CACHE_WRITE_ALLOCATION;
384 break;
385 case 0x2:
386 cpuid_cache_info_p->c_type = CACHE_READ_ALLOCATION;
387 break;
388 case 0x4:
389 cpuid_cache_info_p->c_type = CACHE_WRITE_BACK;
390 break;
391 case 0x8:
392 cpuid_cache_info_p->c_type = CACHE_WRITE_THROUGH;
393 break;
394 default:
395 cpuid_cache_info_p->c_type = CACHE_UNKNOWN;
396 }
397
398 cpuid_cache_info_p->c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
399 cpuid_cache_info_p->c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
400
401 /* I cache size */
402 cpuid_cache_info_p->c_isize = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz * cpuid_cache_info_p->c_assoc;
403
404 /* D cache size */
405 cpuid_cache_info_p->c_dsize = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz * cpuid_cache_info_p->c_assoc;
406
407
408 if ((arm_cache_clidr_info.bits.Ctype3 == 0x4) ||
409 (arm_cache_clidr_info.bits.Ctype2 == 0x4) || (arm_cache_clidr_info.bits.Ctype2 == 0x2)) {
410 if (arm_cache_clidr_info.bits.Ctype3 == 0x4) {
411 /* Select L3 (LLC) if the SoC is new enough to have that.
412 * This will be the second-level cache for the highest-performing ACC. */
413 machine_write_csselr(CSSELR_L3, CSSELR_DATA_UNIFIED);
414 } else {
415 /* Select L2 data cache */
416 machine_write_csselr(CSSELR_L2, CSSELR_DATA_UNIFIED);
417 }
418 arm_cache_ccsidr_info.value = machine_read_ccsidr();
419
420 cpuid_cache_info_p->c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
421 cpuid_cache_info_p->c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
422 cpuid_cache_info_p->c_l2size = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz * cpuid_cache_info_p->c_assoc;
423 cpuid_cache_info_p->c_inner_cache_size = cpuid_cache_info_p->c_dsize;
424 cpuid_cache_info_p->c_bulksize_op = cpuid_cache_info_p->c_l2size;
425
426 /* capri has a 2MB L2 cache unlike every other SoC up to this
427 * point with a 1MB L2 cache, so to get the same performance
428 * gain from coloring, we have to double the number of colors.
429 * Note that in general (and in fact as it's implemented in
430 * i386/cpuid.c), the number of colors is calculated as the
431 * cache line size * the number of sets divided by the page
432 * size. Also note that for H8 devices and up, the page size
433 * will be 16k instead of 4, which will reduce the number of
434 * colors required. Thus, this is really a temporary solution
435 * for capri specifically that we may want to generalize later:
436 *
437 * TODO: Are there any special considerations for our unusual
438 * cache geometries (3MB)?
439 */
440 vm_cache_geometry_colors = ((arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz) / PAGE_SIZE;
441 kprintf(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors);
442 } else {
443 cpuid_cache_info_p->c_l2size = 0;
444
445 cpuid_cache_info_p->c_inner_cache_size = cpuid_cache_info_p->c_dsize;
446 cpuid_cache_info_p->c_bulksize_op = cpuid_cache_info_p->c_dsize;
447 }
448
449 if (cpuid_cache_info_p->c_unified == 0) {
450 machine_write_csselr(CSSELR_L1, CSSELR_INSTR);
451 arm_cache_ccsidr_info.value = machine_read_ccsidr();
452 uint32_t c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
453 uint32_t c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
454 /* I cache size */
455 cpuid_cache_info_p->c_isize = (arm_cache_ccsidr_info.bits.NumSets + 1) * c_linesz * c_assoc;
456 }
457
458 if (cpuid_cache_info_p == cpuid_cache_info_boot_cpu) {
459 cpuid_cache_info_p->c_valid = true;
460 } else {
461 os_atomic_store(&cpuid_cache_info_p->c_valid, true, release);
462 thread_wakeup((event_t)&cpuid_cache_info_p->c_valid);
463 }
464
465 kprintf("%s() - %u bytes %s cache (I:%u D:%u (%s)), %u-way assoc, %u bytes/line\n",
466 __FUNCTION__,
467 cpuid_cache_info_p->c_dsize + cpuid_cache_info_p->c_isize,
468 ((cpuid_cache_info_p->c_type == CACHE_WRITE_BACK) ? "WB" :
469 (cpuid_cache_info_p->c_type == CACHE_WRITE_THROUGH ? "WT" : "Unknown")),
470 cpuid_cache_info_p->c_isize,
471 cpuid_cache_info_p->c_dsize,
472 (cpuid_cache_info_p->c_unified) ? "unified" : "separate",
473 cpuid_cache_info_p->c_assoc,
474 cpuid_cache_info_p->c_linesz);
475 }
476
477 cache_info_t *
cache_info(void)478 cache_info(void)
479 {
480 return cpuid_cache_info_boot_cpu;
481 }
482
483 cache_info_t *
cache_info_type(cluster_type_t cluster_type)484 cache_info_type(cluster_type_t cluster_type)
485 {
486 assert((cluster_type >= 0) && (cluster_type < MAX_CPU_TYPES));
487 cache_info_t *ret = &cpuid_cache_info[cluster_type];
488
489 /*
490 * cpuid_cache_info_boot_cpu is always populated by the time
491 * cache_info_type() is callable. Other clusters may not have completed
492 * do_cacheid() yet.
493 */
494 if (ret == cpuid_cache_info_boot_cpu) {
495 return ret;
496 }
497
498 while (!os_atomic_load(&ret->c_valid, acquire)) {
499 assert_wait((event_t)&ret->c_valid, THREAD_UNINT);
500 if (os_atomic_load(&ret->c_valid, relaxed)) {
501 clear_wait(current_thread(), THREAD_AWAKENED);
502 } else {
503 thread_block(THREAD_CONTINUE_NULL);
504 }
505 }
506
507 return ret;
508 }
509