1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #include <pexpert/pexpert.h>
33 #include <arm/cpuid.h>
34 #include <arm/cpuid_internal.h>
35 #include <arm/cpu_data_internal.h>
36 #include <vm/vm_page.h>
37 #include "proc_reg.h"
38
39 #include <libkern/section_keywords.h>
40
41 /* Temporary types to aid decoding,
42 * Everything in Little Endian */
43
44 typedef struct {
45 uint32_t
46 Ctype1:3, /* 2:0 */
47 Ctype2:3, /* 5:3 */
48 Ctype3:3, /* 8:6 */
49 Ctypes:15, /* 6:23 - Don't Care */
50 LoC:3, /* 26-24 - Level of Coherency */
51 LoU:3, /* 29:27 - Level of Unification */
52 RAZ:2; /* 31:30 - Read-As-Zero */
53 } arm_cache_clidr_t;
54
55 typedef union {
56 arm_cache_clidr_t bits;
57 uint32_t value;
58 } arm_cache_clidr_info_t;
59
60
61 typedef struct {
62 uint32_t
63 LineSize:3, /* 2:0 - Number of words in cache line */
64 Assoc:10, /* 12:3 - Associativity of cache */
65 NumSets:15, /* 27:13 - Number of sets in cache */
66 c_type:4; /* 31:28 - Cache type */
67 } arm_cache_ccsidr_t;
68
69
70 typedef union {
71 arm_cache_ccsidr_t bits;
72 uint32_t value;
73 } arm_cache_ccsidr_info_t;
74
75 /* Statics */
76
77 static SECURITY_READ_ONLY_LATE(arm_cpu_info_t) cpuid_cpu_info;
78 static SECURITY_READ_ONLY_LATE(cache_info_t *) cpuid_cache_info_boot_cpu;
79 static cache_info_t cpuid_cache_info[MAX_CPU_TYPES] = { 0 };
80 static _Atomic uint8_t cpuid_cache_info_bitmap = 0;
81
82 /* Code */
83
84 __private_extern__
85 void
do_cpuid(void)86 do_cpuid(void)
87 {
88 cpuid_cpu_info.value = machine_read_midr();
89 #if (__ARM_ARCH__ == 8)
90
91 #if defined(HAS_APPLE_PAC)
92 cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv8E;
93 #else /* defined(HAS_APPLE_PAC) */
94 cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv8;
95 #endif /* defined(HAS_APPLE_PAC) */
96
97 #elif (__ARM_ARCH__ == 7)
98 #ifdef __ARM_SUB_ARCH__
99 cpuid_cpu_info.arm_info.arm_arch = __ARM_SUB_ARCH__;
100 #else /* __ARM_SUB_ARCH__ */
101 cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv7;
102 #endif /* __ARM_SUB_ARCH__ */
103 #else /* (__ARM_ARCH__ != 7) && (__ARM_ARCH__ != 8) */
104 /* 1176 architecture lives in the extended feature register */
105 if (cpuid_cpu_info.arm_info.arm_arch == CPU_ARCH_EXTENDED) {
106 arm_isa_feat1_reg isa = machine_read_isa_feat1();
107
108 /*
109 * if isa feature register 1 [15:12] == 0x2, this chip
110 * supports sign extention instructions, which indicate ARMv6
111 */
112 if (isa.field.sign_zero_ext_support == 0x2) {
113 cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv6;
114 }
115 }
116 #endif /* (__ARM_ARCH__ != 7) && (__ARM_ARCH__ != 8) */
117 }
118
119 arm_cpu_info_t *
cpuid_info(void)120 cpuid_info(void)
121 {
122 return &cpuid_cpu_info;
123 }
124
125 int
cpuid_get_cpufamily(void)126 cpuid_get_cpufamily(void)
127 {
128 int cpufamily = 0;
129
130 switch (cpuid_info()->arm_info.arm_implementor) {
131 case CPU_VID_ARM:
132 switch (cpuid_info()->arm_info.arm_part) {
133 case CPU_PART_CORTEXA9:
134 cpufamily = CPUFAMILY_ARM_14;
135 break;
136 case CPU_PART_CORTEXA8:
137 cpufamily = CPUFAMILY_ARM_13;
138 break;
139 case CPU_PART_CORTEXA7:
140 cpufamily = CPUFAMILY_ARM_15;
141 break;
142 case CPU_PART_1136JFS:
143 case CPU_PART_1176JZFS:
144 cpufamily = CPUFAMILY_ARM_11;
145 break;
146 case CPU_PART_926EJS:
147 case CPU_PART_920T:
148 cpufamily = CPUFAMILY_ARM_9;
149 break;
150 default:
151 cpufamily = CPUFAMILY_UNKNOWN;
152 break;
153 }
154 break;
155
156 case CPU_VID_INTEL:
157 cpufamily = CPUFAMILY_ARM_XSCALE;
158 break;
159
160 case CPU_VID_APPLE:
161 switch (cpuid_info()->arm_info.arm_part) {
162 case CPU_PART_TYPHOON:
163 case CPU_PART_TYPHOON_CAPRI:
164 cpufamily = CPUFAMILY_ARM_TYPHOON;
165 break;
166 case CPU_PART_TWISTER:
167 case CPU_PART_TWISTER_ELBA_MALTA:
168 cpufamily = CPUFAMILY_ARM_TWISTER;
169 break;
170 case CPU_PART_HURRICANE:
171 case CPU_PART_HURRICANE_MYST:
172 cpufamily = CPUFAMILY_ARM_HURRICANE;
173 break;
174 case CPU_PART_MONSOON:
175 case CPU_PART_MISTRAL:
176 cpufamily = CPUFAMILY_ARM_MONSOON_MISTRAL;
177 break;
178 case CPU_PART_VORTEX:
179 case CPU_PART_TEMPEST:
180 case CPU_PART_TEMPEST_M9:
181 case CPU_PART_VORTEX_ARUBA:
182 case CPU_PART_TEMPEST_ARUBA:
183 cpufamily = CPUFAMILY_ARM_VORTEX_TEMPEST;
184 break;
185 case CPU_PART_LIGHTNING:
186 case CPU_PART_THUNDER:
187 cpufamily = CPUFAMILY_ARM_LIGHTNING_THUNDER;
188 break;
189 case CPU_PART_FIRESTORM_JADE_CHOP:
190 case CPU_PART_FIRESTORM_JADE_DIE:
191 case CPU_PART_ICESTORM_JADE_CHOP:
192 case CPU_PART_ICESTORM_JADE_DIE:
193 default:
194 cpufamily = CPUFAMILY_UNKNOWN;
195 break;
196 }
197 break;
198
199 default:
200 cpufamily = CPUFAMILY_UNKNOWN;
201 break;
202 }
203
204 return cpufamily;
205 }
206
207 int
cpuid_get_cpusubfamily(void)208 cpuid_get_cpusubfamily(void)
209 {
210 int cpusubfamily = CPUSUBFAMILY_UNKNOWN;
211
212 if (cpuid_info()->arm_info.arm_implementor != CPU_VID_APPLE) {
213 return cpusubfamily;
214 }
215
216 switch (cpuid_info()->arm_info.arm_part) {
217 case CPU_PART_TYPHOON:
218 case CPU_PART_TWISTER:
219 case CPU_PART_HURRICANE:
220 case CPU_PART_MONSOON:
221 case CPU_PART_MISTRAL:
222 case CPU_PART_VORTEX:
223 case CPU_PART_TEMPEST:
224 case CPU_PART_LIGHTNING:
225 case CPU_PART_THUNDER:
226 cpusubfamily = CPUSUBFAMILY_ARM_HP;
227 break;
228 case CPU_PART_TYPHOON_CAPRI:
229 case CPU_PART_TWISTER_ELBA_MALTA:
230 case CPU_PART_HURRICANE_MYST:
231 case CPU_PART_VORTEX_ARUBA:
232 case CPU_PART_TEMPEST_ARUBA:
233 cpusubfamily = CPUSUBFAMILY_ARM_HG;
234 break;
235 case CPU_PART_TEMPEST_M9:
236 cpusubfamily = CPUSUBFAMILY_ARM_M;
237 break;
238 case CPU_PART_FIRESTORM_JADE_CHOP:
239 case CPU_PART_ICESTORM_JADE_CHOP:
240 cpusubfamily = CPUSUBFAMILY_ARM_HS;
241 break;
242 case CPU_PART_FIRESTORM_JADE_DIE:
243 case CPU_PART_ICESTORM_JADE_DIE:
244 cpusubfamily = CPUSUBFAMILY_ARM_HC_HD;
245 break;
246 default:
247 cpusubfamily = CPUFAMILY_UNKNOWN;
248 break;
249 }
250
251 return cpusubfamily;
252 }
253
254 void
do_debugid(void)255 do_debugid(void)
256 {
257 machine_do_debugid();
258 }
259
260 arm_debug_info_t *
arm_debug_info(void)261 arm_debug_info(void)
262 {
263 return machine_arm_debug_info();
264 }
265
266 void
do_mvfpid(void)267 do_mvfpid(void)
268 {
269 return machine_do_mvfpid();
270 }
271
272 arm_mvfp_info_t
273 *
arm_mvfp_info(void)274 arm_mvfp_info(void)
275 {
276 return machine_arm_mvfp_info();
277 }
278
279 void
do_cacheid(void)280 do_cacheid(void)
281 {
282 arm_cache_clidr_info_t arm_cache_clidr_info;
283 arm_cache_ccsidr_info_t arm_cache_ccsidr_info;
284
285 /*
286 * We only need to parse cache geometry parameters once per cluster type.
287 * Skip this if some other core of the same type has already parsed them.
288 */
289 cluster_type_t cluster_type = ml_get_topology_info()->cpus[ml_get_cpu_number_local()].cluster_type;
290 uint8_t prev_cpuid_cache_info_bitmap = os_atomic_or_orig(&cpuid_cache_info_bitmap,
291 (uint8_t)(1 << cluster_type), acq_rel);
292 if (prev_cpuid_cache_info_bitmap & (1 << cluster_type)) {
293 return;
294 }
295
296 cache_info_t *cpuid_cache_info_p = &cpuid_cache_info[cluster_type];
297
298 arm_cache_clidr_info.value = machine_read_clidr();
299
300 /*
301 * For compatibility purposes with existing callers, let's cache the boot CPU
302 * cache parameters and return those upon any call to cache_info();
303 */
304 if (prev_cpuid_cache_info_bitmap == 0) {
305 cpuid_cache_info_boot_cpu = cpuid_cache_info_p;
306 }
307
308 /* Select L1 data/unified cache */
309
310 machine_write_csselr(CSSELR_L1, CSSELR_DATA_UNIFIED);
311 arm_cache_ccsidr_info.value = machine_read_ccsidr();
312
313 cpuid_cache_info_p->c_unified = (arm_cache_clidr_info.bits.Ctype1 == 0x4) ? 1 : 0;
314
315 switch (arm_cache_ccsidr_info.bits.c_type) {
316 case 0x1:
317 cpuid_cache_info_p->c_type = CACHE_WRITE_ALLOCATION;
318 break;
319 case 0x2:
320 cpuid_cache_info_p->c_type = CACHE_READ_ALLOCATION;
321 break;
322 case 0x4:
323 cpuid_cache_info_p->c_type = CACHE_WRITE_BACK;
324 break;
325 case 0x8:
326 cpuid_cache_info_p->c_type = CACHE_WRITE_THROUGH;
327 break;
328 default:
329 cpuid_cache_info_p->c_type = CACHE_UNKNOWN;
330 }
331
332 cpuid_cache_info_p->c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
333 cpuid_cache_info_p->c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
334
335 /* I cache size */
336 cpuid_cache_info_p->c_isize = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz * cpuid_cache_info_p->c_assoc;
337
338 /* D cache size */
339 cpuid_cache_info_p->c_dsize = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz * cpuid_cache_info_p->c_assoc;
340
341
342 if ((arm_cache_clidr_info.bits.Ctype3 == 0x4) ||
343 (arm_cache_clidr_info.bits.Ctype2 == 0x4) || (arm_cache_clidr_info.bits.Ctype2 == 0x2)) {
344 if (arm_cache_clidr_info.bits.Ctype3 == 0x4) {
345 /* Select L3 (LLC) if the SoC is new enough to have that.
346 * This will be the second-level cache for the highest-performing ACC. */
347 machine_write_csselr(CSSELR_L3, CSSELR_DATA_UNIFIED);
348 } else {
349 /* Select L2 data cache */
350 machine_write_csselr(CSSELR_L2, CSSELR_DATA_UNIFIED);
351 }
352 arm_cache_ccsidr_info.value = machine_read_ccsidr();
353
354 cpuid_cache_info_p->c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
355 cpuid_cache_info_p->c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
356 cpuid_cache_info_p->c_l2size = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz * cpuid_cache_info_p->c_assoc;
357 cpuid_cache_info_p->c_inner_cache_size = cpuid_cache_info_p->c_dsize;
358 cpuid_cache_info_p->c_bulksize_op = cpuid_cache_info_p->c_l2size;
359
360 /* capri has a 2MB L2 cache unlike every other SoC up to this
361 * point with a 1MB L2 cache, so to get the same performance
362 * gain from coloring, we have to double the number of colors.
363 * Note that in general (and in fact as it's implemented in
364 * i386/cpuid.c), the number of colors is calculated as the
365 * cache line size * the number of sets divided by the page
366 * size. Also note that for H8 devices and up, the page size
367 * will be 16k instead of 4, which will reduce the number of
368 * colors required. Thus, this is really a temporary solution
369 * for capri specifically that we may want to generalize later:
370 *
371 * TODO: Are there any special considerations for our unusual
372 * cache geometries (3MB)?
373 */
374 vm_cache_geometry_colors = ((arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz) / PAGE_SIZE;
375 kprintf(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors);
376 } else {
377 cpuid_cache_info_p->c_l2size = 0;
378
379 cpuid_cache_info_p->c_inner_cache_size = cpuid_cache_info_p->c_dsize;
380 cpuid_cache_info_p->c_bulksize_op = cpuid_cache_info_p->c_dsize;
381 }
382
383 if (cpuid_cache_info_p->c_unified == 0) {
384 machine_write_csselr(CSSELR_L1, CSSELR_INSTR);
385 arm_cache_ccsidr_info.value = machine_read_ccsidr();
386 uint32_t c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
387 uint32_t c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
388 /* I cache size */
389 cpuid_cache_info_p->c_isize = (arm_cache_ccsidr_info.bits.NumSets + 1) * c_linesz * c_assoc;
390 }
391
392 kprintf("%s() - %u bytes %s cache (I:%u D:%u (%s)), %u-way assoc, %u bytes/line\n",
393 __FUNCTION__,
394 cpuid_cache_info_p->c_dsize + cpuid_cache_info_p->c_isize,
395 ((cpuid_cache_info_p->c_type == CACHE_WRITE_BACK) ? "WB" :
396 (cpuid_cache_info_p->c_type == CACHE_WRITE_THROUGH ? "WT" : "Unknown")),
397 cpuid_cache_info_p->c_isize,
398 cpuid_cache_info_p->c_dsize,
399 (cpuid_cache_info_p->c_unified) ? "unified" : "separate",
400 cpuid_cache_info_p->c_assoc,
401 cpuid_cache_info_p->c_linesz);
402 }
403
404 cache_info_t *
cache_info(void)405 cache_info(void)
406 {
407 return cpuid_cache_info_boot_cpu;
408 }
409
410 cache_info_t *
cache_info_type(cluster_type_t cluster_type)411 cache_info_type(cluster_type_t cluster_type)
412 {
413 assert((cluster_type >= 0) && (cluster_type < MAX_CPU_TYPES));
414 return &cpuid_cache_info[cluster_type];
415 }
416