1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 */
4 #include <sys/param.h>
5 #include <sys/kernel.h>
6 #include <sys/sysctl.h>
7
8 #include <machine/machine_routines.h>
9
10 #include <mach/host_info.h>
11 #include <mach/mach_host.h>
12 #include <arm/cpuid.h>
13 #include <kern/hvg_hypercall.h>
14 #include <kern/zalloc.h>
15 #include <libkern/libkern.h>
16 #include <pexpert/device_tree.h>
17
18 #if HYPERVISOR
19 #include <kern/hv_support.h>
20 #include <kern/bits.h>
21 #endif
22
23 extern uint64_t wake_abstime;
24
25 #if DEVELOPMENT || DEBUG
26 /* Various tuneables to modulate selection of WFE in the idle path */
27 extern int wfe_rec_max;
28 extern int wfe_allowed;
29
30 extern int wfe_rec_none;
31 extern uint32_t idle_proximate_timer_wfe;
32 extern uint32_t idle_proximate_io_wfe_masked;
33 extern uint32_t idle_proximate_io_wfe_unmasked;
34
35 static
36 SYSCTL_INT(_machdep, OID_AUTO, wfe_rec_max,
37 CTLFLAG_RW, &wfe_rec_max, 0,
38 "");
39
40 static
41 SYSCTL_INT(_machdep, OID_AUTO, wfe_allowed,
42 CTLFLAG_RW, &wfe_allowed, 0,
43 "");
44
45 static
46 SYSCTL_INT(_machdep, OID_AUTO, idle_timer_wfe,
47 CTLFLAG_RW, &idle_proximate_timer_wfe, 0,
48 "");
49
50 static
51 SYSCTL_INT(_machdep, OID_AUTO, idle_io_wfe_masked,
52 CTLFLAG_RW, &idle_proximate_io_wfe_masked, 0,
53 "");
54 static
55 SYSCTL_INT(_machdep, OID_AUTO, idle_io_wfe_unmasked,
56 CTLFLAG_RW, &idle_proximate_io_wfe_unmasked, 0,
57 "");
58
59 static
60 SYSCTL_INT(_machdep, OID_AUTO, wfe_rec_none,
61 CTLFLAG_RW, &wfe_rec_none, 0,
62 "");
63
64 extern uint64_t wfe_rec_override_mat;
65 SYSCTL_QUAD(_machdep, OID_AUTO, wfe_rec_override_mat,
66 CTLFLAG_RW, &wfe_rec_override_mat,
67 "");
68
69 extern uint64_t wfe_rec_clamp;
70 SYSCTL_QUAD(_machdep, OID_AUTO, wfe_rec_clamp,
71 CTLFLAG_RW, &wfe_rec_clamp,
72 "");
73
74 #endif
75
76 static
77 SYSCTL_QUAD(_machdep, OID_AUTO, wake_abstime,
78 CTLFLAG_RD, &wake_abstime,
79 "Absolute Time at the last wakeup");
80
81 static int
82 sysctl_time_since_reset SYSCTL_HANDLER_ARGS
83 {
84 #pragma unused(arg1, arg2, oidp)
85 uint64_t return_value = ml_get_time_since_reset();
86 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
87 }
88
89 SYSCTL_PROC(_machdep, OID_AUTO, time_since_reset,
90 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED,
91 0, 0, sysctl_time_since_reset, "I",
92 "Continuous time since last SOC boot/wake started");
93
94 static int
95 sysctl_wake_conttime SYSCTL_HANDLER_ARGS
96 {
97 #pragma unused(arg1, arg2, oidp)
98 uint64_t return_value = ml_get_conttime_wake_time();
99 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
100 }
101
102 SYSCTL_PROC(_machdep, OID_AUTO, wake_conttime,
103 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED,
104 0, 0, sysctl_wake_conttime, "I",
105 "Continuous Time at the last wakeup");
106
107 #if defined(HAS_IPI)
108 static int
cpu_signal_deferred_timer(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)109 cpu_signal_deferred_timer(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
110 {
111 int new_value = 0;
112 int changed = 0;
113
114 int old_value = (int)ml_cpu_signal_deferred_get_timer();
115
116 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
117
118 if (error == 0 && changed) {
119 ml_cpu_signal_deferred_adjust_timer((uint64_t)new_value);
120 }
121
122 return error;
123 }
124
125 SYSCTL_PROC(_machdep, OID_AUTO, deferred_ipi_timeout,
126 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
127 0, 0,
128 cpu_signal_deferred_timer, "I", "Deferred IPI timeout (nanoseconds)");
129
130 #endif /* defined(HAS_IPI) */
131
132 /*
133 * For source compatibility, here's some machdep.cpu mibs that
134 * use host_info() to simulate reasonable answers.
135 */
136
137 SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
138 "CPU info");
139
140 static int
141 arm_host_info SYSCTL_HANDLER_ARGS
142 {
143 __unused struct sysctl_oid *unused_oidp = oidp;
144
145 host_basic_info_data_t hinfo;
146 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
147 #define BSD_HOST 1
148 kern_return_t kret = host_info((host_t)BSD_HOST,
149 HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
150 if (KERN_SUCCESS != kret) {
151 return EINVAL;
152 }
153
154 if (sizeof(uint32_t) != arg2) {
155 panic("size mismatch");
156 }
157
158 uintptr_t woffset = (uintptr_t)arg1 / sizeof(uint32_t);
159 uint32_t datum = *(uint32_t *)(((uint32_t *)&hinfo) + woffset);
160 return SYSCTL_OUT(req, &datum, sizeof(datum));
161 }
162
163 /*
164 * machdep.cpu.cores_per_package
165 *
166 * x86: derived from CPUID data.
167 * ARM: how many physical cores we have in the AP; aka hw.physicalcpu_max
168 */
169 static
170 SYSCTL_PROC(_machdep_cpu, OID_AUTO, cores_per_package,
171 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
172 (void *)offsetof(host_basic_info_data_t, physical_cpu_max),
173 sizeof(integer_t),
174 arm_host_info, "I", "CPU cores per package");
175
176 /*
177 * machdep.cpu.core_count
178 *
179 * x86: derived from CPUID data.
180 * ARM: # active physical cores in the AP; aka hw.physicalcpu
181 */
182 static
183 SYSCTL_PROC(_machdep_cpu, OID_AUTO, core_count,
184 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
185 (void *)offsetof(host_basic_info_data_t, physical_cpu),
186 sizeof(integer_t),
187 arm_host_info, "I", "Number of enabled cores per package");
188
189 /*
190 * machdep.cpu.logical_per_package
191 *
192 * x86: derived from CPUID data. Returns ENOENT if HTT bit not set, but
193 * most x64 CPUs have that, so assume it's available.
194 * ARM: total # logical cores in the AP; aka hw.logicalcpu_max
195 */
196 static
197 SYSCTL_PROC(_machdep_cpu, OID_AUTO, logical_per_package,
198 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
199 (void *)offsetof(host_basic_info_data_t, logical_cpu_max),
200 sizeof(integer_t),
201 arm_host_info, "I", "CPU logical cpus per package");
202
203 /*
204 * machdep.cpu.thread_count
205 *
206 * x86: derived from CPUID data.
207 * ARM: # active logical cores in the AP; aka hw.logicalcpu
208 */
209 static
210 SYSCTL_PROC(_machdep_cpu, OID_AUTO, thread_count,
211 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
212 (void *)offsetof(host_basic_info_data_t, logical_cpu),
213 sizeof(integer_t),
214 arm_host_info, "I", "Number of enabled threads per package");
215
216 static SECURITY_READ_ONLY_LATE(char*) brand_string = NULL;
217 static SECURITY_READ_ONLY_LATE(size_t) brand_string_len = 0;
218
219 /*
220 * SecureDTLookupEntry() is only guaranteed to work before PE_init_iokit(),
221 * so we load the brand string (if available) in a startup handler.
222 */
223 __startup_func
224 static void
sysctl_load_brand_string(void)225 sysctl_load_brand_string(void)
226 {
227 DTEntry node;
228 void const *value = NULL;
229 unsigned int size = 0;
230
231 if (kSuccess != SecureDTLookupEntry(0, "/product", &node)) {
232 return;
233 }
234
235 if (kSuccess != SecureDTGetProperty(node, "product-soc-name", (void const **) &value, &size)) {
236 return;
237 }
238
239 if (size == 0) {
240 return;
241 }
242
243 brand_string = zalloc_permanent(size, ZALIGN_NONE);
244 if (brand_string == NULL) {
245 return;
246 }
247
248 memcpy(brand_string, value, size);
249 brand_string_len = size;
250 }
251 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_load_brand_string);
252
253 /*
254 * machdep.cpu.brand_string
255 *
256 * x86: derived from CPUID data.
257 * ARM: Grab the product string from the device tree, if it exists.
258 * Otherwise, cons something up from the CPUID register.
259 * the value is already exported via the commpage. So keep it simple.
260 */
261 static int
262 make_brand_string SYSCTL_HANDLER_ARGS
263 {
264 __unused struct sysctl_oid *unused_oidp = oidp;
265 __unused void *unused_arg1 = arg1;
266 __unused int unused_arg2 = arg2;
267
268 if (brand_string != NULL) {
269 return SYSCTL_OUT(req, brand_string, brand_string_len);
270 }
271
272 const char *impl;
273
274 switch (cpuid_info()->arm_info.arm_implementor) {
275 case CPU_VID_APPLE:
276 impl = "Apple";
277 break;
278 case CPU_VID_ARM:
279 impl = "ARM";
280 break;
281 default:
282 impl = "ARM architecture";
283 break;
284 }
285
286
287 char buf[80];
288 snprintf(buf, sizeof(buf), "%s processor", impl);
289 return SYSCTL_OUT(req, buf, strlen(buf) + 1);
290 }
291
292 SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand_string,
293 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
294 0, 0, make_brand_string, "A", "CPU brand string");
295
296
297 static int
298 virtual_address_size SYSCTL_HANDLER_ARGS
299 {
300 #pragma unused(arg1, arg2, oidp)
301 int return_value = 64 - T0SZ_BOOT;
302 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
303 }
304
305 static
306 SYSCTL_PROC(_machdep, OID_AUTO, virtual_address_size,
307 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
308 0, 0, virtual_address_size, "I",
309 "Number of addressable bits in userspace virtual addresses");
310
311
312 #if DEVELOPMENT || DEBUG
313 extern uint64_t TLockTimeOut;
314 SYSCTL_QUAD(_machdep, OID_AUTO, tlto,
315 CTLFLAG_RW | CTLFLAG_LOCKED, &TLockTimeOut,
316 "Ticket spinlock timeout (MATUs): use with care");
317
318 extern uint32_t timebase_validation;
319 SYSCTL_UINT(_machdep, OID_AUTO, timebase_validation,
320 CTLFLAG_RW | CTLFLAG_LOCKED, &timebase_validation, 0,
321 "Monotonicity validation of kernel mach_absolute_time()");
322
323 #if __WKDM_ISA_2P_WORKAROUND__
324 extern uint64_t wkdmdretries, wkdmdretriespb;
325 extern uint32_t simulate_wkdm2p_error, wkdm_isa_2p_war_required;
326 SYSCTL_QUAD(_machdep, OID_AUTO, wkdmdretries,
327 CTLFLAG_RW | CTLFLAG_LOCKED, &wkdmdretries,
328 "Number of WKDM errata retries");
329 SYSCTL_QUAD(_machdep, OID_AUTO, wkdmdretriespb,
330 CTLFLAG_RW | CTLFLAG_LOCKED, &wkdmdretriespb,
331 "Number of retries where payload was on page boundary");
332 SYSCTL_UINT(_machdep, OID_AUTO, simulate_wkdm2p_error,
333 CTLFLAG_RW | CTLFLAG_LOCKED,
334 &simulate_wkdm2p_error, 0, "");
335 SYSCTL_UINT(_machdep, OID_AUTO, wkdm_isa_2p_war_required,
336 CTLFLAG_RW | CTLFLAG_LOCKED,
337 &wkdm_isa_2p_war_required, 0, "");
338 #endif /* __WKDM_ISA_2P_WORKAROUND__ */
339
340
341 /*
342 * macro to generate a sysctl machdep.cpu.sysreg_* for a given system register
343 * using __builtin_arm_rsr64.
344 */
345 #define SYSCTL_PROC_MACHDEP_CPU_SYSREG(name) \
346 static int \
347 sysctl_sysreg_##name SYSCTL_HANDLER_ARGS \
348 { \
349 _Pragma("unused(arg1, arg2, oidp)") \
350 uint64_t return_value = __builtin_arm_rsr64(#name); \
351 return SYSCTL_OUT(req, &return_value, sizeof(return_value)); \
352 } \
353 SYSCTL_PROC(_machdep_cpu, OID_AUTO, sysreg_##name, \
354 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, \
355 0, 0, sysctl_sysreg_##name, "Q", \
356 #name " register on the current CPU");
357
358
359 // CPU system registers
360 // ARM64: AArch64 Vector Base Address Register
361 SYSCTL_PROC_MACHDEP_CPU_SYSREG(VBAR_EL1);
362 // ARM64: AArch64 Memory Attribute Indirection Register
363 SYSCTL_PROC_MACHDEP_CPU_SYSREG(MAIR_EL1);
364 // ARM64: AArch64 Translation table base register 1
365 SYSCTL_PROC_MACHDEP_CPU_SYSREG(TTBR1_EL1);
366 // ARM64: AArch64 System Control Register
367 SYSCTL_PROC_MACHDEP_CPU_SYSREG(SCTLR_EL1);
368 // ARM64: AArch64 Translation Control Register
369 SYSCTL_PROC_MACHDEP_CPU_SYSREG(TCR_EL1);
370 // ARM64: AArch64 Memory Model Feature Register 0
371 SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64MMFR0_EL1);
372 // ARM64: AArch64 Instruction Set Attribute Register 1
373 SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64ISAR1_EL1);
374 #if APPLE_ARM64_ARCH_FAMILY
375 // Apple ID Register
376 SYSCTL_PROC_MACHDEP_CPU_SYSREG(AIDR_EL1);
377 #endif /* APPLE_ARM64_ARCH_FAMILY */
378
379 #endif /* DEVELOPMENT || DEBUG */
380
381
382 #ifdef ML_IO_TIMEOUTS_ENABLED
383 /*
384 * Timeouts for ml_{io|phys}_{read|write}...
385 * RO on DEVELOPMENT/DEBUG kernels.
386 */
387
388 #if DEVELOPMENT || DEBUG
389 #define MMIO_TIMEOUT_FLAGS (CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED)
390 #else
391 #define MMIO_TIMEOUT_FLAGS (CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED)
392 #endif
393
394 SYSCTL_QUAD(_machdep, OID_AUTO, report_phy_read_delay, MMIO_TIMEOUT_FLAGS,
395 &report_phy_read_delay_to, "Maximum time before io/phys read gets reported or panics");
396 SYSCTL_QUAD(_machdep, OID_AUTO, report_phy_write_delay, MMIO_TIMEOUT_FLAGS,
397 &report_phy_write_delay_to, "Maximum time before io/phys write gets reported or panics");
398 SYSCTL_QUAD(_machdep, OID_AUTO, trace_phy_read_delay, MMIO_TIMEOUT_FLAGS,
399 &trace_phy_read_delay_to, "Maximum time before io/phys read gets ktraced");
400 SYSCTL_QUAD(_machdep, OID_AUTO, trace_phy_write_delay, MMIO_TIMEOUT_FLAGS,
401 &trace_phy_write_delay_to, "Maximum time before io/phys write gets ktraced");
402 SYSCTL_UINT(_machdep, OID_AUTO, report_phy_read_osbt, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
403 &report_phy_read_osbt, 0, "Whether to report exceeding io/phys read duration via OSReportWithBacktrace");
404 SYSCTL_UINT(_machdep, OID_AUTO, report_phy_write_osbt, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
405 &report_phy_write_osbt, 0, "Whether to report exceeding io/phys write duration via OSReportWithBacktrace");
406
407 SYSCTL_INT(_machdep, OID_AUTO, phy_read_delay_panic, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
408 &phy_read_panic, 0, "if set, report-phy-read-delay timeout panics");
409 SYSCTL_INT(_machdep, OID_AUTO, phy_write_delay_panic, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
410 &phy_write_panic, 0, "if set, report-phy-write-delay timeout panics");
411
412 #if ML_IO_SIMULATE_STRETCHED_ENABLED
413 SYSCTL_QUAD(_machdep, OID_AUTO, sim_stretched_io_ns, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
414 &simulate_stretched_io, "simulate stretched io in ml_read_io, ml_write_io");
415 #endif /* ML_IO_SIMULATE_STRETCHED_ENABLED */
416
417 #endif /* ML_IO_TIMEOUTS_ENABLED */
418