1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 */
4 #include <sys/param.h>
5 #include <sys/kernel.h>
6 #include <sys/sysctl.h>
7
8 #include <machine/machine_routines.h>
9
10 #include <mach/host_info.h>
11 #include <mach/mach_host.h>
12 #include <arm/cpuid.h>
13 #include <kern/zalloc.h>
14 #include <libkern/libkern.h>
15 #include <pexpert/device_tree.h>
16
17 #if HYPERVISOR
18 #include <kern/hv_support.h>
19 #endif
20
21 extern uint64_t wake_abstime;
22 extern int lck_mtx_adaptive_spin_mode;
23
24 #if DEVELOPMENT || DEBUG
25 /* Various tuneables to modulate selection of WFE in the idle path */
26 extern int wfe_rec_max;
27 extern int wfe_allowed;
28
29 extern int wfe_rec_none;
30 extern uint32_t idle_proximate_timer_wfe;
31 extern uint32_t idle_proximate_io_wfe_masked;
32 extern uint32_t idle_proximate_io_wfe_unmasked;
33
34 static
35 SYSCTL_INT(_machdep, OID_AUTO, wfe_rec_max,
36 CTLFLAG_RW, &wfe_rec_max, 0,
37 "");
38
39 static
40 SYSCTL_INT(_machdep, OID_AUTO, wfe_allowed,
41 CTLFLAG_RW, &wfe_allowed, 0,
42 "");
43
44 static
45 SYSCTL_INT(_machdep, OID_AUTO, idle_timer_wfe,
46 CTLFLAG_RW, &idle_proximate_timer_wfe, 0,
47 "");
48
49 static
50 SYSCTL_INT(_machdep, OID_AUTO, idle_io_wfe_masked,
51 CTLFLAG_RW, &idle_proximate_io_wfe_masked, 0,
52 "");
53 static
54 SYSCTL_INT(_machdep, OID_AUTO, idle_io_wfe_unmasked,
55 CTLFLAG_RW, &idle_proximate_io_wfe_unmasked, 0,
56 "");
57
58 static
59 SYSCTL_INT(_machdep, OID_AUTO, wfe_rec_none,
60 CTLFLAG_RW, &wfe_rec_none, 0,
61 "");
62
63 extern uint64_t wfe_rec_override_mat;
64 SYSCTL_QUAD(_machdep, OID_AUTO, wfe_rec_override_mat,
65 CTLFLAG_RW, &wfe_rec_override_mat,
66 "");
67
68 extern uint64_t wfe_rec_clamp;
69 SYSCTL_QUAD(_machdep, OID_AUTO, wfe_rec_clamp,
70 CTLFLAG_RW, &wfe_rec_clamp,
71 "");
72
73 #endif
74
75 static
76 SYSCTL_QUAD(_machdep, OID_AUTO, wake_abstime,
77 CTLFLAG_RD, &wake_abstime,
78 "Absolute Time at the last wakeup");
79
80 static int
81 sysctl_time_since_reset SYSCTL_HANDLER_ARGS
82 {
83 #pragma unused(arg1, arg2, oidp)
84 uint64_t return_value = ml_get_time_since_reset();
85 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
86 }
87
88 SYSCTL_PROC(_machdep, OID_AUTO, time_since_reset,
89 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED,
90 0, 0, sysctl_time_since_reset, "I",
91 "Continuous time since last SOC boot/wake started");
92
93 static int
94 sysctl_wake_conttime SYSCTL_HANDLER_ARGS
95 {
96 #pragma unused(arg1, arg2, oidp)
97 uint64_t return_value = ml_get_conttime_wake_time();
98 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
99 }
100
101 SYSCTL_PROC(_machdep, OID_AUTO, wake_conttime,
102 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED,
103 0, 0, sysctl_wake_conttime, "I",
104 "Continuous Time at the last wakeup");
105
106 #if defined(HAS_IPI)
107 static int
cpu_signal_deferred_timer(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)108 cpu_signal_deferred_timer(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
109 {
110 int new_value = 0;
111 int changed = 0;
112
113 int old_value = (int)ml_cpu_signal_deferred_get_timer();
114
115 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
116
117 if (error == 0 && changed) {
118 ml_cpu_signal_deferred_adjust_timer((uint64_t)new_value);
119 }
120
121 return error;
122 }
123
124 SYSCTL_PROC(_machdep, OID_AUTO, deferred_ipi_timeout,
125 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
126 0, 0,
127 cpu_signal_deferred_timer, "I", "Deferred IPI timeout (nanoseconds)");
128
129 #endif /* defined(HAS_IPI) */
130
131 /*
132 * For source compatibility, here's some machdep.cpu mibs that
133 * use host_info() to simulate reasonable answers.
134 */
135
136 SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
137 "CPU info");
138
139 static int
140 arm_host_info SYSCTL_HANDLER_ARGS
141 {
142 __unused struct sysctl_oid *unused_oidp = oidp;
143
144 host_basic_info_data_t hinfo;
145 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
146 #define BSD_HOST 1
147 kern_return_t kret = host_info((host_t)BSD_HOST,
148 HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
149 if (KERN_SUCCESS != kret) {
150 return EINVAL;
151 }
152
153 if (sizeof(uint32_t) != arg2) {
154 panic("size mismatch");
155 }
156
157 uintptr_t woffset = (uintptr_t)arg1 / sizeof(uint32_t);
158 uint32_t datum = *(uint32_t *)(((uint32_t *)&hinfo) + woffset);
159 return SYSCTL_OUT(req, &datum, sizeof(datum));
160 }
161
162 /*
163 * machdep.cpu.cores_per_package
164 *
165 * x86: derived from CPUID data.
166 * ARM: how many physical cores we have in the AP; aka hw.physicalcpu_max
167 */
168 static
169 SYSCTL_PROC(_machdep_cpu, OID_AUTO, cores_per_package,
170 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
171 (void *)offsetof(host_basic_info_data_t, physical_cpu_max),
172 sizeof(integer_t),
173 arm_host_info, "I", "CPU cores per package");
174
175 /*
176 * machdep.cpu.core_count
177 *
178 * x86: derived from CPUID data.
179 * ARM: # active physical cores in the AP; aka hw.physicalcpu
180 */
181 static
182 SYSCTL_PROC(_machdep_cpu, OID_AUTO, core_count,
183 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
184 (void *)offsetof(host_basic_info_data_t, physical_cpu),
185 sizeof(integer_t),
186 arm_host_info, "I", "Number of enabled cores per package");
187
188 /*
189 * machdep.cpu.logical_per_package
190 *
191 * x86: derived from CPUID data. Returns ENOENT if HTT bit not set, but
192 * most x64 CPUs have that, so assume it's available.
193 * ARM: total # logical cores in the AP; aka hw.logicalcpu_max
194 */
195 static
196 SYSCTL_PROC(_machdep_cpu, OID_AUTO, logical_per_package,
197 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
198 (void *)offsetof(host_basic_info_data_t, logical_cpu_max),
199 sizeof(integer_t),
200 arm_host_info, "I", "CPU logical cpus per package");
201
202 /*
203 * machdep.cpu.thread_count
204 *
205 * x86: derived from CPUID data.
206 * ARM: # active logical cores in the AP; aka hw.logicalcpu
207 */
208 static
209 SYSCTL_PROC(_machdep_cpu, OID_AUTO, thread_count,
210 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
211 (void *)offsetof(host_basic_info_data_t, logical_cpu),
212 sizeof(integer_t),
213 arm_host_info, "I", "Number of enabled threads per package");
214
215 static SECURITY_READ_ONLY_LATE(char*) brand_string = NULL;
216 static SECURITY_READ_ONLY_LATE(size_t) brand_string_len = 0;
217
218 /*
219 * SecureDTLookupEntry() is only guaranteed to work before PE_init_iokit(),
220 * so we load the brand string (if available) in a startup handler.
221 */
222 __startup_func
223 static void
sysctl_load_brand_string(void)224 sysctl_load_brand_string(void)
225 {
226 DTEntry node;
227 void const *value = NULL;
228 unsigned int size = 0;
229
230 if (kSuccess != SecureDTLookupEntry(0, "/product", &node)) {
231 return;
232 }
233
234 if (kSuccess != SecureDTGetProperty(node, "product-soc-name", (void const **) &value, &size)) {
235 return;
236 }
237
238 if (size == 0) {
239 return;
240 }
241
242 brand_string = zalloc_permanent(size, ZALIGN_NONE);
243 if (brand_string == NULL) {
244 return;
245 }
246
247 memcpy(brand_string, value, size);
248 brand_string_len = size;
249 }
250 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_load_brand_string);
251
252 /*
253 * machdep.cpu.brand_string
254 *
255 * x86: derived from CPUID data.
256 * ARM: Grab the product string from the device tree, if it exists.
257 * Otherwise, cons something up from the CPUID register.
258 * the value is already exported via the commpage. So keep it simple.
259 */
260 static int
261 make_brand_string SYSCTL_HANDLER_ARGS
262 {
263 __unused struct sysctl_oid *unused_oidp = oidp;
264 __unused void *unused_arg1 = arg1;
265 __unused int unused_arg2 = arg2;
266
267 if (brand_string != NULL) {
268 return SYSCTL_OUT(req, brand_string, brand_string_len);
269 }
270
271 const char *impl;
272
273 switch (cpuid_info()->arm_info.arm_implementor) {
274 case CPU_VID_APPLE:
275 impl = "Apple";
276 break;
277 case CPU_VID_ARM:
278 impl = "ARM";
279 break;
280 default:
281 impl = "ARM architecture";
282 break;
283 }
284
285
286 char buf[80];
287 snprintf(buf, sizeof(buf), "%s processor", impl);
288 return SYSCTL_OUT(req, buf, strlen(buf) + 1);
289 }
290
291 SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand_string,
292 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
293 0, 0, make_brand_string, "A", "CPU brand string");
294
295
296 static
297 SYSCTL_INT(_machdep, OID_AUTO, lck_mtx_adaptive_spin_mode,
298 CTLFLAG_RW, &lck_mtx_adaptive_spin_mode, 0,
299 "Enable adaptive spin behavior for kernel mutexes");
300
301 static int
302 virtual_address_size SYSCTL_HANDLER_ARGS
303 {
304 #pragma unused(arg1, arg2, oidp)
305 int return_value = 64 - T0SZ_BOOT;
306 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
307 }
308
309 static
310 SYSCTL_PROC(_machdep, OID_AUTO, virtual_address_size,
311 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
312 0, 0, virtual_address_size, "I",
313 "Number of addressable bits in userspace virtual addresses");
314
315
316 #if DEVELOPMENT || DEBUG
317 extern uint64_t TLockTimeOut;
318 SYSCTL_QUAD(_machdep, OID_AUTO, tlto,
319 CTLFLAG_RW | CTLFLAG_LOCKED, &TLockTimeOut,
320 "Ticket spinlock timeout (MATUs): use with care");
321
322 extern uint32_t timebase_validation;
323 SYSCTL_UINT(_machdep, OID_AUTO, timebase_validation,
324 CTLFLAG_RW | CTLFLAG_LOCKED, &timebase_validation, 0,
325 "Monotonicity validation of kernel mach_absolute_time()");
326
327 #if __WKDM_ISA_2P_WORKAROUND__
328 extern uint64_t wkdmdretries, wkdmdretriespb;
329 extern uint32_t simulate_wkdm2p_error, wkdm_isa_2p_war_required;
330 SYSCTL_QUAD(_machdep, OID_AUTO, wkdmdretries,
331 CTLFLAG_RW | CTLFLAG_LOCKED, &wkdmdretries,
332 "Number of WKDM errata retries");
333 SYSCTL_QUAD(_machdep, OID_AUTO, wkdmdretriespb,
334 CTLFLAG_RW | CTLFLAG_LOCKED, &wkdmdretriespb,
335 "Number of retries where payload was on page boundary");
336 SYSCTL_UINT(_machdep, OID_AUTO, simulate_wkdm2p_error,
337 CTLFLAG_RW | CTLFLAG_LOCKED,
338 &simulate_wkdm2p_error, 0, "");
339 SYSCTL_UINT(_machdep, OID_AUTO, wkdm_isa_2p_war_required,
340 CTLFLAG_RW | CTLFLAG_LOCKED,
341 &wkdm_isa_2p_war_required, 0, "");
342 #endif /* __WKDM_ISA_2P_WORKAROUND__ */
343
344
345 /*
346 * macro to generate a sysctl machdep.cpu.sysreg_* for a given system register
347 * using __builtin_arm_rsr64.
348 */
349 #define SYSCTL_PROC_MACHDEP_CPU_SYSREG(name) \
350 static int \
351 sysctl_sysreg_##name SYSCTL_HANDLER_ARGS \
352 { \
353 _Pragma("unused(arg1, arg2, oidp)") \
354 uint64_t return_value = __builtin_arm_rsr64(#name); \
355 return SYSCTL_OUT(req, &return_value, sizeof(return_value)); \
356 } \
357 SYSCTL_PROC(_machdep_cpu, OID_AUTO, sysreg_##name, \
358 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, \
359 0, 0, sysctl_sysreg_##name, "Q", \
360 #name " register on the current CPU");
361
362
363 // CPU system registers
364 // ARM64: AArch64 Vector Base Address Register
365 SYSCTL_PROC_MACHDEP_CPU_SYSREG(VBAR_EL1);
366 // ARM64: AArch64 Memory Attribute Indirection Register
367 SYSCTL_PROC_MACHDEP_CPU_SYSREG(MAIR_EL1);
368 // ARM64: AArch64 Translation table base register 1
369 SYSCTL_PROC_MACHDEP_CPU_SYSREG(TTBR1_EL1);
370 // ARM64: AArch64 System Control Register
371 SYSCTL_PROC_MACHDEP_CPU_SYSREG(SCTLR_EL1);
372 // ARM64: AArch64 Translation Control Register
373 SYSCTL_PROC_MACHDEP_CPU_SYSREG(TCR_EL1);
374 // ARM64: AArch64 Memory Model Feature Register 0
375 SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64MMFR0_EL1);
376 // ARM64: AArch64 Instruction Set Attribute Register 1
377 SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64ISAR1_EL1);
378 #if APPLE_ARM64_ARCH_FAMILY
379 // Apple ID Register
380 SYSCTL_PROC_MACHDEP_CPU_SYSREG(AIDR_EL1);
381 #endif /* APPLE_ARM64_ARCH_FAMILY */
382
383 #endif /* DEVELOPMENT || DEBUG */
384
385
386 #ifdef ML_IO_TIMEOUTS_ENABLED
387 /* Timeouts for ml_{io|phys}_{read|write}... */
388
389 SYSCTL_UINT(_machdep, OID_AUTO, report_phy_read_delay, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
390 &report_phy_read_delay_to, 0, "Maximum time before io/phys read gets reported or panics");
391 SYSCTL_UINT(_machdep, OID_AUTO, report_phy_write_delay, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
392 &report_phy_write_delay_to, 0, "Maximum time before io/phys write gets reported or panics");
393 SYSCTL_UINT(_machdep, OID_AUTO, trace_phy_read_delay, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
394 &trace_phy_read_delay_to, 0, "Maximum time before io/phys read gets ktraced");
395 SYSCTL_UINT(_machdep, OID_AUTO, trace_phy_write_delay, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
396 &trace_phy_write_delay_to, 0, "Maximum time before io/phys write gets ktraced");
397 SYSCTL_UINT(_machdep, OID_AUTO, report_phy_read_osbt, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
398 &report_phy_read_osbt, 0, "Whether to report exceeding io/phys read duration via OSReportWithBacktrace");
399 SYSCTL_UINT(_machdep, OID_AUTO, report_phy_write_osbt, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
400 &report_phy_write_osbt, 0, "Whether to report exceeding io/phys write duration via OSReportWithBacktrace");
401
402 SYSCTL_INT(_machdep, OID_AUTO, phy_read_delay_panic, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
403 &phy_read_panic, 0, "if set, report-phy-read-delay timeout panics");
404 SYSCTL_INT(_machdep, OID_AUTO, phy_writedelay_panic, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
405 &phy_write_panic, 0, "if set, report-phy-write-delay timeout panics");
406
407 #if ML_IO_SIMULATE_STRETCHED_ENABLED
408 SYSCTL_QUAD(_machdep, OID_AUTO, sim_stretched_io_ns, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
409 &simulate_stretched_io, "simulate stretched io in ml_read_io, ml_write_io");
410 #endif /* ML_IO_SIMULATE_STRETCHED_ENABLED */
411
412 #endif /* ML_IO_TIMEOUTS_ENABLED */
413