1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 */
4 #include <sys/param.h>
5 #include <sys/kernel.h>
6 #include <sys/sysctl.h>
7
8 #include <machine/machine_routines.h>
9
10 #include <mach/host_info.h>
11 #include <mach/mach_host.h>
12 #include <arm/cpuid.h>
13 #include <kern/hvg_hypercall.h>
14 #include <vm/pmap.h>
15 #include <kern/zalloc.h>
16 #include <libkern/libkern.h>
17 #include <pexpert/device_tree.h>
18 #include <kern/task.h>
19 #include <vm/vm_protos.h>
20
21 #if HYPERVISOR
22 #include <kern/hv_support.h>
23 #include <kern/bits.h>
24 #endif
25
26 extern uint64_t wake_abstime;
27
28 #if DEVELOPMENT || DEBUG
29 /* Various tuneables to modulate selection of WFE in the idle path */
30 extern int wfe_rec_max;
31 extern int wfe_allowed;
32
33 extern int wfe_rec_none;
34 extern uint32_t idle_proximate_timer_wfe;
35 extern uint32_t idle_proximate_io_wfe_masked;
36 extern uint32_t idle_proximate_io_wfe_unmasked;
37
38 static
39 SYSCTL_INT(_machdep, OID_AUTO, wfe_rec_max,
40 CTLFLAG_RW, &wfe_rec_max, 0,
41 "");
42
43 static
44 SYSCTL_INT(_machdep, OID_AUTO, wfe_allowed,
45 CTLFLAG_RW, &wfe_allowed, 0,
46 "");
47
48 static
49 SYSCTL_INT(_machdep, OID_AUTO, idle_timer_wfe,
50 CTLFLAG_RW, &idle_proximate_timer_wfe, 0,
51 "");
52
53 static
54 SYSCTL_INT(_machdep, OID_AUTO, idle_io_wfe_masked,
55 CTLFLAG_RW, &idle_proximate_io_wfe_masked, 0,
56 "");
57 static
58 SYSCTL_INT(_machdep, OID_AUTO, idle_io_wfe_unmasked,
59 CTLFLAG_RW, &idle_proximate_io_wfe_unmasked, 0,
60 "");
61
62 static
63 SYSCTL_INT(_machdep, OID_AUTO, wfe_rec_none,
64 CTLFLAG_RW, &wfe_rec_none, 0,
65 "");
66
67 extern uint64_t wfe_rec_override_mat;
68 SYSCTL_QUAD(_machdep, OID_AUTO, wfe_rec_override_mat,
69 CTLFLAG_RW, &wfe_rec_override_mat,
70 "");
71
72 extern uint64_t wfe_rec_clamp;
73 SYSCTL_QUAD(_machdep, OID_AUTO, wfe_rec_clamp,
74 CTLFLAG_RW, &wfe_rec_clamp,
75 "");
76
77 #endif
78
79 static
80 SYSCTL_QUAD(_machdep, OID_AUTO, wake_abstime,
81 CTLFLAG_RD, &wake_abstime,
82 "Absolute Time at the last wakeup");
83
84 static int
85 sysctl_time_since_reset SYSCTL_HANDLER_ARGS
86 {
87 #pragma unused(arg1, arg2, oidp)
88 uint64_t return_value = ml_get_time_since_reset();
89 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
90 }
91
92 SYSCTL_PROC(_machdep, OID_AUTO, time_since_reset,
93 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED,
94 0, 0, sysctl_time_since_reset, "I",
95 "Continuous time since last SOC boot/wake started");
96
97 static int
98 sysctl_wake_conttime SYSCTL_HANDLER_ARGS
99 {
100 #pragma unused(arg1, arg2, oidp)
101 uint64_t return_value = ml_get_conttime_wake_time();
102 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
103 }
104
105 SYSCTL_PROC(_machdep, OID_AUTO, wake_conttime,
106 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED,
107 0, 0, sysctl_wake_conttime, "I",
108 "Continuous Time at the last wakeup");
109
110 #if defined(HAS_IPI)
111 static int
cpu_signal_deferred_timer(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)112 cpu_signal_deferred_timer(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
113 {
114 int new_value = 0;
115 int changed = 0;
116
117 int old_value = (int)ml_cpu_signal_deferred_get_timer();
118
119 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
120
121 if (error == 0 && changed) {
122 ml_cpu_signal_deferred_adjust_timer((uint64_t)new_value);
123 }
124
125 return error;
126 }
127
128 SYSCTL_PROC(_machdep, OID_AUTO, deferred_ipi_timeout,
129 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
130 0, 0,
131 cpu_signal_deferred_timer, "I", "Deferred IPI timeout (nanoseconds)");
132
133 #endif /* defined(HAS_IPI) */
134
135 /*
136 * For source compatibility, here's some machdep.cpu mibs that
137 * use host_info() to simulate reasonable answers.
138 */
139
140 SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
141 "CPU info");
142
143 static int
144 arm_host_info SYSCTL_HANDLER_ARGS
145 {
146 __unused struct sysctl_oid *unused_oidp = oidp;
147
148 host_basic_info_data_t hinfo;
149 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
150 #define BSD_HOST 1
151 kern_return_t kret = host_info((host_t)BSD_HOST,
152 HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
153 if (KERN_SUCCESS != kret) {
154 return EINVAL;
155 }
156
157 if (sizeof(uint32_t) != arg2) {
158 panic("size mismatch");
159 }
160
161 uintptr_t woffset = (uintptr_t)arg1 / sizeof(uint32_t);
162 uint32_t datum = *(uint32_t *)(((uint32_t *)&hinfo) + woffset);
163 return SYSCTL_OUT(req, &datum, sizeof(datum));
164 }
165
166 /*
167 * machdep.cpu.cores_per_package
168 *
169 * x86: derived from CPUID data.
170 * ARM: how many physical cores we have in the AP; aka hw.physicalcpu_max
171 */
172 static
173 SYSCTL_PROC(_machdep_cpu, OID_AUTO, cores_per_package,
174 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
175 (void *)offsetof(host_basic_info_data_t, physical_cpu_max),
176 sizeof(integer_t),
177 arm_host_info, "I", "CPU cores per package");
178
179 /*
180 * machdep.cpu.core_count
181 *
182 * x86: derived from CPUID data.
183 * ARM: # active physical cores in the AP; aka hw.physicalcpu
184 */
185 static
186 SYSCTL_PROC(_machdep_cpu, OID_AUTO, core_count,
187 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
188 (void *)offsetof(host_basic_info_data_t, physical_cpu),
189 sizeof(integer_t),
190 arm_host_info, "I", "Number of enabled cores per package");
191
192 /*
193 * machdep.cpu.logical_per_package
194 *
195 * x86: derived from CPUID data. Returns ENOENT if HTT bit not set, but
196 * most x64 CPUs have that, so assume it's available.
197 * ARM: total # logical cores in the AP; aka hw.logicalcpu_max
198 */
199 static
200 SYSCTL_PROC(_machdep_cpu, OID_AUTO, logical_per_package,
201 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
202 (void *)offsetof(host_basic_info_data_t, logical_cpu_max),
203 sizeof(integer_t),
204 arm_host_info, "I", "CPU logical cpus per package");
205
206 /*
207 * machdep.cpu.thread_count
208 *
209 * x86: derived from CPUID data.
210 * ARM: # active logical cores in the AP; aka hw.logicalcpu
211 */
212 static
213 SYSCTL_PROC(_machdep_cpu, OID_AUTO, thread_count,
214 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
215 (void *)offsetof(host_basic_info_data_t, logical_cpu),
216 sizeof(integer_t),
217 arm_host_info, "I", "Number of enabled threads per package");
218
219 static SECURITY_READ_ONLY_LATE(char*) brand_string = NULL;
220 static SECURITY_READ_ONLY_LATE(size_t) brand_string_len = 0;
221
222 /*
223 * SecureDTLookupEntry() is only guaranteed to work before PE_init_iokit(),
224 * so we load the brand string (if available) in a startup handler.
225 */
226 __startup_func
227 static void
sysctl_load_brand_string(void)228 sysctl_load_brand_string(void)
229 {
230 DTEntry node;
231 void const *value = NULL;
232 unsigned int size = 0;
233
234 if (kSuccess != SecureDTLookupEntry(0, "/product", &node)) {
235 return;
236 }
237
238 if (kSuccess != SecureDTGetProperty(node, "product-soc-name", (void const **) &value, &size)) {
239 return;
240 }
241
242 if (size == 0) {
243 return;
244 }
245
246 brand_string = zalloc_permanent(size, ZALIGN_NONE);
247 if (brand_string == NULL) {
248 return;
249 }
250
251 memcpy(brand_string, value, size);
252 brand_string_len = size;
253 }
254 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_load_brand_string);
255
256 /*
257 * machdep.cpu.brand_string
258 *
259 * x86: derived from CPUID data.
260 * ARM: Grab the product string from the device tree, if it exists.
261 * Otherwise, cons something up from the CPUID register.
262 * the value is already exported via the commpage. So keep it simple.
263 */
264 static int
265 make_brand_string SYSCTL_HANDLER_ARGS
266 {
267 __unused struct sysctl_oid *unused_oidp = oidp;
268 __unused void *unused_arg1 = arg1;
269 __unused int unused_arg2 = arg2;
270
271 if (brand_string != NULL) {
272 return SYSCTL_OUT(req, brand_string, brand_string_len);
273 }
274
275 const char *impl;
276
277 switch (cpuid_info()->arm_info.arm_implementor) {
278 case CPU_VID_APPLE:
279 impl = "Apple";
280 break;
281 case CPU_VID_ARM:
282 impl = "ARM";
283 break;
284 default:
285 impl = "ARM architecture";
286 break;
287 }
288
289 char buf[80];
290 snprintf(buf, sizeof(buf), "%s processor", impl);
291 return SYSCTL_OUT(req, buf, strlen(buf) + 1);
292 }
293
294 SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand_string,
295 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
296 0, 0, make_brand_string, "A", "CPU brand string");
297
298
299 static int
300 virtual_address_size SYSCTL_HANDLER_ARGS
301 {
302 #pragma unused(arg1, arg2, oidp)
303 int return_value = 64 - T0SZ_BOOT;
304 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
305 }
306
307 static
308 SYSCTL_PROC(_machdep, OID_AUTO, virtual_address_size,
309 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
310 0, 0, virtual_address_size, "I",
311 "Number of addressable bits in userspace virtual addresses");
312
313
314 #if DEVELOPMENT || DEBUG
315 extern uint64_t TLockTimeOut;
316 SYSCTL_QUAD(_machdep, OID_AUTO, tlto,
317 CTLFLAG_RW | CTLFLAG_LOCKED, &TLockTimeOut,
318 "Ticket spinlock timeout (MATUs): use with care");
319
320 extern uint32_t timebase_validation;
321 SYSCTL_UINT(_machdep, OID_AUTO, timebase_validation,
322 CTLFLAG_RW | CTLFLAG_LOCKED, &timebase_validation, 0,
323 "Monotonicity validation of kernel mach_absolute_time()");
324
325 #if __WKDM_ISA_2P_WORKAROUND__
326 extern uint64_t wkdmdretries, wkdmdretriespb;
327 extern uint32_t simulate_wkdm2p_error, wkdm_isa_2p_war_required;
328 SYSCTL_QUAD(_machdep, OID_AUTO, wkdmdretries,
329 CTLFLAG_RW | CTLFLAG_LOCKED, &wkdmdretries,
330 "Number of WKDM errata retries");
331 SYSCTL_QUAD(_machdep, OID_AUTO, wkdmdretriespb,
332 CTLFLAG_RW | CTLFLAG_LOCKED, &wkdmdretriespb,
333 "Number of retries where payload was on page boundary");
334 SYSCTL_UINT(_machdep, OID_AUTO, simulate_wkdm2p_error,
335 CTLFLAG_RW | CTLFLAG_LOCKED,
336 &simulate_wkdm2p_error, 0, "");
337 SYSCTL_UINT(_machdep, OID_AUTO, wkdm_isa_2p_war_required,
338 CTLFLAG_RW | CTLFLAG_LOCKED,
339 &wkdm_isa_2p_war_required, 0, "");
340 #endif /* __WKDM_ISA_2P_WORKAROUND__ */
341
342
343 /*
344 * macro to generate a sysctl machdep.cpu.sysreg_* for a given system register
345 * using __builtin_arm_rsr64.
346 */
347 #define SYSCTL_PROC_MACHDEP_CPU_SYSREG(name) \
348 static int \
349 sysctl_sysreg_##name SYSCTL_HANDLER_ARGS \
350 { \
351 _Pragma("unused(arg1, arg2, oidp)") \
352 uint64_t return_value = __builtin_arm_rsr64(#name); \
353 return SYSCTL_OUT(req, &return_value, sizeof(return_value)); \
354 } \
355 SYSCTL_PROC(_machdep_cpu, OID_AUTO, sysreg_##name, \
356 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, \
357 0, 0, sysctl_sysreg_##name, "Q", \
358 #name " register on the current CPU");
359
360
361 // CPU system registers
362 // ARM64: AArch64 Vector Base Address Register
363 SYSCTL_PROC_MACHDEP_CPU_SYSREG(VBAR_EL1);
364 // ARM64: AArch64 Memory Attribute Indirection Register
365 SYSCTL_PROC_MACHDEP_CPU_SYSREG(MAIR_EL1);
366 // ARM64: AArch64 Translation table base register 1
367 SYSCTL_PROC_MACHDEP_CPU_SYSREG(TTBR1_EL1);
368 // ARM64: AArch64 System Control Register
369 SYSCTL_PROC_MACHDEP_CPU_SYSREG(SCTLR_EL1);
370 // ARM64: AArch64 Translation Control Register
371 SYSCTL_PROC_MACHDEP_CPU_SYSREG(TCR_EL1);
372 // ARM64: AArch64 Memory Model Feature Register 0
373 SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64MMFR0_EL1);
374 // ARM64: AArch64 Instruction Set Attribute Register 1
375 SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64ISAR1_EL1);
376 #if APPLE_ARM64_ARCH_FAMILY
377 // Apple ID Register
378 SYSCTL_PROC_MACHDEP_CPU_SYSREG(AIDR_EL1);
379 #endif /* APPLE_ARM64_ARCH_FAMILY */
380
381 #endif /* DEVELOPMENT || DEBUG */
382
383
384 #ifdef ML_IO_TIMEOUTS_ENABLED
385 /*
386 * Timeouts for ml_{io|phys}_{read|write}...
387 * RO on DEVELOPMENT/DEBUG kernels.
388 */
389
390 #if DEVELOPMENT || DEBUG
391 #define MMIO_TIMEOUT_FLAGS (CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED)
392 #else
393 #define MMIO_TIMEOUT_FLAGS (CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED)
394 #endif
395
396 SYSCTL_QUAD(_machdep, OID_AUTO, report_phy_read_delay, MMIO_TIMEOUT_FLAGS,
397 &report_phy_read_delay_to, "Maximum time before io/phys read gets reported or panics");
398 SYSCTL_QUAD(_machdep, OID_AUTO, report_phy_write_delay, MMIO_TIMEOUT_FLAGS,
399 &report_phy_write_delay_to, "Maximum time before io/phys write gets reported or panics");
400 SYSCTL_QUAD(_machdep, OID_AUTO, trace_phy_read_delay, MMIO_TIMEOUT_FLAGS,
401 &trace_phy_read_delay_to, "Maximum time before io/phys read gets ktraced");
402 SYSCTL_QUAD(_machdep, OID_AUTO, trace_phy_write_delay, MMIO_TIMEOUT_FLAGS,
403 &trace_phy_write_delay_to, "Maximum time before io/phys write gets ktraced");
404
405 SYSCTL_INT(_machdep, OID_AUTO, phy_read_delay_panic, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
406 &phy_read_panic, 0, "if set, report-phy-read-delay timeout panics");
407 SYSCTL_INT(_machdep, OID_AUTO, phy_write_delay_panic, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
408 &phy_write_panic, 0, "if set, report-phy-write-delay timeout panics");
409
410 #if ML_IO_SIMULATE_STRETCHED_ENABLED
411 SYSCTL_QUAD(_machdep, OID_AUTO, sim_stretched_io_ns, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
412 &simulate_stretched_io, "simulate stretched io in ml_read_io, ml_write_io");
413 #endif /* ML_IO_SIMULATE_STRETCHED_ENABLED */
414
415 #endif /* ML_IO_TIMEOUTS_ENABLED */
416
417 int opensource_kernel = 1;
418 SYSCTL_INT(_kern, OID_AUTO, opensource_kernel, CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
419 &opensource_kernel, 0, "Opensource Kernel");
420
421 static int
422 machdep_ptrauth_enabled SYSCTL_HANDLER_ARGS
423 {
424 #pragma unused(arg1, arg2, oidp)
425
426 #if __has_feature(ptrauth_calls)
427 task_t task = current_task();
428 int ret = !ml_task_get_disable_user_jop(task);
429 #else
430 const int ret = 0;
431 #endif
432
433 return SYSCTL_OUT(req, &ret, sizeof(ret));
434 }
435
436 SYSCTL_PROC(_machdep, OID_AUTO, ptrauth_enabled,
437 CTLTYPE_INT | CTLFLAG_KERN | CTLFLAG_RD,
438 0, 0,
439 machdep_ptrauth_enabled, "I", "");
440
441 #if CONFIG_TELEMETRY && (DEBUG || DEVELOPMENT)
442 extern unsigned long trap_telemetry_reported_events;
443 SYSCTL_ULONG(_debug, OID_AUTO, trap_telemetry_reported_events,
444 CTLFLAG_RD | CTLFLAG_LOCKED, &trap_telemetry_reported_events,
445 "Number of trap telemetry events successfully reported");
446
447 extern unsigned long trap_telemetry_capacity_dropped_events;
448 SYSCTL_ULONG(_debug, OID_AUTO, trap_telemetry_capacity_dropped_events,
449 CTLFLAG_RD | CTLFLAG_LOCKED, &trap_telemetry_capacity_dropped_events,
450 "Number of trap telemetry events which were dropped due to a full RSB");
451 #endif /* CONFIG_TELEMETRY && (DEBUG || DEVELOPMENT) */
452
453
454 #if DEBUG || DEVELOPMENT
455 /* A sysctl that can be used to check if the platform supports DRAM ECC and error injection. */
456 static int
457 dram_ecc_error_injection_capable SYSCTL_HANDLER_ARGS
458 {
459 #pragma unused(arg1, arg2, req)
460 int capable = 0;
461
462 /* T6041 does not support error injection. */
463
464 return SYSCTL_OUT(req, &capable, sizeof(capable));
465 }
466 SYSCTL_PROC(_vm, OID_AUTO, dram_ecc_error_injection_capable, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
467 0, 0, &dram_ecc_error_injection_capable, "I", "");
468 #endif /* DEBUG || DEVELOPMENT */
469
470