1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 */
4 #include <sys/param.h>
5 #include <sys/kernel.h>
6 #include <sys/sysctl.h>
7
8 #include <machine/machine_routines.h>
9
10 #include <mach/host_info.h>
11 #include <mach/mach_host.h>
12 #include <arm/cpuid.h>
13 #include <kern/hvg_hypercall.h>
14 #include <vm/pmap.h>
15 #include <kern/zalloc.h>
16 #include <libkern/libkern.h>
17 #include <pexpert/device_tree.h>
18 #include <kern/task.h>
19 #include <vm/vm_protos.h>
20
21 #if HYPERVISOR
22 #include <kern/hv_support.h>
23 #include <kern/bits.h>
24 #endif
25
26 #define __STR(x) #x
27 #define STRINGIFY(x) __STR(x)
28
29 extern uint64_t wake_abstime;
30
31 #if DEVELOPMENT || DEBUG
32 /* Various tuneables to modulate selection of WFE in the idle path */
33 extern int wfe_rec_max;
34 extern int wfe_allowed;
35
36 extern int wfe_rec_none;
37 extern uint32_t idle_proximate_timer_wfe;
38 extern uint32_t idle_proximate_io_wfe_masked;
39 extern uint32_t idle_proximate_io_wfe_unmasked;
40
41 static
42 SYSCTL_INT(_machdep, OID_AUTO, wfe_rec_max,
43 CTLFLAG_RW, &wfe_rec_max, 0,
44 "");
45
46 static
47 SYSCTL_INT(_machdep, OID_AUTO, wfe_allowed,
48 CTLFLAG_RW, &wfe_allowed, 0,
49 "");
50
51 static
52 SYSCTL_INT(_machdep, OID_AUTO, idle_timer_wfe,
53 CTLFLAG_RW, &idle_proximate_timer_wfe, 0,
54 "");
55
56 static
57 SYSCTL_INT(_machdep, OID_AUTO, idle_io_wfe_masked,
58 CTLFLAG_RW, &idle_proximate_io_wfe_masked, 0,
59 "");
60 static
61 SYSCTL_INT(_machdep, OID_AUTO, idle_io_wfe_unmasked,
62 CTLFLAG_RW, &idle_proximate_io_wfe_unmasked, 0,
63 "");
64
65 static
66 SYSCTL_INT(_machdep, OID_AUTO, wfe_rec_none,
67 CTLFLAG_RW, &wfe_rec_none, 0,
68 "");
69
70 extern uint64_t wfe_rec_override_mat;
71 SYSCTL_QUAD(_machdep, OID_AUTO, wfe_rec_override_mat,
72 CTLFLAG_RW, &wfe_rec_override_mat,
73 "");
74
75 extern uint64_t wfe_rec_clamp;
76 SYSCTL_QUAD(_machdep, OID_AUTO, wfe_rec_clamp,
77 CTLFLAG_RW, &wfe_rec_clamp,
78 "");
79
80 #endif
81
82 static
83 SYSCTL_QUAD(_machdep, OID_AUTO, wake_abstime,
84 CTLFLAG_RD, &wake_abstime,
85 "Absolute Time at the last wakeup");
86
87 static int
88 sysctl_time_since_reset SYSCTL_HANDLER_ARGS
89 {
90 #pragma unused(arg1, arg2, oidp)
91 uint64_t return_value = ml_get_time_since_reset();
92 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
93 }
94
95 SYSCTL_PROC(_machdep, OID_AUTO, time_since_reset,
96 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED,
97 0, 0, sysctl_time_since_reset, "I",
98 "Continuous time since last SOC boot/wake started");
99
100 static int
101 sysctl_wake_conttime SYSCTL_HANDLER_ARGS
102 {
103 #pragma unused(arg1, arg2, oidp)
104 uint64_t return_value = ml_get_conttime_wake_time();
105 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
106 }
107
108 SYSCTL_PROC(_machdep, OID_AUTO, wake_conttime,
109 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED,
110 0, 0, sysctl_wake_conttime, "I",
111 "Continuous Time at the last wakeup");
112
113 #if defined(HAS_IPI)
114 static int
cpu_signal_deferred_timer(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)115 cpu_signal_deferred_timer(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
116 {
117 int new_value = 0;
118 int changed = 0;
119
120 int old_value = (int)ml_cpu_signal_deferred_get_timer();
121
122 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
123
124 if (error == 0 && changed) {
125 ml_cpu_signal_deferred_adjust_timer((uint64_t)new_value);
126 }
127
128 return error;
129 }
130
131 SYSCTL_PROC(_machdep, OID_AUTO, deferred_ipi_timeout,
132 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
133 0, 0,
134 cpu_signal_deferred_timer, "I", "Deferred IPI timeout (nanoseconds)");
135
136 #endif /* defined(HAS_IPI) */
137
138 /*
139 * For source compatibility, here's some machdep.cpu mibs that
140 * use host_info() to simulate reasonable answers.
141 */
142
143 SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
144 "CPU info");
145
146 static int
147 arm_host_info SYSCTL_HANDLER_ARGS
148 {
149 __unused struct sysctl_oid *unused_oidp = oidp;
150
151 host_basic_info_data_t hinfo;
152 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
153 #define BSD_HOST 1
154 kern_return_t kret = host_info((host_t)BSD_HOST,
155 HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
156 if (KERN_SUCCESS != kret) {
157 return EINVAL;
158 }
159
160 if (sizeof(uint32_t) != arg2) {
161 panic("size mismatch");
162 }
163
164 uintptr_t woffset = (uintptr_t)arg1 / sizeof(uint32_t);
165 uint32_t datum = *(uint32_t *)(((uint32_t *)&hinfo) + woffset);
166 return SYSCTL_OUT(req, &datum, sizeof(datum));
167 }
168
169 /*
170 * machdep.cpu.cores_per_package
171 *
172 * x86: derived from CPUID data.
173 * ARM: how many physical cores we have in the AP; aka hw.physicalcpu_max
174 */
175 static
176 SYSCTL_PROC(_machdep_cpu, OID_AUTO, cores_per_package,
177 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
178 (void *)offsetof(host_basic_info_data_t, physical_cpu_max),
179 sizeof(integer_t),
180 arm_host_info, "I", "CPU cores per package");
181
182 /*
183 * machdep.cpu.core_count
184 *
185 * x86: derived from CPUID data.
186 * ARM: # active physical cores in the AP; aka hw.physicalcpu
187 */
188 static
189 SYSCTL_PROC(_machdep_cpu, OID_AUTO, core_count,
190 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
191 (void *)offsetof(host_basic_info_data_t, physical_cpu),
192 sizeof(integer_t),
193 arm_host_info, "I", "Number of enabled cores per package");
194
195 /*
196 * machdep.cpu.logical_per_package
197 *
198 * x86: derived from CPUID data. Returns ENOENT if HTT bit not set, but
199 * most x64 CPUs have that, so assume it's available.
200 * ARM: total # logical cores in the AP; aka hw.logicalcpu_max
201 */
202 static
203 SYSCTL_PROC(_machdep_cpu, OID_AUTO, logical_per_package,
204 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
205 (void *)offsetof(host_basic_info_data_t, logical_cpu_max),
206 sizeof(integer_t),
207 arm_host_info, "I", "CPU logical cpus per package");
208
209 /*
210 * machdep.cpu.thread_count
211 *
212 * x86: derived from CPUID data.
213 * ARM: # active logical cores in the AP; aka hw.logicalcpu
214 */
215 static
216 SYSCTL_PROC(_machdep_cpu, OID_AUTO, thread_count,
217 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
218 (void *)offsetof(host_basic_info_data_t, logical_cpu),
219 sizeof(integer_t),
220 arm_host_info, "I", "Number of enabled threads per package");
221
222 static SECURITY_READ_ONLY_LATE(char*) brand_string = NULL;
223 static SECURITY_READ_ONLY_LATE(size_t) brand_string_len = 0;
224
225 /*
226 * SecureDTLookupEntry() is only guaranteed to work before PE_init_iokit(),
227 * so we load the brand string (if available) in a startup handler.
228 */
229 __startup_func
230 static void
sysctl_load_brand_string(void)231 sysctl_load_brand_string(void)
232 {
233 DTEntry node;
234 void const *value = NULL;
235 unsigned int size = 0;
236
237 if (kSuccess != SecureDTLookupEntry(0, "/product", &node)) {
238 return;
239 }
240
241 if (kSuccess != SecureDTGetProperty(node, "product-soc-name", (void const **) &value, &size)) {
242 return;
243 }
244
245 if (size == 0) {
246 return;
247 }
248
249 brand_string = zalloc_permanent(size, ZALIGN_NONE);
250 if (brand_string == NULL) {
251 return;
252 }
253
254 memcpy(brand_string, value, size);
255 brand_string_len = size;
256 }
257 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_load_brand_string);
258
259 /*
260 * machdep.cpu.brand_string
261 *
262 * x86: derived from CPUID data.
263 * ARM: Grab the product string from the device tree, if it exists.
264 * Otherwise, cons something up from the CPUID register.
265 * the value is already exported via the commpage. So keep it simple.
266 */
267 static int
268 make_brand_string SYSCTL_HANDLER_ARGS
269 {
270 __unused struct sysctl_oid *unused_oidp = oidp;
271 __unused void *unused_arg1 = arg1;
272 __unused int unused_arg2 = arg2;
273
274 if (brand_string != NULL) {
275 return SYSCTL_OUT(req, brand_string, brand_string_len);
276 }
277
278 const char *impl;
279
280 switch (cpuid_info()->arm_info.arm_implementor) {
281 case CPU_VID_APPLE:
282 impl = "Apple";
283 break;
284 case CPU_VID_ARM:
285 impl = "ARM";
286 break;
287 default:
288 impl = "ARM architecture";
289 break;
290 }
291
292 char buf[80];
293 snprintf(buf, sizeof(buf), "%s processor", impl);
294 return SYSCTL_OUT(req, buf, strlen(buf) + 1);
295 }
296
297 SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand_string,
298 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
299 0, 0, make_brand_string, "A", "CPU brand string");
300
301
302 static int
303 virtual_address_size SYSCTL_HANDLER_ARGS
304 {
305 #pragma unused(arg1, arg2, oidp)
306 int return_value = 64 - T0SZ_BOOT;
307 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
308 }
309
310 static
311 SYSCTL_PROC(_machdep, OID_AUTO, virtual_address_size,
312 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
313 0, 0, virtual_address_size, "I",
314 "Number of addressable bits in userspace virtual addresses");
315
316
317 #if DEVELOPMENT || DEBUG
318 extern uint64_t TLockTimeOut;
319 SYSCTL_QUAD(_machdep, OID_AUTO, tlto,
320 CTLFLAG_RW | CTLFLAG_LOCKED, &TLockTimeOut,
321 "Ticket spinlock timeout (MATUs): use with care");
322
323 extern uint32_t timebase_validation;
324 SYSCTL_UINT(_machdep, OID_AUTO, timebase_validation,
325 CTLFLAG_RW | CTLFLAG_LOCKED, &timebase_validation, 0,
326 "Monotonicity validation of kernel mach_absolute_time()");
327
328 #if __WKDM_ISA_2P_WORKAROUND__
329 extern uint64_t wkdmdretries, wkdmdretriespb;
330 extern uint32_t simulate_wkdm2p_error, wkdm_isa_2p_war_required;
331 SYSCTL_QUAD(_machdep, OID_AUTO, wkdmdretries,
332 CTLFLAG_RW | CTLFLAG_LOCKED, &wkdmdretries,
333 "Number of WKDM errata retries");
334 SYSCTL_QUAD(_machdep, OID_AUTO, wkdmdretriespb,
335 CTLFLAG_RW | CTLFLAG_LOCKED, &wkdmdretriespb,
336 "Number of retries where payload was on page boundary");
337 SYSCTL_UINT(_machdep, OID_AUTO, simulate_wkdm2p_error,
338 CTLFLAG_RW | CTLFLAG_LOCKED,
339 &simulate_wkdm2p_error, 0, "");
340 SYSCTL_UINT(_machdep, OID_AUTO, wkdm_isa_2p_war_required,
341 CTLFLAG_RW | CTLFLAG_LOCKED,
342 &wkdm_isa_2p_war_required, 0, "");
343 #endif /* __WKDM_ISA_2P_WORKAROUND__ */
344
345
346 /*
347 * macro to generate a sysctl machdep.cpu.sysreg_* for a given system register
348 * using __builtin_arm_rsr64.
349 */
350 #define SYSCTL_PROC_MACHDEP_CPU_SYSREG(name) \
351 static int \
352 sysctl_sysreg_##name SYSCTL_HANDLER_ARGS \
353 { \
354 _Pragma("unused(arg1, arg2, oidp)") \
355 uint64_t return_value = __builtin_arm_rsr64(#name); \
356 return SYSCTL_OUT(req, &return_value, sizeof(return_value)); \
357 } \
358 SYSCTL_PROC(_machdep_cpu, OID_AUTO, sysreg_##name, \
359 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, \
360 0, 0, sysctl_sysreg_##name, "Q", \
361 #name " register on the current CPU");
362
363
364 // CPU system registers
365 // ARM64: AArch64 Vector Base Address Register
366 SYSCTL_PROC_MACHDEP_CPU_SYSREG(VBAR_EL1);
367 // ARM64: AArch64 Memory Attribute Indirection Register
368 SYSCTL_PROC_MACHDEP_CPU_SYSREG(MAIR_EL1);
369 // ARM64: AArch64 Translation table base register 1
370 SYSCTL_PROC_MACHDEP_CPU_SYSREG(TTBR1_EL1);
371 // ARM64: AArch64 System Control Register
372 SYSCTL_PROC_MACHDEP_CPU_SYSREG(SCTLR_EL1);
373 // ARM64: AArch64 Translation Control Register
374 SYSCTL_PROC_MACHDEP_CPU_SYSREG(TCR_EL1);
375 // ARM64: AArch64 Memory Model Feature Register 0
376 SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64MMFR0_EL1);
377 // ARM64: AArch64 Instruction Set Attribute Register 1
378 SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64ISAR1_EL1);
379 #if APPLE_ARM64_ARCH_FAMILY
380 // Apple ID Register
381 SYSCTL_PROC_MACHDEP_CPU_SYSREG(AIDR_EL1);
382 #endif /* APPLE_ARM64_ARCH_FAMILY */
383
384 #endif /* DEVELOPMENT || DEBUG */
385
386
387 #ifdef ML_IO_TIMEOUTS_ENABLED
388 /*
389 * Timeouts for ml_{io|phys}_{read|write}...
390 * RO on DEVELOPMENT/DEBUG kernels.
391 */
392
393 #if DEVELOPMENT || DEBUG
394 #define MMIO_TIMEOUT_FLAGS (CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED)
395 #else
396 #define MMIO_TIMEOUT_FLAGS (CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED)
397 #endif
398
399 SYSCTL_QUAD(_machdep, OID_AUTO, report_phy_read_delay, MMIO_TIMEOUT_FLAGS,
400 &report_phy_read_delay_to, "Maximum time before io/phys read gets reported or panics");
401 SYSCTL_QUAD(_machdep, OID_AUTO, report_phy_write_delay, MMIO_TIMEOUT_FLAGS,
402 &report_phy_write_delay_to, "Maximum time before io/phys write gets reported or panics");
403 SYSCTL_QUAD(_machdep, OID_AUTO, trace_phy_read_delay, MMIO_TIMEOUT_FLAGS,
404 &trace_phy_read_delay_to, "Maximum time before io/phys read gets ktraced");
405 SYSCTL_QUAD(_machdep, OID_AUTO, trace_phy_write_delay, MMIO_TIMEOUT_FLAGS,
406 &trace_phy_write_delay_to, "Maximum time before io/phys write gets ktraced");
407
408 SYSCTL_INT(_machdep, OID_AUTO, phy_read_delay_panic, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
409 &phy_read_panic, 0, "if set, report-phy-read-delay timeout panics");
410 SYSCTL_INT(_machdep, OID_AUTO, phy_write_delay_panic, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
411 &phy_write_panic, 0, "if set, report-phy-write-delay timeout panics");
412
413 #if ML_IO_SIMULATE_STRETCHED_ENABLED
414 SYSCTL_QUAD(_machdep, OID_AUTO, sim_stretched_io_ns, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
415 &simulate_stretched_io, "simulate stretched io in ml_read_io, ml_write_io");
416 #endif /* ML_IO_SIMULATE_STRETCHED_ENABLED */
417
418 #endif /* ML_IO_TIMEOUTS_ENABLED */
419
420 int opensource_kernel = 1;
421 SYSCTL_INT(_kern, OID_AUTO, opensource_kernel, CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
422 &opensource_kernel, 0, "Opensource Kernel");
423
424 static int
425 machdep_ptrauth_enabled SYSCTL_HANDLER_ARGS
426 {
427 #pragma unused(arg1, arg2, oidp)
428
429 #if __has_feature(ptrauth_calls)
430 task_t task = current_task();
431 int ret = !ml_task_get_disable_user_jop(task);
432 #else
433 const int ret = 0;
434 #endif
435
436 return SYSCTL_OUT(req, &ret, sizeof(ret));
437 }
438
439 SYSCTL_PROC(_machdep, OID_AUTO, ptrauth_enabled,
440 CTLTYPE_INT | CTLFLAG_KERN | CTLFLAG_RD,
441 0, 0,
442 machdep_ptrauth_enabled, "I", "");
443
444 static const char _ctrr_type[] =
445 #if defined(KERNEL_CTRR_VERSION)
446 "ctrrv" STRINGIFY(KERNEL_CTRR_VERSION);
447 #elif defined(KERNEL_INTEGRITY_KTRR)
448 "ktrr";
449 #elif defined(KERNEL_INTEGRITY_PV_CTRR)
450 "pv";
451 #else
452 "none";
453 #endif
454
455 SYSCTL_STRING(_machdep, OID_AUTO, ctrr_type,
456 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
457 __DECONST(char *, _ctrr_type), 0,
458 "CTRR type supported by hardware/kernel");
459
460 #if CONFIG_TELEMETRY && (DEBUG || DEVELOPMENT)
461 extern unsigned long trap_telemetry_reported_events;
462 SYSCTL_ULONG(_debug, OID_AUTO, trap_telemetry_reported_events,
463 CTLFLAG_RD | CTLFLAG_LOCKED, &trap_telemetry_reported_events,
464 "Number of trap telemetry events successfully reported");
465
466 extern unsigned long trap_telemetry_capacity_dropped_events;
467 SYSCTL_ULONG(_debug, OID_AUTO, trap_telemetry_capacity_dropped_events,
468 CTLFLAG_RD | CTLFLAG_LOCKED, &trap_telemetry_capacity_dropped_events,
469 "Number of trap telemetry events which were dropped due to a full RSB");
470 #endif /* CONFIG_TELEMETRY && (DEBUG || DEVELOPMENT) */
471
472
473 #if DEBUG || DEVELOPMENT
474 /* A sysctl that can be used to check if the platform supports DRAM ECC and error injection. */
475 static int
476 dram_ecc_error_injection_capable SYSCTL_HANDLER_ARGS
477 {
478 #pragma unused(arg1, arg2, req)
479 int capable = 0;
480
481 /* T6041 does not support error injection. */
482
483 return SYSCTL_OUT(req, &capable, sizeof(capable));
484 }
485 SYSCTL_PROC(_vm, OID_AUTO, dram_ecc_error_injection_capable, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
486 0, 0, &dram_ecc_error_injection_capable, "I", "");
487 #endif /* DEBUG || DEVELOPMENT */
488
489
490 #if DEBUG || DEVELOPMENT
491 extern _Atomic unsigned int ipcpv_telemetry_count;
492 SYSCTL_UINT(_debug, OID_AUTO, ipcpv_telemetry_count,
493 CTLFLAG_RD | CTLFLAG_LOCKED, &ipcpv_telemetry_count,
494 0, "Number of ipc policy violation telemetry emitted");
495 #endif /* DEBUG || DEVELOPMENT */
496