1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*-
29 * Copyright (c) 1982, 1986, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Mike Karels at Berkeley Software Design, Inc.
34 *
35 * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD
36 * project, to make these variables more userfriendly.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 */
68
69 #include <sys/param.h>
70 #include <sys/kernel.h>
71 #include <sys/systm.h>
72 #include <sys/sysctl.h>
73 #include <sys/proc_internal.h>
74 #include <sys/unistd.h>
75
76 #if defined(SMP)
77 #include <machine/smp.h>
78 #endif
79
80 #include <sys/param.h> /* XXX prune includes */
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/malloc.h>
84 #include <sys/proc.h>
85 #include <sys/file_internal.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/ioctl.h>
89 #include <sys/namei.h>
90 #include <sys/tty.h>
91 #include <sys/disklabel.h>
92 #include <sys/vm.h>
93 #include <sys/sysctl.h>
94 #include <sys/user.h>
95 #include <mach/machine.h>
96 #include <mach/mach_types.h>
97 #include <mach/vm_param.h>
98 #include <kern/task.h>
99 #include <vm/vm_kern.h>
100 #include <vm/vm_map.h>
101 #include <vm/vm_protos.h>
102 #include <mach/host_info.h>
103 #include <kern/pms.h>
104 #include <pexpert/device_tree.h>
105 #include <pexpert/pexpert.h>
106 #include <kern/sched_prim.h>
107 #include <console/serial_protos.h>
108
109 extern vm_map_t bsd_pageable_map;
110
111 #include <sys/mount_internal.h>
112 #include <sys/kdebug.h>
113
114 #include <IOKit/IOPlatformExpert.h>
115 #include <pexpert/pexpert.h>
116
117 #include <machine/config.h>
118 #include <machine/machine_routines.h>
119 #include <machine/cpu_capabilities.h>
120
121 #include <mach/mach_host.h> /* for host_info() */
122
123 #if defined(__i386__) || defined(__x86_64__)
124 #include <i386/cpuid.h> /* for cpuid_info() */
125 #endif
126
127 #if defined(__arm64__)
128 #include <arm/cpuid.h> /* for cpuid_info() & cache_info() */
129 #include <arm/cpu_capabilities_public.h>
130 #endif
131
132 #if defined(CONFIG_XNUPOST)
133 #include <tests/ktest.h>
134 #endif
135
136 /**
137 * Prevents an issue with creating the sysctl node hw.optional.arm on some
138 * platforms. If the 'arm' macro is defined, then the word "arm" is preprocessed
139 * to 1. As the 'arm' macro is not used in this file, we do not need to redefine
140 * after we are done.
141 */
142 #if defined(arm)
143 #undef arm
144 #endif /* defined(arm) */
145
146 #ifndef MAX
147 #define MAX(a, b) (a >= b ? a : b)
148 #endif
149
150 #if defined(__arm64__) && defined(CONFIG_XNUPOST)
151 kern_return_t arm_cpu_capabilities_legacy_test(void);
152 #endif /* defined(__arm64__) && defined(CONFIG_XNUPOST) */
153
154 /* XXX This should be in a BSD accessible Mach header, but isn't. */
155 extern unsigned int vm_page_wire_count;
156
157 static int cputhreadtype, cpu64bit;
158 static uint64_t cacheconfig[10];
159 static int packages;
160
161 static char * osenvironment = NULL;
162 static uint32_t osenvironment_size = 0;
163 static int osenvironment_initialized = 0;
164
165 static uint32_t ephemeral_storage = 0;
166 static uint32_t use_recovery_securityd = 0;
167
168 static struct {
169 uint32_t ephemeral_storage:1;
170 uint32_t use_recovery_securityd:1;
171 } property_existence = {0, 0};
172
173 SYSCTL_EXTENSIBLE_NODE(, 0, sysctl, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
174 "Sysctl internal magic");
175 SYSCTL_EXTENSIBLE_NODE(, CTL_KERN, kern, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
176 "High kernel, proc, limits &c");
177 SYSCTL_EXTENSIBLE_NODE(, CTL_VM, vm, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
178 "Virtual memory");
179 SYSCTL_EXTENSIBLE_NODE(, CTL_VFS, vfs, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
180 "File system");
181 SYSCTL_EXTENSIBLE_NODE(, CTL_NET, net, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
182 "Network, (see socket.h)");
183 SYSCTL_EXTENSIBLE_NODE(, CTL_DEBUG, debug, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
184 "Debugging");
185 #if DEBUG || DEVELOPMENT
186 SYSCTL_NODE(_debug, OID_AUTO, test,
187 CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, "tests");
188 #endif /* DEBUG || DEVELOPMENT */
189 SYSCTL_NODE(, CTL_HW, hw, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
190 "hardware");
191 SYSCTL_EXTENSIBLE_NODE(, CTL_MACHDEP, machdep, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
192 "machine dependent");
193 SYSCTL_NODE(, CTL_USER, user, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
194 "user-level");
195
196 SYSCTL_NODE(_kern, OID_AUTO, bridge, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
197 "bridge");
198
199 #define SYSCTL_RETURN(r, x) SYSCTL_OUT(r, &x, sizeof(x))
200
201 /******************************************************************************
202 * hw.* MIB
203 */
204
205 #define CTLHW_RETQUAD (1U << 31)
206 #define CTLHW_LOCAL (1U << 30)
207 #define CTLHW_PERFLEVEL (1U << 29)
208
209 #define HW_LOCAL_CPUTHREADTYPE (1 | CTLHW_LOCAL)
210 #define HW_LOCAL_PHYSICALCPU (2 | CTLHW_LOCAL)
211 #define HW_LOCAL_PHYSICALCPUMAX (3 | CTLHW_LOCAL)
212 #define HW_LOCAL_LOGICALCPU (4 | CTLHW_LOCAL)
213 #define HW_LOCAL_LOGICALCPUMAX (5 | CTLHW_LOCAL)
214 #define HW_LOCAL_CPUTYPE (6 | CTLHW_LOCAL)
215 #define HW_LOCAL_CPUSUBTYPE (7 | CTLHW_LOCAL)
216 #define HW_LOCAL_CPUFAMILY (8 | CTLHW_LOCAL)
217 #define HW_LOCAL_CPUSUBFAMILY (9 | CTLHW_LOCAL)
218 #define HW_NPERFLEVELS (10 | CTLHW_LOCAL)
219 #define HW_PERFLEVEL_PHYSICALCPU (11 | CTLHW_PERFLEVEL)
220 #define HW_PERFLEVEL_PHYSICALCPUMAX (12 | CTLHW_PERFLEVEL)
221 #define HW_PERFLEVEL_LOGICALCPU (13 | CTLHW_PERFLEVEL)
222 #define HW_PERFLEVEL_LOGICALCPUMAX (14 | CTLHW_PERFLEVEL)
223 #define HW_PERFLEVEL_L1ICACHESIZE (15 | CTLHW_PERFLEVEL)
224 #define HW_PERFLEVEL_L1DCACHESIZE (16 | CTLHW_PERFLEVEL)
225 #define HW_PERFLEVEL_L2CACHESIZE (17 | CTLHW_PERFLEVEL)
226 #define HW_PERFLEVEL_CPUSPERL2 (18 | CTLHW_PERFLEVEL)
227 #define HW_PERFLEVEL_L3CACHESIZE (19 | CTLHW_PERFLEVEL)
228 #define HW_PERFLEVEL_CPUSPERL3 (20 | CTLHW_PERFLEVEL)
229 #define HW_PERFLEVEL_NAME (21 | CTLHW_PERFLEVEL)
230
231
232 /*
233 * For a given perflevel, return the corresponding CPU type.
234 */
235 cluster_type_t cpu_type_for_perflevel(int perflevel);
236 cluster_type_t
cpu_type_for_perflevel(int perflevel)237 cpu_type_for_perflevel(int perflevel)
238 {
239 unsigned int cpu_types = ml_get_cpu_types();
240 __assert_only unsigned int n_perflevels = __builtin_popcount(cpu_types);
241
242 assert((perflevel >= 0) && (perflevel < n_perflevels));
243
244 /* Check CPU types mask for each cluster type in descending order of performance */
245 cluster_type_t cluster_types_in_order[MAX_CPU_TYPES];
246 cluster_types_in_order[0] = CLUSTER_TYPE_SMP;
247 #if defined(__arm__) || defined(__arm64__)
248 cluster_types_in_order[1] = CLUSTER_TYPE_P;
249 cluster_types_in_order[2] = CLUSTER_TYPE_E;
250 #endif /* defined(__arm__) || defined(__arm64__) */
251
252 int perflevel_ind = 0;
253 for (int i = 0; i < MAX_CPU_TYPES; i++) {
254 unsigned int type_mask = 1 << cluster_types_in_order[i];
255 if (type_mask & cpu_types) {
256 if (perflevel_ind == perflevel) {
257 return cluster_types_in_order[i];
258 }
259 perflevel_ind++;
260 }
261 }
262 return 0;
263 }
264
265 static ml_cpu_info_t
sysctl_hw_generic_cpu_info(int perflevel,int arg2 __unused)266 sysctl_hw_generic_cpu_info(int perflevel, int arg2 __unused)
267 {
268 bool ignore_perflevel = false;
269 #if APPLE_ARM64_ARCH_FAMILY
270 if (arg2 == HW_CACHELINE) {
271 /* Apple SoCs have a uniform cacheline size across all clusters */
272 ignore_perflevel = true;
273 }
274 #endif
275
276 ml_cpu_info_t cpu_info;
277 if (ignore_perflevel) {
278 ml_cpu_get_info(&cpu_info);
279 } else {
280 ml_cpu_get_info_type(&cpu_info, cpu_type_for_perflevel(perflevel));
281 }
282 return cpu_info;
283 }
284
285 /*
286 * Supporting some variables requires us to do "real" work. We
287 * gather some of that here.
288 */
289 static int
sysctl_hw_generic(__unused struct sysctl_oid * oidp,void * arg1,int arg2,struct sysctl_req * req)290 sysctl_hw_generic(__unused struct sysctl_oid *oidp, void *arg1,
291 int arg2, struct sysctl_req *req)
292 {
293 char dummy[65];
294 int epochTemp;
295 int val, doquad;
296 long long qval;
297 unsigned int cpu_count;
298 host_basic_info_data_t hinfo;
299 kern_return_t kret;
300 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
301
302 /*
303 * If we are using one of the perflevel sysctls, return early if the perflevel
304 * does not exist in this system.
305 */
306 int perflevel = (int)arg1;
307 int n_perflevels = __builtin_popcount(ml_get_cpu_types());
308
309 if (arg2 & CTLHW_PERFLEVEL) {
310 if ((perflevel < 0) || (perflevel >= n_perflevels)) {
311 return ENOENT;
312 }
313 } else {
314 perflevel = n_perflevels - 1;
315 }
316
317 /*
318 * Test and mask off the 'return quad' flag.
319 * Note that only some things here support it.
320 */
321 doquad = arg2 & CTLHW_RETQUAD;
322 arg2 &= ~CTLHW_RETQUAD;
323
324 #define BSD_HOST 1
325 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
326
327 /*
328 * Handle various OIDs.
329 *
330 * OIDs that can return int or quad set val and qval and then break.
331 * Errors and int-only values return inline.
332 */
333 switch (arg2) {
334 case HW_NCPU:
335 if (kret == KERN_SUCCESS) {
336 return SYSCTL_RETURN(req, hinfo.max_cpus);
337 } else {
338 return EINVAL;
339 }
340 case HW_AVAILCPU:
341 if (kret == KERN_SUCCESS) {
342 return SYSCTL_RETURN(req, hinfo.avail_cpus);
343 } else {
344 return EINVAL;
345 }
346 case HW_LOCAL_PHYSICALCPU:
347 if (kret == KERN_SUCCESS) {
348 return SYSCTL_RETURN(req, hinfo.physical_cpu);
349 } else {
350 return EINVAL;
351 }
352 case HW_LOCAL_PHYSICALCPUMAX:
353 if (kret == KERN_SUCCESS) {
354 return SYSCTL_RETURN(req, hinfo.physical_cpu_max);
355 } else {
356 return EINVAL;
357 }
358 case HW_LOCAL_LOGICALCPU:
359 if (kret == KERN_SUCCESS) {
360 return SYSCTL_RETURN(req, hinfo.logical_cpu);
361 } else {
362 return EINVAL;
363 }
364 case HW_LOCAL_LOGICALCPUMAX:
365 if (kret == KERN_SUCCESS) {
366 return SYSCTL_RETURN(req, hinfo.logical_cpu_max);
367 } else {
368 return EINVAL;
369 }
370 case HW_NPERFLEVELS:
371 return SYSCTL_RETURN(req, n_perflevels);
372 case HW_PERFLEVEL_PHYSICALCPU:
373 cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), false, true);
374 return SYSCTL_RETURN(req, cpu_count);
375 case HW_PERFLEVEL_PHYSICALCPUMAX:
376 cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), false, false);
377 return SYSCTL_RETURN(req, cpu_count);
378 case HW_PERFLEVEL_LOGICALCPU:
379 cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), true, true);
380 return SYSCTL_RETURN(req, cpu_count);
381 case HW_PERFLEVEL_LOGICALCPUMAX:
382 cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), true, false);
383 return SYSCTL_RETURN(req, cpu_count);
384 case HW_PERFLEVEL_L1ICACHESIZE: {
385 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
386 val = (int)cpu_info.l1_icache_size;
387 qval = (long long)cpu_info.l1_icache_size;
388 break;
389 }
390 case HW_PERFLEVEL_L1DCACHESIZE: {
391 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
392 val = (int)cpu_info.l1_dcache_size;
393 qval = (long long)cpu_info.l1_dcache_size;
394 break;
395 }
396 case HW_PERFLEVEL_L2CACHESIZE: {
397 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
398 val = (int)cpu_info.l2_cache_size;
399 qval = (long long)cpu_info.l2_cache_size;
400 break;
401 }
402 case HW_PERFLEVEL_CPUSPERL2:
403 cpu_count = ml_cpu_cache_sharing(2, cpu_type_for_perflevel(perflevel), false);
404 return SYSCTL_RETURN(req, cpu_count);
405 case HW_PERFLEVEL_L3CACHESIZE: {
406 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
407 if (cpu_info.l3_cache_size == UINT32_MAX) {
408 return EINVAL;
409 }
410 val = (int)cpu_info.l3_cache_size;
411 qval = (long long)cpu_info.l3_cache_size;
412 break;
413 }
414 case HW_PERFLEVEL_CPUSPERL3: {
415 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
416 if (cpu_info.l3_cache_size == UINT32_MAX) {
417 return EINVAL;
418 }
419 cpu_count = ml_cpu_cache_sharing(3, cpu_type_for_perflevel(perflevel), false);
420 return SYSCTL_RETURN(req, cpu_count);
421 }
422 case HW_PERFLEVEL_NAME:
423 bzero(dummy, sizeof(dummy));
424 ml_get_cluster_type_name(cpu_type_for_perflevel(perflevel), dummy, sizeof(dummy));
425 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
426 case HW_LOCAL_CPUTYPE:
427 if (kret == KERN_SUCCESS) {
428 return SYSCTL_RETURN(req, hinfo.cpu_type);
429 } else {
430 return EINVAL;
431 }
432 case HW_LOCAL_CPUSUBTYPE:
433 if (kret == KERN_SUCCESS) {
434 return SYSCTL_RETURN(req, hinfo.cpu_subtype);
435 } else {
436 return EINVAL;
437 }
438 case HW_LOCAL_CPUFAMILY:
439 {
440 int cpufamily = 0;
441 #if defined (__i386__) || defined (__x86_64__)
442 cpufamily = cpuid_cpufamily();
443 #elif defined(__arm64__)
444 {
445 cpufamily = cpuid_get_cpufamily();
446 }
447 #else
448 #error unknown architecture
449 #endif
450 return SYSCTL_RETURN(req, cpufamily);
451 }
452 case HW_LOCAL_CPUSUBFAMILY:
453 {
454 int cpusubfamily = 0;
455 #if defined (__i386__) || defined (__x86_64__)
456 cpusubfamily = CPUSUBFAMILY_UNKNOWN;
457 #elif defined(__arm64__)
458 {
459 cpusubfamily = cpuid_get_cpusubfamily();
460 }
461 #else
462 #error unknown architecture
463 #endif
464 return SYSCTL_RETURN(req, cpusubfamily);
465 }
466 case HW_PAGESIZE:
467 {
468 vm_map_t map = get_task_map(current_task());
469 val = vm_map_page_size(map);
470 qval = (long long)val;
471 break;
472 }
473 case HW_CACHELINE: {
474 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
475 val = (int)cpu_info.cache_line_size;
476 qval = (long long)val;
477 break;
478 }
479 case HW_L1ICACHESIZE: {
480 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
481 val = (int)cpu_info.l1_icache_size;
482 qval = (long long)cpu_info.l1_icache_size;
483 break;
484 }
485 case HW_L1DCACHESIZE: {
486 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
487 val = (int)cpu_info.l1_dcache_size;
488 qval = (long long)cpu_info.l1_dcache_size;
489 break;
490 }
491 case HW_L2CACHESIZE: {
492 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
493 if (cpu_info.l2_cache_size == UINT32_MAX) {
494 return EINVAL;
495 }
496 val = (int)cpu_info.l2_cache_size;
497 qval = (long long)cpu_info.l2_cache_size;
498 break;
499 }
500 case HW_L3CACHESIZE: {
501 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
502 if (cpu_info.l3_cache_size == UINT32_MAX) {
503 return EINVAL;
504 }
505 val = (int)cpu_info.l3_cache_size;
506 qval = (long long)cpu_info.l3_cache_size;
507 break;
508 }
509 case HW_TARGET:
510 bzero(dummy, sizeof(dummy));
511 if (!PEGetTargetName(dummy, 64)) {
512 return EINVAL;
513 }
514 dummy[64] = 0;
515 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
516 case HW_PRODUCT:
517 bzero(dummy, sizeof(dummy));
518 if (!PEGetProductName(dummy, 64)) {
519 return EINVAL;
520 }
521 dummy[64] = 0;
522 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
523
524 /*
525 * Deprecated variables. We still support these for
526 * backwards compatibility purposes only.
527 */
528 #if XNU_TARGET_OS_OSX && defined(__arm64__)
529 /* The following two are kludged for backward
530 * compatibility. Use hw.product/hw.target for something
531 * consistent instead. */
532
533 case HW_MACHINE:
534 bzero(dummy, sizeof(dummy));
535 if (proc_platform(req->p) == PLATFORM_IOS) {
536 /* iOS-on-Mac processes don't expect the macOS kind of
537 * hw.machine, e.g. "arm64", but are used to seeing
538 * a product string on iOS, which we here hardcode
539 * to return as "iPad8,6" for compatibility.
540 *
541 * Another reason why hw.machine and hw.model are
542 * trouble and hw.target+hw.product should be used
543 * instead.
544 */
545
546 strlcpy(dummy, "iPad8,6", sizeof(dummy));
547 }
548 else {
549 strlcpy(dummy, "arm64", sizeof(dummy));
550 }
551 dummy[64] = 0;
552 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
553 case HW_MODEL:
554 bzero(dummy, sizeof(dummy));
555 if (!PEGetProductName(dummy, 64)) {
556 return EINVAL;
557 }
558 dummy[64] = 0;
559 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
560 #else
561 case HW_MACHINE:
562 bzero(dummy, sizeof(dummy));
563 if (!PEGetMachineName(dummy, 64)) {
564 return EINVAL;
565 }
566 dummy[64] = 0;
567 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
568 case HW_MODEL:
569 bzero(dummy, sizeof(dummy));
570 if (!PEGetModelName(dummy, 64)) {
571 return EINVAL;
572 }
573 dummy[64] = 0;
574 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
575 #endif
576 case HW_USERMEM:
577 {
578 int usermem = (int)(mem_size - vm_page_wire_count * page_size);
579
580 return SYSCTL_RETURN(req, usermem);
581 }
582 case HW_EPOCH:
583 epochTemp = PEGetPlatformEpoch();
584 if (epochTemp == -1) {
585 return EINVAL;
586 }
587 return SYSCTL_RETURN(req, epochTemp);
588 case HW_VECTORUNIT: {
589 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
590 int vector = cpu_info.vector_unit == 0? 0 : 1;
591 return SYSCTL_RETURN(req, vector);
592 }
593 case HW_L2SETTINGS: {
594 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
595 if (cpu_info.l2_cache_size == UINT32_MAX) {
596 return EINVAL;
597 }
598 return SYSCTL_RETURN(req, cpu_info.l2_settings);
599 }
600 case HW_L3SETTINGS: {
601 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
602 if (cpu_info.l3_cache_size == UINT32_MAX) {
603 return EINVAL;
604 }
605 return SYSCTL_RETURN(req, cpu_info.l3_settings);
606 }
607 default:
608 return ENOTSUP;
609 }
610 /*
611 * Callers may come to us with either int or quad buffers.
612 */
613 if (doquad) {
614 return SYSCTL_RETURN(req, qval);
615 }
616 return SYSCTL_RETURN(req, val);
617 }
618
619 static int
sysctl_hw_cachesize(struct sysctl_oid * oidp __unused,void * arg1 __unused,int arg2 __unused,struct sysctl_req * req)620 sysctl_hw_cachesize(struct sysctl_oid *oidp __unused, void *arg1 __unused,
621 int arg2 __unused, struct sysctl_req *req)
622 {
623 uint64_t cachesize[10] = {};
624
625 #if __x86_64__
626 cachesize[0] = ml_cpu_cache_size(0);
627 cachesize[1] = ml_cpu_cache_size(1);
628 cachesize[2] = ml_cpu_cache_size(2);
629 cachesize[3] = ml_cpu_cache_size(3);
630 #elif __arm64__
631 cluster_type_t min_perflevel_cluster_type = cpu_type_for_perflevel(__builtin_popcount(ml_get_cpu_types()) - 1);
632
633 cachesize[0] = ml_get_machine_mem();
634 cachesize[1] = cache_info_type(min_perflevel_cluster_type)->c_dsize; /* Using the DCache */
635 cachesize[2] = cache_info_type(min_perflevel_cluster_type)->c_l2size;
636 #else
637 #error unknown architecture
638 #endif
639
640 return SYSCTL_RETURN(req, cachesize);
641 }
642
643 /* hw.pagesize and hw.tbfrequency are expected as 64 bit values */
644 static int
sysctl_pagesize(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)645 sysctl_pagesize
646 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
647 {
648 vm_map_t map = get_task_map(current_task());
649 long long l = vm_map_page_size(map);
650 return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
651 }
652
653 static int
sysctl_pagesize32(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)654 sysctl_pagesize32
655 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
656 {
657 long long l;
658 #if __arm64__
659 l = (long long) (1 << page_shift_user32);
660 #else /* __arm64__ */
661 l = (long long) PAGE_SIZE;
662 #endif /* __arm64__ */
663 return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
664 }
665
666 static int
sysctl_tbfrequency(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)667 sysctl_tbfrequency
668 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
669 {
670 long long l = gPEClockFrequencyInfo.timebase_frequency_hz;
671 return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
672 }
673
674 /*
675 * Called by IOKit on Intel, or by sysctl_load_devicetree_entries()
676 */
677 void
sysctl_set_osenvironment(unsigned int size,const void * value)678 sysctl_set_osenvironment(unsigned int size, const void* value)
679 {
680 if (osenvironment_size == 0 && size > 0) {
681 osenvironment = zalloc_permanent(size, ZALIGN_NONE);
682 if (osenvironment) {
683 memcpy(osenvironment, value, size);
684 osenvironment_size = size;
685 }
686 }
687 }
688
689 void
sysctl_unblock_osenvironment(void)690 sysctl_unblock_osenvironment(void)
691 {
692 os_atomic_inc(&osenvironment_initialized, relaxed);
693 thread_wakeup((event_t) &osenvironment_initialized);
694 }
695
696 /*
697 * Create sysctl entries coming from device tree.
698 *
699 * Entries from device tree are loaded here because SecureDTLookupEntry() only works before
700 * PE_init_iokit(). Doing this also avoids the extern-C hackery to access these entries
701 * from IORegistry (which requires C++).
702 */
703 __startup_func
704 static void
sysctl_load_devicetree_entries(void)705 sysctl_load_devicetree_entries(void)
706 {
707 DTEntry chosen;
708 void const *value;
709 unsigned int size;
710
711 if (kSuccess != SecureDTLookupEntry(0, "/chosen", &chosen)) {
712 return;
713 }
714
715 /* load osenvironment */
716 if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &value, &size)) {
717 sysctl_set_osenvironment(size, value);
718 }
719
720 /* load ephemeral_storage */
721 if (kSuccess == SecureDTGetProperty(chosen, "ephemeral-storage", (void const **) &value, &size)) {
722 if (size == sizeof(uint32_t)) {
723 ephemeral_storage = *(uint32_t const *)value;
724 property_existence.ephemeral_storage = 1;
725 }
726 }
727
728 /* load use_recovery_securityd */
729 if (kSuccess == SecureDTGetProperty(chosen, "use-recovery-securityd", (void const **) &value, &size)) {
730 if (size == sizeof(uint32_t)) {
731 use_recovery_securityd = *(uint32_t const *)value;
732 property_existence.use_recovery_securityd = 1;
733 }
734 }
735 }
736 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_load_devicetree_entries);
737
738 static int
sysctl_osenvironment(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)739 sysctl_osenvironment
740 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
741 {
742 #if defined(__x86_64__)
743 #if (DEVELOPMENT || DEBUG)
744 if (os_atomic_load(&osenvironment_initialized, relaxed) == 0) {
745 assert_wait((event_t) &osenvironment_initialized, THREAD_UNINT);
746 if (os_atomic_load(&osenvironment_initialized, relaxed) != 0) {
747 clear_wait(current_thread(), THREAD_AWAKENED);
748 } else {
749 (void) thread_block(THREAD_CONTINUE_NULL);
750 }
751 }
752 #endif
753 #endif
754 if (osenvironment_size > 0) {
755 return SYSCTL_OUT(req, osenvironment, osenvironment_size);
756 } else {
757 return EINVAL;
758 }
759 }
760
761 static int
sysctl_ephemeral_storage(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)762 sysctl_ephemeral_storage
763 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
764 {
765 if (property_existence.ephemeral_storage) {
766 return SYSCTL_OUT(req, &ephemeral_storage, sizeof(ephemeral_storage));
767 } else {
768 return EINVAL;
769 }
770 }
771
772 static int
sysctl_use_recovery_securityd(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)773 sysctl_use_recovery_securityd
774 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
775 {
776 if (property_existence.use_recovery_securityd) {
777 return SYSCTL_OUT(req, &use_recovery_securityd, sizeof(use_recovery_securityd));
778 } else {
779 return EINVAL;
780 }
781 }
782
783 static int
sysctl_use_kernelmanagerd(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)784 sysctl_use_kernelmanagerd
785 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
786 {
787 #if XNU_TARGET_OS_OSX
788 static int use_kernelmanagerd = 1;
789 static bool once = false;
790
791 if (!once) {
792 kc_format_t kc_format;
793 PE_get_primary_kc_format(&kc_format);
794 if (kc_format == KCFormatFileset) {
795 use_kernelmanagerd = 1;
796 } else {
797 PE_parse_boot_argn("kernelmanagerd", &use_kernelmanagerd, sizeof(use_kernelmanagerd));
798 }
799 once = true;
800 }
801 #else
802 static int use_kernelmanagerd = 0;
803 #endif
804 return SYSCTL_OUT(req, &use_kernelmanagerd, sizeof(use_kernelmanagerd));
805 }
806
807 #define HW_LOCAL_FREQUENCY 1
808 #define HW_LOCAL_FREQUENCY_MIN 2
809 #define HW_LOCAL_FREQUENCY_MAX 3
810 #define HW_LOCAL_FREQUENCY_CLOCK_RATE 4
811
812 static int
sysctl_bus_frequency(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,__unused struct sysctl_req * req)813 sysctl_bus_frequency
814 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
815 {
816
817 #if DEBUG || DEVELOPMENT || !defined(__arm64__)
818 switch (arg2) {
819 case HW_LOCAL_FREQUENCY:
820 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_hz);
821 case HW_LOCAL_FREQUENCY_MIN:
822 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_min_hz);
823 case HW_LOCAL_FREQUENCY_MAX:
824 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_max_hz);
825 case HW_LOCAL_FREQUENCY_CLOCK_RATE:
826 return SYSCTL_OUT(req, &gPEClockFrequencyInfo.bus_clock_rate_hz, sizeof(int));
827 default:
828 return EINVAL;
829 }
830 #else
831 return ENOENT;
832 #endif
833 }
834
835 static int
sysctl_cpu_frequency(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,__unused struct sysctl_req * req)836 sysctl_cpu_frequency
837 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
838 {
839
840 #if DEBUG || DEVELOPMENT || !defined(__arm64__)
841 switch (arg2) {
842 case HW_LOCAL_FREQUENCY:
843 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_hz);
844 case HW_LOCAL_FREQUENCY_MIN:
845 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_min_hz);
846 case HW_LOCAL_FREQUENCY_MAX:
847 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_max_hz);
848 case HW_LOCAL_FREQUENCY_CLOCK_RATE:
849 return SYSCTL_OUT(req, &gPEClockFrequencyInfo.cpu_clock_rate_hz, sizeof(int));
850 default:
851 return EINVAL;
852 }
853 #else
854 return ENOENT;
855 #endif
856 }
857
858 /*
859 * This sysctl will signal to userspace that a serial console is desired:
860 *
861 * hw.serialdebugmode = 1 will load the serial console job in the multi-user session;
862 * hw.serialdebugmode = 2 will load the serial console job in the base system as well
863 */
864 static int
sysctl_serialdebugmode(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)865 sysctl_serialdebugmode
866 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
867 {
868 uint32_t serial_boot_arg;
869 int serialdebugmode = 0;
870
871 if (PE_parse_boot_argn("serial", &serial_boot_arg, sizeof(serial_boot_arg)) &&
872 (serial_boot_arg & SERIALMODE_OUTPUT) && (serial_boot_arg & SERIALMODE_INPUT)) {
873 serialdebugmode = (serial_boot_arg & SERIALMODE_BASE_TTY) ? 2 : 1;
874 }
875
876 return sysctl_io_number(req, serialdebugmode, sizeof(serialdebugmode), NULL, NULL);
877 }
878
879 /*
880 * hw.* MIB variables.
881 */
882 SYSCTL_PROC(_hw, HW_NCPU, ncpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_NCPU, sysctl_hw_generic, "I", "");
883 SYSCTL_PROC(_hw, HW_AVAILCPU, activecpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_AVAILCPU, sysctl_hw_generic, "I", "");
884 SYSCTL_PROC(_hw, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPU, sysctl_hw_generic, "I", "");
885 SYSCTL_PROC(_hw, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
886 SYSCTL_PROC(_hw, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPU, sysctl_hw_generic, "I", "");
887 SYSCTL_PROC(_hw, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
888 SYSCTL_INT(_hw, HW_BYTEORDER, byteorder, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, BYTE_ORDER, "");
889 SYSCTL_PROC(_hw, OID_AUTO, cputype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUTYPE, sysctl_hw_generic, "I", "");
890 SYSCTL_PROC(_hw, OID_AUTO, cpusubtype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBTYPE, sysctl_hw_generic, "I", "");
891 SYSCTL_INT(_hw, OID_AUTO, cpu64bit_capable, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpu64bit, 0, "");
892 SYSCTL_PROC(_hw, OID_AUTO, cpufamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUFAMILY, sysctl_hw_generic, "I", "");
893 SYSCTL_PROC(_hw, OID_AUTO, cpusubfamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBFAMILY, sysctl_hw_generic, "I", "");
894 SYSCTL_OPAQUE(_hw, OID_AUTO, cacheconfig, CTLFLAG_RD | CTLFLAG_LOCKED, &cacheconfig, sizeof(cacheconfig), "Q", "");
895 SYSCTL_PROC(_hw, OID_AUTO, cachesize, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_hw_cachesize, "Q", "");
896 SYSCTL_PROC(_hw, OID_AUTO, pagesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize, "Q", "");
897 SYSCTL_PROC(_hw, OID_AUTO, pagesize32, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize32, "Q", "");
898 SYSCTL_PROC(_hw, OID_AUTO, busfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_bus_frequency, "Q", "");
899 SYSCTL_PROC(_hw, OID_AUTO, busfrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_bus_frequency, "Q", "");
900 SYSCTL_PROC(_hw, OID_AUTO, busfrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_bus_frequency, "Q", "");
901 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_cpu_frequency, "Q", "");
902 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_cpu_frequency, "Q", "");
903 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_cpu_frequency, "Q", "");
904 SYSCTL_PROC(_hw, OID_AUTO, cachelinesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_CACHELINE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
905 SYSCTL_PROC(_hw, OID_AUTO, l1icachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
906 SYSCTL_PROC(_hw, OID_AUTO, l1dcachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
907 SYSCTL_PROC(_hw, OID_AUTO, l2cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
908 SYSCTL_PROC(_hw, OID_AUTO, l3cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
909 #if defined(__arm64__) && (DEBUG || DEVELOPMENT)
910 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_hz, "");
911 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_min_hz, "");
912 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_max_hz, "");
913 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_hz, "");
914 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_min_hz, "");
915 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_max_hz, "");
916 SYSCTL_QUAD(_hw, OID_AUTO, fixfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.fix_frequency_hz, "");
917 #endif /* __arm64__ */
918 SYSCTL_PROC(_hw, OID_AUTO, tbfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_tbfrequency, "Q", "");
919 /**
920 * The naming around the sysctls for max_mem and max_mem_actual are different between macOS and
921 * non-macOS platforms because historically macOS's hw.memsize provided the value of the actual
922 * physical memory size, whereas on non-macOS it is the memory size minus any carveouts.
923 */
924 #if XNU_TARGET_OS_OSX
925 SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem_actual, "");
926 SYSCTL_QUAD(_hw, OID_AUTO, memsize_usable, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem, "");
927 #else
928 SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem, "");
929 SYSCTL_QUAD(_hw, OID_AUTO, memsize_physical, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem_actual, "");
930 #endif /* XNU_TARGET_OS_OSX */
931 SYSCTL_INT(_hw, OID_AUTO, packages, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &packages, 0, "");
932 #if defined(XNU_TARGET_OS_XR)
933 SYSCTL_UINT(_hw, OID_AUTO, chiprole, CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPlatformChipRole, 1, "");
934 #endif /* not XNU_TARGET_OS_XR */
935 SYSCTL_PROC(_hw, OID_AUTO, osenvironment, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_osenvironment, "A", "");
936 SYSCTL_PROC(_hw, OID_AUTO, ephemeral_storage, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_ephemeral_storage, "I", "");
937 SYSCTL_PROC(_hw, OID_AUTO, use_recovery_securityd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_recovery_securityd, "I", "");
938 SYSCTL_PROC(_hw, OID_AUTO, use_kernelmanagerd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_kernelmanagerd, "I", "");
939 SYSCTL_PROC(_hw, OID_AUTO, serialdebugmode, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_serialdebugmode, "I", "");
940
941 /*
942 * hw.perflevelN.* variables.
943 * Users may check these to determine properties that vary across different CPU types, such as number of CPUs,
944 * or cache sizes. Perflevel 0 corresponds to the highest performance one.
945 */
946 SYSCTL_NODE(_hw, OID_AUTO, perflevel0, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, "Perf level 0 topology and cache geometry paramaters");
947 SYSCTL_NODE(_hw, OID_AUTO, perflevel1, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, "Perf level 1 topology and cache geometry paramaters");
948 SYSCTL_PROC(_hw, OID_AUTO, nperflevels, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_NPERFLEVELS, sysctl_hw_generic, "I", "Number of performance levels supported by this system");
949
950 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_PHYSICALCPU, sysctl_hw_generic, "I", "");
951 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
952 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_LOGICALCPU, sysctl_hw_generic, "I", "");
953 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
954 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l1icachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L1ICACHESIZE, sysctl_hw_generic, "I", "");
955 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l1dcachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L1DCACHESIZE, sysctl_hw_generic, "I", "");
956 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l2cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L2CACHESIZE, sysctl_hw_generic, "I", "");
957 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, cpusperl2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_CPUSPERL2, sysctl_hw_generic, "I", "");
958 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l3cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L3CACHESIZE, sysctl_hw_generic, "I", "");
959 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, cpusperl3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_CPUSPERL3, sysctl_hw_generic, "I", "");
960 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, name, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_NAME, sysctl_hw_generic, "A", "");
961
962 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_PHYSICALCPU, sysctl_hw_generic, "I", "");
963 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
964 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_LOGICALCPU, sysctl_hw_generic, "I", "");
965 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
966 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l1icachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L1ICACHESIZE, sysctl_hw_generic, "I", "");
967 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l1dcachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L1DCACHESIZE, sysctl_hw_generic, "I", "");
968 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l2cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L2CACHESIZE, sysctl_hw_generic, "I", "");
969 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, cpusperl2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_CPUSPERL2, sysctl_hw_generic, "I", "");
970 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l3cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L3CACHESIZE, sysctl_hw_generic, "I", "");
971 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, cpusperl3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_CPUSPERL3, sysctl_hw_generic, "I", "");
972 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, name, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_NAME, sysctl_hw_generic, "A", "");
973
974 /*
975 * Optional CPU features can register nodes below hw.optional.
976 *
977 * If the feature is not present, the node should either not be registered,
978 * or it should return 0. If the feature is present, the node should return
979 * 1.
980 */
981 SYSCTL_NODE(_hw, OID_AUTO, optional, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "optional features");
982 SYSCTL_NODE(_hw_optional, OID_AUTO, arm, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "optional features for ARM processors");
983
984 SYSCTL_INT(_hw_optional, OID_AUTO, floatingpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, 1, ""); /* always set */
985
986 /*
987 * Optional device hardware features can be registered by drivers below hw.features
988 */
989 SYSCTL_EXTENSIBLE_NODE(_hw, OID_AUTO, features, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "hardware features");
990
991 /*
992 * Deprecated variables. These are supported for backwards compatibility
993 * purposes only. The MASKED flag requests that the variables not be
994 * printed by sysctl(8) and similar utilities.
995 *
996 * The variables named *_compat here are int-sized versions of variables
997 * that are now exported as quads. The int-sized versions are normally
998 * looked up only by number, wheras the quad-sized versions should be
999 * looked up by name.
1000 *
1001 * The *_compat nodes are *NOT* visible within the kernel.
1002 */
1003
1004 SYSCTL_PROC(_hw, HW_PAGESIZE, pagesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PAGESIZE, sysctl_hw_generic, "I", "");
1005 SYSCTL_PROC(_hw, HW_BUS_FREQ, busfrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_bus_frequency, "I", "");
1006 SYSCTL_PROC(_hw, HW_CPU_FREQ, cpufrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_cpu_frequency, "I", "");
1007 SYSCTL_PROC(_hw, HW_CACHELINE, cachelinesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_CACHELINE, sysctl_hw_generic, "I", "");
1008 SYSCTL_PROC(_hw, HW_L1ICACHESIZE, l1icachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE, sysctl_hw_generic, "I", "");
1009 SYSCTL_PROC(_hw, HW_L1DCACHESIZE, l1dcachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE, sysctl_hw_generic, "I", "");
1010 SYSCTL_PROC(_hw, HW_L2CACHESIZE, l2cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE, sysctl_hw_generic, "I", "");
1011 SYSCTL_PROC(_hw, HW_L3CACHESIZE, l3cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE, sysctl_hw_generic, "I", "");
1012 SYSCTL_COMPAT_INT(_hw, HW_TB_FREQ, tbfrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.timebase_frequency_hz, 0, "");
1013 SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MACHINE, sysctl_hw_generic, "A", "");
1014 SYSCTL_PROC(_hw, HW_MODEL, model, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MODEL, sysctl_hw_generic, "A", "");
1015 SYSCTL_PROC(_hw, HW_TARGET, target, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_TARGET, sysctl_hw_generic, "A", "");
1016 SYSCTL_PROC(_hw, HW_PRODUCT, product, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PRODUCT, sysctl_hw_generic, "A", "");
1017 SYSCTL_COMPAT_UINT(_hw, HW_PHYSMEM, physmem, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &mem_size, 0, "");
1018 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_USERMEM, sysctl_hw_generic, "I", "");
1019 SYSCTL_PROC(_hw, HW_EPOCH, epoch, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_EPOCH, sysctl_hw_generic, "I", "");
1020 SYSCTL_PROC(_hw, HW_VECTORUNIT, vectorunit, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_VECTORUNIT, sysctl_hw_generic, "I", "");
1021 SYSCTL_PROC(_hw, HW_L2SETTINGS, l2settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2SETTINGS, sysctl_hw_generic, "I", "");
1022 SYSCTL_PROC(_hw, HW_L3SETTINGS, l3settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3SETTINGS, sysctl_hw_generic, "I", "");
1023 SYSCTL_INT(_hw, OID_AUTO, cputhreadtype, CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, &cputhreadtype, 0, "");
1024
1025 #if defined(__i386__) || defined(__x86_64__) || CONFIG_X86_64_COMPAT
1026 static int
sysctl_cpu_capability(__unused struct sysctl_oid * oidp,void * arg1,__unused int arg2,struct sysctl_req * req)1027 sysctl_cpu_capability
1028 (__unused struct sysctl_oid *oidp, void *arg1, __unused int arg2, struct sysctl_req *req)
1029 {
1030 uint64_t caps;
1031 caps = _get_cpu_capabilities();
1032
1033 uint64_t mask = (uint64_t) (uintptr_t) arg1;
1034 boolean_t is_capable = (caps & mask) != 0;
1035
1036 return SYSCTL_OUT(req, &is_capable, sizeof(is_capable));
1037 }
1038 #define capability(name) name
1039
1040
1041 SYSCTL_PROC(_hw_optional, OID_AUTO, mmx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMMX), 0, sysctl_cpu_capability, "I", "");
1042 SYSCTL_PROC(_hw_optional, OID_AUTO, sse, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE), 0, sysctl_cpu_capability, "I", "");
1043 SYSCTL_PROC(_hw_optional, OID_AUTO, sse2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE2), 0, sysctl_cpu_capability, "I", "");
1044 SYSCTL_PROC(_hw_optional, OID_AUTO, sse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE3), 0, sysctl_cpu_capability, "I", "");
1045 SYSCTL_PROC(_hw_optional, OID_AUTO, supplementalsse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSupplementalSSE3), 0, sysctl_cpu_capability, "I", "");
1046 SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_1), 0, sysctl_cpu_capability, "I", "");
1047 SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_2), 0, sysctl_cpu_capability, "I", "");
1048 /* "x86_64" is actually a preprocessor symbol on the x86_64 kernel, so we have to hack this */
1049 #undef x86_64
1050 SYSCTL_PROC(_hw_optional, OID_AUTO, x86_64, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(k64Bit), 0, sysctl_cpu_capability, "I", "");
1051 SYSCTL_PROC(_hw_optional, OID_AUTO, aes, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAES), 0, sysctl_cpu_capability, "I", "");
1052 SYSCTL_PROC(_hw_optional, OID_AUTO, avx1_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX1_0), 0, sysctl_cpu_capability, "I", "");
1053 SYSCTL_PROC(_hw_optional, OID_AUTO, rdrand, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRDRAND), 0, sysctl_cpu_capability, "I", "");
1054 SYSCTL_PROC(_hw_optional, OID_AUTO, f16c, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasF16C), 0, sysctl_cpu_capability, "I", "");
1055 SYSCTL_PROC(_hw_optional, OID_AUTO, enfstrg, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasENFSTRG), 0, sysctl_cpu_capability, "I", "");
1056 SYSCTL_PROC(_hw_optional, OID_AUTO, fma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasFMA), 0, sysctl_cpu_capability, "I", "");
1057 SYSCTL_PROC(_hw_optional, OID_AUTO, avx2_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX2_0), 0, sysctl_cpu_capability, "I", "");
1058 SYSCTL_PROC(_hw_optional, OID_AUTO, bmi1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI1), 0, sysctl_cpu_capability, "I", "");
1059 SYSCTL_PROC(_hw_optional, OID_AUTO, bmi2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI2), 0, sysctl_cpu_capability, "I", "");
1060 SYSCTL_PROC(_hw_optional, OID_AUTO, rtm, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRTM), 0, sysctl_cpu_capability, "I", "");
1061 SYSCTL_PROC(_hw_optional, OID_AUTO, hle, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasHLE), 0, sysctl_cpu_capability, "I", "");
1062 SYSCTL_PROC(_hw_optional, OID_AUTO, adx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasADX), 0, sysctl_cpu_capability, "I", "");
1063 SYSCTL_PROC(_hw_optional, OID_AUTO, mpx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMPX), 0, sysctl_cpu_capability, "I", "");
1064 SYSCTL_PROC(_hw_optional, OID_AUTO, sgx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSGX), 0, sysctl_cpu_capability, "I", "");
1065 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512f, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512F), 0, sysctl_cpu_capability, "I", "");
1066 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512cd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512CD), 0, sysctl_cpu_capability, "I", "");
1067 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512dq, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512DQ), 0, sysctl_cpu_capability, "I", "");
1068 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512bw, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512BW), 0, sysctl_cpu_capability, "I", "");
1069 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vl, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VL), 0, sysctl_cpu_capability, "I", "");
1070 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512ifma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512IFMA), 0, sysctl_cpu_capability, "I", "");
1071 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vbmi, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VBMI), 0, sysctl_cpu_capability, "I", "");
1072 #undef capability
1073 #endif /* !__i386__ && !__x86_64 && !CONFIG_X86_64_COMPAT */
1074
1075 #if defined (__arm64__)
1076 int watchpoint_flag = 0;
1077 int breakpoint_flag = 0;
1078
1079 SECURITY_READ_ONLY_LATE(int) gUCNormalMem = 0;
1080
1081 #if defined (__arm64__)
1082 SECURITY_READ_ONLY_LATE(int) arm64_flag = 1;
1083 #else /* end __arm64__*/
1084 SECURITY_READ_ONLY_LATE(int) arm64_flag = 0;
1085 #endif
1086
1087 /* ARM Optional Feature Sysctls */
1088 #define ARM_FEATURE_FLAG(flag_name) \
1089 SECURITY_READ_ONLY_LATE(int) gARM_ ## flag_name = 0; \
1090 SYSCTL_INT(_hw_optional_arm, OID_AUTO, flag_name, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_ ## flag_name, 0, "")
1091 #include <arm/arm_features.inc>
1092 #undef ARM_FEATURE_FLAG
1093
1094 /* Legacy Names ARM Optional Feature Sysctls */
1095 #define LEGACY_ARM_SYSCTL(legacy_name, flag_name) \
1096 SYSCTL_INT(_hw_optional, OID_AUTO, legacy_name, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_ ## flag_name, 0, "")
1097
1098 LEGACY_ARM_SYSCTL(neon, AdvSIMD);
1099 LEGACY_ARM_SYSCTL(neon_hpfp, AdvSIMD_HPFPCvt);
1100 LEGACY_ARM_SYSCTL(neon_fp16, FEAT_FP16);
1101 LEGACY_ARM_SYSCTL(armv8_crc32, FEAT_CRC32);
1102 LEGACY_ARM_SYSCTL(armv8_gpi, FEAT_PACIMP);
1103 LEGACY_ARM_SYSCTL(armv8_1_atomics, FEAT_LSE);
1104 LEGACY_ARM_SYSCTL(armv8_2_fhm, FEAT_FHM);
1105 LEGACY_ARM_SYSCTL(armv8_2_sha512, FEAT_SHA512);
1106 LEGACY_ARM_SYSCTL(armv8_2_sha3, FEAT_SHA3);
1107 LEGACY_ARM_SYSCTL(armv8_3_compnum, FEAT_FCMA);
1108
1109 /* Misc ARM Optional Feature Sysctls */
1110 SYSCTL_INT(_hw_optional, OID_AUTO, watchpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &watchpoint_flag, 0, "");
1111 SYSCTL_INT(_hw_optional, OID_AUTO, breakpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &breakpoint_flag, 0, "");
1112
1113 SYSCTL_INT(_hw_optional, OID_AUTO, ucnormal_mem, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gUCNormalMem, 0, "");
1114
1115 #if DEBUG || DEVELOPMENT
1116 #if __ARM_KERNEL_PROTECT__
1117 static SECURITY_READ_ONLY_LATE(int) arm_kernel_protect = 1;
1118 #else
1119 static SECURITY_READ_ONLY_LATE(int) arm_kernel_protect = 0;
1120 #endif
1121 SYSCTL_INT(_hw_optional, OID_AUTO, arm_kernel_protect, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm_kernel_protect, 0, "");
1122 #endif
1123
1124 #if DEBUG || DEVELOPMENT
1125 static int ic_inval_filters = 0;
1126 SYSCTL_INT(_hw_optional, OID_AUTO, ic_inval_filters, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &ic_inval_filters, 0, "");
1127 #endif
1128
1129 #if DEBUG || DEVELOPMENT
1130 static SECURITY_READ_ONLY_LATE(int) wkdm_popcount = 0;
1131 SYSCTL_INT(_hw_optional, OID_AUTO, wkdm_popcount, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &wkdm_popcount, 0, "");
1132 #endif
1133
1134 #if DEBUG || DEVELOPMENT
1135 #if __has_feature(ptrauth_calls)
1136 static SECURITY_READ_ONLY_LATE(int) ptrauth = 1;
1137 #else
1138 static SECURITY_READ_ONLY_LATE(int) ptrauth = 0;
1139 #endif
1140 SYSCTL_INT(_hw_optional, OID_AUTO, ptrauth, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &ptrauth, 0, "");
1141 #endif
1142
1143 /*
1144 * Without this little ifdef dance, the preprocessor replaces "arm64" with "1",
1145 * leaving us with a less-than-helpful sysctl.hwoptional.1.
1146 */
1147 #ifdef arm64
1148 #undef arm64
1149 SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm64_flag, 0, "");
1150 #define arm64 1
1151 #else
1152 SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm64_flag, 0, "");
1153 #endif
1154 #endif /* ! __arm64__ */
1155
1156 #if defined (__arm64__)
1157
1158 /*
1159 * Generate an uint64_t containing one bit per FEAT extension, reporting
1160 * the presence of each extension.
1161 */
1162 static int
sysctl_hw_caps(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1163 sysctl_hw_caps(__unused struct sysctl_oid *oidp, __unused void *arg1,
1164 __unused int arg2, struct sysctl_req *req)
1165 {
1166 /* Local buffer, one bit per FEAT, reset to 0, set if FEAT present. */
1167 #define CAP_BYTE_NB ((CAP_BIT_NB + 7) / 8)
1168 uint8_t feats[CAP_BYTE_NB] = {0};
1169
1170 /* Write a bit in the local buffer. */
1171 #define CAP_DO_SET_BIT(n) { \
1172 assert((n) < CAP_BIT_NB); \
1173 const uint32_t word_id = ((uint32_t) (n)) >> 3; \
1174 assert(word_id < CAP_BYTE_NB); \
1175 const uint8_t bit_id = ((uint32_t) (n)) & 0x7; \
1176 feats[word_id] |= ((uint8_t) 1) << bit_id; \
1177 }
1178
1179 /* Write a capability bit in the local buffer. */
1180 #define CAP_SET_BIT_(var, name) \
1181 if (var) { \
1182 CAP_DO_SET_BIT(CAP_BIT_##name); \
1183 }
1184 #define CAP_SET_BIT(name) CAP_SET_BIT_(gARM_##name, name)
1185
1186 /* Write a capability field in the local buffer. */
1187 #define CAP_SET_FIELD(name, val) { \
1188 const uint32_t start = CAP_BIT_##name; \
1189 const uint32_t end = start + CAP_BIT_##name##_LEN; \
1190 uint32_t src = CAP_BIT_##name##_##val; \
1191 for (uint32_t id = start; id < end; id++) { \
1192 if (src & 1) { \
1193 CAP_DO_SET_BIT(id); \
1194 } \
1195 src >>= 1; \
1196 } \
1197 }
1198
1199 /* Report presence of all FEATs. */
1200 #define ARM_FEATURE_FLAG(x) CAP_SET_BIT(x)
1201 #include <arm/arm_features.inc>
1202 #undef ARM_FEATURE_FLAG
1203
1204
1205 /* Write the local buffer to userspace and complete. */
1206 return SYSCTL_OUT(req, feats, CAP_BYTE_NB);
1207 }
1208 SYSCTL_PROC(_hw_optional_arm, OID_AUTO, caps, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_hw_caps, "Q", "");
1209
1210 extern int sme_max_svl_b;
1211 SYSCTL_INT(_hw_optional_arm, OID_AUTO, sme_max_svl_b, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &sme_max_svl_b, 0, "");
1212 #endif /* ! __arm64__ */
1213
1214
1215 #if defined(__arm64__) && defined(CONFIG_XNUPOST)
1216 /**
1217 * Test whether the new values for a few hw.optional sysctls matches the legacy
1218 * way of obtaining that information.
1219 *
1220 * Specifically, hw.optional.neon_fp16 has been used to indicate both FEAT_FP16
1221 * and FEAT_FHM, as we are now grabbing the information directly from the ISA
1222 * status registers instead of from the arm_mvfp_info, we need to check that
1223 * this new source won't break any existing usecases of the sysctl and assert
1224 * that hw.optional.neon_fp16 will return the same value as it used to for all
1225 * devices.
1226 */
1227 kern_return_t
arm_cpu_capabilities_legacy_test(void)1228 arm_cpu_capabilities_legacy_test(void)
1229 {
1230 T_SETUPBEGIN;
1231 arm_mvfp_info_t *mvfp_info = arm_mvfp_info();
1232 T_ASSERT_NOTNULL(mvfp_info, "arm_mvfp_info returned null pointer.");
1233 T_SETUPEND;
1234
1235
1236 T_EXPECT_EQ_INT(mvfp_info->neon, gARM_AdvSIMD, "neon value should match legacy");
1237 T_EXPECT_EQ_INT(mvfp_info->neon_hpfp, gARM_AdvSIMD_HPFPCvt, "neon hpfp cvt value should match legacy");
1238 T_EXPECT_EQ_INT(mvfp_info->neon_fp16, gARM_FEAT_FP16, "neon fp16 value should match legacy");
1239
1240 T_LOG("Completed arm cpu capabalities legacy compliance test.");
1241 return KERN_SUCCESS;
1242 }
1243 #endif /* defined(__arm64__) && defined(CONFIG_XNUPOST) */
1244
1245 /******************************************************************************
1246 * Generic MIB initialisation.
1247 *
1248 * This is a hack, and should be replaced with SYSINITs
1249 * at some point.
1250 */
1251 void
sysctl_mib_init(void)1252 sysctl_mib_init(void)
1253 {
1254 #if defined(__i386__) || defined (__x86_64__)
1255 cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit;
1256 #elif defined (__arm64__)
1257 cpu64bit = (cpu_type() & CPU_ARCH_ABI64) == CPU_ARCH_ABI64;
1258 #else
1259 #error Unsupported arch
1260 #endif
1261 #if defined (__i386__) || defined (__x86_64__)
1262 /* hw.cacheconfig */
1263 cacheconfig[0] = ml_cpu_cache_sharing(0, CLUSTER_TYPE_SMP, true);
1264 cacheconfig[1] = ml_cpu_cache_sharing(1, CLUSTER_TYPE_SMP, true);
1265 cacheconfig[2] = ml_cpu_cache_sharing(2, CLUSTER_TYPE_SMP, true);
1266 cacheconfig[3] = ml_cpu_cache_sharing(3, CLUSTER_TYPE_SMP, true);
1267 cacheconfig[4] = 0;
1268
1269 /* hw.packages */
1270 packages = (int)(roundup(ml_cpu_cache_sharing(0, CLUSTER_TYPE_SMP, true), cpuid_info()->thread_count)
1271 / cpuid_info()->thread_count);
1272
1273 #elif defined(__arm64__) /* end __i386 */
1274 watchpoint_flag = arm_debug_info()->num_watchpoint_pairs;
1275 breakpoint_flag = arm_debug_info()->num_breakpoint_pairs;
1276
1277 cluster_type_t min_perflevel_cluster_type = cpu_type_for_perflevel(__builtin_popcount(ml_get_cpu_types()) - 1);
1278
1279 cacheconfig[0] = ml_wait_max_cpus();
1280 cacheconfig[1] = ml_cpu_cache_sharing(1, min_perflevel_cluster_type, true);
1281 cacheconfig[2] = ml_cpu_cache_sharing(2, min_perflevel_cluster_type, true);
1282 cacheconfig[3] = 0;
1283 cacheconfig[4] = 0;
1284 cacheconfig[5] = 0;
1285 cacheconfig[6] = 0;
1286
1287 packages = 1;
1288 #else
1289 #error unknown architecture
1290 #endif /* !__i386__ && !__x86_64 && !__arm64__ */
1291 }
1292
1293 __startup_func
1294 static void
sysctl_mib_startup(void)1295 sysctl_mib_startup(void)
1296 {
1297 cputhreadtype = cpu_threadtype();
1298
1299 /*
1300 * Populate the optional portion of the hw.* MIB.
1301 *
1302 * XXX This could be broken out into parts of the code
1303 * that actually directly relate to the functions in
1304 * question.
1305 */
1306
1307 if (cputhreadtype != CPU_THREADTYPE_NONE) {
1308 sysctl_register_oid_early(&sysctl__hw_cputhreadtype);
1309 }
1310
1311 #if defined(XNU_TARGET_OS_XR)
1312 if (gPlatformChipRole != UINT32_MAX) {
1313 sysctl_register_oid_early(&sysctl__hw_chiprole);
1314 }
1315 #endif /* not XNU_TARGET_OS_XR */
1316 }
1317 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_mib_startup);
1318