1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*-
29 * Copyright (c) 1982, 1986, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Mike Karels at Berkeley Software Design, Inc.
34 *
35 * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD
36 * project, to make these variables more userfriendly.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 */
68
69 #include <sys/param.h>
70 #include <sys/kernel.h>
71 #include <sys/systm.h>
72 #include <sys/sysctl.h>
73 #include <sys/proc_internal.h>
74 #include <sys/unistd.h>
75
76 #if defined(SMP)
77 #include <machine/smp.h>
78 #endif
79
80 #include <sys/param.h> /* XXX prune includes */
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/malloc.h>
84 #include <sys/proc.h>
85 #include <sys/file_internal.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/ioctl.h>
89 #include <sys/namei.h>
90 #include <sys/tty.h>
91 #include <sys/disklabel.h>
92 #include <sys/vm.h>
93 #include <sys/sysctl.h>
94 #include <sys/user.h>
95 #include <mach/machine.h>
96 #include <mach/mach_types.h>
97 #include <mach/vm_param.h>
98 #include <kern/task.h>
99 #include <vm/vm_kern.h>
100 #include <vm/vm_map.h>
101 #include <vm/vm_protos.h>
102 #include <mach/host_info.h>
103 #include <kern/pms.h>
104 #include <pexpert/device_tree.h>
105 #include <pexpert/pexpert.h>
106 #include <kern/sched_prim.h>
107 #include <console/serial_protos.h>
108
109 extern vm_map_t bsd_pageable_map;
110
111 #include <sys/mount_internal.h>
112 #include <sys/kdebug.h>
113
114 #include <IOKit/IOPlatformExpert.h>
115 #include <pexpert/pexpert.h>
116
117 #include <machine/config.h>
118 #include <machine/machine_routines.h>
119 #include <machine/cpu_capabilities.h>
120
121 #include <mach/mach_host.h> /* for host_info() */
122
123 #if defined(__i386__) || defined(__x86_64__)
124 #include <i386/cpuid.h> /* for cpuid_info() */
125 #endif
126
127 #if defined(__arm__) || defined(__arm64__)
128 #include <arm/cpuid.h> /* for cpuid_info() & cache_info() */
129 #endif
130
131 #if defined(CONFIG_XNUPOST)
132 #include <tests/ktest.h>
133 #endif
134
135 /**
136 * Prevents an issue with creating the sysctl node hw.optional.arm on some
137 * platforms. If the 'arm' macro is defined, then the word "arm" is preprocessed
138 * to 1. As the 'arm' macro is not used in this file, we do not need to redefine
139 * after we are done.
140 */
141 #if defined(arm)
142 #undef arm
143 #endif /* defined(arm) */
144
145 #ifndef MAX
146 #define MAX(a, b) (a >= b ? a : b)
147 #endif
148
149 #if (defined(__arm__) || defined(__arm64__)) && defined(CONFIG_XNUPOST)
150 kern_return_t arm_cpu_capabilities_legacy_test(void);
151 #endif /* (defined(__arm__) || defined(__arm64__)) && defined(CONFIG_XNUPOST) */
152
153 /* XXX This should be in a BSD accessible Mach header, but isn't. */
154 extern unsigned int vm_page_wire_count;
155
156 static int cputhreadtype, cpu64bit;
157 static uint64_t cacheconfig[10], cachesize[10];
158 static int packages;
159
160 static char * osenvironment = NULL;
161 static uint32_t osenvironment_size = 0;
162 static int osenvironment_initialized = 0;
163
164 static uint32_t ephemeral_storage = 0;
165 static uint32_t use_recovery_securityd = 0;
166
167 static struct {
168 uint32_t ephemeral_storage:1;
169 uint32_t use_recovery_securityd:1;
170 } property_existence = {0, 0};
171
172 SYSCTL_EXTENSIBLE_NODE(, 0, sysctl, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
173 "Sysctl internal magic");
174 SYSCTL_EXTENSIBLE_NODE(, CTL_KERN, kern, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
175 "High kernel, proc, limits &c");
176 SYSCTL_EXTENSIBLE_NODE(, CTL_VM, vm, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
177 "Virtual memory");
178 SYSCTL_EXTENSIBLE_NODE(, CTL_VFS, vfs, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
179 "File system");
180 SYSCTL_EXTENSIBLE_NODE(, CTL_NET, net, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
181 "Network, (see socket.h)");
182 SYSCTL_EXTENSIBLE_NODE(, CTL_DEBUG, debug, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
183 "Debugging");
184 #if DEBUG || DEVELOPMENT
185 SYSCTL_NODE(_debug, OID_AUTO, test,
186 CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, "tests");
187 #endif
188 SYSCTL_NODE(, CTL_HW, hw, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
189 "hardware");
190 SYSCTL_EXTENSIBLE_NODE(, CTL_MACHDEP, machdep, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
191 "machine dependent");
192 SYSCTL_NODE(, CTL_USER, user, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
193 "user-level");
194
195 SYSCTL_NODE(_kern, OID_AUTO, bridge, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
196 "bridge");
197
198 #define SYSCTL_RETURN(r, x) SYSCTL_OUT(r, &x, sizeof(x))
199
200 /******************************************************************************
201 * hw.* MIB
202 */
203
204 #define CTLHW_RETQUAD (1U << 31)
205 #define CTLHW_LOCAL (1U << 30)
206 #define CTLHW_PERFLEVEL (1U << 29)
207
208 #define HW_LOCAL_CPUTHREADTYPE (1 | CTLHW_LOCAL)
209 #define HW_LOCAL_PHYSICALCPU (2 | CTLHW_LOCAL)
210 #define HW_LOCAL_PHYSICALCPUMAX (3 | CTLHW_LOCAL)
211 #define HW_LOCAL_LOGICALCPU (4 | CTLHW_LOCAL)
212 #define HW_LOCAL_LOGICALCPUMAX (5 | CTLHW_LOCAL)
213 #define HW_LOCAL_CPUTYPE (6 | CTLHW_LOCAL)
214 #define HW_LOCAL_CPUSUBTYPE (7 | CTLHW_LOCAL)
215 #define HW_LOCAL_CPUFAMILY (8 | CTLHW_LOCAL)
216 #define HW_LOCAL_CPUSUBFAMILY (9 | CTLHW_LOCAL)
217 #define HW_NPERFLEVELS (10 | CTLHW_LOCAL)
218 #define HW_PERFLEVEL_PHYSICALCPU (11 | CTLHW_PERFLEVEL)
219 #define HW_PERFLEVEL_PHYSICALCPUMAX (12 | CTLHW_PERFLEVEL)
220 #define HW_PERFLEVEL_LOGICALCPU (13 | CTLHW_PERFLEVEL)
221 #define HW_PERFLEVEL_LOGICALCPUMAX (14 | CTLHW_PERFLEVEL)
222 #define HW_PERFLEVEL_L1ICACHESIZE (15 | CTLHW_PERFLEVEL)
223 #define HW_PERFLEVEL_L1DCACHESIZE (16 | CTLHW_PERFLEVEL)
224 #define HW_PERFLEVEL_L2CACHESIZE (17 | CTLHW_PERFLEVEL)
225 #define HW_PERFLEVEL_CPUSPERL2 (18 | CTLHW_PERFLEVEL)
226 #define HW_PERFLEVEL_L3CACHESIZE (19 | CTLHW_PERFLEVEL)
227 #define HW_PERFLEVEL_CPUSPERL3 (20 | CTLHW_PERFLEVEL)
228
229
230 /*
231 * For a given perflevel, return the corresponding CPU type.
232 */
233 static cluster_type_t
cpu_type_for_perflevel(int perflevel)234 cpu_type_for_perflevel(int perflevel)
235 {
236 unsigned int cpu_types = ml_get_cpu_types();
237 unsigned int n_perflevels = __builtin_popcount(cpu_types);
238
239 assert((perflevel >= 0) && (perflevel < n_perflevels));
240
241 int current_idx = 0, current_perflevel = -1;
242
243 while (cpu_types) {
244 current_perflevel += cpu_types & 1;
245 if (current_perflevel == (n_perflevels - (perflevel + 1))) {
246 return current_idx;
247 }
248
249 cpu_types >>= 1;
250 current_idx++;
251 }
252
253 return 0;
254 }
255
256 /*
257 * Supporting some variables requires us to do "real" work. We
258 * gather some of that here.
259 */
260 static int
sysctl_hw_generic(__unused struct sysctl_oid * oidp,void * arg1,int arg2,struct sysctl_req * req)261 sysctl_hw_generic(__unused struct sysctl_oid *oidp, void *arg1,
262 int arg2, struct sysctl_req *req)
263 {
264 char dummy[65];
265 int epochTemp;
266 ml_cpu_info_t cpu_info;
267 int val, doquad;
268 long long qval;
269 unsigned int cpu_count;
270 host_basic_info_data_t hinfo;
271 kern_return_t kret;
272 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
273
274 /*
275 * If we are using one of the perflevel sysctls, return early if the perflevel
276 * does not exist in this system.
277 */
278 int perflevel = (int)arg1;
279 int n_perflevels = __builtin_popcount(ml_get_cpu_types());
280
281 if (arg2 & CTLHW_PERFLEVEL) {
282 if ((perflevel < 0) || (perflevel >= n_perflevels)) {
283 return ENOENT;
284 }
285 } else {
286 perflevel = n_perflevels - 1;
287 }
288
289 /*
290 * Test and mask off the 'return quad' flag.
291 * Note that only some things here support it.
292 */
293 doquad = arg2 & CTLHW_RETQUAD;
294 arg2 &= ~CTLHW_RETQUAD;
295
296 ml_cpu_get_info_type(&cpu_info, cpu_type_for_perflevel(perflevel));
297
298 #define BSD_HOST 1
299 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
300
301 /*
302 * Handle various OIDs.
303 *
304 * OIDs that can return int or quad set val and qval and then break.
305 * Errors and int-only values return inline.
306 */
307 switch (arg2) {
308 case HW_NCPU:
309 if (kret == KERN_SUCCESS) {
310 return SYSCTL_RETURN(req, hinfo.max_cpus);
311 } else {
312 return EINVAL;
313 }
314 case HW_AVAILCPU:
315 if (kret == KERN_SUCCESS) {
316 return SYSCTL_RETURN(req, hinfo.avail_cpus);
317 } else {
318 return EINVAL;
319 }
320 case HW_LOCAL_PHYSICALCPU:
321 if (kret == KERN_SUCCESS) {
322 return SYSCTL_RETURN(req, hinfo.physical_cpu);
323 } else {
324 return EINVAL;
325 }
326 case HW_LOCAL_PHYSICALCPUMAX:
327 if (kret == KERN_SUCCESS) {
328 return SYSCTL_RETURN(req, hinfo.physical_cpu_max);
329 } else {
330 return EINVAL;
331 }
332 case HW_LOCAL_LOGICALCPU:
333 if (kret == KERN_SUCCESS) {
334 return SYSCTL_RETURN(req, hinfo.logical_cpu);
335 } else {
336 return EINVAL;
337 }
338 case HW_LOCAL_LOGICALCPUMAX:
339 if (kret == KERN_SUCCESS) {
340 return SYSCTL_RETURN(req, hinfo.logical_cpu_max);
341 } else {
342 return EINVAL;
343 }
344 case HW_NPERFLEVELS:
345 return SYSCTL_RETURN(req, n_perflevels);
346 case HW_PERFLEVEL_PHYSICALCPU:
347 cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), false, true);
348 return SYSCTL_RETURN(req, cpu_count);
349 case HW_PERFLEVEL_PHYSICALCPUMAX:
350 cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), false, false);
351 return SYSCTL_RETURN(req, cpu_count);
352 case HW_PERFLEVEL_LOGICALCPU:
353 cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), true, true);
354 return SYSCTL_RETURN(req, cpu_count);
355 case HW_PERFLEVEL_LOGICALCPUMAX:
356 cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), true, false);
357 return SYSCTL_RETURN(req, cpu_count);
358 case HW_PERFLEVEL_L1ICACHESIZE:
359 val = (int)cpu_info.l1_icache_size;
360 qval = (long long)cpu_info.l1_icache_size;
361 break;
362 case HW_PERFLEVEL_L1DCACHESIZE:
363 val = (int)cpu_info.l1_dcache_size;
364 qval = (long long)cpu_info.l1_dcache_size;
365 break;
366 case HW_PERFLEVEL_L2CACHESIZE:
367 val = (int)cpu_info.l2_cache_size;
368 qval = (long long)cpu_info.l2_cache_size;
369 break;
370 case HW_PERFLEVEL_CPUSPERL2:
371 cpu_count = ml_cpu_cache_sharing(2, cpu_type_for_perflevel(perflevel), false);
372 return SYSCTL_RETURN(req, cpu_count);
373 case HW_PERFLEVEL_L3CACHESIZE:
374 if (cpu_info.l3_cache_size == UINT32_MAX) {
375 return EINVAL;
376 }
377 val = (int)cpu_info.l3_cache_size;
378 qval = (long long)cpu_info.l3_cache_size;
379 break;
380 case HW_PERFLEVEL_CPUSPERL3:
381 if (cpu_info.l3_cache_size == UINT32_MAX) {
382 return EINVAL;
383 }
384 cpu_count = ml_cpu_cache_sharing(3, cpu_type_for_perflevel(perflevel), false);
385 return SYSCTL_RETURN(req, cpu_count);
386 case HW_LOCAL_CPUTYPE:
387 if (kret == KERN_SUCCESS) {
388 return SYSCTL_RETURN(req, hinfo.cpu_type);
389 } else {
390 return EINVAL;
391 }
392 case HW_LOCAL_CPUSUBTYPE:
393 if (kret == KERN_SUCCESS) {
394 return SYSCTL_RETURN(req, hinfo.cpu_subtype);
395 } else {
396 return EINVAL;
397 }
398 case HW_LOCAL_CPUFAMILY:
399 {
400 int cpufamily = 0;
401 #if defined (__i386__) || defined (__x86_64__)
402 cpufamily = cpuid_cpufamily();
403 #elif defined(__arm__) || defined(__arm64__)
404 {
405 cpufamily = cpuid_get_cpufamily();
406 }
407 #else
408 #error unknown architecture
409 #endif
410 return SYSCTL_RETURN(req, cpufamily);
411 }
412 case HW_LOCAL_CPUSUBFAMILY:
413 {
414 int cpusubfamily = 0;
415 #if defined (__i386__) || defined (__x86_64__)
416 cpusubfamily = CPUSUBFAMILY_UNKNOWN;
417 #elif defined(__arm__) || defined(__arm64__)
418 {
419 cpusubfamily = cpuid_get_cpusubfamily();
420 }
421 #else
422 #error unknown architecture
423 #endif
424 return SYSCTL_RETURN(req, cpusubfamily);
425 }
426 case HW_PAGESIZE:
427 {
428 vm_map_t map = get_task_map(current_task());
429 val = vm_map_page_size(map);
430 qval = (long long)val;
431 break;
432 }
433 case HW_CACHELINE:
434 val = (int)cpu_info.cache_line_size;
435 qval = (long long)val;
436 break;
437 case HW_L1ICACHESIZE:
438 val = (int)cpu_info.l1_icache_size;
439 qval = (long long)cpu_info.l1_icache_size;
440 break;
441 case HW_L1DCACHESIZE:
442 val = (int)cpu_info.l1_dcache_size;
443 qval = (long long)cpu_info.l1_dcache_size;
444 break;
445 case HW_L2CACHESIZE:
446 if (cpu_info.l2_cache_size == UINT32_MAX) {
447 return EINVAL;
448 }
449 val = (int)cpu_info.l2_cache_size;
450 qval = (long long)cpu_info.l2_cache_size;
451 break;
452 case HW_L3CACHESIZE:
453 if (cpu_info.l3_cache_size == UINT32_MAX) {
454 return EINVAL;
455 }
456 val = (int)cpu_info.l3_cache_size;
457 qval = (long long)cpu_info.l3_cache_size;
458 break;
459 case HW_TARGET:
460 bzero(dummy, sizeof(dummy));
461 if (!PEGetTargetName(dummy, 64)) {
462 return EINVAL;
463 }
464 dummy[64] = 0;
465 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
466 case HW_PRODUCT:
467 bzero(dummy, sizeof(dummy));
468 if (!PEGetProductName(dummy, 64)) {
469 return EINVAL;
470 }
471 dummy[64] = 0;
472 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
473
474 /*
475 * Deprecated variables. We still support these for
476 * backwards compatibility purposes only.
477 */
478 #if XNU_TARGET_OS_OSX && defined(__arm64__)
479 /* The following two are kludged for backward
480 * compatibility. Use hw.product/hw.target for something
481 * consistent instead. */
482
483 case HW_MACHINE:
484 bzero(dummy, sizeof(dummy));
485 if (proc_platform(req->p) == PLATFORM_IOS) {
486 /* iOS-on-Mac processes don't expect the macOS kind of
487 * hw.machine, e.g. "arm64", but are used to seeing
488 * a product string on iOS, which we here hardcode
489 * to return as "iPad8,6" for compatibility.
490 *
491 * Another reason why hw.machine and hw.model are
492 * trouble and hw.target+hw.product should be used
493 * instead.
494 */
495
496 strlcpy(dummy, "iPad8,6", sizeof(dummy));
497 }
498 else {
499 strlcpy(dummy, "arm64", sizeof(dummy));
500 }
501 dummy[64] = 0;
502 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
503 case HW_MODEL:
504 bzero(dummy, sizeof(dummy));
505 if (!PEGetProductName(dummy, 64)) {
506 return EINVAL;
507 }
508 dummy[64] = 0;
509 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
510 #else
511 case HW_MACHINE:
512 bzero(dummy, sizeof(dummy));
513 if (!PEGetMachineName(dummy, 64)) {
514 return EINVAL;
515 }
516 dummy[64] = 0;
517 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
518 case HW_MODEL:
519 bzero(dummy, sizeof(dummy));
520 if (!PEGetModelName(dummy, 64)) {
521 return EINVAL;
522 }
523 dummy[64] = 0;
524 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
525 #endif
526 case HW_USERMEM:
527 {
528 int usermem = (int)(mem_size - vm_page_wire_count * page_size);
529
530 return SYSCTL_RETURN(req, usermem);
531 }
532 case HW_EPOCH:
533 epochTemp = PEGetPlatformEpoch();
534 if (epochTemp == -1) {
535 return EINVAL;
536 }
537 return SYSCTL_RETURN(req, epochTemp);
538 case HW_VECTORUNIT: {
539 int vector = cpu_info.vector_unit == 0? 0 : 1;
540 return SYSCTL_RETURN(req, vector);
541 }
542 case HW_L2SETTINGS:
543 if (cpu_info.l2_cache_size == UINT32_MAX) {
544 return EINVAL;
545 }
546 return SYSCTL_RETURN(req, cpu_info.l2_settings);
547 case HW_L3SETTINGS:
548 if (cpu_info.l3_cache_size == UINT32_MAX) {
549 return EINVAL;
550 }
551 return SYSCTL_RETURN(req, cpu_info.l3_settings);
552 default:
553 return ENOTSUP;
554 }
555 /*
556 * Callers may come to us with either int or quad buffers.
557 */
558 if (doquad) {
559 return SYSCTL_RETURN(req, qval);
560 }
561 return SYSCTL_RETURN(req, val);
562 }
563
564 /* hw.pagesize and hw.tbfrequency are expected as 64 bit values */
565 static int
sysctl_pagesize(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)566 sysctl_pagesize
567 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
568 {
569 vm_map_t map = get_task_map(current_task());
570 long long l = vm_map_page_size(map);
571 return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
572 }
573
574 static int
sysctl_pagesize32(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)575 sysctl_pagesize32
576 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
577 {
578 long long l;
579 #if __arm64__
580 l = (long long) (1 << page_shift_user32);
581 #else /* __arm64__ */
582 l = (long long) PAGE_SIZE;
583 #endif /* __arm64__ */
584 return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
585 }
586
587 static int
sysctl_tbfrequency(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)588 sysctl_tbfrequency
589 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
590 {
591 long long l = gPEClockFrequencyInfo.timebase_frequency_hz;
592 return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
593 }
594
595 /*
596 * Called by IOKit on Intel, or by sysctl_load_devicetree_entries()
597 */
598 void
sysctl_set_osenvironment(unsigned int size,const void * value)599 sysctl_set_osenvironment(unsigned int size, const void* value)
600 {
601 if (osenvironment_size == 0 && size > 0) {
602 osenvironment = zalloc_permanent(size, ZALIGN_NONE);
603 if (osenvironment) {
604 memcpy(osenvironment, value, size);
605 osenvironment_size = size;
606 }
607 }
608 }
609
610 void
sysctl_unblock_osenvironment(void)611 sysctl_unblock_osenvironment(void)
612 {
613 os_atomic_inc(&osenvironment_initialized, relaxed);
614 thread_wakeup((event_t) &osenvironment_initialized);
615 }
616
617 /*
618 * Create sysctl entries coming from device tree.
619 *
620 * Entries from device tree are loaded here because SecureDTLookupEntry() only works before
621 * PE_init_iokit(). Doing this also avoids the extern-C hackery to access these entries
622 * from IORegistry (which requires C++).
623 */
624 __startup_func
625 static void
sysctl_load_devicetree_entries(void)626 sysctl_load_devicetree_entries(void)
627 {
628 DTEntry chosen;
629 void const *value;
630 unsigned int size;
631
632 if (kSuccess != SecureDTLookupEntry(0, "/chosen", &chosen)) {
633 return;
634 }
635
636 /* load osenvironment */
637 if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &value, &size)) {
638 sysctl_set_osenvironment(size, value);
639 }
640
641 /* load ephemeral_storage */
642 if (kSuccess == SecureDTGetProperty(chosen, "ephemeral-storage", (void const **) &value, &size)) {
643 if (size == sizeof(uint32_t)) {
644 ephemeral_storage = *(uint32_t const *)value;
645 property_existence.ephemeral_storage = 1;
646 }
647 }
648
649 /* load use_recovery_securityd */
650 if (kSuccess == SecureDTGetProperty(chosen, "use-recovery-securityd", (void const **) &value, &size)) {
651 if (size == sizeof(uint32_t)) {
652 use_recovery_securityd = *(uint32_t const *)value;
653 property_existence.use_recovery_securityd = 1;
654 }
655 }
656 }
657 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_load_devicetree_entries);
658
659 static int
sysctl_osenvironment(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)660 sysctl_osenvironment
661 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
662 {
663 #if defined(__x86_64__)
664 #if (DEVELOPMENT || DEBUG)
665 if (os_atomic_load(&osenvironment_initialized, relaxed) == 0) {
666 assert_wait((event_t) &osenvironment_initialized, THREAD_UNINT);
667 if (os_atomic_load(&osenvironment_initialized, relaxed) != 0) {
668 clear_wait(current_thread(), THREAD_AWAKENED);
669 } else {
670 (void) thread_block(THREAD_CONTINUE_NULL);
671 }
672 }
673 #endif
674 #endif
675 if (osenvironment_size > 0) {
676 return SYSCTL_OUT(req, osenvironment, osenvironment_size);
677 } else {
678 return EINVAL;
679 }
680 }
681
682 static int
sysctl_ephemeral_storage(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)683 sysctl_ephemeral_storage
684 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
685 {
686 if (property_existence.ephemeral_storage) {
687 return SYSCTL_OUT(req, &ephemeral_storage, sizeof(ephemeral_storage));
688 } else {
689 return EINVAL;
690 }
691 }
692
693 static int
sysctl_use_recovery_securityd(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)694 sysctl_use_recovery_securityd
695 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
696 {
697 if (property_existence.use_recovery_securityd) {
698 return SYSCTL_OUT(req, &use_recovery_securityd, sizeof(use_recovery_securityd));
699 } else {
700 return EINVAL;
701 }
702 }
703
704 static int
sysctl_use_kernelmanagerd(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)705 sysctl_use_kernelmanagerd
706 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
707 {
708 #if CONFIG_ARROW
709 static int use_kernelmanagerd = 1;
710 #else
711 static int use_kernelmanagerd = 0;
712 #endif
713 static bool once = false;
714
715 if (!once) {
716 kc_format_t kc_format;
717 PE_get_primary_kc_format(&kc_format);
718 if (kc_format == KCFormatFileset) {
719 use_kernelmanagerd = 1;
720 } else {
721 PE_parse_boot_argn("kernelmanagerd", &use_kernelmanagerd, sizeof(use_kernelmanagerd));
722 }
723 once = true;
724 }
725 return SYSCTL_OUT(req, &use_kernelmanagerd, sizeof(use_kernelmanagerd));
726 }
727
728 #define HW_LOCAL_FREQUENCY 1
729 #define HW_LOCAL_FREQUENCY_MIN 2
730 #define HW_LOCAL_FREQUENCY_MAX 3
731 #define HW_LOCAL_FREQUENCY_CLOCK_RATE 4
732
733 static int
sysctl_bus_frequency(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,__unused struct sysctl_req * req)734 sysctl_bus_frequency
735 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
736 {
737
738 #if DEBUG || DEVELOPMENT || (!defined(__arm__) && !defined(__arm64__))
739 switch (arg2) {
740 case HW_LOCAL_FREQUENCY:
741 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_hz);
742 case HW_LOCAL_FREQUENCY_MIN:
743 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_min_hz);
744 case HW_LOCAL_FREQUENCY_MAX:
745 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_max_hz);
746 case HW_LOCAL_FREQUENCY_CLOCK_RATE:
747 return SYSCTL_OUT(req, &gPEClockFrequencyInfo.bus_clock_rate_hz, sizeof(int));
748 default:
749 return EINVAL;
750 }
751 #else
752 return ENOENT;
753 #endif
754 }
755
756 static int
sysctl_cpu_frequency(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,__unused struct sysctl_req * req)757 sysctl_cpu_frequency
758 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
759 {
760
761 #if DEBUG || DEVELOPMENT || (!defined(__arm__) && !defined(__arm64__))
762 switch (arg2) {
763 case HW_LOCAL_FREQUENCY:
764 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_hz);
765 case HW_LOCAL_FREQUENCY_MIN:
766 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_min_hz);
767 case HW_LOCAL_FREQUENCY_MAX:
768 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_max_hz);
769 case HW_LOCAL_FREQUENCY_CLOCK_RATE:
770 return SYSCTL_OUT(req, &gPEClockFrequencyInfo.cpu_clock_rate_hz, sizeof(int));
771 default:
772 return EINVAL;
773 }
774 #else
775 return ENOENT;
776 #endif
777 }
778
779 /*
780 * This sysctl will signal to userspace that a serial console is desired:
781 *
782 * hw.serialdebugmode = 1 will load the serial console job in the multi-user session;
783 * hw.serialdebugmode = 2 will load the serial console job in the base system as well
784 */
785 static int
sysctl_serialdebugmode(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)786 sysctl_serialdebugmode
787 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
788 {
789 uint32_t serial_boot_arg;
790 int serialdebugmode = 0;
791
792 if (PE_parse_boot_argn("serial", &serial_boot_arg, sizeof(serial_boot_arg)) &&
793 (serial_boot_arg & SERIALMODE_OUTPUT) && (serial_boot_arg & SERIALMODE_INPUT)) {
794 serialdebugmode = (serial_boot_arg & SERIALMODE_BASE_TTY) ? 2 : 1;
795 }
796
797 return sysctl_io_number(req, serialdebugmode, sizeof(serialdebugmode), NULL, NULL);
798 }
799
800 /*
801 * hw.* MIB variables.
802 */
803 SYSCTL_PROC(_hw, HW_NCPU, ncpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_NCPU, sysctl_hw_generic, "I", "");
804 SYSCTL_PROC(_hw, HW_AVAILCPU, activecpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_AVAILCPU, sysctl_hw_generic, "I", "");
805 SYSCTL_PROC(_hw, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPU, sysctl_hw_generic, "I", "");
806 SYSCTL_PROC(_hw, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
807 SYSCTL_PROC(_hw, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPU, sysctl_hw_generic, "I", "");
808 SYSCTL_PROC(_hw, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
809 SYSCTL_INT(_hw, HW_BYTEORDER, byteorder, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, BYTE_ORDER, "");
810 SYSCTL_PROC(_hw, OID_AUTO, cputype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUTYPE, sysctl_hw_generic, "I", "");
811 SYSCTL_PROC(_hw, OID_AUTO, cpusubtype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBTYPE, sysctl_hw_generic, "I", "");
812 SYSCTL_INT(_hw, OID_AUTO, cpu64bit_capable, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpu64bit, 0, "");
813 SYSCTL_PROC(_hw, OID_AUTO, cpufamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUFAMILY, sysctl_hw_generic, "I", "");
814 SYSCTL_PROC(_hw, OID_AUTO, cpusubfamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBFAMILY, sysctl_hw_generic, "I", "");
815 SYSCTL_OPAQUE(_hw, OID_AUTO, cacheconfig, CTLFLAG_RD | CTLFLAG_LOCKED, &cacheconfig, sizeof(cacheconfig), "Q", "");
816 SYSCTL_OPAQUE(_hw, OID_AUTO, cachesize, CTLFLAG_RD | CTLFLAG_LOCKED, &cachesize, sizeof(cachesize), "Q", "");
817 SYSCTL_PROC(_hw, OID_AUTO, pagesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize, "Q", "");
818 SYSCTL_PROC(_hw, OID_AUTO, pagesize32, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize32, "Q", "");
819 SYSCTL_PROC(_hw, OID_AUTO, busfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_bus_frequency, "Q", "");
820 SYSCTL_PROC(_hw, OID_AUTO, busfrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_bus_frequency, "Q", "");
821 SYSCTL_PROC(_hw, OID_AUTO, busfrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_bus_frequency, "Q", "");
822 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_cpu_frequency, "Q", "");
823 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_cpu_frequency, "Q", "");
824 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_cpu_frequency, "Q", "");
825 SYSCTL_PROC(_hw, OID_AUTO, cachelinesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_CACHELINE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
826 SYSCTL_PROC(_hw, OID_AUTO, l1icachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
827 SYSCTL_PROC(_hw, OID_AUTO, l1dcachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
828 SYSCTL_PROC(_hw, OID_AUTO, l2cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
829 SYSCTL_PROC(_hw, OID_AUTO, l3cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
830 #if (defined(__arm__) || defined(__arm64__)) && (DEBUG || DEVELOPMENT)
831 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_hz, "");
832 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_min_hz, "");
833 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_max_hz, "");
834 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_hz, "");
835 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_min_hz, "");
836 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_max_hz, "");
837 SYSCTL_QUAD(_hw, OID_AUTO, fixfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.fix_frequency_hz, "");
838 #endif /* __arm__ || __arm64__ */
839 SYSCTL_PROC(_hw, OID_AUTO, tbfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_tbfrequency, "Q", "");
840 #if XNU_TARGET_OS_OSX
841 SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem_actual, "");
842 #else
843 SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem, "");
844 #endif /* XNU_TARGET_OS_OSX */
845 SYSCTL_INT(_hw, OID_AUTO, packages, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &packages, 0, "");
846 SYSCTL_PROC(_hw, OID_AUTO, osenvironment, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_osenvironment, "A", "");
847 SYSCTL_PROC(_hw, OID_AUTO, ephemeral_storage, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_ephemeral_storage, "I", "");
848 SYSCTL_PROC(_hw, OID_AUTO, use_recovery_securityd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_recovery_securityd, "I", "");
849 SYSCTL_PROC(_hw, OID_AUTO, use_kernelmanagerd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_kernelmanagerd, "I", "");
850 SYSCTL_PROC(_hw, OID_AUTO, serialdebugmode, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_serialdebugmode, "I", "");
851
852 /*
853 * hw.perflevelN.* variables.
854 * Users may check these to determine properties that vary across different CPU types, such as number of CPUs,
855 * or cache sizes. Perflevel 0 corresponds to the highest performance one.
856 */
857 SYSCTL_NODE(_hw, OID_AUTO, perflevel0, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, "Perf level 0 topology and cache geometry paramaters");
858 SYSCTL_NODE(_hw, OID_AUTO, perflevel1, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, "Perf level 1 topology and cache geometry paramaters");
859 SYSCTL_PROC(_hw, OID_AUTO, nperflevels, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_NPERFLEVELS, sysctl_hw_generic, "I", "Number of performance levels supported by this system");
860
861 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_PHYSICALCPU, sysctl_hw_generic, "I", "");
862 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
863 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_LOGICALCPU, sysctl_hw_generic, "I", "");
864 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
865 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l1icachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L1ICACHESIZE, sysctl_hw_generic, "I", "");
866 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l1dcachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L1DCACHESIZE, sysctl_hw_generic, "I", "");
867 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l2cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L2CACHESIZE, sysctl_hw_generic, "I", "");
868 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, cpusperl2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_CPUSPERL2, sysctl_hw_generic, "I", "");
869 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l3cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L3CACHESIZE, sysctl_hw_generic, "I", "");
870 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, cpusperl3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_CPUSPERL3, sysctl_hw_generic, "I", "");
871
872 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_PHYSICALCPU, sysctl_hw_generic, "I", "");
873 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
874 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_LOGICALCPU, sysctl_hw_generic, "I", "");
875 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
876 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l1icachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L1ICACHESIZE, sysctl_hw_generic, "I", "");
877 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l1dcachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L1DCACHESIZE, sysctl_hw_generic, "I", "");
878 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l2cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L2CACHESIZE, sysctl_hw_generic, "I", "");
879 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, cpusperl2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_CPUSPERL2, sysctl_hw_generic, "I", "");
880 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l3cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L3CACHESIZE, sysctl_hw_generic, "I", "");
881 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, cpusperl3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_CPUSPERL3, sysctl_hw_generic, "I", "");
882
883 /*
884 * Optional CPU features can register nodes below hw.optional.
885 *
886 * If the feature is not present, the node should either not be registered,
887 * or it should return 0. If the feature is present, the node should return
888 * 1.
889 */
890 SYSCTL_NODE(_hw, OID_AUTO, optional, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "optional features");
891 SYSCTL_NODE(_hw_optional, OID_AUTO, arm, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "optional features for ARM processors");
892
893 SYSCTL_INT(_hw_optional, OID_AUTO, floatingpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, 1, ""); /* always set */
894
895 /*
896 * Optional device hardware features can be registered by drivers below hw.features
897 */
898 SYSCTL_EXTENSIBLE_NODE(_hw, OID_AUTO, features, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "hardware features");
899
900 /*
901 * Deprecated variables. These are supported for backwards compatibility
902 * purposes only. The MASKED flag requests that the variables not be
903 * printed by sysctl(8) and similar utilities.
904 *
905 * The variables named *_compat here are int-sized versions of variables
906 * that are now exported as quads. The int-sized versions are normally
907 * looked up only by number, wheras the quad-sized versions should be
908 * looked up by name.
909 *
910 * The *_compat nodes are *NOT* visible within the kernel.
911 */
912
913 SYSCTL_PROC(_hw, HW_PAGESIZE, pagesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PAGESIZE, sysctl_hw_generic, "I", "");
914 SYSCTL_PROC(_hw, HW_BUS_FREQ, busfrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_bus_frequency, "I", "");
915 SYSCTL_PROC(_hw, HW_CPU_FREQ, cpufrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_cpu_frequency, "I", "");
916 SYSCTL_PROC(_hw, HW_CACHELINE, cachelinesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_CACHELINE, sysctl_hw_generic, "I", "");
917 SYSCTL_PROC(_hw, HW_L1ICACHESIZE, l1icachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE, sysctl_hw_generic, "I", "");
918 SYSCTL_PROC(_hw, HW_L1DCACHESIZE, l1dcachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE, sysctl_hw_generic, "I", "");
919 SYSCTL_PROC(_hw, HW_L2CACHESIZE, l2cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE, sysctl_hw_generic, "I", "");
920 SYSCTL_PROC(_hw, HW_L3CACHESIZE, l3cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE, sysctl_hw_generic, "I", "");
921 SYSCTL_COMPAT_INT(_hw, HW_TB_FREQ, tbfrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.timebase_frequency_hz, 0, "");
922 SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MACHINE, sysctl_hw_generic, "A", "");
923 SYSCTL_PROC(_hw, HW_MODEL, model, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MODEL, sysctl_hw_generic, "A", "");
924 SYSCTL_PROC(_hw, HW_TARGET, target, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_TARGET, sysctl_hw_generic, "A", "");
925 SYSCTL_PROC(_hw, HW_PRODUCT, product, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PRODUCT, sysctl_hw_generic, "A", "");
926 SYSCTL_COMPAT_UINT(_hw, HW_PHYSMEM, physmem, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &mem_size, 0, "");
927 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_USERMEM, sysctl_hw_generic, "I", "");
928 SYSCTL_PROC(_hw, HW_EPOCH, epoch, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_EPOCH, sysctl_hw_generic, "I", "");
929 SYSCTL_PROC(_hw, HW_VECTORUNIT, vectorunit, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_VECTORUNIT, sysctl_hw_generic, "I", "");
930 SYSCTL_PROC(_hw, HW_L2SETTINGS, l2settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2SETTINGS, sysctl_hw_generic, "I", "");
931 SYSCTL_PROC(_hw, HW_L3SETTINGS, l3settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3SETTINGS, sysctl_hw_generic, "I", "");
932 SYSCTL_INT(_hw, OID_AUTO, cputhreadtype, CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, &cputhreadtype, 0, "");
933
934 #if defined(__i386__) || defined(__x86_64__) || CONFIG_X86_64_COMPAT
935 static int
sysctl_cpu_capability(__unused struct sysctl_oid * oidp,void * arg1,__unused int arg2,struct sysctl_req * req)936 sysctl_cpu_capability
937 (__unused struct sysctl_oid *oidp, void *arg1, __unused int arg2, struct sysctl_req *req)
938 {
939 uint64_t caps;
940 caps = _get_cpu_capabilities();
941
942 uint64_t mask = (uint64_t) (uintptr_t) arg1;
943 boolean_t is_capable = (caps & mask) != 0;
944
945 return SYSCTL_OUT(req, &is_capable, sizeof(is_capable));
946 }
947 #define capability(name) name
948
949
950 SYSCTL_PROC(_hw_optional, OID_AUTO, mmx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMMX), 0, sysctl_cpu_capability, "I", "");
951 SYSCTL_PROC(_hw_optional, OID_AUTO, sse, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE), 0, sysctl_cpu_capability, "I", "");
952 SYSCTL_PROC(_hw_optional, OID_AUTO, sse2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE2), 0, sysctl_cpu_capability, "I", "");
953 SYSCTL_PROC(_hw_optional, OID_AUTO, sse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE3), 0, sysctl_cpu_capability, "I", "");
954 SYSCTL_PROC(_hw_optional, OID_AUTO, supplementalsse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSupplementalSSE3), 0, sysctl_cpu_capability, "I", "");
955 SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_1), 0, sysctl_cpu_capability, "I", "");
956 SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_2), 0, sysctl_cpu_capability, "I", "");
957 /* "x86_64" is actually a preprocessor symbol on the x86_64 kernel, so we have to hack this */
958 #undef x86_64
959 SYSCTL_PROC(_hw_optional, OID_AUTO, x86_64, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(k64Bit), 0, sysctl_cpu_capability, "I", "");
960 SYSCTL_PROC(_hw_optional, OID_AUTO, aes, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAES), 0, sysctl_cpu_capability, "I", "");
961 SYSCTL_PROC(_hw_optional, OID_AUTO, avx1_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX1_0), 0, sysctl_cpu_capability, "I", "");
962 SYSCTL_PROC(_hw_optional, OID_AUTO, rdrand, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRDRAND), 0, sysctl_cpu_capability, "I", "");
963 SYSCTL_PROC(_hw_optional, OID_AUTO, f16c, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasF16C), 0, sysctl_cpu_capability, "I", "");
964 SYSCTL_PROC(_hw_optional, OID_AUTO, enfstrg, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasENFSTRG), 0, sysctl_cpu_capability, "I", "");
965 SYSCTL_PROC(_hw_optional, OID_AUTO, fma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasFMA), 0, sysctl_cpu_capability, "I", "");
966 SYSCTL_PROC(_hw_optional, OID_AUTO, avx2_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX2_0), 0, sysctl_cpu_capability, "I", "");
967 SYSCTL_PROC(_hw_optional, OID_AUTO, bmi1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI1), 0, sysctl_cpu_capability, "I", "");
968 SYSCTL_PROC(_hw_optional, OID_AUTO, bmi2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI2), 0, sysctl_cpu_capability, "I", "");
969 SYSCTL_PROC(_hw_optional, OID_AUTO, rtm, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRTM), 0, sysctl_cpu_capability, "I", "");
970 SYSCTL_PROC(_hw_optional, OID_AUTO, hle, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasHLE), 0, sysctl_cpu_capability, "I", "");
971 SYSCTL_PROC(_hw_optional, OID_AUTO, adx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasADX), 0, sysctl_cpu_capability, "I", "");
972 SYSCTL_PROC(_hw_optional, OID_AUTO, mpx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMPX), 0, sysctl_cpu_capability, "I", "");
973 SYSCTL_PROC(_hw_optional, OID_AUTO, sgx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSGX), 0, sysctl_cpu_capability, "I", "");
974 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512f, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512F), 0, sysctl_cpu_capability, "I", "");
975 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512cd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512CD), 0, sysctl_cpu_capability, "I", "");
976 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512dq, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512DQ), 0, sysctl_cpu_capability, "I", "");
977 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512bw, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512BW), 0, sysctl_cpu_capability, "I", "");
978 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vl, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VL), 0, sysctl_cpu_capability, "I", "");
979 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512ifma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512IFMA), 0, sysctl_cpu_capability, "I", "");
980 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vbmi, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VBMI), 0, sysctl_cpu_capability, "I", "");
981 #undef capability
982 #endif /* !__i386__ && !__x86_64 && !CONFIG_X86_64_COMPAT */
983
984 #if defined (__arm__) || defined (__arm64__)
985 int watchpoint_flag = 0;
986 int breakpoint_flag = 0;
987 SECURITY_READ_ONLY_LATE(int) gARMv8Crc32 = 0;
988
989 /* Features from: ID_AA64ISAR0_EL1 */
990 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FlagM = 0;
991 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FlagM2 = 0;
992 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FHM = 0;
993 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_DotProd = 0;
994 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SHA3 = 0;
995 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_RDM = 0;
996 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_LSE = 0;
997 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SHA256 = 0;
998 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SHA512 = 0;
999 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SHA1 = 0;
1000 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_AES = 0;
1001 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_PMULL = 0;
1002
1003 /* Features from: ID_AA64ISAR1_EL1 */
1004 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SPECRES = 0;
1005 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SB = 0;
1006 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FRINTTS = 0;
1007 SECURITY_READ_ONLY_LATE(int) gARMv8Gpi = 0;
1008 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_LRCPC = 0;
1009 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_LRCPC2 = 0;
1010 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FCMA = 0;
1011 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_JSCVT = 0;
1012 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_PAuth = 0;
1013 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_PAuth2 = 0;
1014 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FPAC = 0;
1015 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_DPB = 0;
1016 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_DPB2 = 0;
1017 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_BF16 = 0;
1018 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_I8MM = 0;
1019
1020 /* Features from: ID_AA64MMFR0_EL1 */
1021 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_ECV = 0;
1022
1023 /* Features from: ID_AA64MMFR2_EL1 */
1024 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_LSE2 = 0;
1025
1026 /* Features from: ID_AA64PFR0_EL1 */
1027 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_CSV2 = 0;
1028 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_CSV3 = 0;
1029 SECURITY_READ_ONLY_LATE(int) gARM_AdvSIMD = 0;
1030 SECURITY_READ_ONLY_LATE(int) gARM_AdvSIMD_HPFPCvt = 0;
1031 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FP16 = 0;
1032
1033 /* Features from: ID_AA64PFR1_EL1 */
1034 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SSBS = 0;
1035 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_BTI = 0;
1036
1037 SECURITY_READ_ONLY_LATE(int) gUCNormalMem = 0;
1038
1039 #if defined (__arm__)
1040 SECURITY_READ_ONLY_LATE(int) arm64_flag = 0;
1041 #elif defined (__arm64__) /* end __arm__*/
1042 SECURITY_READ_ONLY_LATE(int) arm64_flag = 1;
1043 #else /* end __arm64__*/
1044 SECURITY_READ_ONLY_LATE(int) arm64_flag = 0;
1045 #endif
1046
1047 /* Legacy Names ARM Optional Feature Sysctls */
1048 SYSCTL_INT(_hw_optional, OID_AUTO, neon, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_AdvSIMD, 0, "");
1049 SYSCTL_INT(_hw_optional, OID_AUTO, neon_hpfp, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_AdvSIMD_HPFPCvt, 0, "");
1050 SYSCTL_INT(_hw_optional, OID_AUTO, neon_fp16, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FP16, 0, "");
1051 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_1_atomics, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LSE, 0, "");
1052 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_fhm, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FHM, 0, "");
1053 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_sha512, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA512, 0, "");
1054 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_sha3, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA3, 0, "");
1055 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_3_compnum, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FCMA, 0, "");
1056
1057 /* Misc ARM Optional Feature Sysctls */
1058 SYSCTL_INT(_hw_optional, OID_AUTO, watchpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &watchpoint_flag, 0, "");
1059 SYSCTL_INT(_hw_optional, OID_AUTO, breakpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &breakpoint_flag, 0, "");
1060
1061 /**
1062 * Enumerated syscalls for every ARM optional feature to be exported to
1063 * userspace. These are to be enumerated using the official feature name from
1064 * the ARM ARM. They are grouped below based on the MSR that will be used to populate the data.
1065 */
1066
1067 /* Features from: ID_AA64ISAR0_EL1 */
1068 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_crc32, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv8Crc32, 0, "");
1069 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FlagM, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FlagM, 0, "");
1070 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FlagM2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FlagM2, 0, "");
1071 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FHM, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FHM, 0, "");
1072 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_DotProd, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_DotProd, 0, "");
1073 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SHA3, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA3, 0, "");
1074 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_RDM, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_RDM, 0, "");
1075 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_LSE, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LSE, 0, "");
1076 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SHA256, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA256, 0, "");
1077 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SHA512, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA512, 0, "");
1078 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SHA1, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA1, 0, "");
1079 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_AES, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_AES, 0, "");
1080 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_PMULL, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_PMULL, 0, "");
1081
1082 /* Features from: ID_AA64ISAR1_EL1 */
1083 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_gpi, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv8Gpi, 0, "");
1084 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SPECRES, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SPECRES, 0, "");
1085 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SB, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SB, 0, "");
1086 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FRINTTS, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FRINTTS, 0, "");
1087 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_LRCPC, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LRCPC, 0, "");
1088 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_LRCPC2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LRCPC2, 0, "");
1089 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FCMA, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FCMA, 0, "");
1090 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_JSCVT, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_JSCVT, 0, "");
1091 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_PAuth, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_PAuth, 0, "");
1092 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_PAuth2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_PAuth2, 0, "");
1093 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FPAC, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FPAC, 0, "");
1094 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_DPB, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_DPB, 0, "");
1095 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_DPB2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_DPB2, 0, "");
1096 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_BF16, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_BF16, 0, "");
1097 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_I8MM, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_I8MM, 0, "");
1098
1099 /* Features from: ID_AA64MMFR0_EL1 */
1100 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_ECV, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_ECV, 0, "");
1101
1102 /* Features from: ID_AA64MMFR2_EL1 */
1103 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_LSE2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LSE2, 0, "");
1104
1105 /* Features from: ID_AA64PFR0_EL1 */
1106 SYSCTL_INT(_hw_optional, OID_AUTO, AdvSIMD, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_AdvSIMD, 0, "");
1107 SYSCTL_INT(_hw_optional, OID_AUTO, AdvSIMD_HPFPCvt, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_AdvSIMD_HPFPCvt, 0, "");
1108 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_CSV2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_CSV2, 0, "");
1109 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_CSV3, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_CSV3, 0, "");
1110 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FP16, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FP16, 0, "");
1111
1112 /* Features from: ID_AA64PFR1_EL1 */
1113 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SSBS, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SSBS, 0, "");
1114 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_BTI, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_BTI, 0, "");
1115
1116 SYSCTL_INT(_hw_optional, OID_AUTO, ucnormal_mem, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gUCNormalMem, 0, "");
1117
1118 #if DEBUG || DEVELOPMENT
1119 #if __ARM_KERNEL_PROTECT__
1120 static SECURITY_READ_ONLY_LATE(int) arm_kernel_protect = 1;
1121 #else
1122 static SECURITY_READ_ONLY_LATE(int) arm_kernel_protect = 0;
1123 #endif
1124 SYSCTL_INT(_hw_optional, OID_AUTO, arm_kernel_protect, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm_kernel_protect, 0, "");
1125 #endif
1126
1127 #if DEBUG || DEVELOPMENT
1128 static int ic_inval_filters = 0;
1129 SYSCTL_INT(_hw_optional, OID_AUTO, ic_inval_filters, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &ic_inval_filters, 0, "");
1130 #endif
1131
1132 #if DEBUG || DEVELOPMENT
1133 #if __APPLE_WKDM_POPCNT_EXTENSIONS__
1134 static SECURITY_READ_ONLY_LATE(int) wkdm_popcount = 1;
1135 #else
1136 static SECURITY_READ_ONLY_LATE(int) wkdm_popcount = 0;
1137 #endif
1138 SYSCTL_INT(_hw_optional, OID_AUTO, wkdm_popcount, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &wkdm_popcount, 0, "");
1139 #endif
1140
1141 #if DEBUG || DEVELOPMENT
1142 #if __has_feature(ptrauth_calls)
1143 static SECURITY_READ_ONLY_LATE(int) ptrauth = 1;
1144 #else
1145 static SECURITY_READ_ONLY_LATE(int) ptrauth = 0;
1146 #endif
1147 SYSCTL_INT(_hw_optional, OID_AUTO, ptrauth, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &ptrauth, 0, "");
1148 #endif
1149
1150 /*
1151 * Without this little ifdef dance, the preprocessor replaces "arm64" with "1",
1152 * leaving us with a less-than-helpful sysctl.hwoptional.1.
1153 */
1154 #ifdef arm64
1155 #undef arm64
1156 SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm64_flag, 0, "");
1157 #define arm64 1
1158 #else
1159 SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm64_flag, 0, "");
1160 #endif
1161 #endif /* !__arm__ && ! __arm64__ */
1162
1163
1164 #if (defined(__arm__) || defined(__arm64__)) && defined(CONFIG_XNUPOST)
1165 /**
1166 * Test whether the new values for a few hw.optional sysctls matches the legacy
1167 * way of obtaining that information.
1168 *
1169 * Specifically, hw.optional.neon_fp16 has been used to indicate both FEAT_FP16
1170 * and FEAT_FHM, as we are now grabbing the information directly from the ISA
1171 * status registers instead of from the arm_mvfp_info, we need to check that
1172 * this new source won't break any existing usecases of the sysctl and assert
1173 * that hw.optional.neon_fp16 will return the same value as it used to for all
1174 * devices.
1175 */
1176 kern_return_t
arm_cpu_capabilities_legacy_test(void)1177 arm_cpu_capabilities_legacy_test(void)
1178 {
1179 T_SETUPBEGIN;
1180 arm_mvfp_info_t *mvfp_info = arm_mvfp_info();
1181 T_ASSERT_NOTNULL(mvfp_info, "arm_mvfp_info returned null pointer.");
1182 T_SETUPEND;
1183
1184
1185 T_EXPECT_EQ_INT(mvfp_info->neon, gARM_AdvSIMD, "neon value should match legacy");
1186 T_EXPECT_EQ_INT(mvfp_info->neon_hpfp, gARM_AdvSIMD_HPFPCvt, "neon hpfp cvt value should match legacy");
1187 T_EXPECT_EQ_INT(mvfp_info->neon_fp16, gARM_FEAT_FP16, "neon fp16 value should match legacy");
1188
1189 T_LOG("Completed arm cpu capabalities legacy compliance test.");
1190 return KERN_SUCCESS;
1191 }
1192 #endif /* (defined(__arm__) || defined(__arm64__)) && defined(CONFIG_XNUPOST) */
1193
1194 /******************************************************************************
1195 * Generic MIB initialisation.
1196 *
1197 * This is a hack, and should be replaced with SYSINITs
1198 * at some point.
1199 */
1200 void
sysctl_mib_init(void)1201 sysctl_mib_init(void)
1202 {
1203 #if defined(__i386__) || defined (__x86_64__)
1204 cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit;
1205 #elif defined(__arm__) || defined (__arm64__)
1206 cpu64bit = (cpu_type() & CPU_ARCH_ABI64) == CPU_ARCH_ABI64;
1207 #else
1208 #error Unsupported arch
1209 #endif
1210 #if defined (__i386__) || defined (__x86_64__)
1211 /* hw.cacheconfig */
1212 cacheconfig[0] = ml_cpu_cache_sharing(0, CLUSTER_TYPE_SMP, true);
1213 cacheconfig[1] = ml_cpu_cache_sharing(1, CLUSTER_TYPE_SMP, true);
1214 cacheconfig[2] = ml_cpu_cache_sharing(2, CLUSTER_TYPE_SMP, true);
1215 cacheconfig[3] = ml_cpu_cache_sharing(3, CLUSTER_TYPE_SMP, true);
1216 cacheconfig[4] = 0;
1217
1218 /* hw.cachesize */
1219 cachesize[0] = ml_cpu_cache_size(0);
1220 cachesize[1] = ml_cpu_cache_size(1);
1221 cachesize[2] = ml_cpu_cache_size(2);
1222 cachesize[3] = ml_cpu_cache_size(3);
1223 cachesize[4] = 0;
1224
1225 /* hw.packages */
1226 packages = (int)(roundup(ml_cpu_cache_sharing(0, CLUSTER_TYPE_SMP, true), cpuid_info()->thread_count)
1227 / cpuid_info()->thread_count);
1228
1229 #elif defined(__arm__) || defined(__arm64__) /* end __i386 */
1230 watchpoint_flag = arm_debug_info()->num_watchpoint_pairs;
1231 breakpoint_flag = arm_debug_info()->num_breakpoint_pairs;
1232
1233 #if defined(__arm__)
1234 arm_mvfp_info_t *mvfp_info = arm_mvfp_info();
1235 gARM_AdvSIMD = mvfp_info->neon;
1236 gARM_AdvSIMD_HPFPCvt = mvfp_info->neon_hpfp;
1237 gARM_FEAT_FP16 = mvfp_info->neon_fp16;
1238 #endif /* __arm__ */
1239
1240 cluster_type_t min_perflevel_cluster_type = cpu_type_for_perflevel(__builtin_popcount(ml_get_cpu_types()) - 1);
1241
1242 cacheconfig[0] = ml_wait_max_cpus();
1243 cacheconfig[1] = ml_cpu_cache_sharing(1, min_perflevel_cluster_type, true);
1244 cacheconfig[2] = ml_cpu_cache_sharing(2, min_perflevel_cluster_type, true);
1245 cacheconfig[3] = 0;
1246 cacheconfig[4] = 0;
1247 cacheconfig[5] = 0;
1248 cacheconfig[6] = 0;
1249
1250 cachesize[0] = ml_get_machine_mem();
1251 cachesize[1] = cache_info_type(min_perflevel_cluster_type)->c_dsize; /* Using the DCache */
1252 cachesize[2] = cache_info_type(min_perflevel_cluster_type)->c_l2size;
1253 cachesize[3] = 0;
1254 cachesize[4] = 0;
1255
1256 packages = 1;
1257 #else
1258 #error unknown architecture
1259 #endif /* !__i386__ && !__x86_64 && !__arm__ && !__arm64__ */
1260 }
1261
1262 __startup_func
1263 static void
sysctl_mib_startup(void)1264 sysctl_mib_startup(void)
1265 {
1266 cputhreadtype = cpu_threadtype();
1267
1268 /*
1269 * Populate the optional portion of the hw.* MIB.
1270 *
1271 * XXX This could be broken out into parts of the code
1272 * that actually directly relate to the functions in
1273 * question.
1274 */
1275
1276 if (cputhreadtype != CPU_THREADTYPE_NONE) {
1277 sysctl_register_oid_early(&sysctl__hw_cputhreadtype);
1278 }
1279
1280 }
1281 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_mib_startup);
1282