xref: /xnu-8792.61.2/bsd/kern/kern_mib.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*-
29  * Copyright (c) 1982, 1986, 1989, 1993
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * This code is derived from software contributed to Berkeley by
33  * Mike Karels at Berkeley Software Design, Inc.
34  *
35  * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD
36  * project, to make these variables more userfriendly.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kern_sysctl.c	8.4 (Berkeley) 4/14/94
67  */
68 
69 #include <sys/param.h>
70 #include <sys/kernel.h>
71 #include <sys/systm.h>
72 #include <sys/sysctl.h>
73 #include <sys/proc_internal.h>
74 #include <sys/unistd.h>
75 
76 #if defined(SMP)
77 #include <machine/smp.h>
78 #endif
79 
80 #include <sys/param.h>  /* XXX prune includes */
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/malloc.h>
84 #include <sys/proc.h>
85 #include <sys/file_internal.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/ioctl.h>
89 #include <sys/namei.h>
90 #include <sys/tty.h>
91 #include <sys/disklabel.h>
92 #include <sys/vm.h>
93 #include <sys/sysctl.h>
94 #include <sys/user.h>
95 #include <mach/machine.h>
96 #include <mach/mach_types.h>
97 #include <mach/vm_param.h>
98 #include <kern/task.h>
99 #include <vm/vm_kern.h>
100 #include <vm/vm_map.h>
101 #include <vm/vm_protos.h>
102 #include <mach/host_info.h>
103 #include <kern/pms.h>
104 #include <pexpert/device_tree.h>
105 #include <pexpert/pexpert.h>
106 #include <kern/sched_prim.h>
107 #include <console/serial_protos.h>
108 
109 extern vm_map_t bsd_pageable_map;
110 
111 #include <sys/mount_internal.h>
112 #include <sys/kdebug.h>
113 
114 #include <IOKit/IOPlatformExpert.h>
115 #include <pexpert/pexpert.h>
116 
117 #include <machine/config.h>
118 #include <machine/machine_routines.h>
119 #include <machine/cpu_capabilities.h>
120 
121 #include <mach/mach_host.h>             /* for host_info() */
122 
123 #if defined(__i386__) || defined(__x86_64__)
124 #include <i386/cpuid.h> /* for cpuid_info() */
125 #endif
126 
127 #if defined(__arm64__)
128 #include <arm/cpuid.h>          /* for cpuid_info() & cache_info() */
129 #endif
130 
131 #if defined(CONFIG_XNUPOST)
132 #include <tests/ktest.h>
133 #endif
134 
135 /**
136  * Prevents an issue with creating the sysctl node hw.optional.arm on some
137  * platforms. If the 'arm' macro is defined, then the word "arm" is preprocessed
138  * to 1. As the 'arm' macro is not used in this file, we do not need to redefine
139  * after we are done.
140  */
141 #if defined(arm)
142 #undef arm
143 #endif /* defined(arm) */
144 
145 #ifndef MAX
146 #define MAX(a, b) (a >= b ? a : b)
147 #endif
148 
149 #if defined(__arm64__) && defined(CONFIG_XNUPOST)
150 kern_return_t arm_cpu_capabilities_legacy_test(void);
151 #endif /* defined(__arm64__) && defined(CONFIG_XNUPOST) */
152 
153 /* XXX This should be in a BSD accessible Mach header, but isn't. */
154 extern unsigned int vm_page_wire_count;
155 
156 static int      cputhreadtype, cpu64bit;
157 static uint64_t cacheconfig[10], cachesize[10];
158 static int      packages;
159 
160 static char *   osenvironment = NULL;
161 static uint32_t osenvironment_size = 0;
162 static int      osenvironment_initialized = 0;
163 
164 static uint32_t ephemeral_storage = 0;
165 static uint32_t use_recovery_securityd = 0;
166 
167 static struct {
168 	uint32_t ephemeral_storage:1;
169 	uint32_t use_recovery_securityd:1;
170 } property_existence = {0, 0};
171 
172 SYSCTL_EXTENSIBLE_NODE(, 0, sysctl, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
173     "Sysctl internal magic");
174 SYSCTL_EXTENSIBLE_NODE(, CTL_KERN, kern, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
175     "High kernel, proc, limits &c");
176 SYSCTL_EXTENSIBLE_NODE(, CTL_VM, vm, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
177     "Virtual memory");
178 SYSCTL_EXTENSIBLE_NODE(, CTL_VFS, vfs, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
179     "File system");
180 SYSCTL_EXTENSIBLE_NODE(, CTL_NET, net, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
181     "Network, (see socket.h)");
182 SYSCTL_EXTENSIBLE_NODE(, CTL_DEBUG, debug, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
183     "Debugging");
184 #if DEBUG || DEVELOPMENT
185 SYSCTL_NODE(_debug, OID_AUTO, test,
186     CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, "tests");
187 #endif
188 SYSCTL_NODE(, CTL_HW, hw, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
189     "hardware");
190 SYSCTL_EXTENSIBLE_NODE(, CTL_MACHDEP, machdep, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
191     "machine dependent");
192 SYSCTL_NODE(, CTL_USER, user, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
193     "user-level");
194 
195 SYSCTL_NODE(_kern, OID_AUTO, bridge, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
196     "bridge");
197 
198 #define SYSCTL_RETURN(r, x)     SYSCTL_OUT(r, &x, sizeof(x))
199 
200 /******************************************************************************
201  * hw.* MIB
202  */
203 
204 #define CTLHW_RETQUAD   (1U << 31)
205 #define CTLHW_LOCAL     (1U << 30)
206 #define CTLHW_PERFLEVEL (1U << 29)
207 
208 #define HW_LOCAL_CPUTHREADTYPE        (1 | CTLHW_LOCAL)
209 #define HW_LOCAL_PHYSICALCPU          (2 | CTLHW_LOCAL)
210 #define HW_LOCAL_PHYSICALCPUMAX       (3 | CTLHW_LOCAL)
211 #define HW_LOCAL_LOGICALCPU           (4 | CTLHW_LOCAL)
212 #define HW_LOCAL_LOGICALCPUMAX        (5 | CTLHW_LOCAL)
213 #define HW_LOCAL_CPUTYPE              (6 | CTLHW_LOCAL)
214 #define HW_LOCAL_CPUSUBTYPE           (7 | CTLHW_LOCAL)
215 #define HW_LOCAL_CPUFAMILY            (8 | CTLHW_LOCAL)
216 #define HW_LOCAL_CPUSUBFAMILY         (9 | CTLHW_LOCAL)
217 #define HW_NPERFLEVELS                (10 | CTLHW_LOCAL)
218 #define HW_PERFLEVEL_PHYSICALCPU      (11 | CTLHW_PERFLEVEL)
219 #define HW_PERFLEVEL_PHYSICALCPUMAX   (12 | CTLHW_PERFLEVEL)
220 #define HW_PERFLEVEL_LOGICALCPU       (13 | CTLHW_PERFLEVEL)
221 #define HW_PERFLEVEL_LOGICALCPUMAX    (14 | CTLHW_PERFLEVEL)
222 #define HW_PERFLEVEL_L1ICACHESIZE     (15 | CTLHW_PERFLEVEL)
223 #define HW_PERFLEVEL_L1DCACHESIZE     (16 | CTLHW_PERFLEVEL)
224 #define HW_PERFLEVEL_L2CACHESIZE      (17 | CTLHW_PERFLEVEL)
225 #define HW_PERFLEVEL_CPUSPERL2        (18 | CTLHW_PERFLEVEL)
226 #define HW_PERFLEVEL_L3CACHESIZE      (19 | CTLHW_PERFLEVEL)
227 #define HW_PERFLEVEL_CPUSPERL3        (20 | CTLHW_PERFLEVEL)
228 #define HW_PERFLEVEL_NAME             (21 | CTLHW_PERFLEVEL)
229 
230 
231 /*
232  * For a given perflevel, return the corresponding CPU type.
233  */
234 cluster_type_t cpu_type_for_perflevel(int perflevel);
235 cluster_type_t
cpu_type_for_perflevel(int perflevel)236 cpu_type_for_perflevel(int perflevel)
237 {
238 	unsigned int cpu_types = ml_get_cpu_types();
239 	unsigned int n_perflevels = __builtin_popcount(cpu_types);
240 
241 	assert((perflevel >= 0) && (perflevel < n_perflevels));
242 
243 	int current_idx = 0, current_perflevel = -1;
244 
245 	while (cpu_types) {
246 		current_perflevel += cpu_types & 1;
247 		if (current_perflevel == (n_perflevels - (perflevel + 1))) {
248 			return current_idx;
249 		}
250 
251 		cpu_types >>= 1;
252 		current_idx++;
253 	}
254 
255 	return 0;
256 }
257 
258 /*
259  * Supporting some variables requires us to do "real" work.  We
260  * gather some of that here.
261  */
262 static int
sysctl_hw_generic(__unused struct sysctl_oid * oidp,void * arg1,int arg2,struct sysctl_req * req)263 sysctl_hw_generic(__unused struct sysctl_oid *oidp, void *arg1,
264     int arg2, struct sysctl_req *req)
265 {
266 	char dummy[65];
267 	int  epochTemp;
268 	ml_cpu_info_t cpu_info;
269 	int val, doquad;
270 	long long qval;
271 	unsigned int cpu_count;
272 	host_basic_info_data_t hinfo;
273 	kern_return_t kret;
274 	mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
275 
276 	/*
277 	 * If we are using one of the perflevel sysctls, return early if the perflevel
278 	 * does not exist in this system.
279 	 */
280 	int perflevel = (int)arg1;
281 	int n_perflevels = __builtin_popcount(ml_get_cpu_types());
282 
283 	if (arg2 & CTLHW_PERFLEVEL) {
284 		if ((perflevel < 0) || (perflevel >= n_perflevels)) {
285 			return ENOENT;
286 		}
287 	} else {
288 		perflevel = n_perflevels - 1;
289 	}
290 
291 	/*
292 	 * Test and mask off the 'return quad' flag.
293 	 * Note that only some things here support it.
294 	 */
295 	doquad = arg2 & CTLHW_RETQUAD;
296 	arg2 &= ~CTLHW_RETQUAD;
297 
298 	ml_cpu_get_info_type(&cpu_info, cpu_type_for_perflevel(perflevel));
299 
300 #define BSD_HOST 1
301 	kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
302 
303 	/*
304 	 * Handle various OIDs.
305 	 *
306 	 * OIDs that can return int or quad set val and qval and then break.
307 	 * Errors and int-only values return inline.
308 	 */
309 	switch (arg2) {
310 	case HW_NCPU:
311 		if (kret == KERN_SUCCESS) {
312 			return SYSCTL_RETURN(req, hinfo.max_cpus);
313 		} else {
314 			return EINVAL;
315 		}
316 	case HW_AVAILCPU:
317 		if (kret == KERN_SUCCESS) {
318 			return SYSCTL_RETURN(req, hinfo.avail_cpus);
319 		} else {
320 			return EINVAL;
321 		}
322 	case HW_LOCAL_PHYSICALCPU:
323 		if (kret == KERN_SUCCESS) {
324 			return SYSCTL_RETURN(req, hinfo.physical_cpu);
325 		} else {
326 			return EINVAL;
327 		}
328 	case HW_LOCAL_PHYSICALCPUMAX:
329 		if (kret == KERN_SUCCESS) {
330 			return SYSCTL_RETURN(req, hinfo.physical_cpu_max);
331 		} else {
332 			return EINVAL;
333 		}
334 	case HW_LOCAL_LOGICALCPU:
335 		if (kret == KERN_SUCCESS) {
336 			return SYSCTL_RETURN(req, hinfo.logical_cpu);
337 		} else {
338 			return EINVAL;
339 		}
340 	case HW_LOCAL_LOGICALCPUMAX:
341 		if (kret == KERN_SUCCESS) {
342 			return SYSCTL_RETURN(req, hinfo.logical_cpu_max);
343 		} else {
344 			return EINVAL;
345 		}
346 	case HW_NPERFLEVELS:
347 		return SYSCTL_RETURN(req, n_perflevels);
348 	case HW_PERFLEVEL_PHYSICALCPU:
349 		cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), false, true);
350 		return SYSCTL_RETURN(req, cpu_count);
351 	case HW_PERFLEVEL_PHYSICALCPUMAX:
352 		cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), false, false);
353 		return SYSCTL_RETURN(req, cpu_count);
354 	case HW_PERFLEVEL_LOGICALCPU:
355 		cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), true, true);
356 		return SYSCTL_RETURN(req, cpu_count);
357 	case HW_PERFLEVEL_LOGICALCPUMAX:
358 		cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), true, false);
359 		return SYSCTL_RETURN(req, cpu_count);
360 	case HW_PERFLEVEL_L1ICACHESIZE:
361 		val = (int)cpu_info.l1_icache_size;
362 		qval = (long long)cpu_info.l1_icache_size;
363 		break;
364 	case HW_PERFLEVEL_L1DCACHESIZE:
365 		val = (int)cpu_info.l1_dcache_size;
366 		qval = (long long)cpu_info.l1_dcache_size;
367 		break;
368 	case HW_PERFLEVEL_L2CACHESIZE:
369 		val = (int)cpu_info.l2_cache_size;
370 		qval = (long long)cpu_info.l2_cache_size;
371 		break;
372 	case HW_PERFLEVEL_CPUSPERL2:
373 		cpu_count = ml_cpu_cache_sharing(2, cpu_type_for_perflevel(perflevel), false);
374 		return SYSCTL_RETURN(req, cpu_count);
375 	case HW_PERFLEVEL_L3CACHESIZE:
376 		if (cpu_info.l3_cache_size == UINT32_MAX) {
377 			return EINVAL;
378 		}
379 		val = (int)cpu_info.l3_cache_size;
380 		qval = (long long)cpu_info.l3_cache_size;
381 		break;
382 	case HW_PERFLEVEL_CPUSPERL3:
383 		if (cpu_info.l3_cache_size == UINT32_MAX) {
384 			return EINVAL;
385 		}
386 		cpu_count = ml_cpu_cache_sharing(3, cpu_type_for_perflevel(perflevel), false);
387 		return SYSCTL_RETURN(req, cpu_count);
388 	case HW_PERFLEVEL_NAME:
389 		bzero(dummy, sizeof(dummy));
390 		ml_get_cluster_type_name(cpu_type_for_perflevel(perflevel), dummy, sizeof(dummy));
391 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
392 	case HW_LOCAL_CPUTYPE:
393 		if (kret == KERN_SUCCESS) {
394 			return SYSCTL_RETURN(req, hinfo.cpu_type);
395 		} else {
396 			return EINVAL;
397 		}
398 	case HW_LOCAL_CPUSUBTYPE:
399 		if (kret == KERN_SUCCESS) {
400 			return SYSCTL_RETURN(req, hinfo.cpu_subtype);
401 		} else {
402 			return EINVAL;
403 		}
404 	case HW_LOCAL_CPUFAMILY:
405 	{
406 		int cpufamily = 0;
407 #if defined (__i386__) || defined (__x86_64__)
408 		cpufamily = cpuid_cpufamily();
409 #elif defined(__arm64__)
410 		{
411 			cpufamily = cpuid_get_cpufamily();
412 		}
413 #else
414 #error unknown architecture
415 #endif
416 		return SYSCTL_RETURN(req, cpufamily);
417 	}
418 	case HW_LOCAL_CPUSUBFAMILY:
419 	{
420 		int cpusubfamily = 0;
421 #if defined (__i386__) || defined (__x86_64__)
422 		cpusubfamily = CPUSUBFAMILY_UNKNOWN;
423 #elif defined(__arm64__)
424 		{
425 			cpusubfamily = cpuid_get_cpusubfamily();
426 		}
427 #else
428 #error unknown architecture
429 #endif
430 		return SYSCTL_RETURN(req, cpusubfamily);
431 	}
432 	case HW_PAGESIZE:
433 	{
434 		vm_map_t map = get_task_map(current_task());
435 		val = vm_map_page_size(map);
436 		qval = (long long)val;
437 		break;
438 	}
439 	case HW_CACHELINE:
440 		val = (int)cpu_info.cache_line_size;
441 		qval = (long long)val;
442 		break;
443 	case HW_L1ICACHESIZE:
444 		val = (int)cpu_info.l1_icache_size;
445 		qval = (long long)cpu_info.l1_icache_size;
446 		break;
447 	case HW_L1DCACHESIZE:
448 		val = (int)cpu_info.l1_dcache_size;
449 		qval = (long long)cpu_info.l1_dcache_size;
450 		break;
451 	case HW_L2CACHESIZE:
452 		if (cpu_info.l2_cache_size == UINT32_MAX) {
453 			return EINVAL;
454 		}
455 		val = (int)cpu_info.l2_cache_size;
456 		qval = (long long)cpu_info.l2_cache_size;
457 		break;
458 	case HW_L3CACHESIZE:
459 		if (cpu_info.l3_cache_size == UINT32_MAX) {
460 			return EINVAL;
461 		}
462 		val = (int)cpu_info.l3_cache_size;
463 		qval = (long long)cpu_info.l3_cache_size;
464 		break;
465 	case HW_TARGET:
466 		bzero(dummy, sizeof(dummy));
467 		if (!PEGetTargetName(dummy, 64)) {
468 			return EINVAL;
469 		}
470 		dummy[64] = 0;
471 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
472 	case HW_PRODUCT:
473 		bzero(dummy, sizeof(dummy));
474 		if (!PEGetProductName(dummy, 64)) {
475 			return EINVAL;
476 		}
477 		dummy[64] = 0;
478 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
479 
480 		/*
481 		 * Deprecated variables.  We still support these for
482 		 * backwards compatibility purposes only.
483 		 */
484 #if XNU_TARGET_OS_OSX && defined(__arm64__)
485 	/* The following two are kludged for backward
486 	 * compatibility. Use hw.product/hw.target for something
487 	 * consistent instead. */
488 
489 	case HW_MACHINE:
490 		bzero(dummy, sizeof(dummy));
491 		if (proc_platform(req->p) == PLATFORM_IOS) {
492 			/* iOS-on-Mac processes don't expect the macOS kind of
493 			 * hw.machine, e.g. "arm64", but are used to seeing
494 			 * a product string on iOS, which we here hardcode
495 			 * to return as "iPad8,6" for compatibility.
496 			 *
497 			 * Another reason why hw.machine and hw.model are
498 			 * trouble and hw.target+hw.product should be used
499 			 * instead.
500 			 */
501 
502 			strlcpy(dummy, "iPad8,6", sizeof(dummy));
503 		}
504 		else {
505 			strlcpy(dummy, "arm64", sizeof(dummy));
506 		}
507 		dummy[64] = 0;
508 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
509 	case HW_MODEL:
510 		bzero(dummy, sizeof(dummy));
511 		if (!PEGetProductName(dummy, 64)) {
512 			return EINVAL;
513 		}
514 		dummy[64] = 0;
515 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
516 #else
517 	case HW_MACHINE:
518 		bzero(dummy, sizeof(dummy));
519 		if (!PEGetMachineName(dummy, 64)) {
520 			return EINVAL;
521 		}
522 		dummy[64] = 0;
523 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
524 	case HW_MODEL:
525 		bzero(dummy, sizeof(dummy));
526 		if (!PEGetModelName(dummy, 64)) {
527 			return EINVAL;
528 		}
529 		dummy[64] = 0;
530 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
531 #endif
532 	case HW_USERMEM:
533 	{
534 		int usermem = (int)(mem_size - vm_page_wire_count * page_size);
535 
536 		return SYSCTL_RETURN(req, usermem);
537 	}
538 	case HW_EPOCH:
539 		epochTemp = PEGetPlatformEpoch();
540 		if (epochTemp == -1) {
541 			return EINVAL;
542 		}
543 		return SYSCTL_RETURN(req, epochTemp);
544 	case HW_VECTORUNIT: {
545 		int vector = cpu_info.vector_unit == 0? 0 : 1;
546 		return SYSCTL_RETURN(req, vector);
547 	}
548 	case HW_L2SETTINGS:
549 		if (cpu_info.l2_cache_size == UINT32_MAX) {
550 			return EINVAL;
551 		}
552 		return SYSCTL_RETURN(req, cpu_info.l2_settings);
553 	case HW_L3SETTINGS:
554 		if (cpu_info.l3_cache_size == UINT32_MAX) {
555 			return EINVAL;
556 		}
557 		return SYSCTL_RETURN(req, cpu_info.l3_settings);
558 	default:
559 		return ENOTSUP;
560 	}
561 	/*
562 	 * Callers may come to us with either int or quad buffers.
563 	 */
564 	if (doquad) {
565 		return SYSCTL_RETURN(req, qval);
566 	}
567 	return SYSCTL_RETURN(req, val);
568 }
569 
570 /* hw.pagesize and hw.tbfrequency are expected as 64 bit values */
571 static int
sysctl_pagesize(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)572 sysctl_pagesize
573 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
574 {
575 	vm_map_t map = get_task_map(current_task());
576 	long long l = vm_map_page_size(map);
577 	return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
578 }
579 
580 static int
sysctl_pagesize32(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)581 sysctl_pagesize32
582 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
583 {
584 	long long l;
585 #if __arm64__
586 	l = (long long) (1 << page_shift_user32);
587 #else /* __arm64__ */
588 	l = (long long) PAGE_SIZE;
589 #endif /* __arm64__ */
590 	return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
591 }
592 
593 static int
sysctl_tbfrequency(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)594 sysctl_tbfrequency
595 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
596 {
597 	long long l = gPEClockFrequencyInfo.timebase_frequency_hz;
598 	return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
599 }
600 
601 /*
602  * Called by IOKit on Intel, or by sysctl_load_devicetree_entries()
603  */
604 void
sysctl_set_osenvironment(unsigned int size,const void * value)605 sysctl_set_osenvironment(unsigned int size, const void* value)
606 {
607 	if (osenvironment_size == 0 && size > 0) {
608 		osenvironment = zalloc_permanent(size, ZALIGN_NONE);
609 		if (osenvironment) {
610 			memcpy(osenvironment, value, size);
611 			osenvironment_size = size;
612 		}
613 	}
614 }
615 
616 void
sysctl_unblock_osenvironment(void)617 sysctl_unblock_osenvironment(void)
618 {
619 	os_atomic_inc(&osenvironment_initialized, relaxed);
620 	thread_wakeup((event_t) &osenvironment_initialized);
621 }
622 
623 /*
624  * Create sysctl entries coming from device tree.
625  *
626  * Entries from device tree are loaded here because SecureDTLookupEntry() only works before
627  * PE_init_iokit(). Doing this also avoids the extern-C hackery to access these entries
628  * from IORegistry (which requires C++).
629  */
630 __startup_func
631 static void
sysctl_load_devicetree_entries(void)632 sysctl_load_devicetree_entries(void)
633 {
634 	DTEntry chosen;
635 	void const *value;
636 	unsigned int size;
637 
638 	if (kSuccess != SecureDTLookupEntry(0, "/chosen", &chosen)) {
639 		return;
640 	}
641 
642 	/* load osenvironment */
643 	if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &value, &size)) {
644 		sysctl_set_osenvironment(size, value);
645 	}
646 
647 	/* load ephemeral_storage */
648 	if (kSuccess == SecureDTGetProperty(chosen, "ephemeral-storage", (void const **) &value, &size)) {
649 		if (size == sizeof(uint32_t)) {
650 			ephemeral_storage = *(uint32_t const *)value;
651 			property_existence.ephemeral_storage = 1;
652 		}
653 	}
654 
655 	/* load use_recovery_securityd */
656 	if (kSuccess == SecureDTGetProperty(chosen, "use-recovery-securityd", (void const **) &value, &size)) {
657 		if (size == sizeof(uint32_t)) {
658 			use_recovery_securityd = *(uint32_t const *)value;
659 			property_existence.use_recovery_securityd = 1;
660 		}
661 	}
662 }
663 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_load_devicetree_entries);
664 
665 static int
sysctl_osenvironment(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)666 sysctl_osenvironment
667 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
668 {
669 #if defined(__x86_64__)
670 #if (DEVELOPMENT || DEBUG)
671 	if (os_atomic_load(&osenvironment_initialized, relaxed) == 0) {
672 		assert_wait((event_t) &osenvironment_initialized, THREAD_UNINT);
673 		if (os_atomic_load(&osenvironment_initialized, relaxed) != 0) {
674 			clear_wait(current_thread(), THREAD_AWAKENED);
675 		} else {
676 			(void) thread_block(THREAD_CONTINUE_NULL);
677 		}
678 	}
679 #endif
680 #endif
681 	if (osenvironment_size > 0) {
682 		return SYSCTL_OUT(req, osenvironment, osenvironment_size);
683 	} else {
684 		return EINVAL;
685 	}
686 }
687 
688 static int
sysctl_ephemeral_storage(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)689 sysctl_ephemeral_storage
690 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
691 {
692 	if (property_existence.ephemeral_storage) {
693 		return SYSCTL_OUT(req, &ephemeral_storage, sizeof(ephemeral_storage));
694 	} else {
695 		return EINVAL;
696 	}
697 }
698 
699 static int
sysctl_use_recovery_securityd(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)700 sysctl_use_recovery_securityd
701 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
702 {
703 	if (property_existence.use_recovery_securityd) {
704 		return SYSCTL_OUT(req, &use_recovery_securityd, sizeof(use_recovery_securityd));
705 	} else {
706 		return EINVAL;
707 	}
708 }
709 
710 static int
sysctl_use_kernelmanagerd(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)711 sysctl_use_kernelmanagerd
712 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
713 {
714 #if XNU_TARGET_OS_OSX
715 	static int use_kernelmanagerd = 1;
716 	static bool once = false;
717 
718 	if (!once) {
719 		kc_format_t kc_format;
720 		PE_get_primary_kc_format(&kc_format);
721 		if (kc_format == KCFormatFileset) {
722 			use_kernelmanagerd = 1;
723 		} else {
724 			PE_parse_boot_argn("kernelmanagerd", &use_kernelmanagerd, sizeof(use_kernelmanagerd));
725 		}
726 		once = true;
727 	}
728 #else
729 	static int use_kernelmanagerd = 0;
730 #endif
731 	return SYSCTL_OUT(req, &use_kernelmanagerd, sizeof(use_kernelmanagerd));
732 }
733 
734 #define HW_LOCAL_FREQUENCY             1
735 #define HW_LOCAL_FREQUENCY_MIN         2
736 #define HW_LOCAL_FREQUENCY_MAX         3
737 #define HW_LOCAL_FREQUENCY_CLOCK_RATE  4
738 
739 static int
sysctl_bus_frequency(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,__unused struct sysctl_req * req)740 sysctl_bus_frequency
741 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
742 {
743 
744 #if DEBUG || DEVELOPMENT || !defined(__arm64__)
745 	switch (arg2) {
746 	case HW_LOCAL_FREQUENCY:
747 		return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_hz);
748 	case HW_LOCAL_FREQUENCY_MIN:
749 		return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_min_hz);
750 	case HW_LOCAL_FREQUENCY_MAX:
751 		return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_max_hz);
752 	case HW_LOCAL_FREQUENCY_CLOCK_RATE:
753 		return SYSCTL_OUT(req, &gPEClockFrequencyInfo.bus_clock_rate_hz, sizeof(int));
754 	default:
755 		return EINVAL;
756 	}
757 #else
758 	return ENOENT;
759 #endif
760 }
761 
762 static int
sysctl_cpu_frequency(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,__unused struct sysctl_req * req)763 sysctl_cpu_frequency
764 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
765 {
766 
767 #if DEBUG || DEVELOPMENT || !defined(__arm64__)
768 	switch (arg2) {
769 	case HW_LOCAL_FREQUENCY:
770 		return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_hz);
771 	case HW_LOCAL_FREQUENCY_MIN:
772 		return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_min_hz);
773 	case HW_LOCAL_FREQUENCY_MAX:
774 		return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_max_hz);
775 	case HW_LOCAL_FREQUENCY_CLOCK_RATE:
776 		return SYSCTL_OUT(req, &gPEClockFrequencyInfo.cpu_clock_rate_hz, sizeof(int));
777 	default:
778 		return EINVAL;
779 	}
780 #else
781 	return ENOENT;
782 #endif
783 }
784 
785 /*
786  *  This sysctl will signal to userspace that a serial console is desired:
787  *
788  *    hw.serialdebugmode = 1 will load the serial console job in the multi-user session;
789  *    hw.serialdebugmode = 2 will load the serial console job in the base system as well
790  */
791 static int
sysctl_serialdebugmode(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)792 sysctl_serialdebugmode
793 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
794 {
795 	uint32_t serial_boot_arg;
796 	int serialdebugmode = 0;
797 
798 	if (PE_parse_boot_argn("serial", &serial_boot_arg, sizeof(serial_boot_arg)) &&
799 	    (serial_boot_arg & SERIALMODE_OUTPUT) && (serial_boot_arg & SERIALMODE_INPUT)) {
800 		serialdebugmode = (serial_boot_arg & SERIALMODE_BASE_TTY) ? 2 : 1;
801 	}
802 
803 	return sysctl_io_number(req, serialdebugmode, sizeof(serialdebugmode), NULL, NULL);
804 }
805 
806 /*
807  * hw.* MIB variables.
808  */
809 SYSCTL_PROC(_hw, HW_NCPU, ncpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_NCPU, sysctl_hw_generic, "I", "");
810 SYSCTL_PROC(_hw, HW_AVAILCPU, activecpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_AVAILCPU, sysctl_hw_generic, "I", "");
811 SYSCTL_PROC(_hw, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPU, sysctl_hw_generic, "I", "");
812 SYSCTL_PROC(_hw, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
813 SYSCTL_PROC(_hw, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPU, sysctl_hw_generic, "I", "");
814 SYSCTL_PROC(_hw, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
815 SYSCTL_INT(_hw, HW_BYTEORDER, byteorder, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, BYTE_ORDER, "");
816 SYSCTL_PROC(_hw, OID_AUTO, cputype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUTYPE, sysctl_hw_generic, "I", "");
817 SYSCTL_PROC(_hw, OID_AUTO, cpusubtype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBTYPE, sysctl_hw_generic, "I", "");
818 SYSCTL_INT(_hw, OID_AUTO, cpu64bit_capable, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpu64bit, 0, "");
819 SYSCTL_PROC(_hw, OID_AUTO, cpufamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUFAMILY, sysctl_hw_generic, "I", "");
820 SYSCTL_PROC(_hw, OID_AUTO, cpusubfamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBFAMILY, sysctl_hw_generic, "I", "");
821 SYSCTL_OPAQUE(_hw, OID_AUTO, cacheconfig, CTLFLAG_RD | CTLFLAG_LOCKED, &cacheconfig, sizeof(cacheconfig), "Q", "");
822 SYSCTL_OPAQUE(_hw, OID_AUTO, cachesize, CTLFLAG_RD | CTLFLAG_LOCKED, &cachesize, sizeof(cachesize), "Q", "");
823 SYSCTL_PROC(_hw, OID_AUTO, pagesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize, "Q", "");
824 SYSCTL_PROC(_hw, OID_AUTO, pagesize32, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize32, "Q", "");
825 SYSCTL_PROC(_hw, OID_AUTO, busfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_bus_frequency, "Q", "");
826 SYSCTL_PROC(_hw, OID_AUTO, busfrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_bus_frequency, "Q", "");
827 SYSCTL_PROC(_hw, OID_AUTO, busfrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_bus_frequency, "Q", "");
828 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_cpu_frequency, "Q", "");
829 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_cpu_frequency, "Q", "");
830 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_cpu_frequency, "Q", "");
831 SYSCTL_PROC(_hw, OID_AUTO, cachelinesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_CACHELINE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
832 SYSCTL_PROC(_hw, OID_AUTO, l1icachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
833 SYSCTL_PROC(_hw, OID_AUTO, l1dcachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
834 SYSCTL_PROC(_hw, OID_AUTO, l2cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
835 SYSCTL_PROC(_hw, OID_AUTO, l3cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
836 #if defined(__arm64__) && (DEBUG || DEVELOPMENT)
837 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_hz, "");
838 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_min_hz, "");
839 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_max_hz, "");
840 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_hz, "");
841 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_min_hz, "");
842 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_max_hz, "");
843 SYSCTL_QUAD(_hw, OID_AUTO, fixfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.fix_frequency_hz, "");
844 #endif /* __arm64__ */
845 SYSCTL_PROC(_hw, OID_AUTO, tbfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_tbfrequency, "Q", "");
846 #if XNU_TARGET_OS_OSX
847 SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem_actual, "");
848 #else
849 SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem, "");
850 SYSCTL_QUAD(_hw, OID_AUTO, memsize_physical, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem_actual, "");
851 #endif /* XNU_TARGET_OS_OSX */
852 SYSCTL_INT(_hw, OID_AUTO, packages, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &packages, 0, "");
853 SYSCTL_PROC(_hw, OID_AUTO, osenvironment, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_osenvironment, "A", "");
854 SYSCTL_PROC(_hw, OID_AUTO, ephemeral_storage, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_ephemeral_storage, "I", "");
855 SYSCTL_PROC(_hw, OID_AUTO, use_recovery_securityd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_recovery_securityd, "I", "");
856 SYSCTL_PROC(_hw, OID_AUTO, use_kernelmanagerd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_kernelmanagerd, "I", "");
857 SYSCTL_PROC(_hw, OID_AUTO, serialdebugmode, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_serialdebugmode, "I", "");
858 
859 /*
860  * hw.perflevelN.* variables.
861  * Users may check these to determine properties that vary across different CPU types, such as number of CPUs,
862  * or cache sizes. Perflevel 0 corresponds to the highest performance one.
863  */
864 SYSCTL_NODE(_hw, OID_AUTO, perflevel0, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, "Perf level 0 topology and cache geometry paramaters");
865 SYSCTL_NODE(_hw, OID_AUTO, perflevel1, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, "Perf level 1 topology and cache geometry paramaters");
866 SYSCTL_PROC(_hw, OID_AUTO, nperflevels, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_NPERFLEVELS, sysctl_hw_generic, "I", "Number of performance levels supported by this system");
867 
868 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_PHYSICALCPU, sysctl_hw_generic, "I", "");
869 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
870 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_LOGICALCPU, sysctl_hw_generic, "I", "");
871 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
872 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l1icachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L1ICACHESIZE, sysctl_hw_generic, "I", "");
873 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l1dcachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L1DCACHESIZE, sysctl_hw_generic, "I", "");
874 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l2cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L2CACHESIZE, sysctl_hw_generic, "I", "");
875 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, cpusperl2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_CPUSPERL2, sysctl_hw_generic, "I", "");
876 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l3cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L3CACHESIZE, sysctl_hw_generic, "I", "");
877 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, cpusperl3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_CPUSPERL3, sysctl_hw_generic, "I", "");
878 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, name, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_NAME, sysctl_hw_generic, "A", "");
879 
880 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_PHYSICALCPU, sysctl_hw_generic, "I", "");
881 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
882 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_LOGICALCPU, sysctl_hw_generic, "I", "");
883 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
884 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l1icachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L1ICACHESIZE, sysctl_hw_generic, "I", "");
885 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l1dcachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L1DCACHESIZE, sysctl_hw_generic, "I", "");
886 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l2cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L2CACHESIZE, sysctl_hw_generic, "I", "");
887 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, cpusperl2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_CPUSPERL2, sysctl_hw_generic, "I", "");
888 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l3cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L3CACHESIZE, sysctl_hw_generic, "I", "");
889 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, cpusperl3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_CPUSPERL3, sysctl_hw_generic, "I", "");
890 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, name, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_NAME, sysctl_hw_generic, "A", "");
891 
892 /*
893  * Optional CPU features can register nodes below hw.optional.
894  *
895  * If the feature is not present, the node should either not be registered,
896  * or it should return 0.  If the feature is present, the node should return
897  * 1.
898  */
899 SYSCTL_NODE(_hw, OID_AUTO, optional, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "optional features");
900 SYSCTL_NODE(_hw_optional, OID_AUTO, arm, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "optional features for ARM processors");
901 
902 SYSCTL_INT(_hw_optional, OID_AUTO, floatingpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, 1, "");      /* always set */
903 
904 /*
905  * Optional device hardware features can be registered by drivers below hw.features
906  */
907 SYSCTL_EXTENSIBLE_NODE(_hw, OID_AUTO, features, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "hardware features");
908 
909 /*
910  * Deprecated variables.  These are supported for backwards compatibility
911  * purposes only.  The MASKED flag requests that the variables not be
912  * printed by sysctl(8) and similar utilities.
913  *
914  * The variables named *_compat here are int-sized versions of variables
915  * that are now exported as quads.  The int-sized versions are normally
916  * looked up only by number, wheras the quad-sized versions should be
917  * looked up by name.
918  *
919  * The *_compat nodes are *NOT* visible within the kernel.
920  */
921 
922 SYSCTL_PROC(_hw, HW_PAGESIZE, pagesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PAGESIZE, sysctl_hw_generic, "I", "");
923 SYSCTL_PROC(_hw, HW_BUS_FREQ, busfrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_bus_frequency, "I", "");
924 SYSCTL_PROC(_hw, HW_CPU_FREQ, cpufrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_cpu_frequency, "I", "");
925 SYSCTL_PROC(_hw, HW_CACHELINE, cachelinesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_CACHELINE, sysctl_hw_generic, "I", "");
926 SYSCTL_PROC(_hw, HW_L1ICACHESIZE, l1icachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE, sysctl_hw_generic, "I", "");
927 SYSCTL_PROC(_hw, HW_L1DCACHESIZE, l1dcachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE, sysctl_hw_generic, "I", "");
928 SYSCTL_PROC(_hw, HW_L2CACHESIZE, l2cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE, sysctl_hw_generic, "I", "");
929 SYSCTL_PROC(_hw, HW_L3CACHESIZE, l3cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE, sysctl_hw_generic, "I", "");
930 SYSCTL_COMPAT_INT(_hw, HW_TB_FREQ, tbfrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.timebase_frequency_hz, 0, "");
931 SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MACHINE, sysctl_hw_generic, "A", "");
932 SYSCTL_PROC(_hw, HW_MODEL, model, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MODEL, sysctl_hw_generic, "A", "");
933 SYSCTL_PROC(_hw, HW_TARGET, target, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_TARGET, sysctl_hw_generic, "A", "");
934 SYSCTL_PROC(_hw, HW_PRODUCT, product, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PRODUCT, sysctl_hw_generic, "A", "");
935 SYSCTL_COMPAT_UINT(_hw, HW_PHYSMEM, physmem, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &mem_size, 0, "");
936 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_USERMEM, sysctl_hw_generic, "I", "");
937 SYSCTL_PROC(_hw, HW_EPOCH, epoch, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_EPOCH, sysctl_hw_generic, "I", "");
938 SYSCTL_PROC(_hw, HW_VECTORUNIT, vectorunit, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_VECTORUNIT, sysctl_hw_generic, "I", "");
939 SYSCTL_PROC(_hw, HW_L2SETTINGS, l2settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2SETTINGS, sysctl_hw_generic, "I", "");
940 SYSCTL_PROC(_hw, HW_L3SETTINGS, l3settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3SETTINGS, sysctl_hw_generic, "I", "");
941 SYSCTL_INT(_hw, OID_AUTO, cputhreadtype, CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, &cputhreadtype, 0, "");
942 
943 #if defined(__i386__) || defined(__x86_64__) || CONFIG_X86_64_COMPAT
944 static int
sysctl_cpu_capability(__unused struct sysctl_oid * oidp,void * arg1,__unused int arg2,struct sysctl_req * req)945 sysctl_cpu_capability
946 (__unused struct sysctl_oid *oidp, void *arg1, __unused int arg2, struct sysctl_req *req)
947 {
948 	uint64_t    caps;
949 	caps = _get_cpu_capabilities();
950 
951 	uint64_t        mask = (uint64_t) (uintptr_t) arg1;
952 	boolean_t       is_capable = (caps & mask) != 0;
953 
954 	return SYSCTL_OUT(req, &is_capable, sizeof(is_capable));
955 }
956 #define capability(name) name
957 
958 
959 SYSCTL_PROC(_hw_optional, OID_AUTO, mmx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMMX), 0, sysctl_cpu_capability, "I", "");
960 SYSCTL_PROC(_hw_optional, OID_AUTO, sse, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE), 0, sysctl_cpu_capability, "I", "");
961 SYSCTL_PROC(_hw_optional, OID_AUTO, sse2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE2), 0, sysctl_cpu_capability, "I", "");
962 SYSCTL_PROC(_hw_optional, OID_AUTO, sse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE3), 0, sysctl_cpu_capability, "I", "");
963 SYSCTL_PROC(_hw_optional, OID_AUTO, supplementalsse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSupplementalSSE3), 0, sysctl_cpu_capability, "I", "");
964 SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_1), 0, sysctl_cpu_capability, "I", "");
965 SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_2), 0, sysctl_cpu_capability, "I", "");
966 /* "x86_64" is actually a preprocessor symbol on the x86_64 kernel, so we have to hack this */
967 #undef x86_64
968 SYSCTL_PROC(_hw_optional, OID_AUTO, x86_64, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(k64Bit), 0, sysctl_cpu_capability, "I", "");
969 SYSCTL_PROC(_hw_optional, OID_AUTO, aes, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAES), 0, sysctl_cpu_capability, "I", "");
970 SYSCTL_PROC(_hw_optional, OID_AUTO, avx1_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX1_0), 0, sysctl_cpu_capability, "I", "");
971 SYSCTL_PROC(_hw_optional, OID_AUTO, rdrand, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRDRAND), 0, sysctl_cpu_capability, "I", "");
972 SYSCTL_PROC(_hw_optional, OID_AUTO, f16c, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasF16C), 0, sysctl_cpu_capability, "I", "");
973 SYSCTL_PROC(_hw_optional, OID_AUTO, enfstrg, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasENFSTRG), 0, sysctl_cpu_capability, "I", "");
974 SYSCTL_PROC(_hw_optional, OID_AUTO, fma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasFMA), 0, sysctl_cpu_capability, "I", "");
975 SYSCTL_PROC(_hw_optional, OID_AUTO, avx2_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX2_0), 0, sysctl_cpu_capability, "I", "");
976 SYSCTL_PROC(_hw_optional, OID_AUTO, bmi1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI1), 0, sysctl_cpu_capability, "I", "");
977 SYSCTL_PROC(_hw_optional, OID_AUTO, bmi2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI2), 0, sysctl_cpu_capability, "I", "");
978 SYSCTL_PROC(_hw_optional, OID_AUTO, rtm, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRTM), 0, sysctl_cpu_capability, "I", "");
979 SYSCTL_PROC(_hw_optional, OID_AUTO, hle, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasHLE), 0, sysctl_cpu_capability, "I", "");
980 SYSCTL_PROC(_hw_optional, OID_AUTO, adx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasADX), 0, sysctl_cpu_capability, "I", "");
981 SYSCTL_PROC(_hw_optional, OID_AUTO, mpx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMPX), 0, sysctl_cpu_capability, "I", "");
982 SYSCTL_PROC(_hw_optional, OID_AUTO, sgx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSGX), 0, sysctl_cpu_capability, "I", "");
983 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512f, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512F), 0, sysctl_cpu_capability, "I", "");
984 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512cd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512CD), 0, sysctl_cpu_capability, "I", "");
985 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512dq, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512DQ), 0, sysctl_cpu_capability, "I", "");
986 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512bw, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512BW), 0, sysctl_cpu_capability, "I", "");
987 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vl, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VL), 0, sysctl_cpu_capability, "I", "");
988 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512ifma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512IFMA), 0, sysctl_cpu_capability, "I", "");
989 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vbmi, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VBMI), 0, sysctl_cpu_capability, "I", "");
990 #undef capability
991 #endif /* !__i386__ && !__x86_64 && !CONFIG_X86_64_COMPAT */
992 
993 #if defined (__arm64__)
994 int watchpoint_flag = 0;
995 int breakpoint_flag = 0;
996 SECURITY_READ_ONLY_LATE(int) gARMv8Crc32 = 0;
997 
998 /* Features from: ID_AA64ISAR0_EL1 */
999 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FlagM = 0;
1000 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FlagM2 = 0;
1001 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FHM = 0;
1002 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_DotProd = 0;
1003 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SHA3 = 0;
1004 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_RDM = 0;
1005 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_LSE = 0;
1006 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SHA256 = 0;
1007 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SHA512 = 0;
1008 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SHA1 = 0;
1009 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_AES = 0;
1010 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_PMULL = 0;
1011 
1012 /* Features from: ID_AA64ISAR1_EL1 */
1013 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SPECRES = 0;
1014 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SB = 0;
1015 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FRINTTS = 0;
1016 SECURITY_READ_ONLY_LATE(int) gARMv8Gpi = 0;
1017 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_LRCPC = 0;
1018 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_LRCPC2 = 0;
1019 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FCMA = 0;
1020 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_JSCVT = 0;
1021 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_PAuth = 0;
1022 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_PAuth2 = 0;
1023 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FPAC = 0;
1024 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_DPB = 0;
1025 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_DPB2 = 0;
1026 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_BF16 = 0;
1027 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_I8MM = 0;
1028 
1029 /* Features from: ID_AA64MMFR0_EL1 */
1030 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_ECV = 0;
1031 
1032 /* Features from: ID_AA64MMFR2_EL1 */
1033 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_LSE2 = 0;
1034 
1035 /* Features from: ID_AA64PFR0_EL1 */
1036 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_CSV2 = 0;
1037 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_CSV3 = 0;
1038 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_DIT = 0;
1039 SECURITY_READ_ONLY_LATE(int) gARM_AdvSIMD = 0;
1040 SECURITY_READ_ONLY_LATE(int) gARM_AdvSIMD_HPFPCvt = 0;
1041 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FP16 = 0;
1042 
1043 /* Features from: ID_AA64PFR1_EL1 */
1044 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SSBS = 0;
1045 SECURITY_READ_ONLY_LATE(int) gARM_FEAT_BTI = 0;
1046 
1047 SECURITY_READ_ONLY_LATE(int) gUCNormalMem = 0;
1048 
1049 #if defined (__arm64__)
1050 SECURITY_READ_ONLY_LATE(int) arm64_flag = 1;
1051 #else /* end __arm64__*/
1052 SECURITY_READ_ONLY_LATE(int) arm64_flag = 0;
1053 #endif
1054 
1055 /* Legacy Names ARM Optional Feature Sysctls */
1056 SYSCTL_INT(_hw_optional, OID_AUTO, neon, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_AdvSIMD, 0, "");
1057 SYSCTL_INT(_hw_optional, OID_AUTO, neon_hpfp, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_AdvSIMD_HPFPCvt, 0, "");
1058 SYSCTL_INT(_hw_optional, OID_AUTO, neon_fp16, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FP16, 0, "");
1059 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_1_atomics, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LSE, 0, "");
1060 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_fhm, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FHM, 0, "");
1061 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_sha512, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA512, 0, "");
1062 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_sha3, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA3, 0, "");
1063 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_3_compnum, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FCMA, 0, "");
1064 
1065 /* Misc ARM Optional Feature Sysctls */
1066 SYSCTL_INT(_hw_optional, OID_AUTO, watchpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &watchpoint_flag, 0, "");
1067 SYSCTL_INT(_hw_optional, OID_AUTO, breakpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &breakpoint_flag, 0, "");
1068 
1069 /**
1070  * Enumerated syscalls for every ARM optional feature to be exported to
1071  * userspace. These are to be enumerated using the official feature name from
1072  * the ARM ARM. They are grouped below based on the MSR that will be used to populate the data.
1073  */
1074 
1075 /* Features from: ID_AA64ISAR0_EL1 */
1076 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_crc32, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv8Crc32, 0, "");
1077 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FlagM, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FlagM, 0, "");
1078 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FlagM2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FlagM2, 0, "");
1079 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FHM, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FHM, 0, "");
1080 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_DotProd, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_DotProd, 0, "");
1081 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SHA3, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA3, 0, "");
1082 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_RDM, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_RDM, 0, "");
1083 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_LSE, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LSE, 0, "");
1084 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SHA256, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA256, 0, "");
1085 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SHA512, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA512, 0, "");
1086 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SHA1, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA1, 0, "");
1087 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_AES, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_AES, 0, "");
1088 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_PMULL, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_PMULL, 0, "");
1089 
1090 /* Features from: ID_AA64ISAR1_EL1 */
1091 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_gpi, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv8Gpi, 0, "");
1092 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SPECRES, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SPECRES, 0, "");
1093 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SB, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SB, 0, "");
1094 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FRINTTS, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FRINTTS, 0, "");
1095 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_LRCPC, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LRCPC, 0, "");
1096 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_LRCPC2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LRCPC2, 0, "");
1097 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FCMA, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FCMA, 0, "");
1098 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_JSCVT, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_JSCVT, 0, "");
1099 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_PAuth, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_PAuth, 0, "");
1100 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_PAuth2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_PAuth2, 0, "");
1101 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FPAC, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FPAC, 0, "");
1102 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_DPB, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_DPB, 0, "");
1103 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_DPB2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_DPB2, 0, "");
1104 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_BF16, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_BF16, 0, "");
1105 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_I8MM, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_I8MM, 0, "");
1106 
1107 /* Features from: ID_AA64MMFR0_EL1 */
1108 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_ECV, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_ECV, 0, "");
1109 
1110 /* Features from: ID_AA64MMFR2_EL1 */
1111 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_LSE2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LSE2, 0, "");
1112 
1113 /* Features from: ID_AA64PFR0_EL1 */
1114 SYSCTL_INT(_hw_optional, OID_AUTO, AdvSIMD, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_AdvSIMD, 0, "");
1115 SYSCTL_INT(_hw_optional, OID_AUTO, AdvSIMD_HPFPCvt, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_AdvSIMD_HPFPCvt, 0, "");
1116 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_CSV2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_CSV2, 0, "");
1117 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_CSV3, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_CSV3, 0, "");
1118 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_DIT, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_DIT, 0, "");
1119 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FP16, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FP16, 0, "");
1120 
1121 /* Features from: FPCR */
1122 SECURITY_READ_ONLY_LATE(int) gARM_FP_SyncExceptions = 0;
1123 
1124 /* Features from: ID_AA64PFR1_EL1 */
1125 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SSBS, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SSBS, 0, "");
1126 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_BTI, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_BTI, 0, "");
1127 
1128 /* Features from FPCR. */
1129 SYSCTL_INT(_hw_optional_arm, OID_AUTO, FP_SyncExceptions, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FP_SyncExceptions, 0, "");
1130 
1131 SYSCTL_INT(_hw_optional, OID_AUTO, ucnormal_mem, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gUCNormalMem, 0, "");
1132 
1133 #if DEBUG || DEVELOPMENT
1134 #if __ARM_KERNEL_PROTECT__
1135 static SECURITY_READ_ONLY_LATE(int) arm_kernel_protect = 1;
1136 #else
1137 static SECURITY_READ_ONLY_LATE(int) arm_kernel_protect = 0;
1138 #endif
1139 SYSCTL_INT(_hw_optional, OID_AUTO, arm_kernel_protect, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm_kernel_protect, 0, "");
1140 #endif
1141 
1142 #if DEBUG || DEVELOPMENT
1143 static int ic_inval_filters = 0;
1144 SYSCTL_INT(_hw_optional, OID_AUTO, ic_inval_filters, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &ic_inval_filters, 0, "");
1145 #endif
1146 
1147 #if DEBUG || DEVELOPMENT
1148 #if __APPLE_WKDM_POPCNT_EXTENSIONS__
1149 static SECURITY_READ_ONLY_LATE(int) wkdm_popcount = 1;
1150 #else
1151 static SECURITY_READ_ONLY_LATE(int) wkdm_popcount = 0;
1152 #endif
1153 SYSCTL_INT(_hw_optional, OID_AUTO, wkdm_popcount, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &wkdm_popcount, 0, "");
1154 #endif
1155 
1156 #if DEBUG || DEVELOPMENT
1157 #if __has_feature(ptrauth_calls)
1158 static SECURITY_READ_ONLY_LATE(int) ptrauth = 1;
1159 #else
1160 static SECURITY_READ_ONLY_LATE(int) ptrauth = 0;
1161 #endif
1162 SYSCTL_INT(_hw_optional, OID_AUTO, ptrauth, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &ptrauth, 0, "");
1163 #endif
1164 
1165 /*
1166  * Without this little ifdef dance, the preprocessor replaces "arm64" with "1",
1167  * leaving us with a less-than-helpful sysctl.hwoptional.1.
1168  */
1169 #ifdef arm64
1170 #undef arm64
1171 SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm64_flag, 0, "");
1172 #define arm64 1
1173 #else
1174 SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm64_flag, 0, "");
1175 #endif
1176 #endif /* ! __arm64__ */
1177 
1178 
1179 #if defined(__arm64__) && defined(CONFIG_XNUPOST)
1180 /**
1181  * Test whether the new values for a few hw.optional sysctls matches the legacy
1182  * way of obtaining that information.
1183  *
1184  * Specifically, hw.optional.neon_fp16 has been used to indicate both FEAT_FP16
1185  * and FEAT_FHM, as we are now grabbing the information directly from the ISA
1186  * status registers instead of from the arm_mvfp_info, we need to check that
1187  * this new source won't break any existing usecases of the sysctl and assert
1188  * that hw.optional.neon_fp16 will return the same value as it used to for all
1189  * devices.
1190  */
1191 kern_return_t
arm_cpu_capabilities_legacy_test(void)1192 arm_cpu_capabilities_legacy_test(void)
1193 {
1194 	T_SETUPBEGIN;
1195 	arm_mvfp_info_t *mvfp_info = arm_mvfp_info();
1196 	T_ASSERT_NOTNULL(mvfp_info, "arm_mvfp_info returned null pointer.");
1197 	T_SETUPEND;
1198 
1199 
1200 	T_EXPECT_EQ_INT(mvfp_info->neon, gARM_AdvSIMD, "neon value should match legacy");
1201 	T_EXPECT_EQ_INT(mvfp_info->neon_hpfp, gARM_AdvSIMD_HPFPCvt, "neon hpfp cvt value should match legacy");
1202 	T_EXPECT_EQ_INT(mvfp_info->neon_fp16, gARM_FEAT_FP16, "neon fp16 value should match legacy");
1203 
1204 	T_LOG("Completed arm cpu capabalities legacy compliance test.");
1205 	return KERN_SUCCESS;
1206 }
1207 #endif /* defined(__arm64__) && defined(CONFIG_XNUPOST) */
1208 
1209 /******************************************************************************
1210  * Generic MIB initialisation.
1211  *
1212  * This is a hack, and should be replaced with SYSINITs
1213  * at some point.
1214  */
1215 void
sysctl_mib_init(void)1216 sysctl_mib_init(void)
1217 {
1218 #if defined(__i386__) || defined (__x86_64__)
1219 	cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit;
1220 #elif defined (__arm64__)
1221 	cpu64bit = (cpu_type() & CPU_ARCH_ABI64) == CPU_ARCH_ABI64;
1222 #else
1223 #error Unsupported arch
1224 #endif
1225 #if defined (__i386__) || defined (__x86_64__)
1226 	/* hw.cacheconfig */
1227 	cacheconfig[0] = ml_cpu_cache_sharing(0, CLUSTER_TYPE_SMP, true);
1228 	cacheconfig[1] = ml_cpu_cache_sharing(1, CLUSTER_TYPE_SMP, true);
1229 	cacheconfig[2] = ml_cpu_cache_sharing(2, CLUSTER_TYPE_SMP, true);
1230 	cacheconfig[3] = ml_cpu_cache_sharing(3, CLUSTER_TYPE_SMP, true);
1231 	cacheconfig[4] = 0;
1232 
1233 	/* hw.cachesize */
1234 	cachesize[0] = ml_cpu_cache_size(0);
1235 	cachesize[1] = ml_cpu_cache_size(1);
1236 	cachesize[2] = ml_cpu_cache_size(2);
1237 	cachesize[3] = ml_cpu_cache_size(3);
1238 	cachesize[4] = 0;
1239 
1240 	/* hw.packages */
1241 	packages = (int)(roundup(ml_cpu_cache_sharing(0, CLUSTER_TYPE_SMP, true), cpuid_info()->thread_count)
1242 	    / cpuid_info()->thread_count);
1243 
1244 #elif defined(__arm64__) /* end __i386 */
1245 	watchpoint_flag = arm_debug_info()->num_watchpoint_pairs;
1246 	breakpoint_flag = arm_debug_info()->num_breakpoint_pairs;
1247 
1248 	cluster_type_t min_perflevel_cluster_type = cpu_type_for_perflevel(__builtin_popcount(ml_get_cpu_types()) - 1);
1249 
1250 	cacheconfig[0] = ml_wait_max_cpus();
1251 	cacheconfig[1] = ml_cpu_cache_sharing(1, min_perflevel_cluster_type, true);
1252 	cacheconfig[2] = ml_cpu_cache_sharing(2, min_perflevel_cluster_type, true);
1253 	cacheconfig[3] = 0;
1254 	cacheconfig[4] = 0;
1255 	cacheconfig[5] = 0;
1256 	cacheconfig[6] = 0;
1257 
1258 	cachesize[0] = ml_get_machine_mem();
1259 	cachesize[1] = cache_info_type(min_perflevel_cluster_type)->c_dsize; /* Using the DCache */
1260 	cachesize[2] = cache_info_type(min_perflevel_cluster_type)->c_l2size;
1261 	cachesize[3] = 0;
1262 	cachesize[4] = 0;
1263 
1264 	packages = 1;
1265 #else
1266 #error unknown architecture
1267 #endif /* !__i386__ && !__x86_64 && !__arm64__ */
1268 }
1269 
1270 __startup_func
1271 static void
sysctl_mib_startup(void)1272 sysctl_mib_startup(void)
1273 {
1274 	cputhreadtype = cpu_threadtype();
1275 
1276 	/*
1277 	 * Populate the optional portion of the hw.* MIB.
1278 	 *
1279 	 * XXX This could be broken out into parts of the code
1280 	 *     that actually directly relate to the functions in
1281 	 *     question.
1282 	 */
1283 
1284 	if (cputhreadtype != CPU_THREADTYPE_NONE) {
1285 		sysctl_register_oid_early(&sysctl__hw_cputhreadtype);
1286 	}
1287 
1288 }
1289 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_mib_startup);
1290