xref: /xnu-12377.61.12/bsd/kern/kern_mib.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*-
29  * Copyright (c) 1982, 1986, 1989, 1993
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * This code is derived from software contributed to Berkeley by
33  * Mike Karels at Berkeley Software Design, Inc.
34  *
35  * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD
36  * project, to make these variables more userfriendly.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kern_sysctl.c	8.4 (Berkeley) 4/14/94
67  */
68 
69 #include <sys/errno.h>
70 #include <sys/param.h>
71 #include <sys/kernel.h>
72 #include <sys/syslimits.h>
73 #include <sys/systm.h>
74 #include <sys/sysctl.h>
75 #include <sys/proc_internal.h>
76 #include <sys/unistd.h>
77 
78 #if defined(SMP)
79 #include <machine/smp.h>
80 #endif
81 
82 #include <sys/param.h>  /* XXX prune includes */
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/malloc.h>
86 #include <sys/proc.h>
87 #include <sys/file_internal.h>
88 #include <sys/vnode.h>
89 #include <sys/unistd.h>
90 #include <sys/ioctl.h>
91 #include <sys/namei.h>
92 #include <sys/tty.h>
93 #include <sys/disklabel.h>
94 #include <sys/vm.h>
95 #include <sys/sysctl.h>
96 #include <sys/user.h>
97 #include <mach/machine.h>
98 #include <mach/mach_types.h>
99 #include <mach/vm_param.h>
100 #include <kern/task.h>
101 #include <vm/vm_kern.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_protos.h>
104 #include <mach/host_info.h>
105 #include <kern/pms.h>
106 #include <pexpert/device_tree.h>
107 #include <pexpert/pexpert.h>
108 #include <kern/sched_prim.h>
109 #include <console/serial_protos.h>
110 
111 extern vm_map_t bsd_pageable_map;
112 
113 #include <sys/mount_internal.h>
114 #include <sys/kdebug.h>
115 
116 #include <IOKit/IOPlatformExpert.h>
117 #include <IOKit/IOBSD.h>
118 #include <pexpert/pexpert.h>
119 
120 #include <machine/config.h>
121 #include <machine/machine_routines.h>
122 #include <machine/cpu_capabilities.h>
123 
124 #include <mach/mach_host.h>             /* for host_info() */
125 
126 #if defined(__i386__) || defined(__x86_64__)
127 #include <i386/cpuid.h> /* for cpuid_info() */
128 #endif
129 
130 #if defined(__arm64__)
131 #include <arm/cpuid.h>          /* for cpuid_info() & cache_info() */
132 #include <arm/cpu_capabilities_public.h>
133 #endif
134 
135 #if defined(CONFIG_XNUPOST)
136 #include <tests/ktest.h>
137 #endif
138 
139 /**
140  * Prevents an issue with creating the sysctl node hw.optional.arm on some
141  * platforms. If the 'arm' macro is defined, then the word "arm" is preprocessed
142  * to 1. As the 'arm' macro is not used in this file, we do not need to redefine
143  * after we are done.
144  */
145 #if defined(arm)
146 #undef arm
147 #endif /* defined(arm) */
148 
149 #ifndef MAX
150 #define MAX(a, b) (a >= b ? a : b)
151 #endif
152 
153 #if defined(__arm64__) && defined(CONFIG_XNUPOST)
154 kern_return_t arm_cpu_capabilities_legacy_test(void);
155 #endif /* defined(__arm64__) && defined(CONFIG_XNUPOST) */
156 
157 /* XXX This should be in a BSD accessible Mach header, but isn't. */
158 extern unsigned int vm_page_wire_count;
159 
160 static int      cputhreadtype, cpu64bit;
161 static uint64_t cacheconfig[10];
162 static int      packages;
163 
164 static char *   osenvironment = NULL;
165 static uint32_t osenvironment_size = 0;
166 static int      osenvironment_initialized = 0;
167 
168 static uint32_t ephemeral_storage = 0;
169 static uint32_t use_recovery_securityd = 0;
170 
171 static char *mempath = NULL;
172 static size_t mempath_size = 0;
173 
174 static struct {
175 	uint32_t ephemeral_storage:1;
176 	uint32_t use_recovery_securityd:1;
177 } property_existence = {0, 0};
178 
179 SYSCTL_EXTENSIBLE_NODE(, 0, sysctl, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
180     "Sysctl internal magic");
181 SYSCTL_EXTENSIBLE_NODE(, CTL_KERN, kern, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
182     "High kernel, proc, limits &c");
183 SYSCTL_EXTENSIBLE_NODE(, CTL_VM, vm, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
184     "Virtual memory");
185 SYSCTL_EXTENSIBLE_NODE(, CTL_VFS, vfs, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
186     "File system");
187 SYSCTL_EXTENSIBLE_NODE(, CTL_NET, net, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
188     "Network, (see socket.h)");
189 SYSCTL_EXTENSIBLE_NODE(, CTL_DEBUG, debug, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
190     "Debugging");
191 #if DEBUG || DEVELOPMENT
192 SYSCTL_NODE(_debug, OID_AUTO, test,
193     CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, "tests");
194 #endif /* DEBUG || DEVELOPMENT */
195 SYSCTL_NODE(, CTL_HW, hw, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
196     "hardware");
197 SYSCTL_EXTENSIBLE_NODE(, CTL_MACHDEP, machdep, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
198     "machine dependent");
199 SYSCTL_NODE(, CTL_USER, user, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
200     "user-level");
201 
202 SYSCTL_NODE(_kern, OID_AUTO, bridge, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
203     "bridge");
204 
205 #define SYSCTL_RETURN(r, x)     SYSCTL_OUT(r, &x, sizeof(x))
206 
207 /******************************************************************************
208  * hw.* MIB
209  */
210 
211 #define CTLHW_RETQUAD   (1U << 31)
212 #define CTLHW_LOCAL     (1U << 30)
213 #define CTLHW_PERFLEVEL (1U << 29)
214 
215 #define HW_LOCAL_CPUTHREADTYPE        (1 | CTLHW_LOCAL)
216 #define HW_LOCAL_PHYSICALCPU          (2 | CTLHW_LOCAL)
217 #define HW_LOCAL_PHYSICALCPUMAX       (3 | CTLHW_LOCAL)
218 #define HW_LOCAL_LOGICALCPU           (4 | CTLHW_LOCAL)
219 #define HW_LOCAL_LOGICALCPUMAX        (5 | CTLHW_LOCAL)
220 #define HW_LOCAL_CPUTYPE              (6 | CTLHW_LOCAL)
221 #define HW_LOCAL_CPUSUBTYPE           (7 | CTLHW_LOCAL)
222 #define HW_LOCAL_CPUFAMILY            (8 | CTLHW_LOCAL)
223 #define HW_LOCAL_CPUSUBFAMILY         (9 | CTLHW_LOCAL)
224 #define HW_NPERFLEVELS                (10 | CTLHW_LOCAL)
225 #define HW_PERFLEVEL_PHYSICALCPU      (11 | CTLHW_PERFLEVEL)
226 #define HW_PERFLEVEL_PHYSICALCPUMAX   (12 | CTLHW_PERFLEVEL)
227 #define HW_PERFLEVEL_LOGICALCPU       (13 | CTLHW_PERFLEVEL)
228 #define HW_PERFLEVEL_LOGICALCPUMAX    (14 | CTLHW_PERFLEVEL)
229 #define HW_PERFLEVEL_L1ICACHESIZE     (15 | CTLHW_PERFLEVEL)
230 #define HW_PERFLEVEL_L1DCACHESIZE     (16 | CTLHW_PERFLEVEL)
231 #define HW_PERFLEVEL_L2CACHESIZE      (17 | CTLHW_PERFLEVEL)
232 #define HW_PERFLEVEL_CPUSPERL2        (18 | CTLHW_PERFLEVEL)
233 #define HW_PERFLEVEL_L3CACHESIZE      (19 | CTLHW_PERFLEVEL)
234 #define HW_PERFLEVEL_CPUSPERL3        (20 | CTLHW_PERFLEVEL)
235 #define HW_PERFLEVEL_NAME             (21 | CTLHW_PERFLEVEL)
236 
237 
238 /*
239  * For a given perflevel, return the corresponding CPU type.
240  */
241 cluster_type_t cpu_type_for_perflevel(int perflevel);
242 cluster_type_t
cpu_type_for_perflevel(int perflevel)243 cpu_type_for_perflevel(int perflevel)
244 {
245 	unsigned int cpu_types = ml_get_cpu_types();
246 	__assert_only unsigned int n_perflevels = __builtin_popcount(cpu_types);
247 
248 	assert((perflevel >= 0) && (perflevel < n_perflevels));
249 
250 	/* Check CPU types mask for each cluster type in descending order of performance */
251 	cluster_type_t cluster_types_in_order[MAX_CPU_TYPES];
252 	cluster_types_in_order[0] = CLUSTER_TYPE_SMP;
253 #if defined(__arm__) || defined(__arm64__)
254 	cluster_types_in_order[1] = CLUSTER_TYPE_P;
255 	cluster_types_in_order[2] = CLUSTER_TYPE_E;
256 #endif /* defined(__arm__) || defined(__arm64__) */
257 
258 	int perflevel_ind = 0;
259 	for (int i = 0; i < MAX_CPU_TYPES; i++) {
260 		unsigned int type_mask = 1 << cluster_types_in_order[i];
261 		if (type_mask & cpu_types) {
262 			if (perflevel_ind == perflevel) {
263 				return cluster_types_in_order[i];
264 			}
265 			perflevel_ind++;
266 		}
267 	}
268 	return 0;
269 }
270 
271 static ml_cpu_info_t
sysctl_hw_generic_cpu_info(int perflevel,int arg2 __unused)272 sysctl_hw_generic_cpu_info(int perflevel, int arg2 __unused)
273 {
274 	bool ignore_perflevel = false;
275 #if APPLE_ARM64_ARCH_FAMILY
276 	if (arg2 == HW_CACHELINE) {
277 		/* Apple SoCs have a uniform cacheline size across all clusters */
278 		ignore_perflevel = true;
279 	}
280 #endif
281 
282 	ml_cpu_info_t cpu_info;
283 	if (ignore_perflevel) {
284 		ml_cpu_get_info(&cpu_info);
285 	} else {
286 		ml_cpu_get_info_type(&cpu_info, cpu_type_for_perflevel(perflevel));
287 	}
288 	return cpu_info;
289 }
290 
291 /*
292  * Supporting some variables requires us to do "real" work.  We
293  * gather some of that here.
294  */
295 static int
sysctl_hw_generic(__unused struct sysctl_oid * oidp,void * arg1,int arg2,struct sysctl_req * req)296 sysctl_hw_generic(__unused struct sysctl_oid *oidp, void *arg1,
297     int arg2, struct sysctl_req *req)
298 {
299 	char dummy[65];
300 	int  epochTemp;
301 	int val, doquad;
302 	long long qval;
303 	unsigned int cpu_count;
304 	host_basic_info_data_t hinfo;
305 	kern_return_t kret;
306 	mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
307 
308 	/*
309 	 * If we are using one of the perflevel sysctls, return early if the perflevel
310 	 * does not exist in this system.
311 	 */
312 	int perflevel = (int)arg1;
313 	int n_perflevels = __builtin_popcount(ml_get_cpu_types());
314 
315 	if (arg2 & CTLHW_PERFLEVEL) {
316 		if ((perflevel < 0) || (perflevel >= n_perflevels)) {
317 			return ENOENT;
318 		}
319 	} else {
320 		perflevel = n_perflevels - 1;
321 	}
322 
323 	/*
324 	 * Test and mask off the 'return quad' flag.
325 	 * Note that only some things here support it.
326 	 */
327 	doquad = arg2 & CTLHW_RETQUAD;
328 	arg2 &= ~CTLHW_RETQUAD;
329 
330 #define BSD_HOST 1
331 	kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
332 
333 	/*
334 	 * Handle various OIDs.
335 	 *
336 	 * OIDs that can return int or quad set val and qval and then break.
337 	 * Errors and int-only values return inline.
338 	 */
339 	switch (arg2) {
340 	case HW_NCPU:
341 		if (kret == KERN_SUCCESS) {
342 			return SYSCTL_RETURN(req, hinfo.max_cpus);
343 		} else {
344 			return EINVAL;
345 		}
346 	case HW_AVAILCPU:
347 		if (kret == KERN_SUCCESS) {
348 			return SYSCTL_RETURN(req, hinfo.avail_cpus);
349 		} else {
350 			return EINVAL;
351 		}
352 	case HW_LOCAL_PHYSICALCPU:
353 		if (kret == KERN_SUCCESS) {
354 			return SYSCTL_RETURN(req, hinfo.physical_cpu);
355 		} else {
356 			return EINVAL;
357 		}
358 	case HW_LOCAL_PHYSICALCPUMAX:
359 		if (kret == KERN_SUCCESS) {
360 			return SYSCTL_RETURN(req, hinfo.physical_cpu_max);
361 		} else {
362 			return EINVAL;
363 		}
364 	case HW_LOCAL_LOGICALCPU:
365 		if (kret == KERN_SUCCESS) {
366 			return SYSCTL_RETURN(req, hinfo.logical_cpu);
367 		} else {
368 			return EINVAL;
369 		}
370 	case HW_LOCAL_LOGICALCPUMAX:
371 		if (kret == KERN_SUCCESS) {
372 			return SYSCTL_RETURN(req, hinfo.logical_cpu_max);
373 		} else {
374 			return EINVAL;
375 		}
376 	case HW_NPERFLEVELS:
377 		return SYSCTL_RETURN(req, n_perflevels);
378 	case HW_PERFLEVEL_PHYSICALCPU:
379 		cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), false, true);
380 		return SYSCTL_RETURN(req, cpu_count);
381 	case HW_PERFLEVEL_PHYSICALCPUMAX:
382 		cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), false, false);
383 		return SYSCTL_RETURN(req, cpu_count);
384 	case HW_PERFLEVEL_LOGICALCPU:
385 		cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), true, true);
386 		return SYSCTL_RETURN(req, cpu_count);
387 	case HW_PERFLEVEL_LOGICALCPUMAX:
388 		cpu_count = ml_get_cpu_number_type(cpu_type_for_perflevel(perflevel), true, false);
389 		return SYSCTL_RETURN(req, cpu_count);
390 	case HW_PERFLEVEL_L1ICACHESIZE: {
391 		ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
392 		val = (int)cpu_info.l1_icache_size;
393 		qval = (long long)cpu_info.l1_icache_size;
394 		break;
395 	}
396 	case HW_PERFLEVEL_L1DCACHESIZE: {
397 		ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
398 		val = (int)cpu_info.l1_dcache_size;
399 		qval = (long long)cpu_info.l1_dcache_size;
400 		break;
401 	}
402 	case HW_PERFLEVEL_L2CACHESIZE: {
403 		ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
404 		val = (int)cpu_info.l2_cache_size;
405 		qval = (long long)cpu_info.l2_cache_size;
406 		break;
407 	}
408 	case HW_PERFLEVEL_CPUSPERL2:
409 		cpu_count = ml_cpu_cache_sharing(2, cpu_type_for_perflevel(perflevel), false);
410 		return SYSCTL_RETURN(req, cpu_count);
411 	case HW_PERFLEVEL_L3CACHESIZE: {
412 		ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
413 		if (cpu_info.l3_cache_size == UINT32_MAX) {
414 			return EINVAL;
415 		}
416 		val = (int)cpu_info.l3_cache_size;
417 		qval = (long long)cpu_info.l3_cache_size;
418 		break;
419 	}
420 	case HW_PERFLEVEL_CPUSPERL3: {
421 		ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
422 		if (cpu_info.l3_cache_size == UINT32_MAX) {
423 			return EINVAL;
424 		}
425 		cpu_count = ml_cpu_cache_sharing(3, cpu_type_for_perflevel(perflevel), false);
426 		return SYSCTL_RETURN(req, cpu_count);
427 	}
428 	case HW_PERFLEVEL_NAME:
429 		bzero(dummy, sizeof(dummy));
430 		ml_get_cluster_type_name(cpu_type_for_perflevel(perflevel), dummy, sizeof(dummy));
431 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
432 	case HW_LOCAL_CPUTYPE:
433 		if (kret == KERN_SUCCESS) {
434 			return SYSCTL_RETURN(req, hinfo.cpu_type);
435 		} else {
436 			return EINVAL;
437 		}
438 	case HW_LOCAL_CPUSUBTYPE:
439 		if (kret == KERN_SUCCESS) {
440 			return SYSCTL_RETURN(req, hinfo.cpu_subtype);
441 		} else {
442 			return EINVAL;
443 		}
444 	case HW_LOCAL_CPUFAMILY:
445 	{
446 		int cpufamily = 0;
447 #if defined (__i386__) || defined (__x86_64__)
448 		cpufamily = cpuid_cpufamily();
449 #elif defined(__arm64__)
450 		{
451 			cpufamily = cpuid_get_cpufamily();
452 		}
453 #else
454 #error unknown architecture
455 #endif
456 		return SYSCTL_RETURN(req, cpufamily);
457 	}
458 	case HW_LOCAL_CPUSUBFAMILY:
459 	{
460 		int cpusubfamily = 0;
461 #if defined (__i386__) || defined (__x86_64__)
462 		cpusubfamily = CPUSUBFAMILY_UNKNOWN;
463 #elif defined(__arm64__)
464 		{
465 			cpusubfamily = cpuid_get_cpusubfamily();
466 		}
467 #else
468 #error unknown architecture
469 #endif
470 		return SYSCTL_RETURN(req, cpusubfamily);
471 	}
472 	case HW_PAGESIZE:
473 	{
474 		vm_map_t map = get_task_map(current_task());
475 		val = vm_map_page_size(map);
476 		qval = (long long)val;
477 		break;
478 	}
479 	case HW_CACHELINE: {
480 		ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
481 		val = (int)cpu_info.cache_line_size;
482 		qval = (long long)val;
483 		break;
484 	}
485 	case HW_L1ICACHESIZE: {
486 		ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
487 		val = (int)cpu_info.l1_icache_size;
488 		qval = (long long)cpu_info.l1_icache_size;
489 		break;
490 	}
491 	case HW_L1DCACHESIZE: {
492 		ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
493 		val = (int)cpu_info.l1_dcache_size;
494 		qval = (long long)cpu_info.l1_dcache_size;
495 		break;
496 	}
497 	case HW_L2CACHESIZE: {
498 		ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
499 		if (cpu_info.l2_cache_size == UINT32_MAX) {
500 			return EINVAL;
501 		}
502 		val = (int)cpu_info.l2_cache_size;
503 		qval = (long long)cpu_info.l2_cache_size;
504 		break;
505 	}
506 	case HW_L3CACHESIZE: {
507 		ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
508 		if (cpu_info.l3_cache_size == UINT32_MAX) {
509 			return EINVAL;
510 		}
511 		val = (int)cpu_info.l3_cache_size;
512 		qval = (long long)cpu_info.l3_cache_size;
513 		break;
514 	}
515 	case HW_TARGET:
516 		bzero(dummy, sizeof(dummy));
517 		if (!PEGetTargetName(dummy, 64)) {
518 			return EINVAL;
519 		}
520 		dummy[64] = 0;
521 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
522 	case HW_PRODUCT:
523 		bzero(dummy, sizeof(dummy));
524 		if (!PEGetProductName(dummy, 64)) {
525 			return EINVAL;
526 		}
527 		dummy[64] = 0;
528 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
529 
530 		/*
531 		 * Deprecated variables.  We still support these for
532 		 * backwards compatibility purposes only.
533 		 */
534 #if XNU_TARGET_OS_OSX && defined(__arm64__)
535 	/* The following two are kludged for backward
536 	 * compatibility. Use hw.product/hw.target for something
537 	 * consistent instead. */
538 
539 	case HW_MACHINE:
540 		bzero(dummy, sizeof(dummy));
541 		if (proc_platform(req->p) == PLATFORM_IOS) {
542 			/* iOS-on-Mac processes don't expect the macOS kind of
543 			 * hw.machine, e.g. "arm64", but are used to seeing
544 			 * a product string on iOS, which we here hardcode
545 			 * to return as "iPad8,6" for compatibility.
546 			 *
547 			 * Another reason why hw.machine and hw.model are
548 			 * trouble and hw.target+hw.product should be used
549 			 * instead.
550 			 */
551 
552 			strlcpy(dummy, "iPad8,6", sizeof(dummy));
553 		}
554 		else {
555 			strlcpy(dummy, "arm64", sizeof(dummy));
556 		}
557 		dummy[64] = 0;
558 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
559 	case HW_MODEL:
560 		bzero(dummy, sizeof(dummy));
561 		if (!PEGetProductName(dummy, 64)) {
562 			return EINVAL;
563 		}
564 		dummy[64] = 0;
565 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
566 #else
567 	case HW_MACHINE:
568 		bzero(dummy, sizeof(dummy));
569 		if (!PEGetMachineName(dummy, 64)) {
570 			return EINVAL;
571 		}
572 		dummy[64] = 0;
573 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
574 	case HW_MODEL:
575 		bzero(dummy, sizeof(dummy));
576 		if (!PEGetModelName(dummy, 64)) {
577 			return EINVAL;
578 		}
579 		dummy[64] = 0;
580 		return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
581 #endif
582 	case HW_USERMEM:
583 	{
584 		int usermem = (int)(max_mem - vm_page_wire_count * page_size);
585 
586 		return SYSCTL_RETURN(req, usermem);
587 	}
588 	case HW_EPOCH:
589 		epochTemp = PEGetPlatformEpoch();
590 		if (epochTemp == -1) {
591 			return EINVAL;
592 		}
593 		return SYSCTL_RETURN(req, epochTemp);
594 	case HW_VECTORUNIT: {
595 		ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
596 		int vector = cpu_info.vector_unit == 0? 0 : 1;
597 		return SYSCTL_RETURN(req, vector);
598 	}
599 	case HW_L2SETTINGS: {
600 		ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
601 		if (cpu_info.l2_cache_size == UINT32_MAX) {
602 			return EINVAL;
603 		}
604 		return SYSCTL_RETURN(req, cpu_info.l2_settings);
605 	}
606 	case HW_L3SETTINGS: {
607 		ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
608 		if (cpu_info.l3_cache_size == UINT32_MAX) {
609 			return EINVAL;
610 		}
611 		return SYSCTL_RETURN(req, cpu_info.l3_settings);
612 	}
613 	default:
614 		return ENOTSUP;
615 	}
616 	/*
617 	 * Callers may come to us with either int or quad buffers.
618 	 */
619 	if (doquad) {
620 		return SYSCTL_RETURN(req, qval);
621 	}
622 	return SYSCTL_RETURN(req, val);
623 }
624 
625 static int
sysctl_hw_cachesize(struct sysctl_oid * oidp __unused,void * arg1 __unused,int arg2 __unused,struct sysctl_req * req)626 sysctl_hw_cachesize(struct sysctl_oid *oidp __unused, void *arg1 __unused,
627     int arg2 __unused, struct sysctl_req *req)
628 {
629 	uint64_t cachesize[10] = {};
630 
631 #if __x86_64__
632 	cachesize[0] = ml_cpu_cache_size(0);
633 	cachesize[1] = ml_cpu_cache_size(1);
634 	cachesize[2] = ml_cpu_cache_size(2);
635 	cachesize[3] = ml_cpu_cache_size(3);
636 #elif __arm64__
637 	cluster_type_t min_perflevel_cluster_type = cpu_type_for_perflevel(__builtin_popcount(ml_get_cpu_types()) - 1);
638 
639 	cachesize[0] = ml_get_machine_mem();
640 	cachesize[1] = cache_info_type(min_perflevel_cluster_type)->c_dsize; /* Using the DCache */
641 	cachesize[2] = cache_info_type(min_perflevel_cluster_type)->c_l2size;
642 #else
643 #error unknown architecture
644 #endif
645 
646 	return SYSCTL_RETURN(req, cachesize);
647 }
648 
649 /* hw.pagesize and hw.tbfrequency are expected as 64 bit values */
650 static int
sysctl_pagesize(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)651 sysctl_pagesize
652 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
653 {
654 	vm_map_t map = get_task_map(current_task());
655 	long long l = vm_map_page_size(map);
656 	return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
657 }
658 
659 static int
sysctl_pagesize32(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)660 sysctl_pagesize32
661 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
662 {
663 	long long l;
664 #if __arm64__
665 	l = (long long) (1 << page_shift_user32);
666 #else /* __arm64__ */
667 	l = (long long) PAGE_SIZE;
668 #endif /* __arm64__ */
669 	return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
670 }
671 
672 static int
sysctl_tbfrequency(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)673 sysctl_tbfrequency
674 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
675 {
676 	long long l = gPEClockFrequencyInfo.timebase_frequency_hz;
677 	return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
678 }
679 
680 /*
681  * Called by IOKit on Intel, or by sysctl_load_devicetree_entries()
682  */
683 void
sysctl_set_osenvironment(unsigned int size,const void * value)684 sysctl_set_osenvironment(unsigned int size, const void* value)
685 {
686 	if (osenvironment_size == 0 && size > 0) {
687 		osenvironment = zalloc_permanent(size, ZALIGN_NONE);
688 		if (osenvironment) {
689 			memcpy(osenvironment, value, size);
690 			osenvironment_size = size;
691 		}
692 	}
693 }
694 
695 void
sysctl_unblock_osenvironment(void)696 sysctl_unblock_osenvironment(void)
697 {
698 	os_atomic_inc(&osenvironment_initialized, relaxed);
699 	thread_wakeup((event_t) &osenvironment_initialized);
700 }
701 
702 /*
703  * Create sysctl entries coming from device tree.
704  *
705  * Entries from device tree are loaded here because SecureDTLookupEntry() only works before
706  * PE_init_iokit(). Doing this also avoids the extern-C hackery to access these entries
707  * from IORegistry (which requires C++).
708  */
709 __startup_func
710 static void
sysctl_load_devicetree_entries(void)711 sysctl_load_devicetree_entries(void)
712 {
713 	DTEntry chosen;
714 	void const *value;
715 	unsigned int size;
716 
717 	if (kSuccess != SecureDTLookupEntry(0, "/chosen", &chosen)) {
718 		return;
719 	}
720 
721 	/* load osenvironment */
722 	if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &value, &size)) {
723 		sysctl_set_osenvironment(size, value);
724 	}
725 
726 	/* load ephemeral_storage */
727 	if (kSuccess == SecureDTGetProperty(chosen, "ephemeral-storage", (void const **) &value, &size)) {
728 		if (size == sizeof(uint32_t)) {
729 			ephemeral_storage = *(uint32_t const *)value;
730 			property_existence.ephemeral_storage = 1;
731 		}
732 	}
733 
734 	/* load use_recovery_securityd */
735 	if (kSuccess == SecureDTGetProperty(chosen, "use-recovery-securityd", (void const **) &value, &size)) {
736 		if (size == sizeof(uint32_t)) {
737 			use_recovery_securityd = *(uint32_t const *)value;
738 			property_existence.use_recovery_securityd = 1;
739 		}
740 	}
741 }
742 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_load_devicetree_entries);
743 
744 static int
sysctl_osenvironment(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)745 sysctl_osenvironment
746 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
747 {
748 #if defined(__x86_64__)
749 #if (DEVELOPMENT || DEBUG)
750 	if (os_atomic_load(&osenvironment_initialized, relaxed) == 0) {
751 		assert_wait((event_t) &osenvironment_initialized, THREAD_UNINT);
752 		if (os_atomic_load(&osenvironment_initialized, relaxed) != 0) {
753 			clear_wait(current_thread(), THREAD_AWAKENED);
754 		} else {
755 			(void) thread_block(THREAD_CONTINUE_NULL);
756 		}
757 	}
758 #endif
759 #endif
760 	if (osenvironment_size > 0) {
761 		return SYSCTL_OUT(req, osenvironment, osenvironment_size);
762 	} else {
763 		return EINVAL;
764 	}
765 }
766 
767 static int
sysctl_ephemeral_storage(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)768 sysctl_ephemeral_storage
769 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
770 {
771 	if (property_existence.ephemeral_storage) {
772 		return SYSCTL_OUT(req, &ephemeral_storage, sizeof(ephemeral_storage));
773 	} else {
774 		return EINVAL;
775 	}
776 }
777 
778 static int
sysctl_use_recovery_securityd(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)779 sysctl_use_recovery_securityd
780 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
781 {
782 	if (property_existence.use_recovery_securityd) {
783 		return SYSCTL_OUT(req, &use_recovery_securityd, sizeof(use_recovery_securityd));
784 	} else {
785 		return EINVAL;
786 	}
787 }
788 
789 static int
sysctl_use_kernelmanagerd(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)790 sysctl_use_kernelmanagerd
791 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
792 {
793 #if XNU_TARGET_OS_OSX
794 	static int use_kernelmanagerd = 1;
795 	static bool once = false;
796 
797 	if (!once) {
798 		kc_format_t kc_format;
799 		PE_get_primary_kc_format(&kc_format);
800 		if (kc_format == KCFormatFileset) {
801 			use_kernelmanagerd = 1;
802 		} else {
803 			PE_parse_boot_argn("kernelmanagerd", &use_kernelmanagerd, sizeof(use_kernelmanagerd));
804 		}
805 		once = true;
806 	}
807 #else
808 	static int use_kernelmanagerd = 0;
809 #endif
810 	return SYSCTL_OUT(req, &use_kernelmanagerd, sizeof(use_kernelmanagerd));
811 }
812 
813 #define HW_LOCAL_FREQUENCY             1
814 #define HW_LOCAL_FREQUENCY_MIN         2
815 #define HW_LOCAL_FREQUENCY_MAX         3
816 #define HW_LOCAL_FREQUENCY_CLOCK_RATE  4
817 
818 static int
sysctl_bus_frequency(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,__unused struct sysctl_req * req)819 sysctl_bus_frequency
820 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
821 {
822 
823 #if DEBUG || DEVELOPMENT || !defined(__arm64__)
824 	switch (arg2) {
825 	case HW_LOCAL_FREQUENCY:
826 		return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_hz);
827 	case HW_LOCAL_FREQUENCY_MIN:
828 		return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_min_hz);
829 	case HW_LOCAL_FREQUENCY_MAX:
830 		return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_max_hz);
831 	case HW_LOCAL_FREQUENCY_CLOCK_RATE:
832 		return SYSCTL_OUT(req, &gPEClockFrequencyInfo.bus_clock_rate_hz, sizeof(int));
833 	default:
834 		return EINVAL;
835 	}
836 #else
837 	return ENOENT;
838 #endif
839 }
840 
841 static int
sysctl_cpu_frequency(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,__unused struct sysctl_req * req)842 sysctl_cpu_frequency
843 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
844 {
845 
846 #if DEBUG || DEVELOPMENT || !defined(__arm64__)
847 	switch (arg2) {
848 	case HW_LOCAL_FREQUENCY:
849 		return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_hz);
850 	case HW_LOCAL_FREQUENCY_MIN:
851 		return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_min_hz);
852 	case HW_LOCAL_FREQUENCY_MAX:
853 		return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_max_hz);
854 	case HW_LOCAL_FREQUENCY_CLOCK_RATE:
855 		return SYSCTL_OUT(req, &gPEClockFrequencyInfo.cpu_clock_rate_hz, sizeof(int));
856 	default:
857 		return EINVAL;
858 	}
859 #else
860 	return ENOENT;
861 #endif
862 }
863 
864 /*
865  *  This sysctl will signal to userspace that a serial console is desired:
866  *
867  *    hw.serialdebugmode = 1 will load the serial console job in the multi-user session;
868  *    hw.serialdebugmode = 2 will load the serial console job in the base system as well
869  */
870 static int
sysctl_serialdebugmode(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)871 sysctl_serialdebugmode
872 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
873 {
874 	uint32_t serial_boot_arg;
875 	int serialdebugmode = 0;
876 
877 	if (PE_parse_boot_argn("serial", &serial_boot_arg, sizeof(serial_boot_arg)) &&
878 	    (serial_boot_arg & SERIALMODE_OUTPUT) && (serial_boot_arg & SERIALMODE_INPUT)) {
879 		serialdebugmode = (serial_boot_arg & SERIALMODE_BASE_TTY) ? 2 : 1;
880 	}
881 
882 	return sysctl_io_number(req, serialdebugmode, sizeof(serialdebugmode), NULL, NULL);
883 }
884 
885 /*
886  * This sysctl is a string that contains the jetsam properties path used by launchd to apply.
887  * jetsam properties to service. This sysctl is set once by launchd at boot and after userspace reboots,
888  * before it spawns any services.
889  */
890 #define kReadOnlyMempathEntitlement "com.apple.private.kernel.mempath-read-only"
891 static int
sysctl_mempath(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)892 sysctl_mempath
893 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
894 {
895 	int error = EINVAL;
896 	if (req->newptr != 0) {
897 		/* initproc is the only process that can write to this sysctl */
898 		if (proc_getpid(req->p) != 1) {
899 			return EPERM;
900 		}
901 		if (req->newlen > PATH_MAX) {
902 			return EOVERFLOW;
903 		}
904 		size_t mempath_new_size = req->newlen + 1;
905 		char *mempath_new = kalloc_data(mempath_new_size, Z_WAITOK);
906 		if (!mempath_new) {
907 			return ENOMEM;
908 		}
909 		mempath_new[mempath_new_size - 1] = '\0';
910 		error = SYSCTL_IN(req, mempath_new, mempath_new_size - 1);
911 		if (0 != error) {
912 			kfree_data(mempath_new, mempath_new_size);
913 			return error;
914 		}
915 		/* copy in was successful; swap out old/new buffers */
916 		if (NULL != mempath) {
917 			kfree_data(mempath, mempath_size);
918 		}
919 		mempath = mempath_new;
920 		mempath_size = mempath_new_size;
921 	} else {
922 		/* A read entitlement is required to read this sysctl */
923 		if (!IOCurrentTaskHasEntitlement(kReadOnlyMempathEntitlement)) {
924 			return EPERM;
925 		}
926 		error = EIO;
927 		if (mempath && mempath_size) {
928 			error = SYSCTL_OUT(req, mempath, mempath_size);
929 		}
930 	}
931 	return error;
932 }
933 
934 /*
935  * hw.* MIB variables.
936  */
937 SYSCTL_PROC(_hw, HW_NCPU, ncpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_NCPU, sysctl_hw_generic, "I", "");
938 SYSCTL_PROC(_hw, HW_AVAILCPU, activecpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_AVAILCPU, sysctl_hw_generic, "I", "");
939 SYSCTL_PROC(_hw, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPU, sysctl_hw_generic, "I", "");
940 SYSCTL_PROC(_hw, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
941 SYSCTL_PROC(_hw, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPU, sysctl_hw_generic, "I", "");
942 SYSCTL_PROC(_hw, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
943 SYSCTL_INT(_hw, HW_BYTEORDER, byteorder, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, BYTE_ORDER, "");
944 SYSCTL_PROC(_hw, OID_AUTO, cputype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUTYPE, sysctl_hw_generic, "I", "");
945 SYSCTL_PROC(_hw, OID_AUTO, cpusubtype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBTYPE, sysctl_hw_generic, "I", "");
946 SYSCTL_INT(_hw, OID_AUTO, cpu64bit_capable, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpu64bit, 0, "");
947 SYSCTL_PROC(_hw, OID_AUTO, cpufamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUFAMILY, sysctl_hw_generic, "I", "");
948 SYSCTL_PROC(_hw, OID_AUTO, cpusubfamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBFAMILY, sysctl_hw_generic, "I", "");
949 SYSCTL_OPAQUE(_hw, OID_AUTO, cacheconfig, CTLFLAG_RD | CTLFLAG_LOCKED, &cacheconfig, sizeof(cacheconfig), "Q", "");
950 SYSCTL_PROC(_hw, OID_AUTO, cachesize, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_hw_cachesize, "Q", "");
951 SYSCTL_PROC(_hw, OID_AUTO, pagesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize, "Q", "");
952 SYSCTL_PROC(_hw, OID_AUTO, pagesize32, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize32, "Q", "");
953 SYSCTL_PROC(_hw, OID_AUTO, busfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_bus_frequency, "Q", "");
954 SYSCTL_PROC(_hw, OID_AUTO, busfrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_bus_frequency, "Q", "");
955 SYSCTL_PROC(_hw, OID_AUTO, busfrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_bus_frequency, "Q", "");
956 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_cpu_frequency, "Q", "");
957 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_cpu_frequency, "Q", "");
958 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_cpu_frequency, "Q", "");
959 SYSCTL_PROC(_hw, OID_AUTO, cachelinesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_CACHELINE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
960 SYSCTL_PROC(_hw, OID_AUTO, l1icachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
961 SYSCTL_PROC(_hw, OID_AUTO, l1dcachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
962 SYSCTL_PROC(_hw, OID_AUTO, l2cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
963 SYSCTL_PROC(_hw, OID_AUTO, l3cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
964 #if defined(__arm64__) && (DEBUG || DEVELOPMENT)
965 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_hz, "");
966 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_min_hz, "");
967 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_max_hz, "");
968 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_hz, "");
969 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_min_hz, "");
970 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_max_hz, "");
971 SYSCTL_QUAD(_hw, OID_AUTO, fixfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.fix_frequency_hz, "");
972 #endif /* __arm64__ */
973 SYSCTL_PROC(_hw, OID_AUTO, tbfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_tbfrequency, "Q", "");
974 /**
975  * The naming around the sysctls for max_mem and max_mem_actual are different between macOS and
976  * non-macOS platforms because historically macOS's hw.memsize provided the value of the actual
977  * physical memory size, whereas on non-macOS it is the memory size minus any carveouts.
978  */
979 #if XNU_TARGET_OS_OSX
980 SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem_actual, "");
981 SYSCTL_QUAD(_hw, OID_AUTO, memsize_usable, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem, "");
982 #else
983 SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem, "");
984 SYSCTL_QUAD(_hw, OID_AUTO, memsize_physical, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem_actual, "");
985 #endif /* XNU_TARGET_OS_OSX */
986 SYSCTL_INT(_hw, OID_AUTO, packages, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &packages, 0, "");
987 #if defined(XNU_TARGET_OS_XR)
988 SYSCTL_UINT(_hw, OID_AUTO, chiprole, CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPlatformChipRole, 1, "");
989 #endif /* not XNU_TARGET_OS_XR */
990 SYSCTL_PROC(_hw, OID_AUTO, osenvironment, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_osenvironment, "A", "");
991 SYSCTL_PROC(_hw, OID_AUTO, ephemeral_storage, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_ephemeral_storage, "I", "");
992 SYSCTL_PROC(_hw, OID_AUTO, use_recovery_securityd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_recovery_securityd, "I", "");
993 SYSCTL_PROC(_hw, OID_AUTO, use_kernelmanagerd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_kernelmanagerd, "I", "");
994 SYSCTL_PROC(_hw, OID_AUTO, serialdebugmode, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_serialdebugmode, "I", "");
995 SYSCTL_PROC(_hw, OID_AUTO, mempath, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_mempath, "A", "");
996 
997 /*
998  * hw.perflevelN.* variables.
999  * Users may check these to determine properties that vary across different CPU types, such as number of CPUs,
1000  * or cache sizes. Perflevel 0 corresponds to the highest performance one.
1001  */
1002 SYSCTL_NODE(_hw, OID_AUTO, perflevel0, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, "Perf level 0 topology and cache geometry paramaters");
1003 SYSCTL_NODE(_hw, OID_AUTO, perflevel1, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, "Perf level 1 topology and cache geometry paramaters");
1004 SYSCTL_PROC(_hw, OID_AUTO, nperflevels, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_NPERFLEVELS, sysctl_hw_generic, "I", "Number of performance levels supported by this system");
1005 
1006 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_PHYSICALCPU, sysctl_hw_generic, "I", "");
1007 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
1008 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_LOGICALCPU, sysctl_hw_generic, "I", "");
1009 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
1010 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l1icachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L1ICACHESIZE, sysctl_hw_generic, "I", "");
1011 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l1dcachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L1DCACHESIZE, sysctl_hw_generic, "I", "");
1012 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l2cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L2CACHESIZE, sysctl_hw_generic, "I", "");
1013 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, cpusperl2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_CPUSPERL2, sysctl_hw_generic, "I", "");
1014 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l3cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L3CACHESIZE, sysctl_hw_generic, "I", "");
1015 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, cpusperl3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_CPUSPERL3, sysctl_hw_generic, "I", "");
1016 SYSCTL_PROC(_hw_perflevel0, OID_AUTO, name, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_NAME, sysctl_hw_generic, "A", "");
1017 
1018 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_PHYSICALCPU, sysctl_hw_generic, "I", "");
1019 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
1020 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_LOGICALCPU, sysctl_hw_generic, "I", "");
1021 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
1022 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l1icachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L1ICACHESIZE, sysctl_hw_generic, "I", "");
1023 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l1dcachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L1DCACHESIZE, sysctl_hw_generic, "I", "");
1024 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l2cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L2CACHESIZE, sysctl_hw_generic, "I", "");
1025 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, cpusperl2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_CPUSPERL2, sysctl_hw_generic, "I", "");
1026 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l3cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L3CACHESIZE, sysctl_hw_generic, "I", "");
1027 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, cpusperl3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_CPUSPERL3, sysctl_hw_generic, "I", "");
1028 SYSCTL_PROC(_hw_perflevel1, OID_AUTO, name, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_NAME, sysctl_hw_generic, "A", "");
1029 
1030 /*
1031  * Optional CPU features can register nodes below hw.optional.
1032  *
1033  * If the feature is not present, the node should either not be registered,
1034  * or it should return 0.  If the feature is present, the node should return
1035  * 1.
1036  */
1037 SYSCTL_NODE(_hw, OID_AUTO, optional, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "optional features");
1038 SYSCTL_NODE(_hw_optional, OID_AUTO, arm, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "optional features for ARM processors");
1039 
1040 SYSCTL_INT(_hw_optional, OID_AUTO, floatingpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, 1, "");      /* always set */
1041 
1042 /*
1043  * Optional device hardware features can be registered by drivers below hw.features
1044  */
1045 SYSCTL_EXTENSIBLE_NODE(_hw, OID_AUTO, features, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "hardware features");
1046 
1047 /*
1048  * Deprecated variables.  These are supported for backwards compatibility
1049  * purposes only.  The MASKED flag requests that the variables not be
1050  * printed by sysctl(8) and similar utilities.
1051  *
1052  * The variables named *_compat here are int-sized versions of variables
1053  * that are now exported as quads.  The int-sized versions are normally
1054  * looked up only by number, wheras the quad-sized versions should be
1055  * looked up by name.
1056  *
1057  * The *_compat nodes are *NOT* visible within the kernel.
1058  */
1059 
1060 SYSCTL_PROC(_hw, HW_PAGESIZE, pagesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PAGESIZE, sysctl_hw_generic, "I", "");
1061 SYSCTL_PROC(_hw, HW_BUS_FREQ, busfrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_bus_frequency, "I", "");
1062 SYSCTL_PROC(_hw, HW_CPU_FREQ, cpufrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_cpu_frequency, "I", "");
1063 SYSCTL_PROC(_hw, HW_CACHELINE, cachelinesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_CACHELINE, sysctl_hw_generic, "I", "");
1064 SYSCTL_PROC(_hw, HW_L1ICACHESIZE, l1icachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE, sysctl_hw_generic, "I", "");
1065 SYSCTL_PROC(_hw, HW_L1DCACHESIZE, l1dcachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE, sysctl_hw_generic, "I", "");
1066 SYSCTL_PROC(_hw, HW_L2CACHESIZE, l2cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE, sysctl_hw_generic, "I", "");
1067 SYSCTL_PROC(_hw, HW_L3CACHESIZE, l3cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE, sysctl_hw_generic, "I", "");
1068 SYSCTL_COMPAT_INT(_hw, HW_TB_FREQ, tbfrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.timebase_frequency_hz, 0, "");
1069 SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MACHINE, sysctl_hw_generic, "A", "");
1070 SYSCTL_PROC(_hw, HW_MODEL, model, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MODEL, sysctl_hw_generic, "A", "");
1071 SYSCTL_PROC(_hw, HW_TARGET, target, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_TARGET, sysctl_hw_generic, "A", "");
1072 SYSCTL_PROC(_hw, HW_PRODUCT, product, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PRODUCT, sysctl_hw_generic, "A", "");
1073 SYSCTL_COMPAT_UINT(_hw, HW_PHYSMEM, physmem, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &mem_size, 0, "");
1074 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_USERMEM, sysctl_hw_generic, "I", "");
1075 SYSCTL_PROC(_hw, HW_EPOCH, epoch, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_EPOCH, sysctl_hw_generic, "I", "");
1076 SYSCTL_PROC(_hw, HW_VECTORUNIT, vectorunit, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_VECTORUNIT, sysctl_hw_generic, "I", "");
1077 SYSCTL_PROC(_hw, HW_L2SETTINGS, l2settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2SETTINGS, sysctl_hw_generic, "I", "");
1078 SYSCTL_PROC(_hw, HW_L3SETTINGS, l3settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3SETTINGS, sysctl_hw_generic, "I", "");
1079 SYSCTL_INT(_hw, OID_AUTO, cputhreadtype, CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, &cputhreadtype, 0, "");
1080 
1081 #if defined(__i386__) || defined(__x86_64__) || CONFIG_X86_64_COMPAT
1082 static int
sysctl_cpu_capability(__unused struct sysctl_oid * oidp,void * arg1,__unused int arg2,struct sysctl_req * req)1083 sysctl_cpu_capability
1084 (__unused struct sysctl_oid *oidp, void *arg1, __unused int arg2, struct sysctl_req *req)
1085 {
1086 	uint64_t    caps;
1087 	caps = _get_cpu_capabilities();
1088 
1089 	uint64_t        mask = (uint64_t) (uintptr_t) arg1;
1090 	boolean_t       is_capable = (caps & mask) != 0;
1091 
1092 	return SYSCTL_OUT(req, &is_capable, sizeof(is_capable));
1093 }
1094 #define capability(name) name
1095 
1096 
1097 SYSCTL_PROC(_hw_optional, OID_AUTO, mmx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMMX), 0, sysctl_cpu_capability, "I", "");
1098 SYSCTL_PROC(_hw_optional, OID_AUTO, sse, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE), 0, sysctl_cpu_capability, "I", "");
1099 SYSCTL_PROC(_hw_optional, OID_AUTO, sse2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE2), 0, sysctl_cpu_capability, "I", "");
1100 SYSCTL_PROC(_hw_optional, OID_AUTO, sse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE3), 0, sysctl_cpu_capability, "I", "");
1101 SYSCTL_PROC(_hw_optional, OID_AUTO, supplementalsse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSupplementalSSE3), 0, sysctl_cpu_capability, "I", "");
1102 SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_1), 0, sysctl_cpu_capability, "I", "");
1103 SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_2), 0, sysctl_cpu_capability, "I", "");
1104 /* "x86_64" is actually a preprocessor symbol on the x86_64 kernel, so we have to hack this */
1105 #undef x86_64
1106 SYSCTL_PROC(_hw_optional, OID_AUTO, x86_64, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(k64Bit), 0, sysctl_cpu_capability, "I", "");
1107 SYSCTL_PROC(_hw_optional, OID_AUTO, aes, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAES), 0, sysctl_cpu_capability, "I", "");
1108 SYSCTL_PROC(_hw_optional, OID_AUTO, avx1_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX1_0), 0, sysctl_cpu_capability, "I", "");
1109 SYSCTL_PROC(_hw_optional, OID_AUTO, rdrand, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRDRAND), 0, sysctl_cpu_capability, "I", "");
1110 SYSCTL_PROC(_hw_optional, OID_AUTO, f16c, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasF16C), 0, sysctl_cpu_capability, "I", "");
1111 SYSCTL_PROC(_hw_optional, OID_AUTO, enfstrg, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasENFSTRG), 0, sysctl_cpu_capability, "I", "");
1112 SYSCTL_PROC(_hw_optional, OID_AUTO, fma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasFMA), 0, sysctl_cpu_capability, "I", "");
1113 SYSCTL_PROC(_hw_optional, OID_AUTO, avx2_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX2_0), 0, sysctl_cpu_capability, "I", "");
1114 SYSCTL_PROC(_hw_optional, OID_AUTO, bmi1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI1), 0, sysctl_cpu_capability, "I", "");
1115 SYSCTL_PROC(_hw_optional, OID_AUTO, bmi2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI2), 0, sysctl_cpu_capability, "I", "");
1116 SYSCTL_PROC(_hw_optional, OID_AUTO, rtm, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRTM), 0, sysctl_cpu_capability, "I", "");
1117 SYSCTL_PROC(_hw_optional, OID_AUTO, hle, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasHLE), 0, sysctl_cpu_capability, "I", "");
1118 SYSCTL_PROC(_hw_optional, OID_AUTO, adx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasADX), 0, sysctl_cpu_capability, "I", "");
1119 SYSCTL_PROC(_hw_optional, OID_AUTO, mpx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMPX), 0, sysctl_cpu_capability, "I", "");
1120 SYSCTL_PROC(_hw_optional, OID_AUTO, sgx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSGX), 0, sysctl_cpu_capability, "I", "");
1121 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512f, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512F), 0, sysctl_cpu_capability, "I", "");
1122 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512cd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512CD), 0, sysctl_cpu_capability, "I", "");
1123 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512dq, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512DQ), 0, sysctl_cpu_capability, "I", "");
1124 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512bw, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512BW), 0, sysctl_cpu_capability, "I", "");
1125 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vl, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VL), 0, sysctl_cpu_capability, "I", "");
1126 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512ifma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512IFMA), 0, sysctl_cpu_capability, "I", "");
1127 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vbmi, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VBMI), 0, sysctl_cpu_capability, "I", "");
1128 #undef capability
1129 #endif /* !__i386__ && !__x86_64 && !CONFIG_X86_64_COMPAT */
1130 
1131 #if defined (__arm64__)
1132 int watchpoint_flag = 0;
1133 int breakpoint_flag = 0;
1134 
1135 SECURITY_READ_ONLY_LATE(int) gUCNormalMem = 0;
1136 
1137 #if defined (__arm64__)
1138 SECURITY_READ_ONLY_LATE(int) arm64_flag = 1;
1139 #else /* end __arm64__*/
1140 SECURITY_READ_ONLY_LATE(int) arm64_flag = 0;
1141 #endif
1142 
1143 /* ARM Optional Feature Sysctls */
1144 #define ARM_FEATURE_FLAG(flag_name) \
1145 	SECURITY_READ_ONLY_LATE(int) gARM_ ## flag_name = 0; \
1146 	SYSCTL_INT(_hw_optional_arm, OID_AUTO, flag_name, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_ ## flag_name, 0, "")
1147 #include <arm/arm_features.inc>
1148 #undef ARM_FEATURE_FLAG
1149 
1150 /* Legacy Names ARM Optional Feature Sysctls */
1151 #define LEGACY_ARM_SYSCTL(legacy_name, flag_name) \
1152 	SYSCTL_INT(_hw_optional, OID_AUTO, legacy_name, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_ ## flag_name, 0, "")
1153 
1154 LEGACY_ARM_SYSCTL(neon, AdvSIMD);
1155 LEGACY_ARM_SYSCTL(neon_hpfp, AdvSIMD_HPFPCvt);
1156 LEGACY_ARM_SYSCTL(neon_fp16, FEAT_FP16);
1157 LEGACY_ARM_SYSCTL(armv8_crc32, FEAT_CRC32);
1158 LEGACY_ARM_SYSCTL(armv8_gpi, FEAT_PACIMP);
1159 LEGACY_ARM_SYSCTL(armv8_1_atomics, FEAT_LSE);
1160 LEGACY_ARM_SYSCTL(armv8_2_fhm, FEAT_FHM);
1161 LEGACY_ARM_SYSCTL(armv8_2_sha512, FEAT_SHA512);
1162 LEGACY_ARM_SYSCTL(armv8_2_sha3, FEAT_SHA3);
1163 LEGACY_ARM_SYSCTL(armv8_3_compnum, FEAT_FCMA);
1164 
1165 /* Misc ARM Optional Feature Sysctls */
1166 SYSCTL_INT(_hw_optional, OID_AUTO, watchpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &watchpoint_flag, 0, "");
1167 SYSCTL_INT(_hw_optional, OID_AUTO, breakpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &breakpoint_flag, 0, "");
1168 
1169 SYSCTL_INT(_hw_optional, OID_AUTO, ucnormal_mem, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gUCNormalMem, 0, "");
1170 
1171 #if DEBUG || DEVELOPMENT
1172 #if __ARM_KERNEL_PROTECT__
1173 static SECURITY_READ_ONLY_LATE(int) arm_kernel_protect = 1;
1174 #else
1175 static SECURITY_READ_ONLY_LATE(int) arm_kernel_protect = 0;
1176 #endif
1177 SYSCTL_INT(_hw_optional, OID_AUTO, arm_kernel_protect, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm_kernel_protect, 0, "");
1178 #endif
1179 
1180 #if DEBUG || DEVELOPMENT
1181 static int ic_inval_filters = 0;
1182 SYSCTL_INT(_hw_optional, OID_AUTO, ic_inval_filters, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &ic_inval_filters, 0, "");
1183 #endif
1184 
1185 #if DEBUG || DEVELOPMENT
1186 static SECURITY_READ_ONLY_LATE(int) wkdm_popcount = 0;
1187 SYSCTL_INT(_hw_optional, OID_AUTO, wkdm_popcount, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &wkdm_popcount, 0, "");
1188 #endif
1189 
1190 #if DEBUG || DEVELOPMENT
1191 #if __has_feature(ptrauth_calls)
1192 static SECURITY_READ_ONLY_LATE(int) ptrauth = 1;
1193 #else
1194 static SECURITY_READ_ONLY_LATE(int) ptrauth = 0;
1195 #endif
1196 SYSCTL_INT(_hw_optional, OID_AUTO, ptrauth, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &ptrauth, 0, "");
1197 #endif
1198 
1199 /*
1200  * Without this little ifdef dance, the preprocessor replaces "arm64" with "1",
1201  * leaving us with a less-than-helpful sysctl.hwoptional.1.
1202  */
1203 #ifdef arm64
1204 #undef arm64
1205 SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm64_flag, 0, "");
1206 #define arm64 1
1207 #else
1208 SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm64_flag, 0, "");
1209 #endif
1210 #endif /* ! __arm64__ */
1211 
1212 #if defined (__arm64__)
1213 
1214 /*
1215  * Generate an uint64_t containing one bit per FEAT extension, reporting
1216  * the presence of each extension.
1217  */
1218 static int
sysctl_hw_caps(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1219 sysctl_hw_caps(__unused struct sysctl_oid *oidp, __unused void *arg1,
1220     __unused int arg2, struct sysctl_req *req)
1221 {
1222 	/* Local buffer, one bit per FEAT, reset to 0, set if FEAT present. */
1223 	#define CAP_BYTE_NB ((CAP_BIT_NB + 7) / 8)
1224 	uint8_t feats[CAP_BYTE_NB] = {0};
1225 
1226 	/* Write a bit in the local buffer. */
1227 	#define CAP_DO_SET_BIT(n) { \
1228 	        assert((n) < CAP_BIT_NB); \
1229 	        const uint32_t word_id = ((uint32_t) (n)) >> 3; \
1230 	        assert(word_id < CAP_BYTE_NB); \
1231 	        const uint8_t bit_id = ((uint32_t) (n)) & 0x7; \
1232 	        feats[word_id] |= ((uint8_t) 1) << bit_id; \
1233 	}
1234 
1235 	/* Write a capability bit in the local buffer. */
1236 	#define CAP_SET_BIT_(var, name) \
1237 	if (var) { \
1238 	        CAP_DO_SET_BIT(CAP_BIT_##name); \
1239 	}
1240 	#define CAP_SET_BIT(name) CAP_SET_BIT_(gARM_##name, name)
1241 
1242 	/* Write a capability field in the local buffer. */
1243 	#define CAP_SET_FIELD(name, val) { \
1244 	        const uint32_t start = CAP_BIT_##name; \
1245 	        const uint32_t end = start + CAP_BIT_##name##_LEN; \
1246 	        uint32_t src = CAP_BIT_##name##_##val; \
1247 	        for (uint32_t id = start; id < end; id++) { \
1248 	                if (src & 1) { \
1249 	                        CAP_DO_SET_BIT(id); \
1250 	                } \
1251 	                src >>= 1; \
1252 	        } \
1253 	}
1254 
1255 	/* Report presence of all FEATs. */
1256 	#define ARM_FEATURE_FLAG(x) CAP_SET_BIT(x)
1257 	#include <arm/arm_features.inc>
1258 	#undef ARM_FEATURE_FLAG
1259 
1260 
1261 	/* Write the local buffer to userspace and complete. */
1262 	return SYSCTL_OUT(req, feats, CAP_BYTE_NB);
1263 }
1264 SYSCTL_PROC(_hw_optional_arm, OID_AUTO, caps, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_hw_caps, "Q", "");
1265 
1266 extern int sme_max_svl_b;
1267 SYSCTL_INT(_hw_optional_arm, OID_AUTO, sme_max_svl_b, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &sme_max_svl_b, 0, "");
1268 #endif /* ! __arm64__ */
1269 
1270 
1271 #if defined(__arm64__) && defined(CONFIG_XNUPOST)
1272 /**
1273  * Test whether the new values for a few hw.optional sysctls matches the legacy
1274  * way of obtaining that information.
1275  *
1276  * Specifically, hw.optional.neon_fp16 has been used to indicate both FEAT_FP16
1277  * and FEAT_FHM, as we are now grabbing the information directly from the ISA
1278  * status registers instead of from the arm_mvfp_info, we need to check that
1279  * this new source won't break any existing usecases of the sysctl and assert
1280  * that hw.optional.neon_fp16 will return the same value as it used to for all
1281  * devices.
1282  */
1283 kern_return_t
arm_cpu_capabilities_legacy_test(void)1284 arm_cpu_capabilities_legacy_test(void)
1285 {
1286 	T_SETUPBEGIN;
1287 	arm_mvfp_info_t *mvfp_info = arm_mvfp_info();
1288 	T_ASSERT_NOTNULL(mvfp_info, "arm_mvfp_info returned null pointer.");
1289 	T_SETUPEND;
1290 
1291 
1292 	T_EXPECT_EQ_INT(mvfp_info->neon, gARM_AdvSIMD, "neon value should match legacy");
1293 	T_EXPECT_EQ_INT(mvfp_info->neon_hpfp, gARM_AdvSIMD_HPFPCvt, "neon hpfp cvt value should match legacy");
1294 	T_EXPECT_EQ_INT(mvfp_info->neon_fp16, gARM_FEAT_FP16, "neon fp16 value should match legacy");
1295 
1296 	T_LOG("Completed arm cpu capabalities legacy compliance test.");
1297 	return KERN_SUCCESS;
1298 }
1299 #endif /* defined(__arm64__) && defined(CONFIG_XNUPOST) */
1300 
1301 /******************************************************************************
1302  * Generic MIB initialisation.
1303  *
1304  * This is a hack, and should be replaced with SYSINITs
1305  * at some point.
1306  */
1307 void
sysctl_mib_init(void)1308 sysctl_mib_init(void)
1309 {
1310 #if defined(__i386__) || defined (__x86_64__)
1311 	cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit;
1312 #elif defined (__arm64__)
1313 	cpu64bit = (cpu_type() & CPU_ARCH_ABI64) == CPU_ARCH_ABI64;
1314 #else
1315 #error Unsupported arch
1316 #endif
1317 #if defined (__i386__) || defined (__x86_64__)
1318 	/* hw.cacheconfig */
1319 	cacheconfig[0] = ml_cpu_cache_sharing(0, CLUSTER_TYPE_SMP, true);
1320 	cacheconfig[1] = ml_cpu_cache_sharing(1, CLUSTER_TYPE_SMP, true);
1321 	cacheconfig[2] = ml_cpu_cache_sharing(2, CLUSTER_TYPE_SMP, true);
1322 	cacheconfig[3] = ml_cpu_cache_sharing(3, CLUSTER_TYPE_SMP, true);
1323 	cacheconfig[4] = 0;
1324 
1325 	/* hw.packages */
1326 	packages = (int)(roundup(ml_cpu_cache_sharing(0, CLUSTER_TYPE_SMP, true), cpuid_info()->thread_count)
1327 	    / cpuid_info()->thread_count);
1328 
1329 #elif defined(__arm64__) /* end __i386 */
1330 	watchpoint_flag = arm_debug_info()->num_watchpoint_pairs;
1331 	breakpoint_flag = arm_debug_info()->num_breakpoint_pairs;
1332 
1333 	cluster_type_t min_perflevel_cluster_type = cpu_type_for_perflevel(__builtin_popcount(ml_get_cpu_types()) - 1);
1334 
1335 	cacheconfig[0] = ml_wait_max_cpus();
1336 	cacheconfig[1] = ml_cpu_cache_sharing(1, min_perflevel_cluster_type, true);
1337 	cacheconfig[2] = ml_cpu_cache_sharing(2, min_perflevel_cluster_type, true);
1338 	cacheconfig[3] = 0;
1339 	cacheconfig[4] = 0;
1340 	cacheconfig[5] = 0;
1341 	cacheconfig[6] = 0;
1342 
1343 	packages = 1;
1344 #else
1345 #error unknown architecture
1346 #endif /* !__i386__ && !__x86_64 && !__arm64__ */
1347 }
1348 
1349 __startup_func
1350 static void
sysctl_mib_startup(void)1351 sysctl_mib_startup(void)
1352 {
1353 	cputhreadtype = cpu_threadtype();
1354 
1355 	/*
1356 	 * Populate the optional portion of the hw.* MIB.
1357 	 *
1358 	 * XXX This could be broken out into parts of the code
1359 	 *     that actually directly relate to the functions in
1360 	 *     question.
1361 	 */
1362 
1363 	if (cputhreadtype != CPU_THREADTYPE_NONE) {
1364 		sysctl_register_oid_early(&sysctl__hw_cputhreadtype);
1365 	}
1366 
1367 #if defined(XNU_TARGET_OS_XR)
1368 	if (gPlatformChipRole != UINT32_MAX) {
1369 		sysctl_register_oid_early(&sysctl__hw_chiprole);
1370 	}
1371 #endif /* not XNU_TARGET_OS_XR */
1372 }
1373 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_mib_startup);
1374