xref: /xnu-11417.121.6/osfmk/kern/host.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 /*
60  *	host.c
61  *
62  *	Non-ipc host functions.
63  */
64 
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
79 
80 #include <machine/commpage.h>
81 #include <machine/cpu_capabilities.h>
82 
83 #include <device/device_port.h>
84 
85 #include <kern/kern_types.h>
86 #include <kern/assert.h>
87 #include <kern/kalloc.h>
88 #include <kern/ecc.h>
89 #include <kern/host.h>
90 #include <kern/host_statistics.h>
91 #include <kern/ipc_host.h>
92 #include <kern/misc_protos.h>
93 #include <kern/sched.h>
94 #include <kern/processor.h>
95 #include <kern/mach_node.h>     // mach_node_port_changed()
96 
97 #include <vm/vm_map_xnu.h>
98 #include <vm/vm_purgeable_xnu.h>
99 #include <vm/vm_pageout.h>
100 #include <vm/vm_kern_xnu.h>
101 
102 #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
103 #include <IOKit/IOKitKeys.h> // DriverKit entitlement strings
104 
105 #if CONFIG_ATM
106 #include <atm/atm_internal.h>
107 #endif
108 
109 #if CONFIG_MACF
110 #include <security/mac_mach_internal.h>
111 #endif
112 
113 #if CONFIG_CSR
114 #include <sys/csr.h>
115 #endif
116 
117 #include <pexpert/pexpert.h>
118 
119 SCALABLE_COUNTER_DEFINE(vm_statistics_zero_fill_count);        /* # of zero fill pages */
120 SCALABLE_COUNTER_DEFINE(vm_statistics_reactivations);          /* # of pages reactivated */
121 SCALABLE_COUNTER_DEFINE(vm_statistics_pageins);                /* # of pageins */
122 SCALABLE_COUNTER_DEFINE(vm_statistics_pageouts);               /* # of pageouts */
123 SCALABLE_COUNTER_DEFINE(vm_statistics_faults);                 /* # of faults */
124 SCALABLE_COUNTER_DEFINE(vm_statistics_cow_faults);             /* # of copy-on-writes */
125 SCALABLE_COUNTER_DEFINE(vm_statistics_lookups);                /* object cache lookups */
126 SCALABLE_COUNTER_DEFINE(vm_statistics_hits);                   /* object cache hits */
127 SCALABLE_COUNTER_DEFINE(vm_statistics_purges);                 /* # of pages purged */
128 SCALABLE_COUNTER_DEFINE(vm_statistics_decompressions);         /* # of pages decompressed */
129 SCALABLE_COUNTER_DEFINE(vm_statistics_compressions);           /* # of pages compressed */
130 SCALABLE_COUNTER_DEFINE(vm_statistics_swapins);                /* # of pages swapped in (via compression segments) */
131 SCALABLE_COUNTER_DEFINE(vm_statistics_swapouts);               /* # of pages swapped out (via compression segments) */
132 SCALABLE_COUNTER_DEFINE(vm_statistics_total_uncompressed_pages_in_compressor); /* # of pages (uncompressed) held within the compressor. */
133 SCALABLE_COUNTER_DEFINE(vm_page_grab_count);
134 
135 host_data_t realhost;
136 
137 static void
get_host_vm_stats(vm_statistics64_t out)138 get_host_vm_stats(vm_statistics64_t out)
139 {
140 	out->zero_fill_count = counter_load(&vm_statistics_zero_fill_count);
141 	out->reactivations = counter_load(&vm_statistics_reactivations);
142 	out->pageins = counter_load(&vm_statistics_pageins);
143 	out->pageouts = counter_load(&vm_statistics_pageouts);
144 	out->faults = counter_load(&vm_statistics_faults);
145 	out->cow_faults = counter_load(&vm_statistics_cow_faults);
146 	out->lookups = counter_load(&vm_statistics_lookups);
147 	out->hits = counter_load(&vm_statistics_hits);
148 	out->compressions = counter_load(&vm_statistics_compressions);
149 	out->decompressions = counter_load(&vm_statistics_decompressions);
150 	out->swapins = counter_load(&vm_statistics_swapins);
151 	out->swapouts = counter_load(&vm_statistics_swapouts);
152 }
153 vm_extmod_statistics_data_t host_extmod_statistics;
154 
155 kern_return_t
host_processors(host_priv_t host_priv,processor_array_t * out_array,mach_msg_type_number_t * countp)156 host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
157 {
158 	if (host_priv == HOST_PRIV_NULL) {
159 		return KERN_INVALID_ARGUMENT;
160 	}
161 
162 	unsigned int count = processor_count;
163 	assert(count != 0);
164 
165 	static_assert(sizeof(mach_port_t) == sizeof(processor_t));
166 
167 	mach_port_array_t ports = mach_port_array_alloc(count, Z_WAITOK);
168 	if (!ports) {
169 		return KERN_RESOURCE_SHORTAGE;
170 	}
171 
172 	for (unsigned int i = 0; i < count; i++) {
173 		processor_t processor = processor_array[i];
174 		assert(processor != PROCESSOR_NULL);
175 
176 		/* do the conversion that Mig should handle */
177 		ports[i].port = convert_processor_to_port(processor);
178 	}
179 
180 	*countp = count;
181 	*out_array = ports;
182 
183 	return KERN_SUCCESS;
184 }
185 
186 extern int sched_allow_NO_SMT_threads;
187 
188 kern_return_t
host_info(host_t host,host_flavor_t flavor,host_info_t info,mach_msg_type_number_t * count)189 host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
190 {
191 	if (host == HOST_NULL) {
192 		return KERN_INVALID_ARGUMENT;
193 	}
194 
195 	switch (flavor) {
196 	case HOST_BASIC_INFO: {
197 		host_basic_info_t basic_info;
198 		int master_id = master_processor->cpu_id;
199 
200 		/*
201 		 *	Basic information about this host.
202 		 */
203 		if (*count < HOST_BASIC_INFO_OLD_COUNT) {
204 			return KERN_FAILURE;
205 		}
206 
207 		basic_info = (host_basic_info_t)info;
208 
209 		basic_info->memory_size = machine_info.memory_size;
210 		basic_info->cpu_type = slot_type(master_id);
211 		basic_info->cpu_subtype = slot_subtype(master_id);
212 		basic_info->max_cpus = machine_info.max_cpus;
213 #if CONFIG_SCHED_SMT
214 		if (sched_allow_NO_SMT_threads && current_task()->t_flags & TF_NO_SMT) {
215 			basic_info->avail_cpus = primary_processor_avail_count_user;
216 		} else {
217 			basic_info->avail_cpus = processor_avail_count_user;
218 		}
219 #else
220 		basic_info->avail_cpus = processor_avail_count;
221 #endif
222 
223 
224 		if (*count >= HOST_BASIC_INFO_COUNT) {
225 			basic_info->cpu_threadtype = slot_threadtype(master_id);
226 			basic_info->physical_cpu = machine_info.physical_cpu;
227 			basic_info->physical_cpu_max = machine_info.physical_cpu_max;
228 #if defined(__x86_64__)
229 			basic_info->logical_cpu = basic_info->avail_cpus;
230 #else
231 			basic_info->logical_cpu = machine_info.logical_cpu;
232 #endif
233 			basic_info->logical_cpu_max = machine_info.logical_cpu_max;
234 			basic_info->max_mem = machine_info.max_mem;
235 
236 			*count = HOST_BASIC_INFO_COUNT;
237 		} else {
238 			*count = HOST_BASIC_INFO_OLD_COUNT;
239 		}
240 
241 		return KERN_SUCCESS;
242 	}
243 
244 	case HOST_SCHED_INFO: {
245 		host_sched_info_t sched_info;
246 		uint32_t quantum_time;
247 		uint64_t quantum_ns;
248 
249 		/*
250 		 *	Return scheduler information.
251 		 */
252 		if (*count < HOST_SCHED_INFO_COUNT) {
253 			return KERN_FAILURE;
254 		}
255 
256 		sched_info = (host_sched_info_t)info;
257 
258 		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
259 		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
260 
261 		sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
262 
263 		*count = HOST_SCHED_INFO_COUNT;
264 
265 		return KERN_SUCCESS;
266 	}
267 
268 	case HOST_RESOURCE_SIZES: {
269 		/*
270 		 * Return sizes of kernel data structures
271 		 */
272 		if (*count < HOST_RESOURCE_SIZES_COUNT) {
273 			return KERN_FAILURE;
274 		}
275 
276 		/* XXX Fail until ledgers are implemented */
277 		return KERN_INVALID_ARGUMENT;
278 	}
279 
280 	case HOST_PRIORITY_INFO: {
281 		host_priority_info_t priority_info;
282 
283 		if (*count < HOST_PRIORITY_INFO_COUNT) {
284 			return KERN_FAILURE;
285 		}
286 
287 		priority_info = (host_priority_info_t)info;
288 
289 		priority_info->kernel_priority = MINPRI_KERNEL;
290 		priority_info->system_priority = MINPRI_KERNEL;
291 		priority_info->server_priority = MINPRI_RESERVED;
292 		priority_info->user_priority = BASEPRI_DEFAULT;
293 		priority_info->depress_priority = DEPRESSPRI;
294 		priority_info->idle_priority = IDLEPRI;
295 		priority_info->minimum_priority = MINPRI_USER;
296 		priority_info->maximum_priority = MAXPRI_RESERVED;
297 
298 		*count = HOST_PRIORITY_INFO_COUNT;
299 
300 		return KERN_SUCCESS;
301 	}
302 
303 	/*
304 	 * Gestalt for various trap facilities.
305 	 */
306 	case HOST_MACH_MSG_TRAP:
307 	case HOST_SEMAPHORE_TRAPS: {
308 		*count = 0;
309 		return KERN_SUCCESS;
310 	}
311 
312 	case HOST_CAN_HAS_DEBUGGER: {
313 		host_can_has_debugger_info_t can_has_debugger_info;
314 
315 		if (*count < HOST_CAN_HAS_DEBUGGER_COUNT) {
316 			return KERN_FAILURE;
317 		}
318 
319 		can_has_debugger_info = (host_can_has_debugger_info_t)info;
320 		can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL);
321 		*count = HOST_CAN_HAS_DEBUGGER_COUNT;
322 
323 		return KERN_SUCCESS;
324 	}
325 
326 	case HOST_VM_PURGABLE: {
327 		if (*count < HOST_VM_PURGABLE_COUNT) {
328 			return KERN_FAILURE;
329 		}
330 
331 		vm_purgeable_stats((vm_purgeable_info_t)info, NULL);
332 
333 		*count = HOST_VM_PURGABLE_COUNT;
334 		return KERN_SUCCESS;
335 	}
336 
337 	case HOST_DEBUG_INFO_INTERNAL: {
338 #if DEVELOPMENT || DEBUG
339 		if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT) {
340 			return KERN_FAILURE;
341 		}
342 
343 		host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
344 		bzero(debug_info, sizeof(host_debug_info_internal_data_t));
345 		*count = HOST_DEBUG_INFO_INTERNAL_COUNT;
346 
347 #if CONFIG_COALITIONS
348 		debug_info->config_coalitions = 1;
349 #endif
350 		debug_info->config_bank = 1;
351 #if CONFIG_ATM
352 		debug_info->config_atm = 1;
353 #endif
354 #if CONFIG_CSR
355 		debug_info->config_csr = 1;
356 #endif
357 		return KERN_SUCCESS;
358 #else /* DEVELOPMENT || DEBUG */
359 		return KERN_NOT_SUPPORTED;
360 #endif
361 	}
362 
363 	case HOST_PREFERRED_USER_ARCH: {
364 		host_preferred_user_arch_t user_arch_info;
365 
366 		/*
367 		 *	Basic information about this host.
368 		 */
369 		if (*count < HOST_PREFERRED_USER_ARCH_COUNT) {
370 			return KERN_FAILURE;
371 		}
372 
373 		user_arch_info = (host_preferred_user_arch_t)info;
374 
375 #if defined(PREFERRED_USER_CPU_TYPE) && defined(PREFERRED_USER_CPU_SUBTYPE)
376 		cpu_type_t preferred_cpu_type;
377 		cpu_subtype_t preferred_cpu_subtype;
378 		if (!PE_get_default("kern.preferred_cpu_type", &preferred_cpu_type, sizeof(cpu_type_t))) {
379 			preferred_cpu_type = PREFERRED_USER_CPU_TYPE;
380 		}
381 		if (!PE_get_default("kern.preferred_cpu_subtype", &preferred_cpu_subtype, sizeof(cpu_subtype_t))) {
382 			preferred_cpu_subtype = PREFERRED_USER_CPU_SUBTYPE;
383 		}
384 		user_arch_info->cpu_type    = preferred_cpu_type;
385 		user_arch_info->cpu_subtype = preferred_cpu_subtype;
386 #else
387 		int master_id               = master_processor->cpu_id;
388 		user_arch_info->cpu_type    = slot_type(master_id);
389 		user_arch_info->cpu_subtype = slot_subtype(master_id);
390 #endif
391 
392 
393 		*count = HOST_PREFERRED_USER_ARCH_COUNT;
394 
395 		return KERN_SUCCESS;
396 	}
397 
398 	default: return KERN_INVALID_ARGUMENT;
399 	}
400 }
401 
402 kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
403 
404 kern_return_t
host_statistics(host_t host,host_flavor_t flavor,host_info_t info,mach_msg_type_number_t * count)405 host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
406 {
407 	if (host == HOST_NULL) {
408 		return KERN_INVALID_HOST;
409 	}
410 
411 	switch (flavor) {
412 	case HOST_LOAD_INFO: {
413 		host_load_info_t load_info;
414 
415 		if (*count < HOST_LOAD_INFO_COUNT) {
416 			return KERN_FAILURE;
417 		}
418 
419 		load_info = (host_load_info_t)info;
420 
421 		bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
422 		bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
423 
424 		*count = HOST_LOAD_INFO_COUNT;
425 		return KERN_SUCCESS;
426 	}
427 
428 	case HOST_VM_INFO: {
429 		vm_statistics64_data_t host_vm_stat;
430 		vm_statistics_t stat32;
431 		mach_msg_type_number_t original_count;
432 		natural_t speculative_count = vm_page_speculative_count;
433 
434 		if (*count < HOST_VM_INFO_REV0_COUNT) {
435 			return KERN_FAILURE;
436 		}
437 
438 		get_host_vm_stats(&host_vm_stat);
439 
440 		stat32 = (vm_statistics_t)info;
441 
442 		stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + speculative_count);
443 		stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
444 
445 		if (vm_page_local_q) {
446 			zpercpu_foreach(lq, vm_page_local_q) {
447 				stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
448 			}
449 		}
450 		stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
451 #if !XNU_TARGET_OS_OSX
452 		stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
453 #else /* !XNU_TARGET_OS_OSX */
454 		stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
455 #endif /* !XNU_TARGET_OS_OSX */
456 		stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
457 		stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
458 		stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
459 		stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
460 		stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
461 		stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
462 		stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
463 		stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
464 
465 		/*
466 		 * Fill in extra info added in later revisions of the
467 		 * vm_statistics data structure.  Fill in only what can fit
468 		 * in the data structure the caller gave us !
469 		 */
470 		original_count = *count;
471 		*count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
472 		if (original_count >= HOST_VM_INFO_REV1_COUNT) {
473 			/* rev1 added "purgeable" info */
474 			stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
475 			stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
476 			*count = HOST_VM_INFO_REV1_COUNT;
477 		}
478 
479 		if (original_count >= HOST_VM_INFO_REV2_COUNT) {
480 			/* rev2 added "speculative" info */
481 			stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(speculative_count);
482 			*count = HOST_VM_INFO_REV2_COUNT;
483 		}
484 
485 		/* rev3 changed some of the fields to be 64-bit*/
486 
487 		return KERN_SUCCESS;
488 	}
489 
490 	case HOST_CPU_LOAD_INFO: {
491 		host_cpu_load_info_t cpu_load_info;
492 
493 		if (*count < HOST_CPU_LOAD_INFO_COUNT) {
494 			return KERN_FAILURE;
495 		}
496 
497 #define GET_TICKS_VALUE(state, ticks)                                                      \
498 	MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
499 	MACRO_END
500 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer)                            \
501 	MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&(processor)->timer)); \
502 	MACRO_END
503 
504 		cpu_load_info = (host_cpu_load_info_t)info;
505 		cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
506 		cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
507 		cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
508 		cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
509 
510 		simple_lock(&processor_list_lock, LCK_GRP_NULL);
511 
512 		unsigned int pcount = processor_count;
513 
514 		for (unsigned int i = 0; i < pcount; i++) {
515 			processor_t processor = processor_array[i];
516 			assert(processor != PROCESSOR_NULL);
517 			processor_cpu_load_info(processor, cpu_load_info->cpu_ticks);
518 		}
519 		simple_unlock(&processor_list_lock);
520 
521 		*count = HOST_CPU_LOAD_INFO_COUNT;
522 
523 		return KERN_SUCCESS;
524 	}
525 
526 	case HOST_EXPIRED_TASK_INFO: {
527 		if (*count < TASK_POWER_INFO_COUNT) {
528 			return KERN_FAILURE;
529 		}
530 
531 		task_power_info_t tinfo1 = (task_power_info_t)info;
532 		task_power_info_v2_t tinfo2 = (task_power_info_v2_t)info;
533 
534 		tinfo1->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
535 		tinfo1->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
536 
537 		tinfo1->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
538 
539 		tinfo1->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
540 
541 		tinfo1->total_user = dead_task_statistics.total_user_time;
542 		tinfo1->total_system = dead_task_statistics.total_system_time;
543 		if (*count < TASK_POWER_INFO_V2_COUNT) {
544 			*count = TASK_POWER_INFO_COUNT;
545 		} else if (*count >= TASK_POWER_INFO_V2_COUNT) {
546 			tinfo2->gpu_energy.task_gpu_utilisation = dead_task_statistics.task_gpu_ns;
547 #if defined(__arm64__)
548 			tinfo2->task_energy = dead_task_statistics.task_energy;
549 			tinfo2->task_ptime = dead_task_statistics.total_ptime;
550 			tinfo2->task_pset_switches = dead_task_statistics.total_pset_switches;
551 #endif
552 			*count = TASK_POWER_INFO_V2_COUNT;
553 		}
554 
555 		return KERN_SUCCESS;
556 	}
557 	default: return KERN_INVALID_ARGUMENT;
558 	}
559 }
560 
561 extern uint32_t c_segment_pages_compressed;
562 
563 #define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
564 #define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
565 #define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
566 
567 uint64_t host_statistics_time_window;
568 
569 static LCK_GRP_DECLARE(host_statistics_lck_grp, "host_statistics");
570 static LCK_MTX_DECLARE(host_statistics_lck, &host_statistics_lck_grp);
571 
572 #define HOST_VM_INFO64_REV0             0
573 #define HOST_VM_INFO64_REV1             1
574 #define HOST_EXTMOD_INFO64_REV0         2
575 #define HOST_LOAD_INFO_REV0             3
576 #define HOST_VM_INFO_REV0               4
577 #define HOST_VM_INFO_REV1               5
578 #define HOST_VM_INFO_REV2               6
579 #define HOST_CPU_LOAD_INFO_REV0         7
580 #define HOST_EXPIRED_TASK_INFO_REV0     8
581 #define HOST_EXPIRED_TASK_INFO_REV1     9
582 #define HOST_VM_COMPRESSOR_Q_LEN_REV0   10
583 #define NUM_HOST_INFO_DATA_TYPES        11
584 
585 static vm_statistics64_data_t host_vm_info64_rev0 = {};
586 static vm_statistics64_data_t host_vm_info64_rev1 = {};
587 static vm_extmod_statistics_data_t host_extmod_info64 = {};
588 static host_load_info_data_t host_load_info = {};
589 static vm_statistics_data_t host_vm_info_rev0 = {};
590 static vm_statistics_data_t host_vm_info_rev1 = {};
591 static vm_statistics_data_t host_vm_info_rev2 = {};
592 static host_cpu_load_info_data_t host_cpu_load_info = {};
593 static task_power_info_data_t host_expired_task_info = {};
594 static task_power_info_v2_data_t host_expired_task_info2 = {};
595 static vm_compressor_q_lens_data_t host_vm_compressor_q_lens = {};
596 
597 struct host_stats_cache {
598 	uint64_t last_access;
599 	uint64_t current_requests;
600 	uint64_t max_requests;
601 	uintptr_t data;
602 	mach_msg_type_number_t count; //NOTE count is in sizeof(integer_t)
603 };
604 
605 static struct host_stats_cache g_host_stats_cache[NUM_HOST_INFO_DATA_TYPES] = {
606 	[HOST_VM_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev0, .count = HOST_VM_INFO64_REV0_COUNT },
607 	[HOST_VM_INFO64_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev1, .count = HOST_VM_INFO64_REV1_COUNT },
608 	[HOST_EXTMOD_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_extmod_info64, .count = HOST_EXTMOD_INFO64_COUNT },
609 	[HOST_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_load_info, .count = HOST_LOAD_INFO_COUNT },
610 	[HOST_VM_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev0, .count = HOST_VM_INFO_REV0_COUNT },
611 	[HOST_VM_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev1, .count = HOST_VM_INFO_REV1_COUNT },
612 	[HOST_VM_INFO_REV2] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev2, .count = HOST_VM_INFO_REV2_COUNT },
613 	[HOST_CPU_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_cpu_load_info, .count = HOST_CPU_LOAD_INFO_COUNT },
614 	[HOST_EXPIRED_TASK_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info, .count = TASK_POWER_INFO_COUNT },
615 	[HOST_EXPIRED_TASK_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info2, .count = TASK_POWER_INFO_V2_COUNT},
616 	[HOST_VM_COMPRESSOR_Q_LEN_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_compressor_q_lens, .count = VM_COMPRESSOR_Q_LENS_COUNT},
617 };
618 
619 
620 void
host_statistics_init(void)621 host_statistics_init(void)
622 {
623 	nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW * NSEC_PER_SEC), &host_statistics_time_window);
624 }
625 
626 static void
cache_host_statistics(int index,host_info64_t info)627 cache_host_statistics(int index, host_info64_t info)
628 {
629 	if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
630 		return;
631 	}
632 
633 	if (task_get_platform_binary(current_task())) {
634 		return;
635 	}
636 
637 	memcpy((void *)g_host_stats_cache[index].data, info, g_host_stats_cache[index].count * sizeof(integer_t));
638 	return;
639 }
640 
641 static void
get_cached_info(int index,host_info64_t info,mach_msg_type_number_t * count)642 get_cached_info(int index, host_info64_t info, mach_msg_type_number_t* count)
643 {
644 	if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
645 		*count = 0;
646 		return;
647 	}
648 
649 	*count = g_host_stats_cache[index].count;
650 	memcpy(info, (void *)g_host_stats_cache[index].data, g_host_stats_cache[index].count * sizeof(integer_t));
651 }
652 
653 static int
get_host_info_data_index(bool is_stat64,host_flavor_t flavor,mach_msg_type_number_t * count,kern_return_t * ret)654 get_host_info_data_index(bool is_stat64, host_flavor_t flavor, mach_msg_type_number_t* count, kern_return_t* ret)
655 {
656 	switch (flavor) {
657 	case HOST_VM_INFO64:
658 		if (!is_stat64) {
659 			*ret = KERN_INVALID_ARGUMENT;
660 			return -1;
661 		}
662 		if (*count < HOST_VM_INFO64_REV0_COUNT) {
663 			*ret = KERN_FAILURE;
664 			return -1;
665 		}
666 		if (*count >= HOST_VM_INFO64_REV1_COUNT) {
667 			return HOST_VM_INFO64_REV1;
668 		}
669 		return HOST_VM_INFO64_REV0;
670 
671 	case HOST_EXTMOD_INFO64:
672 		if (!is_stat64) {
673 			*ret = KERN_INVALID_ARGUMENT;
674 			return -1;
675 		}
676 		if (*count < HOST_EXTMOD_INFO64_COUNT) {
677 			*ret = KERN_FAILURE;
678 			return -1;
679 		}
680 		return HOST_EXTMOD_INFO64_REV0;
681 
682 	case HOST_LOAD_INFO:
683 		if (*count < HOST_LOAD_INFO_COUNT) {
684 			*ret = KERN_FAILURE;
685 			return -1;
686 		}
687 		return HOST_LOAD_INFO_REV0;
688 
689 	case HOST_VM_INFO:
690 		if (*count < HOST_VM_INFO_REV0_COUNT) {
691 			*ret = KERN_FAILURE;
692 			return -1;
693 		}
694 		if (*count >= HOST_VM_INFO_REV2_COUNT) {
695 			return HOST_VM_INFO_REV2;
696 		}
697 		if (*count >= HOST_VM_INFO_REV1_COUNT) {
698 			return HOST_VM_INFO_REV1;
699 		}
700 		return HOST_VM_INFO_REV0;
701 
702 	case HOST_CPU_LOAD_INFO:
703 		if (*count < HOST_CPU_LOAD_INFO_COUNT) {
704 			*ret = KERN_FAILURE;
705 			return -1;
706 		}
707 		return HOST_CPU_LOAD_INFO_REV0;
708 
709 	case HOST_EXPIRED_TASK_INFO:
710 		if (*count < TASK_POWER_INFO_COUNT) {
711 			*ret = KERN_FAILURE;
712 			return -1;
713 		}
714 		if (*count >= TASK_POWER_INFO_V2_COUNT) {
715 			return HOST_EXPIRED_TASK_INFO_REV1;
716 		}
717 		return HOST_EXPIRED_TASK_INFO_REV0;
718 
719 	case HOST_VM_COMPRESSOR_Q_LENS:
720 		if (*count < VM_COMPRESSOR_Q_LENS_COUNT) {
721 			*ret = KERN_FAILURE;
722 			return -1;
723 		}
724 		return HOST_VM_COMPRESSOR_Q_LEN_REV0;
725 
726 	default:
727 		*ret = KERN_INVALID_ARGUMENT;
728 		return -1;
729 	}
730 }
731 
732 static bool
rate_limit_host_statistics(bool is_stat64,host_flavor_t flavor,host_info64_t info,mach_msg_type_number_t * count,kern_return_t * ret,int * pindex)733 rate_limit_host_statistics(bool is_stat64, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t* count, kern_return_t* ret, int *pindex)
734 {
735 	task_t task = current_task();
736 
737 	assert(task != kernel_task);
738 
739 	*ret = KERN_SUCCESS;
740 	*pindex = -1;
741 
742 	/* Access control only for third party applications */
743 	if (task_get_platform_binary(task)) {
744 		return FALSE;
745 	}
746 
747 	/* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
748 	bool rate_limited = FALSE;
749 	bool set_last_access = TRUE;
750 
751 	/* there is a cache for every flavor */
752 	int index = get_host_info_data_index(is_stat64, flavor, count, ret);
753 	if (index == -1) {
754 		goto out;
755 	}
756 
757 	*pindex = index;
758 	lck_mtx_lock(&host_statistics_lck);
759 	if (g_host_stats_cache[index].last_access > mach_continuous_time() - host_statistics_time_window) {
760 		set_last_access = FALSE;
761 		if (g_host_stats_cache[index].current_requests++ >= g_host_stats_cache[index].max_requests) {
762 			rate_limited = TRUE;
763 			get_cached_info(index, info, count);
764 		}
765 	}
766 	if (set_last_access) {
767 		g_host_stats_cache[index].current_requests = 1;
768 		/*
769 		 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
770 		 * to let query host_statistics.
771 		 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
772 		 * the provious window.
773 		 */
774 		g_host_stats_cache[index].max_requests = (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS - HOST_STATISTICS_MIN_REQUESTS + 1)) + HOST_STATISTICS_MIN_REQUESTS;
775 		g_host_stats_cache[index].last_access = mach_continuous_time();
776 	}
777 	lck_mtx_unlock(&host_statistics_lck);
778 out:
779 	return rate_limited;
780 }
781 
782 kern_return_t
vm_stats(void * info,unsigned int * count)783 vm_stats(void *info, unsigned int *count)
784 {
785 	vm_statistics64_data_t host_vm_stat;
786 	mach_msg_type_number_t original_count;
787 	unsigned int local_q_internal_count;
788 	unsigned int local_q_external_count;
789 	natural_t speculative_count = vm_page_speculative_count;
790 	natural_t throttled_count = vm_page_throttled_count;
791 
792 	if (*count < HOST_VM_INFO64_REV0_COUNT) {
793 		return KERN_FAILURE;
794 	}
795 	get_host_vm_stats(&host_vm_stat);
796 
797 	vm_statistics64_t stat = (vm_statistics64_t)info;
798 
799 	stat->free_count = vm_page_free_count + speculative_count;
800 	stat->active_count = vm_page_active_count;
801 
802 	local_q_internal_count = 0;
803 	local_q_external_count = 0;
804 	if (vm_page_local_q) {
805 		zpercpu_foreach(lq, vm_page_local_q) {
806 			stat->active_count += lq->vpl_count;
807 			local_q_internal_count += lq->vpl_internal_count;
808 			local_q_external_count += lq->vpl_external_count;
809 		}
810 	}
811 	stat->inactive_count = vm_page_inactive_count;
812 #if !XNU_TARGET_OS_OSX
813 	stat->wire_count = vm_page_wire_count;
814 #else /* !XNU_TARGET_OS_OSX */
815 	stat->wire_count = vm_page_wire_count + throttled_count + vm_lopage_free_count;
816 #endif /* !XNU_TARGET_OS_OSX */
817 	stat->zero_fill_count = host_vm_stat.zero_fill_count;
818 	stat->reactivations = host_vm_stat.reactivations;
819 	stat->pageins = host_vm_stat.pageins;
820 	stat->pageouts = host_vm_stat.pageouts;
821 	stat->faults = host_vm_stat.faults;
822 	stat->cow_faults = host_vm_stat.cow_faults;
823 	stat->lookups = host_vm_stat.lookups;
824 	stat->hits = host_vm_stat.hits;
825 
826 	stat->purgeable_count = vm_page_purgeable_count;
827 	stat->purges = vm_page_purged_count;
828 
829 	stat->speculative_count = speculative_count;
830 
831 	/*
832 	 * Fill in extra info added in later revisions of the
833 	 * vm_statistics data structure.  Fill in only what can fit
834 	 * in the data structure the caller gave us !
835 	 */
836 	original_count = *count;
837 	*count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
838 	if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
839 		/* rev1 added "throttled count" */
840 		stat->throttled_count = throttled_count;
841 		/* rev1 added "compression" info */
842 		stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
843 		stat->compressions = host_vm_stat.compressions;
844 		stat->decompressions = host_vm_stat.decompressions;
845 		stat->swapins = host_vm_stat.swapins;
846 		stat->swapouts = host_vm_stat.swapouts;
847 		/* rev1 added:
848 		 * "external page count"
849 		 * "anonymous page count"
850 		 * "total # of pages (uncompressed) held in the compressor"
851 		 */
852 		stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
853 		stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
854 		stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
855 		*count = HOST_VM_INFO64_REV1_COUNT;
856 	}
857 
858 	return KERN_SUCCESS;
859 }
860 
861 #if DEVELOPMENT || DEBUG
862 extern uint32_t        c_segment_count;
863 extern uint32_t        c_age_count;
864 extern uint32_t        c_early_swappedin_count, c_regular_swappedin_count, c_late_swappedin_count;
865 extern uint32_t        c_early_swapout_count, c_regular_swapout_count, c_late_swapout_count;
866 extern uint32_t        c_swapio_count;
867 extern uint32_t        c_swappedout_count;
868 extern uint32_t        c_swappedout_sparse_count;
869 extern uint32_t        c_major_count;
870 extern uint32_t        c_filling_count;
871 extern uint32_t        c_empty_count;
872 extern uint32_t        c_bad_count;
873 extern uint32_t        c_minor_count;
874 extern uint32_t        c_segments_available;
875 
876 static kern_return_t
vm_compressor_queue_lens(void * info,unsigned int * count)877 vm_compressor_queue_lens(void *info, unsigned int *count)
878 {
879 	if (*count < VM_COMPRESSOR_Q_LENS_COUNT) {
880 		return KERN_NO_SPACE;
881 	}
882 
883 	struct vm_compressor_q_lens *qc = (struct vm_compressor_q_lens *)info;
884 	qc->qcc_segments_available = c_segments_available;
885 	qc->qcc_segment_count = c_segment_count;
886 	qc->qcc_age_count = c_age_count;
887 	qc->qcc_early_swappedin_count = c_early_swappedin_count;
888 	qc->qcc_regular_swappedin_count = c_regular_swappedin_count;
889 	qc->qcc_late_swappedin_count = c_late_swappedin_count;
890 	qc->qcc_early_swapout_count = c_early_swapout_count;
891 	qc->qcc_regular_swapout_count = c_regular_swapout_count;
892 	qc->qcc_late_swapout_count = c_late_swapout_count;
893 	qc->qcc_swapio_count = c_swapio_count;
894 	qc->qcc_swappedout_count = c_swappedout_count;
895 	qc->qcc_swappedout_sparse_count = c_swappedout_sparse_count;
896 	qc->qcc_major_count = c_major_count;
897 	qc->qcc_filling_count = c_filling_count;
898 	qc->qcc_empty_count = c_empty_count;
899 	qc->qcc_bad_count = c_bad_count;
900 	qc->qcc_minor_count = c_minor_count;
901 
902 	*count = VM_COMPRESSOR_Q_LENS_COUNT;
903 
904 	return KERN_SUCCESS;
905 }
906 
907 #endif /* DEVELOPMENT || DEBUG */
908 
909 kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
910 
911 kern_return_t
host_statistics64(host_t host,host_flavor_t flavor,host_info64_t info,mach_msg_type_number_t * count)912 host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
913 {
914 	if (host == HOST_NULL) {
915 		return KERN_INVALID_HOST;
916 	}
917 
918 	switch (flavor) {
919 	case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
920 		return vm_stats(info, count);
921 
922 	case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
923 	{
924 		vm_extmod_statistics_t out_extmod_statistics;
925 
926 		if (*count < HOST_EXTMOD_INFO64_COUNT) {
927 			return KERN_FAILURE;
928 		}
929 
930 		out_extmod_statistics = (vm_extmod_statistics_t)info;
931 		*out_extmod_statistics = host_extmod_statistics;
932 
933 		*count = HOST_EXTMOD_INFO64_COUNT;
934 
935 		return KERN_SUCCESS;
936 	}
937 
938 	case HOST_VM_COMPRESSOR_Q_LENS:
939 #if DEVELOPMENT || DEBUG
940 		return vm_compressor_queue_lens(info, count);
941 #else
942 		return KERN_NOT_SUPPORTED;
943 #endif
944 
945 	default: /* If we didn't recognize the flavor, send to host_statistics */
946 		return host_statistics(host, flavor, (host_info_t)info, count);
947 	}
948 }
949 
950 kern_return_t
host_statistics64_from_user(host_t host,host_flavor_t flavor,host_info64_t info,mach_msg_type_number_t * count)951 host_statistics64_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
952 {
953 	kern_return_t ret = KERN_SUCCESS;
954 	int index;
955 
956 	if (host == HOST_NULL) {
957 		return KERN_INVALID_HOST;
958 	}
959 
960 	if (rate_limit_host_statistics(TRUE, flavor, info, count, &ret, &index)) {
961 		return ret;
962 	}
963 
964 	if (ret != KERN_SUCCESS) {
965 		return ret;
966 	}
967 
968 	ret = host_statistics64(host, flavor, info, count);
969 
970 	if (ret == KERN_SUCCESS) {
971 		cache_host_statistics(index, info);
972 	}
973 
974 	return ret;
975 }
976 
977 kern_return_t
host_statistics_from_user(host_t host,host_flavor_t flavor,host_info64_t info,mach_msg_type_number_t * count)978 host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
979 {
980 	kern_return_t ret = KERN_SUCCESS;
981 	int index;
982 
983 	if (host == HOST_NULL) {
984 		return KERN_INVALID_HOST;
985 	}
986 
987 	if (rate_limit_host_statistics(FALSE, flavor, info, count, &ret, &index)) {
988 		return ret;
989 	}
990 
991 	if (ret != KERN_SUCCESS) {
992 		return ret;
993 	}
994 
995 	ret = host_statistics(host, flavor, info, count);
996 
997 	if (ret == KERN_SUCCESS) {
998 		cache_host_statistics(index, info);
999 	}
1000 
1001 	return ret;
1002 }
1003 
1004 /*
1005  * Get host statistics that require privilege.
1006  * None for now, just call the un-privileged version.
1007  */
1008 kern_return_t
host_priv_statistics(host_priv_t host_priv,host_flavor_t flavor,host_info_t info,mach_msg_type_number_t * count)1009 host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
1010 {
1011 	return host_statistics((host_t)host_priv, flavor, info, count);
1012 }
1013 
1014 kern_return_t
set_sched_stats_active(boolean_t active)1015 set_sched_stats_active(boolean_t active)
1016 {
1017 	sched_stats_active = active;
1018 	return KERN_SUCCESS;
1019 }
1020 
1021 kern_return_t
get_sched_statistics(struct _processor_statistics_np * out,uint32_t * count)1022 get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
1023 {
1024 	uint32_t pos = 0;
1025 
1026 	if (!sched_stats_active) {
1027 		return KERN_FAILURE;
1028 	}
1029 
1030 	percpu_foreach_base(pcpu_base) {
1031 		struct sched_statistics stats;
1032 		processor_t processor;
1033 
1034 		pos += sizeof(struct _processor_statistics_np);
1035 		if (pos > *count) {
1036 			return KERN_FAILURE;
1037 		}
1038 
1039 		stats = *PERCPU_GET_WITH_BASE(pcpu_base, sched_stats);
1040 		processor = PERCPU_GET_WITH_BASE(pcpu_base, processor);
1041 
1042 		out->ps_cpuid = processor->cpu_id;
1043 		out->ps_csw_count = stats.csw_count;
1044 		out->ps_preempt_count = stats.preempt_count;
1045 		out->ps_preempted_rt_count = stats.preempted_rt_count;
1046 		out->ps_preempted_by_rt_count = stats.preempted_by_rt_count;
1047 		out->ps_rt_sched_count = stats.rt_sched_count;
1048 		out->ps_interrupt_count = stats.interrupt_count;
1049 		out->ps_ipi_count = stats.ipi_count;
1050 		out->ps_timer_pop_count = stats.timer_pop_count;
1051 		out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
1052 		out->ps_idle_transitions = stats.idle_transitions;
1053 		out->ps_quantum_timer_expirations = stats.quantum_timer_expirations;
1054 
1055 		out++;
1056 	}
1057 
1058 	/* And include RT Queue information */
1059 	pos += sizeof(struct _processor_statistics_np);
1060 	if (pos > *count) {
1061 		return KERN_FAILURE;
1062 	}
1063 
1064 	bzero(out, sizeof(*out));
1065 	out->ps_cpuid = (-1);
1066 	out->ps_runq_count_sum = SCHED(rt_runq_count_sum)();
1067 	out++;
1068 
1069 	*count = pos;
1070 
1071 	return KERN_SUCCESS;
1072 }
1073 
1074 kern_return_t
host_page_size(host_t host,vm_size_t * out_page_size)1075 host_page_size(host_t host, vm_size_t * out_page_size)
1076 {
1077 	if (host == HOST_NULL) {
1078 		return KERN_INVALID_ARGUMENT;
1079 	}
1080 
1081 	*out_page_size = PAGE_SIZE;
1082 
1083 	return KERN_SUCCESS;
1084 }
1085 
1086 /*
1087  *	Return kernel version string (more than you ever
1088  *	wanted to know about what version of the kernel this is).
1089  */
1090 extern char version[];
1091 
1092 kern_return_t
host_kernel_version(host_t host,kernel_version_t out_version)1093 host_kernel_version(host_t host, kernel_version_t out_version)
1094 {
1095 	if (host == HOST_NULL) {
1096 		return KERN_INVALID_ARGUMENT;
1097 	}
1098 
1099 	(void)strncpy(out_version, version, sizeof(kernel_version_t));
1100 
1101 	return KERN_SUCCESS;
1102 }
1103 
1104 /*
1105  *	host_processor_sets:
1106  *
1107  *	List all processor sets on the host.
1108  */
1109 kern_return_t
host_processor_sets(host_priv_t host_priv,processor_set_name_array_t * pset_list,mach_msg_type_number_t * count)1110 host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
1111 {
1112 	mach_port_array_t ports;
1113 
1114 	if (host_priv == HOST_PRIV_NULL) {
1115 		return KERN_INVALID_ARGUMENT;
1116 	}
1117 
1118 	/*
1119 	 *	Allocate memory.  Can be pageable because it won't be
1120 	 *	touched while holding a lock.
1121 	 */
1122 
1123 	ports = mach_port_array_alloc(1, Z_WAITOK | Z_NOFAIL);
1124 
1125 	/* do the conversion that Mig should handle */
1126 	ports[0].port = convert_pset_name_to_port(&pset0);
1127 
1128 	*pset_list = ports;
1129 	*count = 1;
1130 
1131 	return KERN_SUCCESS;
1132 }
1133 
1134 /*
1135  *	host_processor_set_priv:
1136  *
1137  *	Return control port for given processor set.
1138  */
1139 kern_return_t
host_processor_set_priv(host_priv_t host_priv,processor_set_t pset_name,processor_set_t * pset)1140 host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
1141 {
1142 	if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
1143 		*pset = PROCESSOR_SET_NULL;
1144 
1145 		return KERN_INVALID_ARGUMENT;
1146 	}
1147 
1148 	*pset = pset_name;
1149 
1150 	return KERN_SUCCESS;
1151 }
1152 
1153 /*
1154  *	host_processor_info
1155  *
1156  *	Return info about the processors on this host.  It will return
1157  *	the number of processors, and the specific type of info requested
1158  *	in an OOL array.
1159  */
1160 kern_return_t
host_processor_info(host_t host,processor_flavor_t flavor,natural_t * out_pcount,processor_info_array_t * out_array,mach_msg_type_number_t * out_array_count)1161 host_processor_info(host_t host,
1162     processor_flavor_t flavor,
1163     natural_t * out_pcount,
1164     processor_info_array_t * out_array,
1165     mach_msg_type_number_t * out_array_count)
1166 {
1167 	kern_return_t result;
1168 	host_t thost;
1169 	processor_info_t info;
1170 	unsigned int icount;
1171 	unsigned int pcount;
1172 	vm_offset_t addr;
1173 	vm_size_t size, needed;
1174 	vm_map_copy_t copy;
1175 
1176 	if (host == HOST_NULL) {
1177 		return KERN_INVALID_ARGUMENT;
1178 	}
1179 
1180 	result = processor_info_count(flavor, &icount);
1181 	if (result != KERN_SUCCESS) {
1182 		return result;
1183 	}
1184 
1185 	pcount = processor_count;
1186 	assert(pcount != 0);
1187 
1188 	needed = pcount * icount * sizeof(natural_t);
1189 	size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
1190 	result = kmem_alloc(ipc_kernel_map, &addr, size, KMA_DATA, VM_KERN_MEMORY_IPC);
1191 	if (result != KERN_SUCCESS) {
1192 		return KERN_RESOURCE_SHORTAGE;
1193 	}
1194 
1195 	info = (processor_info_t)addr;
1196 
1197 	for (unsigned int i = 0; i < pcount; i++) {
1198 		processor_t processor = processor_array[i];
1199 		assert(processor != PROCESSOR_NULL);
1200 
1201 		unsigned int tcount = icount;
1202 
1203 		result = processor_info(processor, flavor, &thost, info, &tcount);
1204 		if (result != KERN_SUCCESS) {
1205 			kmem_free(ipc_kernel_map, addr, size);
1206 			return result;
1207 		}
1208 		info += icount;
1209 	}
1210 
1211 	if (size != needed) {
1212 		bzero((char *)addr + needed, size - needed);
1213 	}
1214 
1215 	result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
1216 	    vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
1217 	assert(result == KERN_SUCCESS);
1218 	result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, &copy);
1219 	assert(result == KERN_SUCCESS);
1220 
1221 	*out_pcount = pcount;
1222 	*out_array = (processor_info_array_t)copy;
1223 	*out_array_count = pcount * icount;
1224 
1225 	return KERN_SUCCESS;
1226 }
1227 
1228 static bool
is_valid_host_special_port(int id)1229 is_valid_host_special_port(int id)
1230 {
1231 	return (id <= HOST_MAX_SPECIAL_PORT) &&
1232 	       (id >= HOST_MIN_SPECIAL_PORT) &&
1233 	       ((id <= HOST_LAST_SPECIAL_KERNEL_PORT) || (id > HOST_MAX_SPECIAL_KERNEL_PORT));
1234 }
1235 
1236 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
1237 
1238 /*
1239  *      Kernel interface for setting a special port.
1240  */
1241 kern_return_t
kernel_set_special_port(host_priv_t host_priv,int id,ipc_port_t port)1242 kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1243 {
1244 	ipc_port_t old_port;
1245 
1246 	if (!is_valid_host_special_port(id)) {
1247 		panic("attempted to set invalid special port %d", id);
1248 	}
1249 
1250 #if !MACH_FLIPC
1251 	if (id == HOST_NODE_PORT) {
1252 		return KERN_NOT_SUPPORTED;
1253 	}
1254 #endif
1255 
1256 	host_lock(host_priv);
1257 	old_port = host_priv->special[id];
1258 	host_priv->special[id] = port;
1259 	host_unlock(host_priv);
1260 
1261 #if MACH_FLIPC
1262 	if (id == HOST_NODE_PORT) {
1263 		mach_node_port_changed();
1264 	}
1265 #endif
1266 
1267 	if (IP_VALID(old_port)) {
1268 		ipc_port_release_send(old_port);
1269 	}
1270 
1271 
1272 	return KERN_SUCCESS;
1273 }
1274 
1275 /*
1276  *      Kernel interface for retrieving a special port.
1277  */
1278 kern_return_t
kernel_get_special_port(host_priv_t host_priv,int id,ipc_port_t * portp)1279 kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp)
1280 {
1281 	if (!is_valid_host_special_port(id)) {
1282 		panic("attempted to get invalid special port %d", id);
1283 	}
1284 
1285 	host_lock(host_priv);
1286 	*portp = host_priv->special[id];
1287 	host_unlock(host_priv);
1288 	return KERN_SUCCESS;
1289 }
1290 
1291 /*
1292  *      User interface for setting a special port.
1293  *
1294  *      Only permits the user to set a user-owned special port
1295  *      ID, rejecting a kernel-owned special port ID.
1296  *
1297  *      A special kernel port cannot be set up using this
1298  *      routine; use kernel_set_special_port() instead.
1299  */
1300 kern_return_t
host_set_special_port_from_user(host_priv_t host_priv,int id,ipc_port_t port)1301 host_set_special_port_from_user(host_priv_t host_priv, int id, ipc_port_t port)
1302 {
1303 	if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1304 		return KERN_INVALID_ARGUMENT;
1305 	}
1306 
1307 	if (task_is_driver(current_task())) {
1308 		return KERN_NO_ACCESS;
1309 	}
1310 
1311 	/*
1312 	 * rdar://70585367
1313 	 * disallow immovable send so other process can't retrieve it through host_get_special_port()
1314 	 */
1315 	if (IP_VALID(port) && port->ip_immovable_send) {
1316 		return KERN_INVALID_RIGHT;
1317 	}
1318 
1319 	return host_set_special_port(host_priv, id, port);
1320 }
1321 
1322 kern_return_t
host_set_special_port(host_priv_t host_priv,int id,ipc_port_t port)1323 host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1324 {
1325 	if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1326 		return KERN_INVALID_ARGUMENT;
1327 	}
1328 
1329 	if (current_task() != kernel_task && get_bsdtask_info(current_task()) != initproc) {
1330 		bool allowed = (id == HOST_TELEMETRY_PORT &&
1331 		    IOTaskHasEntitlement(current_task(), "com.apple.private.xpc.launchd.event-monitor"));
1332 #if CONFIG_CSR
1333 		if (!allowed) {
1334 			allowed = (csr_check(CSR_ALLOW_TASK_FOR_PID) == 0);
1335 		}
1336 #endif
1337 		if (!allowed) {
1338 			return KERN_NO_ACCESS;
1339 		}
1340 	}
1341 
1342 #if CONFIG_MACF
1343 	if (mac_task_check_set_host_special_port(current_task(), id, port) != 0) {
1344 		return KERN_NO_ACCESS;
1345 	}
1346 #endif
1347 
1348 	return kernel_set_special_port(host_priv, id, port);
1349 }
1350 
1351 /*
1352  *      User interface for retrieving a special port.
1353  *
1354  *      Note that there is nothing to prevent a user special
1355  *      port from disappearing after it has been discovered by
1356  *      the caller; thus, using a special port can always result
1357  *      in a "port not valid" error.
1358  */
1359 
1360 kern_return_t
host_get_special_port_from_user(host_priv_t host_priv,__unused int node,int id,ipc_port_t * portp)1361 host_get_special_port_from_user(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1362 {
1363 	if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1364 		return KERN_INVALID_ARGUMENT;
1365 	}
1366 
1367 	task_t task = current_task();
1368 	if (task && task_is_driver(task) && id > HOST_MAX_SPECIAL_KERNEL_PORT) {
1369 		/* allow HID drivers to get the sysdiagnose port for keychord handling */
1370 		if (id == HOST_SYSDIAGNOSE_PORT &&
1371 		    IOCurrentTaskHasEntitlement(kIODriverKitHIDFamilyEventServiceEntitlementKey)) {
1372 			goto get_special_port;
1373 		}
1374 		return KERN_NO_ACCESS;
1375 	}
1376 get_special_port:
1377 	return host_get_special_port(host_priv, node, id, portp);
1378 }
1379 
1380 kern_return_t
host_get_special_port(host_priv_t host_priv,__unused int node,int id,ipc_port_t * portp)1381 host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1382 {
1383 	ipc_port_t port;
1384 
1385 	if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1386 		return KERN_INVALID_ARGUMENT;
1387 	}
1388 
1389 	host_lock(host_priv);
1390 	port = realhost.special[id];
1391 	switch (id) {
1392 	case HOST_PORT:
1393 		*portp = ipc_kobject_copy_send(port, &realhost, IKOT_HOST);
1394 		break;
1395 	case HOST_PRIV_PORT:
1396 		*portp = ipc_kobject_copy_send(port, &realhost, IKOT_HOST_PRIV);
1397 		break;
1398 	case HOST_IO_MAIN_PORT:
1399 		*portp = ipc_port_copy_send_any(main_device_port);
1400 		break;
1401 	default:
1402 		*portp = ipc_port_copy_send_mqueue(port);
1403 		break;
1404 	}
1405 	host_unlock(host_priv);
1406 
1407 	return KERN_SUCCESS;
1408 }
1409 
1410 /*
1411  *	host_get_io_main
1412  *
1413  *	Return the IO main access port for this host.
1414  */
1415 kern_return_t
host_get_io_main(host_t host,io_main_t * io_mainp)1416 host_get_io_main(host_t host, io_main_t * io_mainp)
1417 {
1418 	if (host == HOST_NULL) {
1419 		return KERN_INVALID_ARGUMENT;
1420 	}
1421 
1422 	return host_get_io_main_port(host_priv_self(), io_mainp);
1423 }
1424 
1425 host_t
host_self(void)1426 host_self(void)
1427 {
1428 	return &realhost;
1429 }
1430 
1431 host_priv_t
host_priv_self(void)1432 host_priv_self(void)
1433 {
1434 	return &realhost;
1435 }
1436 
1437 kern_return_t
host_set_atm_diagnostic_flag(host_t host,uint32_t diagnostic_flag)1438 host_set_atm_diagnostic_flag(host_t host, uint32_t diagnostic_flag)
1439 {
1440 	if (host == HOST_NULL) {
1441 		return KERN_INVALID_ARGUMENT;
1442 	}
1443 
1444 	if (!IOCurrentTaskHasEntitlement("com.apple.private.set-atm-diagnostic-flag")) {
1445 		return KERN_NO_ACCESS;
1446 	}
1447 
1448 #if CONFIG_ATM
1449 	return atm_set_diagnostic_config(diagnostic_flag);
1450 #else
1451 	(void)diagnostic_flag;
1452 	return KERN_NOT_SUPPORTED;
1453 #endif
1454 }
1455 
1456 kern_return_t
host_set_multiuser_config_flags(host_priv_t host_priv,uint32_t multiuser_config)1457 host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
1458 {
1459 #if !defined(XNU_TARGET_OS_OSX)
1460 	if (host_priv == HOST_PRIV_NULL) {
1461 		return KERN_INVALID_ARGUMENT;
1462 	}
1463 
1464 	/*
1465 	 * multiuser bit is extensively used for sharedIpad mode.
1466 	 * Caller sets the sharedIPad or other mutiuser modes.
1467 	 * Any override during commpage setting is not suitable anymore.
1468 	 */
1469 	commpage_update_multiuser_config(multiuser_config);
1470 	return KERN_SUCCESS;
1471 #else
1472 	(void)host_priv;
1473 	(void)multiuser_config;
1474 	return KERN_NOT_SUPPORTED;
1475 #endif
1476 }
1477