1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * host.c
61 *
62 * Non-ipc host functions.
63 */
64
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <ipc/ipc_policy.h>
73 #include <mach/processor_info.h>
74 #include <mach/vm_param.h>
75 #include <mach/processor.h>
76 #include <mach/mach_host_server.h>
77 #include <mach/host_priv_server.h>
78 #include <mach/vm_map.h>
79 #include <mach/task_info.h>
80
81 #include <machine/commpage.h>
82 #include <machine/cpu_capabilities.h>
83
84 #include <device/device_port.h>
85
86 #include <kern/kern_types.h>
87 #include <kern/assert.h>
88 #include <kern/kalloc.h>
89 #include <kern/ecc.h>
90 #include <kern/host.h>
91 #include <kern/host_statistics.h>
92 #include <kern/ipc_host.h>
93 #include <kern/misc_protos.h>
94 #include <kern/sched.h>
95 #include <kern/processor.h>
96
97 #include <vm/vm_map_xnu.h>
98 #include <vm/vm_purgeable_xnu.h>
99 #include <vm/vm_pageout.h>
100 #include <vm/vm_kern_xnu.h>
101
102 #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
103 #include <IOKit/IOKitKeys.h> // DriverKit entitlement strings
104
105 #if CONFIG_ATM
106 #include <atm/atm_internal.h>
107 #endif
108
109 #if CONFIG_MACF
110 #include <security/mac_mach_internal.h>
111 #endif
112
113 #if CONFIG_CSR
114 #include <sys/csr.h>
115 #endif
116
117 #include <pexpert/pexpert.h>
118
119 SCALABLE_COUNTER_DEFINE(vm_statistics_zero_fill_count); /* # of zero fill pages */
120 SCALABLE_COUNTER_DEFINE(vm_statistics_reactivations); /* # of pages reactivated */
121 SCALABLE_COUNTER_DEFINE(vm_statistics_pageins); /* # of pageins */
122 SCALABLE_COUNTER_DEFINE(vm_statistics_pageouts); /* # of pageouts */
123 SCALABLE_COUNTER_DEFINE(vm_statistics_faults); /* # of faults */
124 SCALABLE_COUNTER_DEFINE(vm_statistics_cow_faults); /* # of copy-on-writes */
125 SCALABLE_COUNTER_DEFINE(vm_statistics_lookups); /* object cache lookups */
126 SCALABLE_COUNTER_DEFINE(vm_statistics_hits); /* object cache hits */
127 SCALABLE_COUNTER_DEFINE(vm_statistics_purges); /* # of pages purged */
128 SCALABLE_COUNTER_DEFINE(vm_statistics_decompressions); /* # of pages decompressed */
129 SCALABLE_COUNTER_DEFINE(vm_statistics_compressions); /* # of pages compressed */
130 SCALABLE_COUNTER_DEFINE(vm_statistics_swapins); /* # of pages swapped in (via compression segments) */
131 SCALABLE_COUNTER_DEFINE(vm_statistics_swapouts); /* # of pages swapped out (via compression segments) */
132 SCALABLE_COUNTER_DEFINE(vm_statistics_total_uncompressed_pages_in_compressor); /* # of pages (uncompressed) held within the compressor. */
133 SCALABLE_COUNTER_DEFINE(vm_page_grab_count);
134 SCALABLE_COUNTER_DEFINE(vm_page_grab_count_kern);
135 SCALABLE_COUNTER_DEFINE(vm_page_grab_count_iopl);
136 SCALABLE_COUNTER_DEFINE(vm_page_grab_count_upl);
137
138 host_data_t realhost;
139
140 static void
get_host_vm_stats(vm_statistics64_t out)141 get_host_vm_stats(vm_statistics64_t out)
142 {
143 out->zero_fill_count = counter_load(&vm_statistics_zero_fill_count);
144 out->reactivations = counter_load(&vm_statistics_reactivations);
145 out->pageins = counter_load(&vm_statistics_pageins);
146 out->pageouts = counter_load(&vm_statistics_pageouts);
147 out->faults = counter_load(&vm_statistics_faults);
148 out->cow_faults = counter_load(&vm_statistics_cow_faults);
149 out->lookups = counter_load(&vm_statistics_lookups);
150 out->hits = counter_load(&vm_statistics_hits);
151 out->compressions = counter_load(&vm_statistics_compressions);
152 out->decompressions = counter_load(&vm_statistics_decompressions);
153 out->swapins = counter_load(&vm_statistics_swapins);
154 out->swapouts = counter_load(&vm_statistics_swapouts);
155 }
156 vm_extmod_statistics_data_t host_extmod_statistics;
157
158 kern_return_t
host_processors(host_priv_t host_priv,processor_array_t * out_array,mach_msg_type_number_t * countp)159 host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
160 {
161 if (host_priv == HOST_PRIV_NULL) {
162 return KERN_INVALID_ARGUMENT;
163 }
164
165 unsigned int count = processor_count;
166 assert(count != 0);
167
168 static_assert(sizeof(mach_port_t) == sizeof(processor_t));
169
170 mach_port_array_t ports = mach_port_array_alloc(count, Z_WAITOK);
171 if (!ports) {
172 return KERN_RESOURCE_SHORTAGE;
173 }
174
175 for (unsigned int i = 0; i < count; i++) {
176 processor_t processor = processor_array[i];
177 assert(processor != PROCESSOR_NULL);
178
179 /* do the conversion that Mig should handle */
180 ports[i].port = convert_processor_to_port(processor);
181 }
182
183 *countp = count;
184 *out_array = ports;
185
186 return KERN_SUCCESS;
187 }
188
189 extern int sched_allow_NO_SMT_threads;
190
191 kern_return_t
host_info(host_t host,host_flavor_t flavor,host_info_t info,mach_msg_type_number_t * count)192 host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
193 {
194 if (host == HOST_NULL) {
195 return KERN_INVALID_ARGUMENT;
196 }
197
198 switch (flavor) {
199 case HOST_BASIC_INFO: {
200 host_basic_info_t basic_info;
201 int master_id = master_processor->cpu_id;
202
203 /*
204 * Basic information about this host.
205 */
206 if (*count < HOST_BASIC_INFO_OLD_COUNT) {
207 return KERN_FAILURE;
208 }
209
210 basic_info = (host_basic_info_t)info;
211
212 basic_info->memory_size = machine_info.memory_size;
213 basic_info->cpu_type = slot_type(master_id);
214 basic_info->cpu_subtype = slot_subtype(master_id);
215 basic_info->max_cpus = machine_info.max_cpus;
216 #if CONFIG_SCHED_SMT
217 if (sched_allow_NO_SMT_threads && current_task()->t_flags & TF_NO_SMT) {
218 basic_info->avail_cpus = primary_processor_avail_count_user;
219 } else {
220 basic_info->avail_cpus = processor_avail_count_user;
221 }
222 #else
223 basic_info->avail_cpus = processor_avail_count;
224 #endif
225
226
227 if (*count >= HOST_BASIC_INFO_COUNT) {
228 basic_info->cpu_threadtype = slot_threadtype(master_id);
229 basic_info->physical_cpu = machine_info.physical_cpu;
230 basic_info->physical_cpu_max = machine_info.physical_cpu_max;
231 #if defined(__x86_64__)
232 basic_info->logical_cpu = basic_info->avail_cpus;
233 #else
234 basic_info->logical_cpu = machine_info.logical_cpu;
235 #endif
236 basic_info->logical_cpu_max = machine_info.logical_cpu_max;
237 basic_info->max_mem = machine_info.max_mem;
238
239 *count = HOST_BASIC_INFO_COUNT;
240 } else {
241 *count = HOST_BASIC_INFO_OLD_COUNT;
242 }
243
244 return KERN_SUCCESS;
245 }
246
247 case HOST_SCHED_INFO: {
248 host_sched_info_t sched_info;
249 uint32_t quantum_time;
250 uint64_t quantum_ns;
251
252 /*
253 * Return scheduler information.
254 */
255 if (*count < HOST_SCHED_INFO_COUNT) {
256 return KERN_FAILURE;
257 }
258
259 sched_info = (host_sched_info_t)info;
260
261 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
262 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
263
264 sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
265
266 *count = HOST_SCHED_INFO_COUNT;
267
268 return KERN_SUCCESS;
269 }
270
271 case HOST_RESOURCE_SIZES: {
272 /*
273 * Return sizes of kernel data structures
274 */
275 if (*count < HOST_RESOURCE_SIZES_COUNT) {
276 return KERN_FAILURE;
277 }
278
279 /* XXX Fail until ledgers are implemented */
280 return KERN_INVALID_ARGUMENT;
281 }
282
283 case HOST_PRIORITY_INFO: {
284 host_priority_info_t priority_info;
285
286 if (*count < HOST_PRIORITY_INFO_COUNT) {
287 return KERN_FAILURE;
288 }
289
290 priority_info = (host_priority_info_t)info;
291
292 priority_info->kernel_priority = MINPRI_KERNEL;
293 priority_info->system_priority = MINPRI_KERNEL;
294 priority_info->server_priority = MINPRI_RESERVED;
295 priority_info->user_priority = BASEPRI_DEFAULT;
296 priority_info->depress_priority = DEPRESSPRI;
297 priority_info->idle_priority = IDLEPRI;
298 priority_info->minimum_priority = MINPRI_USER;
299 priority_info->maximum_priority = MAXPRI_RESERVED;
300
301 *count = HOST_PRIORITY_INFO_COUNT;
302
303 return KERN_SUCCESS;
304 }
305
306 /*
307 * Gestalt for various trap facilities.
308 */
309 case HOST_MACH_MSG_TRAP:
310 case HOST_SEMAPHORE_TRAPS: {
311 *count = 0;
312 return KERN_SUCCESS;
313 }
314
315 case HOST_CAN_HAS_DEBUGGER: {
316 host_can_has_debugger_info_t can_has_debugger_info;
317
318 if (*count < HOST_CAN_HAS_DEBUGGER_COUNT) {
319 return KERN_FAILURE;
320 }
321
322 can_has_debugger_info = (host_can_has_debugger_info_t)info;
323 can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL);
324 *count = HOST_CAN_HAS_DEBUGGER_COUNT;
325
326 return KERN_SUCCESS;
327 }
328
329 case HOST_VM_PURGABLE: {
330 if (*count < HOST_VM_PURGABLE_COUNT) {
331 return KERN_FAILURE;
332 }
333
334 vm_purgeable_stats((vm_purgeable_info_t)info, NULL);
335
336 *count = HOST_VM_PURGABLE_COUNT;
337 return KERN_SUCCESS;
338 }
339
340 case HOST_DEBUG_INFO_INTERNAL: {
341 #if DEVELOPMENT || DEBUG
342 if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT) {
343 return KERN_FAILURE;
344 }
345
346 host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
347 bzero(debug_info, sizeof(host_debug_info_internal_data_t));
348 *count = HOST_DEBUG_INFO_INTERNAL_COUNT;
349
350 #if CONFIG_COALITIONS
351 debug_info->config_coalitions = 1;
352 #endif
353 debug_info->config_bank = 1;
354 #if CONFIG_ATM
355 debug_info->config_atm = 1;
356 #endif
357 #if CONFIG_CSR
358 debug_info->config_csr = 1;
359 #endif
360 return KERN_SUCCESS;
361 #else /* DEVELOPMENT || DEBUG */
362 return KERN_NOT_SUPPORTED;
363 #endif
364 }
365
366 case HOST_PREFERRED_USER_ARCH: {
367 host_preferred_user_arch_t user_arch_info;
368
369 /*
370 * Basic information about this host.
371 */
372 if (*count < HOST_PREFERRED_USER_ARCH_COUNT) {
373 return KERN_FAILURE;
374 }
375
376 user_arch_info = (host_preferred_user_arch_t)info;
377
378 #if defined(PREFERRED_USER_CPU_TYPE) && defined(PREFERRED_USER_CPU_SUBTYPE)
379 cpu_type_t preferred_cpu_type;
380 cpu_subtype_t preferred_cpu_subtype;
381 if (!PE_get_default("kern.preferred_cpu_type", &preferred_cpu_type, sizeof(cpu_type_t))) {
382 preferred_cpu_type = PREFERRED_USER_CPU_TYPE;
383 }
384 if (!PE_get_default("kern.preferred_cpu_subtype", &preferred_cpu_subtype, sizeof(cpu_subtype_t))) {
385 preferred_cpu_subtype = PREFERRED_USER_CPU_SUBTYPE;
386 }
387 user_arch_info->cpu_type = preferred_cpu_type;
388 user_arch_info->cpu_subtype = preferred_cpu_subtype;
389 #elif APPLEVIRTUALPLATFORM
390 extern uint32_t force_arm64_32;
391 if (force_arm64_32) {
392 user_arch_info->cpu_type = CPU_TYPE_ARM64_32;
393 user_arch_info->cpu_subtype = CPU_SUBTYPE_ARM64_32_V8;
394 } else {
395 int master_id = master_processor->cpu_id;
396 user_arch_info->cpu_type = slot_type(master_id);
397 user_arch_info->cpu_subtype = slot_subtype(master_id);
398 }
399 #else
400 int master_id = master_processor->cpu_id;
401 user_arch_info->cpu_type = slot_type(master_id);
402 user_arch_info->cpu_subtype = slot_subtype(master_id);
403 #endif
404
405
406 *count = HOST_PREFERRED_USER_ARCH_COUNT;
407
408 return KERN_SUCCESS;
409 }
410
411 default: return KERN_INVALID_ARGUMENT;
412 }
413 }
414
415 kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
416
417 kern_return_t
host_statistics(host_t host,host_flavor_t flavor,host_info_t info,mach_msg_type_number_t * count)418 host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
419 {
420 if (host == HOST_NULL) {
421 return KERN_INVALID_HOST;
422 }
423
424 switch (flavor) {
425 case HOST_LOAD_INFO: {
426 host_load_info_t load_info;
427
428 if (*count < HOST_LOAD_INFO_COUNT) {
429 return KERN_FAILURE;
430 }
431
432 load_info = (host_load_info_t)info;
433
434 bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
435 bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
436
437 *count = HOST_LOAD_INFO_COUNT;
438 return KERN_SUCCESS;
439 }
440
441 case HOST_VM_INFO: {
442 vm_statistics64_data_t host_vm_stat;
443 vm_statistics_t stat32;
444 mach_msg_type_number_t original_count;
445 natural_t speculative_count = vm_page_speculative_count;
446
447 if (*count < HOST_VM_INFO_REV0_COUNT) {
448 return KERN_FAILURE;
449 }
450
451 get_host_vm_stats(&host_vm_stat);
452
453 stat32 = (vm_statistics_t)info;
454
455 stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + speculative_count);
456 stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
457
458 if (vm_page_local_q) {
459 zpercpu_foreach(lq, vm_page_local_q) {
460 stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
461 }
462 }
463 stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
464 #if !XNU_TARGET_OS_OSX
465 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
466 #else /* !XNU_TARGET_OS_OSX */
467 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
468 #endif /* !XNU_TARGET_OS_OSX */
469 stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
470 stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
471 stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
472 stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
473 stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
474 stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
475 stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
476 stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
477
478 /*
479 * Fill in extra info added in later revisions of the
480 * vm_statistics data structure. Fill in only what can fit
481 * in the data structure the caller gave us !
482 */
483 original_count = *count;
484 *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
485 if (original_count >= HOST_VM_INFO_REV1_COUNT) {
486 /* rev1 added "purgeable" info */
487 stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
488 stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
489 *count = HOST_VM_INFO_REV1_COUNT;
490 }
491
492 if (original_count >= HOST_VM_INFO_REV2_COUNT) {
493 /* rev2 added "speculative" info */
494 stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(speculative_count);
495 *count = HOST_VM_INFO_REV2_COUNT;
496 }
497
498 /* rev3 changed some of the fields to be 64-bit*/
499
500 return KERN_SUCCESS;
501 }
502
503 case HOST_CPU_LOAD_INFO: {
504 host_cpu_load_info_t cpu_load_info;
505
506 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
507 return KERN_FAILURE;
508 }
509
510 #define GET_TICKS_VALUE(state, ticks) \
511 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
512 MACRO_END
513 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
514 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&(processor)->timer)); \
515 MACRO_END
516
517 cpu_load_info = (host_cpu_load_info_t)info;
518 cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
519 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
520 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
521 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
522
523 simple_lock(&processor_list_lock, LCK_GRP_NULL);
524
525 unsigned int pcount = processor_count;
526
527 for (unsigned int i = 0; i < pcount; i++) {
528 processor_t processor = processor_array[i];
529 assert(processor != PROCESSOR_NULL);
530 processor_cpu_load_info(processor, cpu_load_info->cpu_ticks);
531 }
532 simple_unlock(&processor_list_lock);
533
534 *count = HOST_CPU_LOAD_INFO_COUNT;
535
536 return KERN_SUCCESS;
537 }
538
539 case HOST_EXPIRED_TASK_INFO: {
540 if (*count < TASK_POWER_INFO_COUNT) {
541 return KERN_FAILURE;
542 }
543
544 task_power_info_t tinfo1 = (task_power_info_t)info;
545 task_power_info_v2_t tinfo2 = (task_power_info_v2_t)info;
546
547 tinfo1->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
548 tinfo1->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
549
550 tinfo1->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
551
552 tinfo1->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
553
554 tinfo1->total_user = dead_task_statistics.total_user_time;
555 tinfo1->total_system = dead_task_statistics.total_system_time;
556 if (*count < TASK_POWER_INFO_V2_COUNT) {
557 *count = TASK_POWER_INFO_COUNT;
558 } else if (*count >= TASK_POWER_INFO_V2_COUNT) {
559 tinfo2->gpu_energy.task_gpu_utilisation = dead_task_statistics.task_gpu_ns;
560 #if defined(__arm64__)
561 tinfo2->task_energy = dead_task_statistics.task_energy;
562 tinfo2->task_ptime = dead_task_statistics.total_ptime;
563 tinfo2->task_pset_switches = dead_task_statistics.total_pset_switches;
564 #endif
565 *count = TASK_POWER_INFO_V2_COUNT;
566 }
567
568 return KERN_SUCCESS;
569 }
570 default: return KERN_INVALID_ARGUMENT;
571 }
572 }
573
574 extern uint32_t c_segment_pages_compressed;
575
576 #define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
577 #define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
578 #define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
579
580 uint64_t host_statistics_time_window;
581
582 static LCK_GRP_DECLARE(host_statistics_lck_grp, "host_statistics");
583 static LCK_MTX_DECLARE(host_statistics_lck, &host_statistics_lck_grp);
584
585 #define HOST_VM_INFO64_REV0 0
586 #define HOST_VM_INFO64_REV1 1
587 #define HOST_EXTMOD_INFO64_REV0 2
588 #define HOST_LOAD_INFO_REV0 3
589 #define HOST_VM_INFO_REV0 4
590 #define HOST_VM_INFO_REV1 5
591 #define HOST_VM_INFO_REV2 6
592 #define HOST_CPU_LOAD_INFO_REV0 7
593 #define HOST_EXPIRED_TASK_INFO_REV0 8
594 #define HOST_EXPIRED_TASK_INFO_REV1 9
595 #define HOST_VM_COMPRESSOR_Q_LEN_REV0 10
596 #define HOST_VM_INFO64_REV2 11
597 #define NUM_HOST_INFO_DATA_TYPES 12
598
599 static vm_statistics64_data_t host_vm_info64_rev0 = {};
600 static vm_statistics64_data_t host_vm_info64_rev1 = {};
601 static vm_statistics64_data_t host_vm_info64_rev2 = {};
602 static vm_extmod_statistics_data_t host_extmod_info64 = {};
603 static host_load_info_data_t host_load_info = {};
604 static vm_statistics_data_t host_vm_info_rev0 = {};
605 static vm_statistics_data_t host_vm_info_rev1 = {};
606 static vm_statistics_data_t host_vm_info_rev2 = {};
607 static host_cpu_load_info_data_t host_cpu_load_info = {};
608 static task_power_info_data_t host_expired_task_info = {};
609 static task_power_info_v2_data_t host_expired_task_info2 = {};
610 static vm_compressor_q_lens_data_t host_vm_compressor_q_lens = {};
611
612 struct host_stats_cache {
613 uint64_t last_access;
614 uint64_t current_requests;
615 uint64_t max_requests;
616 uintptr_t data;
617 mach_msg_type_number_t count; //NOTE count is in sizeof(integer_t)
618 };
619
620 static struct host_stats_cache g_host_stats_cache[NUM_HOST_INFO_DATA_TYPES] = {
621 [HOST_VM_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev0, .count = HOST_VM_INFO64_REV0_COUNT },
622 [HOST_VM_INFO64_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev1, .count = HOST_VM_INFO64_REV1_COUNT },
623 [HOST_EXTMOD_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_extmod_info64, .count = HOST_EXTMOD_INFO64_COUNT },
624 [HOST_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_load_info, .count = HOST_LOAD_INFO_COUNT },
625 [HOST_VM_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev0, .count = HOST_VM_INFO_REV0_COUNT },
626 [HOST_VM_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev1, .count = HOST_VM_INFO_REV1_COUNT },
627 [HOST_VM_INFO_REV2] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev2, .count = HOST_VM_INFO_REV2_COUNT },
628 [HOST_CPU_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_cpu_load_info, .count = HOST_CPU_LOAD_INFO_COUNT },
629 [HOST_EXPIRED_TASK_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info, .count = TASK_POWER_INFO_COUNT },
630 [HOST_EXPIRED_TASK_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info2, .count = TASK_POWER_INFO_V2_COUNT},
631 [HOST_VM_COMPRESSOR_Q_LEN_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_compressor_q_lens, .count = VM_COMPRESSOR_Q_LENS_COUNT},
632 [HOST_VM_INFO64_REV2] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev2, .count = HOST_VM_INFO64_REV2_COUNT },
633 };
634
635
636 void
host_statistics_init(void)637 host_statistics_init(void)
638 {
639 nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW * NSEC_PER_SEC), &host_statistics_time_window);
640 }
641
642 static void
cache_host_statistics(int index,host_info64_t info)643 cache_host_statistics(int index, host_info64_t info)
644 {
645 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
646 return;
647 }
648
649 if (task_get_platform_binary(current_task())) {
650 return;
651 }
652
653 memcpy((void *)g_host_stats_cache[index].data, info, g_host_stats_cache[index].count * sizeof(integer_t));
654 return;
655 }
656
657 static void
get_cached_info(int index,host_info64_t info,mach_msg_type_number_t * count)658 get_cached_info(int index, host_info64_t info, mach_msg_type_number_t* count)
659 {
660 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
661 *count = 0;
662 return;
663 }
664
665 *count = g_host_stats_cache[index].count;
666 memcpy(info, (void *)g_host_stats_cache[index].data, g_host_stats_cache[index].count * sizeof(integer_t));
667 }
668
669 static int
get_host_info_data_index(bool is_stat64,host_flavor_t flavor,mach_msg_type_number_t * count,kern_return_t * ret)670 get_host_info_data_index(bool is_stat64, host_flavor_t flavor, mach_msg_type_number_t* count, kern_return_t* ret)
671 {
672 switch (flavor) {
673 case HOST_VM_INFO64:
674 if (!is_stat64) {
675 *ret = KERN_INVALID_ARGUMENT;
676 return -1;
677 }
678 if (*count < HOST_VM_INFO64_REV0_COUNT) {
679 *ret = KERN_FAILURE;
680 return -1;
681 }
682 if (*count >= HOST_VM_INFO64_REV2_COUNT) {
683 return HOST_VM_INFO64_REV2;
684 }
685 if (*count >= HOST_VM_INFO64_REV1_COUNT) {
686 return HOST_VM_INFO64_REV1;
687 }
688 return HOST_VM_INFO64_REV0;
689
690 case HOST_EXTMOD_INFO64:
691 if (!is_stat64) {
692 *ret = KERN_INVALID_ARGUMENT;
693 return -1;
694 }
695 if (*count < HOST_EXTMOD_INFO64_COUNT) {
696 *ret = KERN_FAILURE;
697 return -1;
698 }
699 return HOST_EXTMOD_INFO64_REV0;
700
701 case HOST_LOAD_INFO:
702 if (*count < HOST_LOAD_INFO_COUNT) {
703 *ret = KERN_FAILURE;
704 return -1;
705 }
706 return HOST_LOAD_INFO_REV0;
707
708 case HOST_VM_INFO:
709 if (*count < HOST_VM_INFO_REV0_COUNT) {
710 *ret = KERN_FAILURE;
711 return -1;
712 }
713 if (*count >= HOST_VM_INFO_REV2_COUNT) {
714 return HOST_VM_INFO_REV2;
715 }
716 if (*count >= HOST_VM_INFO_REV1_COUNT) {
717 return HOST_VM_INFO_REV1;
718 }
719 return HOST_VM_INFO_REV0;
720
721 case HOST_CPU_LOAD_INFO:
722 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
723 *ret = KERN_FAILURE;
724 return -1;
725 }
726 return HOST_CPU_LOAD_INFO_REV0;
727
728 case HOST_EXPIRED_TASK_INFO:
729 if (*count < TASK_POWER_INFO_COUNT) {
730 *ret = KERN_FAILURE;
731 return -1;
732 }
733 if (*count >= TASK_POWER_INFO_V2_COUNT) {
734 return HOST_EXPIRED_TASK_INFO_REV1;
735 }
736 return HOST_EXPIRED_TASK_INFO_REV0;
737
738 case HOST_VM_COMPRESSOR_Q_LENS:
739 if (*count < VM_COMPRESSOR_Q_LENS_COUNT) {
740 *ret = KERN_FAILURE;
741 return -1;
742 }
743 return HOST_VM_COMPRESSOR_Q_LEN_REV0;
744
745 default:
746 *ret = KERN_INVALID_ARGUMENT;
747 return -1;
748 }
749 }
750
751 static bool
rate_limit_host_statistics(bool is_stat64,host_flavor_t flavor,host_info64_t info,mach_msg_type_number_t * count,kern_return_t * ret,int * pindex)752 rate_limit_host_statistics(bool is_stat64, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t* count, kern_return_t* ret, int *pindex)
753 {
754 task_t task = current_task();
755
756 assert(task != kernel_task);
757
758 *ret = KERN_SUCCESS;
759 *pindex = -1;
760
761 /* Access control only for third party applications */
762 if (task_get_platform_binary(task)) {
763 return FALSE;
764 }
765
766 /* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
767 bool rate_limited = FALSE;
768 bool set_last_access = TRUE;
769
770 /* there is a cache for every flavor */
771 int index = get_host_info_data_index(is_stat64, flavor, count, ret);
772 if (index == -1) {
773 goto out;
774 }
775
776 *pindex = index;
777 lck_mtx_lock(&host_statistics_lck);
778 if (g_host_stats_cache[index].last_access > mach_continuous_time() - host_statistics_time_window) {
779 set_last_access = FALSE;
780 if (g_host_stats_cache[index].current_requests++ >= g_host_stats_cache[index].max_requests) {
781 rate_limited = TRUE;
782 get_cached_info(index, info, count);
783 }
784 }
785 if (set_last_access) {
786 g_host_stats_cache[index].current_requests = 1;
787 /*
788 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
789 * to let query host_statistics.
790 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
791 * the provious window.
792 */
793 g_host_stats_cache[index].max_requests = (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS - HOST_STATISTICS_MIN_REQUESTS + 1)) + HOST_STATISTICS_MIN_REQUESTS;
794 g_host_stats_cache[index].last_access = mach_continuous_time();
795 }
796 lck_mtx_unlock(&host_statistics_lck);
797 out:
798 return rate_limited;
799 }
800
801 kern_return_t
vm_stats(void * info,unsigned int * count)802 vm_stats(void *info, unsigned int *count)
803 {
804 vm_statistics64_data_t host_vm_stat;
805 mach_msg_type_number_t original_count;
806 unsigned int local_q_internal_count;
807 unsigned int local_q_external_count;
808 natural_t speculative_count = vm_page_speculative_count;
809 natural_t throttled_count = vm_page_throttled_count;
810
811 if (*count < HOST_VM_INFO64_REV0_COUNT) {
812 return KERN_FAILURE;
813 }
814 get_host_vm_stats(&host_vm_stat);
815
816 vm_statistics64_t stat = (vm_statistics64_t)info;
817
818 stat->free_count = vm_page_free_count + speculative_count;
819 stat->active_count = vm_page_active_count;
820
821 local_q_internal_count = 0;
822 local_q_external_count = 0;
823 if (vm_page_local_q) {
824 zpercpu_foreach(lq, vm_page_local_q) {
825 stat->active_count += lq->vpl_count;
826 local_q_internal_count += lq->vpl_internal_count;
827 local_q_external_count += lq->vpl_external_count;
828 }
829 }
830 stat->inactive_count = vm_page_inactive_count;
831 #if !XNU_TARGET_OS_OSX
832 stat->wire_count = vm_page_wire_count;
833 #else /* !XNU_TARGET_OS_OSX */
834 stat->wire_count = vm_page_wire_count + throttled_count + vm_lopage_free_count;
835 #endif /* !XNU_TARGET_OS_OSX */
836 stat->zero_fill_count = host_vm_stat.zero_fill_count;
837 stat->reactivations = host_vm_stat.reactivations;
838 stat->pageins = host_vm_stat.pageins;
839 stat->pageouts = host_vm_stat.pageouts;
840 stat->faults = host_vm_stat.faults;
841 stat->cow_faults = host_vm_stat.cow_faults;
842 stat->lookups = host_vm_stat.lookups;
843 stat->hits = host_vm_stat.hits;
844
845 stat->purgeable_count = vm_page_purgeable_count;
846 stat->purges = vm_page_purged_count;
847
848 stat->speculative_count = speculative_count;
849
850 /*
851 * Fill in extra info added in later revisions of the
852 * vm_statistics data structure. Fill in only what can fit
853 * in the data structure the caller gave us !
854 */
855 original_count = *count;
856 *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
857 if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
858 /* rev1 added "throttled count" */
859 stat->throttled_count = throttled_count;
860 /* rev1 added "compression" info */
861 stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
862 stat->compressions = host_vm_stat.compressions;
863 stat->decompressions = host_vm_stat.decompressions;
864 stat->swapins = host_vm_stat.swapins;
865 stat->swapouts = host_vm_stat.swapouts;
866 /* rev1 added:
867 * "external page count"
868 * "anonymous page count"
869 * "total # of pages (uncompressed) held in the compressor"
870 */
871 stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
872 stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
873 stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
874 *count = HOST_VM_INFO64_REV1_COUNT;
875 }
876 if (original_count >= HOST_VM_INFO64_REV2_COUNT) {
877 stat->swapped_count = os_atomic_load(&vm_page_swapped_count, relaxed);
878 *count = HOST_VM_INFO64_REV2_COUNT;
879 }
880
881 return KERN_SUCCESS;
882 }
883
884 #if DEVELOPMENT || DEBUG
885 extern uint32_t c_segment_count;
886 extern uint32_t c_age_count;
887 extern uint32_t c_early_swappedin_count, c_regular_swappedin_count, c_late_swappedin_count;
888 extern uint32_t c_early_swapout_count, c_regular_swapout_count, c_late_swapout_count;
889 extern uint32_t c_swapio_count;
890 extern uint32_t c_swappedout_count;
891 extern uint32_t c_swappedout_sparse_count;
892 extern uint32_t c_major_count;
893 extern uint32_t c_filling_count;
894 extern uint32_t c_empty_count;
895 extern uint32_t c_bad_count;
896 extern uint32_t c_minor_count;
897 extern uint32_t c_segments_available;
898
899 static kern_return_t
vm_compressor_queue_lens(void * info,unsigned int * count)900 vm_compressor_queue_lens(void *info, unsigned int *count)
901 {
902 if (*count < VM_COMPRESSOR_Q_LENS_COUNT) {
903 return KERN_NO_SPACE;
904 }
905
906 struct vm_compressor_q_lens *qc = (struct vm_compressor_q_lens *)info;
907 qc->qcc_segments_available = c_segments_available;
908 qc->qcc_segment_count = c_segment_count;
909 qc->qcc_age_count = c_age_count;
910 qc->qcc_early_swappedin_count = c_early_swappedin_count;
911 qc->qcc_regular_swappedin_count = c_regular_swappedin_count;
912 qc->qcc_late_swappedin_count = c_late_swappedin_count;
913 qc->qcc_early_swapout_count = c_early_swapout_count;
914 qc->qcc_regular_swapout_count = c_regular_swapout_count;
915 qc->qcc_late_swapout_count = c_late_swapout_count;
916 qc->qcc_swapio_count = c_swapio_count;
917 qc->qcc_swappedout_count = c_swappedout_count;
918 qc->qcc_swappedout_sparse_count = c_swappedout_sparse_count;
919 qc->qcc_major_count = c_major_count;
920 qc->qcc_filling_count = c_filling_count;
921 qc->qcc_empty_count = c_empty_count;
922 qc->qcc_bad_count = c_bad_count;
923 qc->qcc_minor_count = c_minor_count;
924
925 *count = VM_COMPRESSOR_Q_LENS_COUNT;
926
927 return KERN_SUCCESS;
928 }
929
930 #endif /* DEVELOPMENT || DEBUG */
931
932 kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
933
934 kern_return_t
host_statistics64(host_t host,host_flavor_t flavor,host_info64_t info,mach_msg_type_number_t * count)935 host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
936 {
937 if (host == HOST_NULL) {
938 return KERN_INVALID_HOST;
939 }
940
941 switch (flavor) {
942 case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
943 return vm_stats(info, count);
944
945 case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
946 {
947 vm_extmod_statistics_t out_extmod_statistics;
948
949 if (*count < HOST_EXTMOD_INFO64_COUNT) {
950 return KERN_FAILURE;
951 }
952
953 out_extmod_statistics = (vm_extmod_statistics_t)info;
954 *out_extmod_statistics = host_extmod_statistics;
955
956 *count = HOST_EXTMOD_INFO64_COUNT;
957
958 return KERN_SUCCESS;
959 }
960
961 case HOST_VM_COMPRESSOR_Q_LENS:
962 #if DEVELOPMENT || DEBUG
963 return vm_compressor_queue_lens(info, count);
964 #else
965 return KERN_NOT_SUPPORTED;
966 #endif
967
968 default: /* If we didn't recognize the flavor, send to host_statistics */
969 return host_statistics(host, flavor, (host_info_t)info, count);
970 }
971 }
972
973 kern_return_t
host_statistics64_from_user(host_t host,host_flavor_t flavor,host_info64_t info,mach_msg_type_number_t * count)974 host_statistics64_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
975 {
976 kern_return_t ret = KERN_SUCCESS;
977 int index;
978
979 if (host == HOST_NULL) {
980 return KERN_INVALID_HOST;
981 }
982
983 if (rate_limit_host_statistics(TRUE, flavor, info, count, &ret, &index)) {
984 return ret;
985 }
986
987 if (ret != KERN_SUCCESS) {
988 return ret;
989 }
990
991 ret = host_statistics64(host, flavor, info, count);
992
993 if (ret == KERN_SUCCESS) {
994 cache_host_statistics(index, info);
995 }
996
997 return ret;
998 }
999
1000 kern_return_t
host_statistics_from_user(host_t host,host_flavor_t flavor,host_info64_t info,mach_msg_type_number_t * count)1001 host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
1002 {
1003 kern_return_t ret = KERN_SUCCESS;
1004 int index;
1005
1006 if (host == HOST_NULL) {
1007 return KERN_INVALID_HOST;
1008 }
1009
1010 if (rate_limit_host_statistics(FALSE, flavor, info, count, &ret, &index)) {
1011 return ret;
1012 }
1013
1014 if (ret != KERN_SUCCESS) {
1015 return ret;
1016 }
1017
1018 ret = host_statistics(host, flavor, info, count);
1019
1020 if (ret == KERN_SUCCESS) {
1021 cache_host_statistics(index, info);
1022 }
1023
1024 return ret;
1025 }
1026
1027 /*
1028 * Get host statistics that require privilege.
1029 * None for now, just call the un-privileged version.
1030 */
1031 kern_return_t
host_priv_statistics(host_priv_t host_priv,host_flavor_t flavor,host_info_t info,mach_msg_type_number_t * count)1032 host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
1033 {
1034 return host_statistics((host_t)host_priv, flavor, info, count);
1035 }
1036
1037 kern_return_t
set_sched_stats_active(boolean_t active)1038 set_sched_stats_active(boolean_t active)
1039 {
1040 sched_stats_active = active;
1041 return KERN_SUCCESS;
1042 }
1043
1044 kern_return_t
get_sched_statistics(struct _processor_statistics_np * out,uint32_t * count)1045 get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
1046 {
1047 uint32_t pos = 0;
1048
1049 if (!sched_stats_active) {
1050 return KERN_FAILURE;
1051 }
1052
1053 percpu_foreach_base(pcpu_base) {
1054 struct sched_statistics stats;
1055 processor_t processor;
1056
1057 pos += sizeof(struct _processor_statistics_np);
1058 if (pos > *count) {
1059 return KERN_FAILURE;
1060 }
1061
1062 stats = *PERCPU_GET_WITH_BASE(pcpu_base, sched_stats);
1063 processor = PERCPU_GET_WITH_BASE(pcpu_base, processor);
1064
1065 out->ps_cpuid = processor->cpu_id;
1066 out->ps_csw_count = stats.csw_count;
1067 out->ps_preempt_count = stats.preempt_count;
1068 out->ps_preempted_rt_count = stats.preempted_rt_count;
1069 out->ps_preempted_by_rt_count = stats.preempted_by_rt_count;
1070 out->ps_rt_sched_count = stats.rt_sched_count;
1071 out->ps_interrupt_count = stats.interrupt_count;
1072 out->ps_ipi_count = stats.ipi_count;
1073 out->ps_timer_pop_count = stats.timer_pop_count;
1074 out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
1075 out->ps_idle_transitions = stats.idle_transitions;
1076 out->ps_quantum_timer_expirations = stats.quantum_timer_expirations;
1077
1078 out++;
1079 }
1080
1081 /* And include RT Queue information */
1082 pos += sizeof(struct _processor_statistics_np);
1083 if (pos > *count) {
1084 return KERN_FAILURE;
1085 }
1086
1087 bzero(out, sizeof(*out));
1088 out->ps_cpuid = (-1);
1089 out->ps_runq_count_sum = SCHED(rt_runq_count_sum)();
1090 out++;
1091
1092 *count = pos;
1093
1094 return KERN_SUCCESS;
1095 }
1096
1097 kern_return_t
host_page_size(host_t host,vm_size_t * out_page_size)1098 host_page_size(host_t host, vm_size_t * out_page_size)
1099 {
1100 if (host == HOST_NULL) {
1101 return KERN_INVALID_ARGUMENT;
1102 }
1103
1104 *out_page_size = PAGE_SIZE;
1105
1106 return KERN_SUCCESS;
1107 }
1108
1109 /*
1110 * Return kernel version string (more than you ever
1111 * wanted to know about what version of the kernel this is).
1112 */
1113 extern char version[];
1114
1115 kern_return_t
host_kernel_version(host_t host,kernel_version_t out_version)1116 host_kernel_version(host_t host, kernel_version_t out_version)
1117 {
1118 if (host == HOST_NULL) {
1119 return KERN_INVALID_ARGUMENT;
1120 }
1121
1122 (void)strncpy(out_version, version, sizeof(kernel_version_t));
1123
1124 return KERN_SUCCESS;
1125 }
1126
1127 /*
1128 * host_processor_sets:
1129 *
1130 * List all processor sets on the host.
1131 */
1132 kern_return_t
host_processor_sets(host_priv_t host_priv,processor_set_name_array_t * pset_list,mach_msg_type_number_t * count)1133 host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
1134 {
1135 mach_port_array_t ports;
1136
1137 if (host_priv == HOST_PRIV_NULL) {
1138 return KERN_INVALID_ARGUMENT;
1139 }
1140
1141 /*
1142 * Allocate memory. Can be pageable because it won't be
1143 * touched while holding a lock.
1144 */
1145
1146 ports = mach_port_array_alloc(1, Z_WAITOK | Z_NOFAIL);
1147
1148 /* do the conversion that Mig should handle */
1149 ports[0].port = convert_pset_name_to_port(&pset0);
1150
1151 *pset_list = ports;
1152 *count = 1;
1153
1154 return KERN_SUCCESS;
1155 }
1156
1157 /*
1158 * host_processor_set_priv:
1159 *
1160 * Return control port for given processor set.
1161 */
1162 kern_return_t
host_processor_set_priv(host_priv_t host_priv,processor_set_t pset_name,processor_set_t * pset)1163 host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
1164 {
1165 if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
1166 *pset = PROCESSOR_SET_NULL;
1167
1168 return KERN_INVALID_ARGUMENT;
1169 }
1170
1171 *pset = pset_name;
1172
1173 return KERN_SUCCESS;
1174 }
1175
1176 /*
1177 * host_processor_info
1178 *
1179 * Return info about the processors on this host. It will return
1180 * the number of processors, and the specific type of info requested
1181 * in an OOL array.
1182 */
1183 kern_return_t
host_processor_info(host_t host,processor_flavor_t flavor,natural_t * out_pcount,processor_info_array_t * out_array,mach_msg_type_number_t * out_array_count)1184 host_processor_info(host_t host,
1185 processor_flavor_t flavor,
1186 natural_t * out_pcount,
1187 processor_info_array_t * out_array,
1188 mach_msg_type_number_t * out_array_count)
1189 {
1190 kern_return_t result;
1191 host_t thost;
1192 processor_info_t info;
1193 unsigned int icount;
1194 unsigned int pcount;
1195 vm_offset_t addr;
1196 vm_size_t size, needed;
1197 vm_map_copy_t copy;
1198
1199 if (host == HOST_NULL) {
1200 return KERN_INVALID_ARGUMENT;
1201 }
1202
1203 result = processor_info_count(flavor, &icount);
1204 if (result != KERN_SUCCESS) {
1205 return result;
1206 }
1207
1208 pcount = processor_count;
1209 assert(pcount != 0);
1210
1211 needed = pcount * icount * sizeof(natural_t);
1212 size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
1213 result = kmem_alloc(ipc_kernel_map, &addr, size, KMA_DATA, VM_KERN_MEMORY_IPC);
1214 if (result != KERN_SUCCESS) {
1215 return KERN_RESOURCE_SHORTAGE;
1216 }
1217
1218 info = (processor_info_t)addr;
1219
1220 for (unsigned int i = 0; i < pcount; i++) {
1221 processor_t processor = processor_array[i];
1222 assert(processor != PROCESSOR_NULL);
1223
1224 unsigned int tcount = icount;
1225
1226 result = processor_info(processor, flavor, &thost, info, &tcount);
1227 if (result != KERN_SUCCESS) {
1228 kmem_free(ipc_kernel_map, addr, size);
1229 return result;
1230 }
1231 info += icount;
1232 }
1233
1234 if (size != needed) {
1235 bzero((char *)addr + needed, size - needed);
1236 }
1237
1238 result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
1239 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
1240 assert(result == KERN_SUCCESS);
1241 result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, ©);
1242 assert(result == KERN_SUCCESS);
1243
1244 *out_pcount = pcount;
1245 *out_array = (processor_info_array_t)copy;
1246 *out_array_count = pcount * icount;
1247
1248 return KERN_SUCCESS;
1249 }
1250
1251 static bool
is_valid_host_special_port(int id)1252 is_valid_host_special_port(int id)
1253 {
1254 return (id <= HOST_MAX_SPECIAL_PORT) &&
1255 (id >= HOST_MIN_SPECIAL_PORT) &&
1256 ((id <= HOST_LAST_SPECIAL_KERNEL_PORT) || (id > HOST_MAX_SPECIAL_KERNEL_PORT));
1257 }
1258
1259 /*
1260 * Kernel interface for setting a special port.
1261 */
1262 kern_return_t
kernel_set_special_port(host_priv_t host_priv,int id,ipc_port_t port)1263 kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1264 {
1265 ipc_port_t old_port;
1266
1267 if (!is_valid_host_special_port(id)) {
1268 panic("attempted to set invalid special port %d", id);
1269 }
1270
1271 if (id == HOST_NODE_PORT) {
1272 return KERN_NOT_SUPPORTED;
1273 }
1274
1275 host_lock(host_priv);
1276 old_port = host_priv->special[id];
1277 host_priv->special[id] = port;
1278 host_unlock(host_priv);
1279
1280 if (IP_VALID(old_port)) {
1281 ipc_port_release_send(old_port);
1282 }
1283
1284
1285 return KERN_SUCCESS;
1286 }
1287
1288 /*
1289 * Kernel interface for retrieving a special port.
1290 */
1291 kern_return_t
kernel_get_special_port(host_priv_t host_priv,int id,ipc_port_t * portp)1292 kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp)
1293 {
1294 if (!is_valid_host_special_port(id)) {
1295 panic("attempted to get invalid special port %d", id);
1296 }
1297
1298 host_lock(host_priv);
1299 *portp = host_priv->special[id];
1300 host_unlock(host_priv);
1301 return KERN_SUCCESS;
1302 }
1303
1304 /*
1305 * User interface for setting a special port.
1306 *
1307 * Only permits the user to set a user-owned special port
1308 * ID, rejecting a kernel-owned special port ID.
1309 *
1310 * A special kernel port cannot be set up using this
1311 * routine; use kernel_set_special_port() instead.
1312 */
1313 kern_return_t
host_set_special_port_from_user(host_priv_t host_priv,int id,ipc_port_t port)1314 host_set_special_port_from_user(host_priv_t host_priv, int id, ipc_port_t port)
1315 {
1316 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1317 return KERN_INVALID_ARGUMENT;
1318 }
1319
1320 if (task_is_driver(current_task())) {
1321 return KERN_NO_ACCESS;
1322 }
1323
1324 /*
1325 * rdar://70585367
1326 * disallow immovable send so other process can't retrieve it through host_get_special_port()
1327 */
1328 if (!ipc_can_stash_naked_send(port)) {
1329 return KERN_DENIED;
1330 }
1331
1332 return host_set_special_port(host_priv, id, port);
1333 }
1334
1335 kern_return_t
host_set_special_port(host_priv_t host_priv,int id,ipc_port_t port)1336 host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1337 {
1338 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1339 return KERN_INVALID_ARGUMENT;
1340 }
1341
1342 if (current_task() != kernel_task && !task_is_initproc(current_task())) {
1343 bool allowed = (id == HOST_TELEMETRY_PORT &&
1344 IOTaskHasEntitlement(current_task(), "com.apple.private.xpc.launchd.event-monitor"));
1345 #if CONFIG_CSR
1346 if (!allowed) {
1347 allowed = (csr_check(CSR_ALLOW_TASK_FOR_PID) == 0);
1348 }
1349 #endif
1350 if (!allowed) {
1351 return KERN_NO_ACCESS;
1352 }
1353 }
1354
1355 #if CONFIG_MACF
1356 if (mac_task_check_set_host_special_port(current_task(), id, port) != 0) {
1357 return KERN_NO_ACCESS;
1358 }
1359 #endif
1360
1361 return kernel_set_special_port(host_priv, id, port);
1362 }
1363
1364 /*
1365 * User interface for retrieving a special port.
1366 *
1367 * Note that there is nothing to prevent a user special
1368 * port from disappearing after it has been discovered by
1369 * the caller; thus, using a special port can always result
1370 * in a "port not valid" error.
1371 */
1372
1373 kern_return_t
host_get_special_port_from_user(host_priv_t host_priv,__unused int node,int id,ipc_port_t * portp)1374 host_get_special_port_from_user(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1375 {
1376 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1377 return KERN_INVALID_ARGUMENT;
1378 }
1379
1380 task_t task = current_task();
1381 if (task && task_is_driver(task) && id > HOST_MAX_SPECIAL_KERNEL_PORT) {
1382 /* allow HID drivers to get the sysdiagnose port for keychord handling */
1383 if (id == HOST_SYSDIAGNOSE_PORT &&
1384 IOCurrentTaskHasEntitlement(kIODriverKitHIDFamilyEventServiceEntitlementKey)) {
1385 goto get_special_port;
1386 }
1387 return KERN_NO_ACCESS;
1388 }
1389 get_special_port:
1390 return host_get_special_port(host_priv, node, id, portp);
1391 }
1392
1393 kern_return_t
host_get_special_port(host_priv_t host_priv,__unused int node,int id,ipc_port_t * portp)1394 host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1395 {
1396 ipc_port_t port;
1397
1398 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1399 return KERN_INVALID_ARGUMENT;
1400 }
1401
1402 host_lock(host_priv);
1403 port = realhost.special[id];
1404 switch (id) {
1405 case HOST_PORT:
1406 *portp = ipc_kobject_copy_send(port, &realhost, IKOT_HOST);
1407 break;
1408 case HOST_PRIV_PORT:
1409 *portp = ipc_kobject_copy_send(port, &realhost, IKOT_HOST_PRIV);
1410 break;
1411 case HOST_IO_MAIN_PORT:
1412 *portp = ipc_port_copy_send_any(main_device_port);
1413 break;
1414 default:
1415 *portp = ipc_port_copy_send_mqueue(port);
1416 break;
1417 }
1418 host_unlock(host_priv);
1419
1420 return KERN_SUCCESS;
1421 }
1422
1423 /*
1424 * host_get_io_main
1425 *
1426 * Return the IO main access port for this host.
1427 */
1428 kern_return_t
host_get_io_main(host_t host,io_main_t * io_mainp)1429 host_get_io_main(host_t host, io_main_t * io_mainp)
1430 {
1431 if (host == HOST_NULL) {
1432 return KERN_INVALID_ARGUMENT;
1433 }
1434
1435 return host_get_io_main_port(host_priv_self(), io_mainp);
1436 }
1437
1438 host_t
host_self(void)1439 host_self(void)
1440 {
1441 return &realhost;
1442 }
1443
1444 host_priv_t
host_priv_self(void)1445 host_priv_self(void)
1446 {
1447 return &realhost;
1448 }
1449
1450 kern_return_t
host_set_atm_diagnostic_flag(host_t host,uint32_t diagnostic_flag)1451 host_set_atm_diagnostic_flag(host_t host, uint32_t diagnostic_flag)
1452 {
1453 if (host == HOST_NULL) {
1454 return KERN_INVALID_ARGUMENT;
1455 }
1456
1457 if (!IOCurrentTaskHasEntitlement("com.apple.private.set-atm-diagnostic-flag")) {
1458 return KERN_NO_ACCESS;
1459 }
1460
1461 #if CONFIG_ATM
1462 return atm_set_diagnostic_config(diagnostic_flag);
1463 #else
1464 (void)diagnostic_flag;
1465 return KERN_NOT_SUPPORTED;
1466 #endif
1467 }
1468
1469 kern_return_t
host_set_multiuser_config_flags(host_priv_t host_priv,uint32_t multiuser_config)1470 host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
1471 {
1472 #if !defined(XNU_TARGET_OS_OSX)
1473 if (host_priv == HOST_PRIV_NULL) {
1474 return KERN_INVALID_ARGUMENT;
1475 }
1476
1477 /*
1478 * multiuser bit is extensively used for sharedIpad mode.
1479 * Caller sets the sharedIPad or other mutiuser modes.
1480 * Any override during commpage setting is not suitable anymore.
1481 */
1482 commpage_update_multiuser_config(multiuser_config);
1483 return KERN_SUCCESS;
1484 #else
1485 (void)host_priv;
1486 (void)multiuser_config;
1487 return KERN_NOT_SUPPORTED;
1488 #endif
1489 }
1490