1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * host.c
61 *
62 * Non-ipc host functions.
63 */
64
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
79
80 #include <machine/commpage.h>
81 #include <machine/cpu_capabilities.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/kalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/ipc_host.h>
89 #include <kern/misc_protos.h>
90 #include <kern/sched.h>
91 #include <kern/processor.h>
92 #include <kern/mach_node.h> // mach_node_port_changed()
93
94 #include <vm/vm_map.h>
95 #include <vm/vm_purgeable_internal.h>
96 #include <vm/vm_pageout.h>
97
98 #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
99 #include <IOKit/IOKitKeys.h> // DriverKit entitlement strings
100
101
102 #if CONFIG_ATM
103 #include <atm/atm_internal.h>
104 #endif
105
106 #if CONFIG_MACF
107 #include <security/mac_mach_internal.h>
108 #endif
109
110 #if CONFIG_CSR
111 #include <sys/csr.h>
112 #endif
113
114 #include <pexpert/pexpert.h>
115
116 SCALABLE_COUNTER_DEFINE(vm_statistics_zero_fill_count); /* # of zero fill pages */
117 SCALABLE_COUNTER_DEFINE(vm_statistics_reactivations); /* # of pages reactivated */
118 SCALABLE_COUNTER_DEFINE(vm_statistics_pageins); /* # of pageins */
119 SCALABLE_COUNTER_DEFINE(vm_statistics_pageouts); /* # of pageouts */
120 SCALABLE_COUNTER_DEFINE(vm_statistics_faults); /* # of faults */
121 SCALABLE_COUNTER_DEFINE(vm_statistics_cow_faults); /* # of copy-on-writes */
122 SCALABLE_COUNTER_DEFINE(vm_statistics_lookups); /* object cache lookups */
123 SCALABLE_COUNTER_DEFINE(vm_statistics_hits); /* object cache hits */
124 SCALABLE_COUNTER_DEFINE(vm_statistics_purges); /* # of pages purged */
125 SCALABLE_COUNTER_DEFINE(vm_statistics_decompressions); /* # of pages decompressed */
126 SCALABLE_COUNTER_DEFINE(vm_statistics_compressions); /* # of pages compressed */
127 SCALABLE_COUNTER_DEFINE(vm_statistics_swapins); /* # of pages swapped in (via compression segments) */
128 SCALABLE_COUNTER_DEFINE(vm_statistics_swapouts); /* # of pages swapped out (via compression segments) */
129 SCALABLE_COUNTER_DEFINE(vm_statistics_total_uncompressed_pages_in_compressor); /* # of pages (uncompressed) held within the compressor. */
130 SCALABLE_COUNTER_DEFINE(vm_page_grab_count);
131
132 host_data_t realhost;
133
134 static void
get_host_vm_stats(vm_statistics64_t out)135 get_host_vm_stats(vm_statistics64_t out)
136 {
137 out->zero_fill_count = counter_load(&vm_statistics_zero_fill_count);
138 out->reactivations = counter_load(&vm_statistics_reactivations);
139 out->pageins = counter_load(&vm_statistics_pageins);
140 out->pageouts = counter_load(&vm_statistics_pageouts);
141 out->faults = counter_load(&vm_statistics_faults);
142 out->cow_faults = counter_load(&vm_statistics_cow_faults);
143 out->lookups = counter_load(&vm_statistics_lookups);
144 out->hits = counter_load(&vm_statistics_hits);
145 out->compressions = counter_load(&vm_statistics_compressions);
146 out->decompressions = counter_load(&vm_statistics_decompressions);
147 out->swapins = counter_load(&vm_statistics_swapins);
148 out->swapouts = counter_load(&vm_statistics_swapouts);
149 }
150 vm_extmod_statistics_data_t host_extmod_statistics;
151
152 kern_return_t
host_processors(host_priv_t host_priv,processor_array_t * out_array,mach_msg_type_number_t * countp)153 host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
154 {
155 if (host_priv == HOST_PRIV_NULL) {
156 return KERN_INVALID_ARGUMENT;
157 }
158
159 unsigned int count = processor_count;
160 assert(count != 0);
161
162 static_assert(sizeof(mach_port_t) == sizeof(processor_t));
163
164 mach_port_t *ports = kalloc_type(mach_port_t, count, Z_WAITOK);
165 if (!ports) {
166 return KERN_RESOURCE_SHORTAGE;
167 }
168
169 for (unsigned int i = 0; i < count; i++) {
170 processor_t processor = processor_array[i];
171 assert(processor != PROCESSOR_NULL);
172
173 /* do the conversion that Mig should handle */
174 ipc_port_t processor_port = convert_processor_to_port(processor);
175 ports[i] = processor_port;
176 }
177
178 *countp = count;
179 *out_array = (processor_array_t)ports;
180
181 return KERN_SUCCESS;
182 }
183
184 extern int sched_allow_NO_SMT_threads;
185
186 kern_return_t
host_info(host_t host,host_flavor_t flavor,host_info_t info,mach_msg_type_number_t * count)187 host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
188 {
189 if (host == HOST_NULL) {
190 return KERN_INVALID_ARGUMENT;
191 }
192
193 switch (flavor) {
194 case HOST_BASIC_INFO: {
195 host_basic_info_t basic_info;
196 int master_id = master_processor->cpu_id;
197
198 /*
199 * Basic information about this host.
200 */
201 if (*count < HOST_BASIC_INFO_OLD_COUNT) {
202 return KERN_FAILURE;
203 }
204
205 basic_info = (host_basic_info_t)info;
206
207 basic_info->memory_size = machine_info.memory_size;
208 basic_info->cpu_type = slot_type(master_id);
209 basic_info->cpu_subtype = slot_subtype(master_id);
210 basic_info->max_cpus = machine_info.max_cpus;
211 #if defined(__x86_64__)
212 if (sched_allow_NO_SMT_threads && current_task()->t_flags & TF_NO_SMT) {
213 basic_info->avail_cpus = primary_processor_avail_count_user;
214 } else {
215 basic_info->avail_cpus = processor_avail_count_user;
216 }
217 #else
218 basic_info->avail_cpus = processor_avail_count;
219 #endif
220
221
222 if (*count >= HOST_BASIC_INFO_COUNT) {
223 basic_info->cpu_threadtype = slot_threadtype(master_id);
224 basic_info->physical_cpu = machine_info.physical_cpu;
225 basic_info->physical_cpu_max = machine_info.physical_cpu_max;
226 #if defined(__x86_64__)
227 basic_info->logical_cpu = basic_info->avail_cpus;
228 #else
229 basic_info->logical_cpu = machine_info.logical_cpu;
230 #endif
231 basic_info->logical_cpu_max = machine_info.logical_cpu_max;
232
233 basic_info->max_mem = machine_info.max_mem;
234
235 *count = HOST_BASIC_INFO_COUNT;
236 } else {
237 *count = HOST_BASIC_INFO_OLD_COUNT;
238 }
239
240 return KERN_SUCCESS;
241 }
242
243 case HOST_SCHED_INFO: {
244 host_sched_info_t sched_info;
245 uint32_t quantum_time;
246 uint64_t quantum_ns;
247
248 /*
249 * Return scheduler information.
250 */
251 if (*count < HOST_SCHED_INFO_COUNT) {
252 return KERN_FAILURE;
253 }
254
255 sched_info = (host_sched_info_t)info;
256
257 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
258 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
259
260 sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
261
262 *count = HOST_SCHED_INFO_COUNT;
263
264 return KERN_SUCCESS;
265 }
266
267 case HOST_RESOURCE_SIZES: {
268 /*
269 * Return sizes of kernel data structures
270 */
271 if (*count < HOST_RESOURCE_SIZES_COUNT) {
272 return KERN_FAILURE;
273 }
274
275 /* XXX Fail until ledgers are implemented */
276 return KERN_INVALID_ARGUMENT;
277 }
278
279 case HOST_PRIORITY_INFO: {
280 host_priority_info_t priority_info;
281
282 if (*count < HOST_PRIORITY_INFO_COUNT) {
283 return KERN_FAILURE;
284 }
285
286 priority_info = (host_priority_info_t)info;
287
288 priority_info->kernel_priority = MINPRI_KERNEL;
289 priority_info->system_priority = MINPRI_KERNEL;
290 priority_info->server_priority = MINPRI_RESERVED;
291 priority_info->user_priority = BASEPRI_DEFAULT;
292 priority_info->depress_priority = DEPRESSPRI;
293 priority_info->idle_priority = IDLEPRI;
294 priority_info->minimum_priority = MINPRI_USER;
295 priority_info->maximum_priority = MAXPRI_RESERVED;
296
297 *count = HOST_PRIORITY_INFO_COUNT;
298
299 return KERN_SUCCESS;
300 }
301
302 /*
303 * Gestalt for various trap facilities.
304 */
305 case HOST_MACH_MSG_TRAP:
306 case HOST_SEMAPHORE_TRAPS: {
307 *count = 0;
308 return KERN_SUCCESS;
309 }
310
311 case HOST_CAN_HAS_DEBUGGER: {
312 host_can_has_debugger_info_t can_has_debugger_info;
313
314 if (*count < HOST_CAN_HAS_DEBUGGER_COUNT) {
315 return KERN_FAILURE;
316 }
317
318 can_has_debugger_info = (host_can_has_debugger_info_t)info;
319 can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL);
320 *count = HOST_CAN_HAS_DEBUGGER_COUNT;
321
322 return KERN_SUCCESS;
323 }
324
325 case HOST_VM_PURGABLE: {
326 if (*count < HOST_VM_PURGABLE_COUNT) {
327 return KERN_FAILURE;
328 }
329
330 vm_purgeable_stats((vm_purgeable_info_t)info, NULL);
331
332 *count = HOST_VM_PURGABLE_COUNT;
333 return KERN_SUCCESS;
334 }
335
336 case HOST_DEBUG_INFO_INTERNAL: {
337 #if DEVELOPMENT || DEBUG
338 if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT) {
339 return KERN_FAILURE;
340 }
341
342 host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
343 bzero(debug_info, sizeof(host_debug_info_internal_data_t));
344 *count = HOST_DEBUG_INFO_INTERNAL_COUNT;
345
346 #if CONFIG_COALITIONS
347 debug_info->config_coalitions = 1;
348 #endif
349 debug_info->config_bank = 1;
350 #if CONFIG_ATM
351 debug_info->config_atm = 1;
352 #endif
353 #if CONFIG_CSR
354 debug_info->config_csr = 1;
355 #endif
356 return KERN_SUCCESS;
357 #else /* DEVELOPMENT || DEBUG */
358 return KERN_NOT_SUPPORTED;
359 #endif
360 }
361
362 case HOST_PREFERRED_USER_ARCH: {
363 host_preferred_user_arch_t user_arch_info;
364
365 /*
366 * Basic information about this host.
367 */
368 if (*count < HOST_PREFERRED_USER_ARCH_COUNT) {
369 return KERN_FAILURE;
370 }
371
372 user_arch_info = (host_preferred_user_arch_t)info;
373
374 #if defined(PREFERRED_USER_CPU_TYPE) && defined(PREFERRED_USER_CPU_SUBTYPE)
375 cpu_type_t preferred_cpu_type;
376 cpu_subtype_t preferred_cpu_subtype;
377 if (!PE_get_default("kern.preferred_cpu_type", &preferred_cpu_type, sizeof(cpu_type_t))) {
378 preferred_cpu_type = PREFERRED_USER_CPU_TYPE;
379 }
380 if (!PE_get_default("kern.preferred_cpu_subtype", &preferred_cpu_subtype, sizeof(cpu_subtype_t))) {
381 preferred_cpu_subtype = PREFERRED_USER_CPU_SUBTYPE;
382 }
383 user_arch_info->cpu_type = preferred_cpu_type;
384 user_arch_info->cpu_subtype = preferred_cpu_subtype;
385 #else
386 int master_id = master_processor->cpu_id;
387 user_arch_info->cpu_type = slot_type(master_id);
388 user_arch_info->cpu_subtype = slot_subtype(master_id);
389 #endif
390
391
392 *count = HOST_PREFERRED_USER_ARCH_COUNT;
393
394 return KERN_SUCCESS;
395 }
396
397 default: return KERN_INVALID_ARGUMENT;
398 }
399 }
400
401 kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
402
403 kern_return_t
host_statistics(host_t host,host_flavor_t flavor,host_info_t info,mach_msg_type_number_t * count)404 host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
405 {
406 if (host == HOST_NULL) {
407 return KERN_INVALID_HOST;
408 }
409
410 switch (flavor) {
411 case HOST_LOAD_INFO: {
412 host_load_info_t load_info;
413
414 if (*count < HOST_LOAD_INFO_COUNT) {
415 return KERN_FAILURE;
416 }
417
418 load_info = (host_load_info_t)info;
419
420 bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
421 bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
422
423 *count = HOST_LOAD_INFO_COUNT;
424 return KERN_SUCCESS;
425 }
426
427 case HOST_VM_INFO: {
428 vm_statistics64_data_t host_vm_stat;
429 vm_statistics_t stat32;
430 mach_msg_type_number_t original_count;
431
432 if (*count < HOST_VM_INFO_REV0_COUNT) {
433 return KERN_FAILURE;
434 }
435
436 get_host_vm_stats(&host_vm_stat);
437
438 stat32 = (vm_statistics_t)info;
439
440 stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
441 stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
442
443 if (vm_page_local_q) {
444 zpercpu_foreach(lq, vm_page_local_q) {
445 stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
446 }
447 }
448 stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
449 #if !XNU_TARGET_OS_OSX
450 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
451 #else /* !XNU_TARGET_OS_OSX */
452 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
453 #endif /* !XNU_TARGET_OS_OSX */
454 stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
455 stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
456 stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
457 stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
458 stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
459 stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
460 stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
461 stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
462
463 /*
464 * Fill in extra info added in later revisions of the
465 * vm_statistics data structure. Fill in only what can fit
466 * in the data structure the caller gave us !
467 */
468 original_count = *count;
469 *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
470 if (original_count >= HOST_VM_INFO_REV1_COUNT) {
471 /* rev1 added "purgeable" info */
472 stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
473 stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
474 *count = HOST_VM_INFO_REV1_COUNT;
475 }
476
477 if (original_count >= HOST_VM_INFO_REV2_COUNT) {
478 /* rev2 added "speculative" info */
479 stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
480 *count = HOST_VM_INFO_REV2_COUNT;
481 }
482
483 /* rev3 changed some of the fields to be 64-bit*/
484
485 return KERN_SUCCESS;
486 }
487
488 case HOST_CPU_LOAD_INFO: {
489 host_cpu_load_info_t cpu_load_info;
490
491 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
492 return KERN_FAILURE;
493 }
494
495 #define GET_TICKS_VALUE(state, ticks) \
496 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
497 MACRO_END
498 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
499 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&(processor)->timer)); \
500 MACRO_END
501
502 cpu_load_info = (host_cpu_load_info_t)info;
503 cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
504 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
505 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
506 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
507
508 simple_lock(&processor_list_lock, LCK_GRP_NULL);
509
510 unsigned int pcount = processor_count;
511
512 for (unsigned int i = 0; i < pcount; i++) {
513 processor_t processor = processor_array[i];
514 assert(processor != PROCESSOR_NULL);
515
516 timer_t idle_state;
517 uint64_t idle_time_snapshot1, idle_time_snapshot2;
518 uint64_t idle_time_tstamp1, idle_time_tstamp2;
519
520 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
521
522 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, user_state);
523 if (precise_user_kernel_time) {
524 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_SYSTEM, system_state);
525 } else {
526 /* system_state may represent either sys or user */
527 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state);
528 }
529
530 idle_state = &processor->idle_state;
531 idle_time_snapshot1 = timer_grab(idle_state);
532 idle_time_tstamp1 = idle_state->tstamp;
533
534 if (processor->current_state != idle_state) {
535 /* Processor is non-idle, so idle timer should be accurate */
536 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state);
537 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
538 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) {
539 /* Idle timer is being updated concurrently, second stamp is good enough */
540 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2);
541 } else {
542 /*
543 * Idle timer may be very stale. Fortunately we have established
544 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
545 */
546 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
547
548 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot1);
549 }
550 }
551 simple_unlock(&processor_list_lock);
552
553 *count = HOST_CPU_LOAD_INFO_COUNT;
554
555 return KERN_SUCCESS;
556 }
557
558 case HOST_EXPIRED_TASK_INFO: {
559 if (*count < TASK_POWER_INFO_COUNT) {
560 return KERN_FAILURE;
561 }
562
563 task_power_info_t tinfo1 = (task_power_info_t)info;
564 task_power_info_v2_t tinfo2 = (task_power_info_v2_t)info;
565
566 tinfo1->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
567 tinfo1->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
568
569 tinfo1->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
570
571 tinfo1->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
572
573 tinfo1->total_user = dead_task_statistics.total_user_time;
574 tinfo1->total_system = dead_task_statistics.total_system_time;
575 if (*count < TASK_POWER_INFO_V2_COUNT) {
576 *count = TASK_POWER_INFO_COUNT;
577 } else if (*count >= TASK_POWER_INFO_V2_COUNT) {
578 tinfo2->gpu_energy.task_gpu_utilisation = dead_task_statistics.task_gpu_ns;
579 #if defined(__arm__) || defined(__arm64__)
580 tinfo2->task_energy = dead_task_statistics.task_energy;
581 tinfo2->task_ptime = dead_task_statistics.total_ptime;
582 tinfo2->task_pset_switches = dead_task_statistics.total_pset_switches;
583 #endif
584 *count = TASK_POWER_INFO_V2_COUNT;
585 }
586
587 return KERN_SUCCESS;
588 }
589 default: return KERN_INVALID_ARGUMENT;
590 }
591 }
592
593 extern uint32_t c_segment_pages_compressed;
594
595 #define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
596 #define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
597 #define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
598
599 uint64_t host_statistics_time_window;
600
601 static LCK_GRP_DECLARE(host_statistics_lck_grp, "host_statistics");
602 static LCK_MTX_DECLARE(host_statistics_lck, &host_statistics_lck_grp);
603
604 #define HOST_VM_INFO64_REV0 0
605 #define HOST_VM_INFO64_REV1 1
606 #define HOST_EXTMOD_INFO64_REV0 2
607 #define HOST_LOAD_INFO_REV0 3
608 #define HOST_VM_INFO_REV0 4
609 #define HOST_VM_INFO_REV1 5
610 #define HOST_VM_INFO_REV2 6
611 #define HOST_CPU_LOAD_INFO_REV0 7
612 #define HOST_EXPIRED_TASK_INFO_REV0 8
613 #define HOST_EXPIRED_TASK_INFO_REV1 9
614 #define NUM_HOST_INFO_DATA_TYPES 10
615
616 static vm_statistics64_data_t host_vm_info64_rev0 = {};
617 static vm_statistics64_data_t host_vm_info64_rev1 = {};
618 static vm_extmod_statistics_data_t host_extmod_info64 = {};
619 static host_load_info_data_t host_load_info = {};
620 static vm_statistics_data_t host_vm_info_rev0 = {};
621 static vm_statistics_data_t host_vm_info_rev1 = {};
622 static vm_statistics_data_t host_vm_info_rev2 = {};
623 static host_cpu_load_info_data_t host_cpu_load_info = {};
624 static task_power_info_data_t host_expired_task_info = {};
625 static task_power_info_v2_data_t host_expired_task_info2 = {};
626
627 struct host_stats_cache {
628 uint64_t last_access;
629 uint64_t current_requests;
630 uint64_t max_requests;
631 uintptr_t data;
632 mach_msg_type_number_t count; //NOTE count is in sizeof(integer_t)
633 };
634
635 static struct host_stats_cache g_host_stats_cache[NUM_HOST_INFO_DATA_TYPES] = {
636 [HOST_VM_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev0, .count = HOST_VM_INFO64_REV0_COUNT },
637 [HOST_VM_INFO64_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev1, .count = HOST_VM_INFO64_REV1_COUNT },
638 [HOST_EXTMOD_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_extmod_info64, .count = HOST_EXTMOD_INFO64_COUNT },
639 [HOST_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_load_info, .count = HOST_LOAD_INFO_COUNT },
640 [HOST_VM_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev0, .count = HOST_VM_INFO_REV0_COUNT },
641 [HOST_VM_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev1, .count = HOST_VM_INFO_REV1_COUNT },
642 [HOST_VM_INFO_REV2] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev2, .count = HOST_VM_INFO_REV2_COUNT },
643 [HOST_CPU_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_cpu_load_info, .count = HOST_CPU_LOAD_INFO_COUNT },
644 [HOST_EXPIRED_TASK_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info, .count = TASK_POWER_INFO_COUNT },
645 [HOST_EXPIRED_TASK_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info2, .count = TASK_POWER_INFO_V2_COUNT},
646 };
647
648
649 void
host_statistics_init(void)650 host_statistics_init(void)
651 {
652 nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW * NSEC_PER_SEC), &host_statistics_time_window);
653 }
654
655 static void
cache_host_statistics(int index,host_info64_t info)656 cache_host_statistics(int index, host_info64_t info)
657 {
658 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
659 return;
660 }
661
662 task_t task = current_task();
663 if (task->t_flags & TF_PLATFORM) {
664 return;
665 }
666
667 memcpy((void *)g_host_stats_cache[index].data, info, g_host_stats_cache[index].count * sizeof(integer_t));
668 return;
669 }
670
671 static void
get_cached_info(int index,host_info64_t info,mach_msg_type_number_t * count)672 get_cached_info(int index, host_info64_t info, mach_msg_type_number_t* count)
673 {
674 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
675 *count = 0;
676 return;
677 }
678
679 *count = g_host_stats_cache[index].count;
680 memcpy(info, (void *)g_host_stats_cache[index].data, g_host_stats_cache[index].count * sizeof(integer_t));
681 }
682
683 static int
get_host_info_data_index(bool is_stat64,host_flavor_t flavor,mach_msg_type_number_t * count,kern_return_t * ret)684 get_host_info_data_index(bool is_stat64, host_flavor_t flavor, mach_msg_type_number_t* count, kern_return_t* ret)
685 {
686 switch (flavor) {
687 case HOST_VM_INFO64:
688 if (!is_stat64) {
689 *ret = KERN_INVALID_ARGUMENT;
690 return -1;
691 }
692 if (*count < HOST_VM_INFO64_REV0_COUNT) {
693 *ret = KERN_FAILURE;
694 return -1;
695 }
696 if (*count >= HOST_VM_INFO64_REV1_COUNT) {
697 return HOST_VM_INFO64_REV1;
698 }
699 return HOST_VM_INFO64_REV0;
700
701 case HOST_EXTMOD_INFO64:
702 if (!is_stat64) {
703 *ret = KERN_INVALID_ARGUMENT;
704 return -1;
705 }
706 if (*count < HOST_EXTMOD_INFO64_COUNT) {
707 *ret = KERN_FAILURE;
708 return -1;
709 }
710 return HOST_EXTMOD_INFO64_REV0;
711
712 case HOST_LOAD_INFO:
713 if (*count < HOST_LOAD_INFO_COUNT) {
714 *ret = KERN_FAILURE;
715 return -1;
716 }
717 return HOST_LOAD_INFO_REV0;
718
719 case HOST_VM_INFO:
720 if (*count < HOST_VM_INFO_REV0_COUNT) {
721 *ret = KERN_FAILURE;
722 return -1;
723 }
724 if (*count >= HOST_VM_INFO_REV2_COUNT) {
725 return HOST_VM_INFO_REV2;
726 }
727 if (*count >= HOST_VM_INFO_REV1_COUNT) {
728 return HOST_VM_INFO_REV1;
729 }
730 return HOST_VM_INFO_REV0;
731
732 case HOST_CPU_LOAD_INFO:
733 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
734 *ret = KERN_FAILURE;
735 return -1;
736 }
737 return HOST_CPU_LOAD_INFO_REV0;
738
739 case HOST_EXPIRED_TASK_INFO:
740 if (*count < TASK_POWER_INFO_COUNT) {
741 *ret = KERN_FAILURE;
742 return -1;
743 }
744 if (*count >= TASK_POWER_INFO_V2_COUNT) {
745 return HOST_EXPIRED_TASK_INFO_REV1;
746 }
747 return HOST_EXPIRED_TASK_INFO_REV0;
748
749 default:
750 *ret = KERN_INVALID_ARGUMENT;
751 return -1;
752 }
753 }
754
755 static bool
rate_limit_host_statistics(bool is_stat64,host_flavor_t flavor,host_info64_t info,mach_msg_type_number_t * count,kern_return_t * ret,int * pindex)756 rate_limit_host_statistics(bool is_stat64, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t* count, kern_return_t* ret, int *pindex)
757 {
758 task_t task = current_task();
759
760 assert(task != kernel_task);
761
762 *ret = KERN_SUCCESS;
763 *pindex = -1;
764
765 /* Access control only for third party applications */
766 if (task->t_flags & TF_PLATFORM) {
767 return FALSE;
768 }
769
770 /* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
771 bool rate_limited = FALSE;
772 bool set_last_access = TRUE;
773
774 /* there is a cache for every flavor */
775 int index = get_host_info_data_index(is_stat64, flavor, count, ret);
776 if (index == -1) {
777 goto out;
778 }
779
780 *pindex = index;
781 lck_mtx_lock(&host_statistics_lck);
782 if (g_host_stats_cache[index].last_access > mach_continuous_time() - host_statistics_time_window) {
783 set_last_access = FALSE;
784 if (g_host_stats_cache[index].current_requests++ >= g_host_stats_cache[index].max_requests) {
785 rate_limited = TRUE;
786 get_cached_info(index, info, count);
787 }
788 }
789 if (set_last_access) {
790 g_host_stats_cache[index].current_requests = 1;
791 /*
792 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
793 * to let query host_statistics.
794 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
795 * the provious window.
796 */
797 g_host_stats_cache[index].max_requests = (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS - HOST_STATISTICS_MIN_REQUESTS + 1)) + HOST_STATISTICS_MIN_REQUESTS;
798 g_host_stats_cache[index].last_access = mach_continuous_time();
799 }
800 lck_mtx_unlock(&host_statistics_lck);
801 out:
802 return rate_limited;
803 }
804
805 kern_return_t
vm_stats(void * info,unsigned int * count)806 vm_stats(void *info, unsigned int *count)
807 {
808 vm_statistics64_data_t host_vm_stat;
809 mach_msg_type_number_t original_count;
810 unsigned int local_q_internal_count;
811 unsigned int local_q_external_count;
812
813 if (*count < HOST_VM_INFO64_REV0_COUNT) {
814 return KERN_FAILURE;
815 }
816 get_host_vm_stats(&host_vm_stat);
817
818 vm_statistics64_t stat = (vm_statistics64_t)info;
819
820 stat->free_count = vm_page_free_count + vm_page_speculative_count;
821 stat->active_count = vm_page_active_count;
822
823 local_q_internal_count = 0;
824 local_q_external_count = 0;
825 if (vm_page_local_q) {
826 zpercpu_foreach(lq, vm_page_local_q) {
827 stat->active_count += lq->vpl_count;
828 local_q_internal_count += lq->vpl_internal_count;
829 local_q_external_count += lq->vpl_external_count;
830 }
831 }
832 stat->inactive_count = vm_page_inactive_count;
833 #if !XNU_TARGET_OS_OSX
834 stat->wire_count = vm_page_wire_count;
835 #else /* !XNU_TARGET_OS_OSX */
836 stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
837 #endif /* !XNU_TARGET_OS_OSX */
838 stat->zero_fill_count = host_vm_stat.zero_fill_count;
839 stat->reactivations = host_vm_stat.reactivations;
840 stat->pageins = host_vm_stat.pageins;
841 stat->pageouts = host_vm_stat.pageouts;
842 stat->faults = host_vm_stat.faults;
843 stat->cow_faults = host_vm_stat.cow_faults;
844 stat->lookups = host_vm_stat.lookups;
845 stat->hits = host_vm_stat.hits;
846
847 stat->purgeable_count = vm_page_purgeable_count;
848 stat->purges = vm_page_purged_count;
849
850 stat->speculative_count = vm_page_speculative_count;
851
852 /*
853 * Fill in extra info added in later revisions of the
854 * vm_statistics data structure. Fill in only what can fit
855 * in the data structure the caller gave us !
856 */
857 original_count = *count;
858 *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
859 if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
860 /* rev1 added "throttled count" */
861 stat->throttled_count = vm_page_throttled_count;
862 /* rev1 added "compression" info */
863 stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
864 stat->compressions = host_vm_stat.compressions;
865 stat->decompressions = host_vm_stat.decompressions;
866 stat->swapins = host_vm_stat.swapins;
867 stat->swapouts = host_vm_stat.swapouts;
868 /* rev1 added:
869 * "external page count"
870 * "anonymous page count"
871 * "total # of pages (uncompressed) held in the compressor"
872 */
873 stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
874 stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
875 stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
876 *count = HOST_VM_INFO64_REV1_COUNT;
877 }
878
879 return KERN_SUCCESS;
880 }
881
882 kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
883
884 kern_return_t
host_statistics64(host_t host,host_flavor_t flavor,host_info64_t info,mach_msg_type_number_t * count)885 host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
886 {
887 if (host == HOST_NULL) {
888 return KERN_INVALID_HOST;
889 }
890
891 switch (flavor) {
892 case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
893 return vm_stats(info, count);
894
895 case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
896 {
897 vm_extmod_statistics_t out_extmod_statistics;
898
899 if (*count < HOST_EXTMOD_INFO64_COUNT) {
900 return KERN_FAILURE;
901 }
902
903 out_extmod_statistics = (vm_extmod_statistics_t)info;
904 *out_extmod_statistics = host_extmod_statistics;
905
906 *count = HOST_EXTMOD_INFO64_COUNT;
907
908 return KERN_SUCCESS;
909 }
910
911 default: /* If we didn't recognize the flavor, send to host_statistics */
912 return host_statistics(host, flavor, (host_info_t)info, count);
913 }
914 }
915
916 kern_return_t
host_statistics64_from_user(host_t host,host_flavor_t flavor,host_info64_t info,mach_msg_type_number_t * count)917 host_statistics64_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
918 {
919 kern_return_t ret = KERN_SUCCESS;
920 int index;
921
922 if (host == HOST_NULL) {
923 return KERN_INVALID_HOST;
924 }
925
926 if (rate_limit_host_statistics(TRUE, flavor, info, count, &ret, &index)) {
927 return ret;
928 }
929
930 if (ret != KERN_SUCCESS) {
931 return ret;
932 }
933
934 ret = host_statistics64(host, flavor, info, count);
935
936 if (ret == KERN_SUCCESS) {
937 cache_host_statistics(index, info);
938 }
939
940 return ret;
941 }
942
943 kern_return_t
host_statistics_from_user(host_t host,host_flavor_t flavor,host_info64_t info,mach_msg_type_number_t * count)944 host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
945 {
946 kern_return_t ret = KERN_SUCCESS;
947 int index;
948
949 if (host == HOST_NULL) {
950 return KERN_INVALID_HOST;
951 }
952
953 if (rate_limit_host_statistics(FALSE, flavor, info, count, &ret, &index)) {
954 return ret;
955 }
956
957 if (ret != KERN_SUCCESS) {
958 return ret;
959 }
960
961 ret = host_statistics(host, flavor, info, count);
962
963 if (ret == KERN_SUCCESS) {
964 cache_host_statistics(index, info);
965 }
966
967 return ret;
968 }
969
970 /*
971 * Get host statistics that require privilege.
972 * None for now, just call the un-privileged version.
973 */
974 kern_return_t
host_priv_statistics(host_priv_t host_priv,host_flavor_t flavor,host_info_t info,mach_msg_type_number_t * count)975 host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
976 {
977 return host_statistics((host_t)host_priv, flavor, info, count);
978 }
979
980 kern_return_t
set_sched_stats_active(boolean_t active)981 set_sched_stats_active(boolean_t active)
982 {
983 sched_stats_active = active;
984 return KERN_SUCCESS;
985 }
986
987 kern_return_t
get_sched_statistics(struct _processor_statistics_np * out,uint32_t * count)988 get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
989 {
990 uint32_t pos = 0;
991
992 if (!sched_stats_active) {
993 return KERN_FAILURE;
994 }
995
996 percpu_foreach_base(pcpu_base) {
997 struct sched_statistics stats;
998 processor_t processor;
999
1000 pos += sizeof(struct _processor_statistics_np);
1001 if (pos > *count) {
1002 return KERN_FAILURE;
1003 }
1004
1005 stats = *PERCPU_GET_WITH_BASE(pcpu_base, sched_stats);
1006 processor = PERCPU_GET_WITH_BASE(pcpu_base, processor);
1007
1008 out->ps_cpuid = processor->cpu_id;
1009 out->ps_csw_count = stats.csw_count;
1010 out->ps_preempt_count = stats.preempt_count;
1011 out->ps_preempted_rt_count = stats.preempted_rt_count;
1012 out->ps_preempted_by_rt_count = stats.preempted_by_rt_count;
1013 out->ps_rt_sched_count = stats.rt_sched_count;
1014 out->ps_interrupt_count = stats.interrupt_count;
1015 out->ps_ipi_count = stats.ipi_count;
1016 out->ps_timer_pop_count = stats.timer_pop_count;
1017 out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
1018 out->ps_idle_transitions = stats.idle_transitions;
1019 out->ps_quantum_timer_expirations = stats.quantum_timer_expirations;
1020
1021 out++;
1022 }
1023
1024 /* And include RT Queue information */
1025 pos += sizeof(struct _processor_statistics_np);
1026 if (pos > *count) {
1027 return KERN_FAILURE;
1028 }
1029
1030 bzero(out, sizeof(*out));
1031 out->ps_cpuid = (-1);
1032 out->ps_runq_count_sum = SCHED(rt_runq_count_sum)();
1033 out++;
1034
1035 *count = pos;
1036
1037 return KERN_SUCCESS;
1038 }
1039
1040 kern_return_t
host_page_size(host_t host,vm_size_t * out_page_size)1041 host_page_size(host_t host, vm_size_t * out_page_size)
1042 {
1043 if (host == HOST_NULL) {
1044 return KERN_INVALID_ARGUMENT;
1045 }
1046
1047 *out_page_size = PAGE_SIZE;
1048
1049 return KERN_SUCCESS;
1050 }
1051
1052 /*
1053 * Return kernel version string (more than you ever
1054 * wanted to know about what version of the kernel this is).
1055 */
1056 extern char version[];
1057
1058 kern_return_t
host_kernel_version(host_t host,kernel_version_t out_version)1059 host_kernel_version(host_t host, kernel_version_t out_version)
1060 {
1061 if (host == HOST_NULL) {
1062 return KERN_INVALID_ARGUMENT;
1063 }
1064
1065 (void)strncpy(out_version, version, sizeof(kernel_version_t));
1066
1067 return KERN_SUCCESS;
1068 }
1069
1070 /*
1071 * host_processor_sets:
1072 *
1073 * List all processor sets on the host.
1074 */
1075 kern_return_t
host_processor_sets(host_priv_t host_priv,processor_set_name_array_t * pset_list,mach_msg_type_number_t * count)1076 host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
1077 {
1078 mach_port_t *ports;
1079
1080 if (host_priv == HOST_PRIV_NULL) {
1081 return KERN_INVALID_ARGUMENT;
1082 }
1083
1084 /*
1085 * Allocate memory. Can be pageable because it won't be
1086 * touched while holding a lock.
1087 */
1088
1089 ports = kalloc_type(mach_port_t, 1, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1090
1091 /* do the conversion that Mig should handle */
1092 ports[0] = convert_pset_name_to_port(&pset0);
1093
1094 *pset_list = (processor_set_array_t)ports;
1095 *count = 1;
1096
1097 return KERN_SUCCESS;
1098 }
1099
1100 /*
1101 * host_processor_set_priv:
1102 *
1103 * Return control port for given processor set.
1104 */
1105 kern_return_t
host_processor_set_priv(host_priv_t host_priv,processor_set_t pset_name,processor_set_t * pset)1106 host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
1107 {
1108 if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
1109 *pset = PROCESSOR_SET_NULL;
1110
1111 return KERN_INVALID_ARGUMENT;
1112 }
1113
1114 *pset = pset_name;
1115
1116 return KERN_SUCCESS;
1117 }
1118
1119 /*
1120 * host_processor_info
1121 *
1122 * Return info about the processors on this host. It will return
1123 * the number of processors, and the specific type of info requested
1124 * in an OOL array.
1125 */
1126 kern_return_t
host_processor_info(host_t host,processor_flavor_t flavor,natural_t * out_pcount,processor_info_array_t * out_array,mach_msg_type_number_t * out_array_count)1127 host_processor_info(host_t host,
1128 processor_flavor_t flavor,
1129 natural_t * out_pcount,
1130 processor_info_array_t * out_array,
1131 mach_msg_type_number_t * out_array_count)
1132 {
1133 kern_return_t result;
1134 host_t thost;
1135 processor_info_t info;
1136 unsigned int icount;
1137 unsigned int pcount;
1138 vm_offset_t addr;
1139 vm_size_t size, needed;
1140 vm_map_copy_t copy;
1141
1142 if (host == HOST_NULL) {
1143 return KERN_INVALID_ARGUMENT;
1144 }
1145
1146 result = processor_info_count(flavor, &icount);
1147 if (result != KERN_SUCCESS) {
1148 return result;
1149 }
1150
1151 pcount = processor_count;
1152 assert(pcount != 0);
1153
1154 needed = pcount * icount * sizeof(natural_t);
1155 size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
1156 result = kmem_alloc(ipc_kernel_map, &addr, size, KMA_DATA, VM_KERN_MEMORY_IPC);
1157 if (result != KERN_SUCCESS) {
1158 return KERN_RESOURCE_SHORTAGE;
1159 }
1160
1161 info = (processor_info_t)addr;
1162
1163 for (unsigned int i = 0; i < pcount; i++) {
1164 processor_t processor = processor_array[i];
1165 assert(processor != PROCESSOR_NULL);
1166
1167 unsigned int tcount = icount;
1168
1169 result = processor_info(processor, flavor, &thost, info, &tcount);
1170 if (result != KERN_SUCCESS) {
1171 kmem_free(ipc_kernel_map, addr, size);
1172 return result;
1173 }
1174 info += icount;
1175 }
1176
1177 if (size != needed) {
1178 bzero((char *)addr + needed, size - needed);
1179 }
1180
1181 result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
1182 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
1183 assert(result == KERN_SUCCESS);
1184 result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, ©);
1185 assert(result == KERN_SUCCESS);
1186
1187 *out_pcount = pcount;
1188 *out_array = (processor_info_array_t)copy;
1189 *out_array_count = pcount * icount;
1190
1191 return KERN_SUCCESS;
1192 }
1193
1194 static bool
is_valid_host_special_port(int id)1195 is_valid_host_special_port(int id)
1196 {
1197 return (id <= HOST_MAX_SPECIAL_PORT) &&
1198 (id >= HOST_MIN_SPECIAL_PORT) &&
1199 ((id <= HOST_LAST_SPECIAL_KERNEL_PORT) || (id > HOST_MAX_SPECIAL_KERNEL_PORT));
1200 }
1201
1202 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
1203
1204 /*
1205 * Kernel interface for setting a special port.
1206 */
1207 kern_return_t
kernel_set_special_port(host_priv_t host_priv,int id,ipc_port_t port)1208 kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1209 {
1210 ipc_port_t old_port;
1211
1212 if (!is_valid_host_special_port(id)) {
1213 panic("attempted to set invalid special port %d", id);
1214 }
1215
1216 #if !MACH_FLIPC
1217 if (id == HOST_NODE_PORT) {
1218 return KERN_NOT_SUPPORTED;
1219 }
1220 #endif
1221
1222 host_lock(host_priv);
1223 old_port = host_priv->special[id];
1224 host_priv->special[id] = port;
1225 host_unlock(host_priv);
1226
1227 #if MACH_FLIPC
1228 if (id == HOST_NODE_PORT) {
1229 mach_node_port_changed();
1230 }
1231 #endif
1232
1233 if (IP_VALID(old_port)) {
1234 ipc_port_release_send(old_port);
1235 }
1236 return KERN_SUCCESS;
1237 }
1238
1239 /*
1240 * Kernel interface for retrieving a special port.
1241 */
1242 kern_return_t
kernel_get_special_port(host_priv_t host_priv,int id,ipc_port_t * portp)1243 kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp)
1244 {
1245 if (!is_valid_host_special_port(id)) {
1246 panic("attempted to get invalid special port %d", id);
1247 }
1248
1249 host_lock(host_priv);
1250 *portp = host_priv->special[id];
1251 host_unlock(host_priv);
1252 return KERN_SUCCESS;
1253 }
1254
1255 /*
1256 * User interface for setting a special port.
1257 *
1258 * Only permits the user to set a user-owned special port
1259 * ID, rejecting a kernel-owned special port ID.
1260 *
1261 * A special kernel port cannot be set up using this
1262 * routine; use kernel_set_special_port() instead.
1263 */
1264 kern_return_t
host_set_special_port_from_user(host_priv_t host_priv,int id,ipc_port_t port)1265 host_set_special_port_from_user(host_priv_t host_priv, int id, ipc_port_t port)
1266 {
1267 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1268 return KERN_INVALID_ARGUMENT;
1269 }
1270
1271 if (task_is_driver(current_task())) {
1272 return KERN_NO_ACCESS;
1273 }
1274
1275 if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
1276 return KERN_INVALID_RIGHT;
1277 }
1278
1279 return host_set_special_port(host_priv, id, port);
1280 }
1281
1282 kern_return_t
host_set_special_port(host_priv_t host_priv,int id,ipc_port_t port)1283 host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1284 {
1285 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1286 return KERN_INVALID_ARGUMENT;
1287 }
1288
1289 if (current_task() != kernel_task && current_task()->bsd_info != initproc) {
1290 bool allowed = (id == HOST_TELEMETRY_PORT &&
1291 IOTaskHasEntitlement(current_task(), "com.apple.private.xpc.launchd.event-monitor"));
1292 #if CONFIG_CSR
1293 if (!allowed) {
1294 allowed = (csr_check(CSR_ALLOW_TASK_FOR_PID) == 0);
1295 }
1296 #endif
1297 if (!allowed) {
1298 return KERN_NO_ACCESS;
1299 }
1300 }
1301
1302 #if CONFIG_MACF
1303 if (mac_task_check_set_host_special_port(current_task(), id, port) != 0) {
1304 return KERN_NO_ACCESS;
1305 }
1306 #endif
1307
1308 return kernel_set_special_port(host_priv, id, port);
1309 }
1310
1311 /*
1312 * User interface for retrieving a special port.
1313 *
1314 * Note that there is nothing to prevent a user special
1315 * port from disappearing after it has been discovered by
1316 * the caller; thus, using a special port can always result
1317 * in a "port not valid" error.
1318 */
1319
1320 kern_return_t
host_get_special_port_from_user(host_priv_t host_priv,__unused int node,int id,ipc_port_t * portp)1321 host_get_special_port_from_user(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1322 {
1323 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1324 return KERN_INVALID_ARGUMENT;
1325 }
1326
1327 task_t task = current_task();
1328 if (task && task_is_driver(task) && id > HOST_MAX_SPECIAL_KERNEL_PORT) {
1329 /* allow HID drivers to get the sysdiagnose port for keychord handling */
1330 if (id == HOST_SYSDIAGNOSE_PORT &&
1331 IOCurrentTaskHasEntitlement(kIODriverKitHIDFamilyEventServiceEntitlementKey)) {
1332 goto get_special_port;
1333 }
1334 return KERN_NO_ACCESS;
1335 }
1336 get_special_port:
1337 return host_get_special_port(host_priv, node, id, portp);
1338 }
1339
1340 kern_return_t
host_get_special_port(host_priv_t host_priv,__unused int node,int id,ipc_port_t * portp)1341 host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1342 {
1343 ipc_port_t port;
1344
1345 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1346 return KERN_INVALID_ARGUMENT;
1347 }
1348
1349 host_lock(host_priv);
1350 port = realhost.special[id];
1351 *portp = ipc_port_copy_send(port);
1352 host_unlock(host_priv);
1353
1354 return KERN_SUCCESS;
1355 }
1356
1357 /*
1358 * host_get_io_master
1359 *
1360 * Return the IO master access port for this host.
1361 */
1362 kern_return_t
host_get_io_master(host_t host,io_master_t * io_masterp)1363 host_get_io_master(host_t host, io_master_t * io_masterp)
1364 {
1365 if (host == HOST_NULL) {
1366 return KERN_INVALID_ARGUMENT;
1367 }
1368
1369 return host_get_io_master_port(host_priv_self(), io_masterp);
1370 }
1371
1372 host_t
host_self(void)1373 host_self(void)
1374 {
1375 return &realhost;
1376 }
1377
1378 host_priv_t
host_priv_self(void)1379 host_priv_self(void)
1380 {
1381 return &realhost;
1382 }
1383
1384 kern_return_t
host_set_atm_diagnostic_flag(host_t host,uint32_t diagnostic_flag)1385 host_set_atm_diagnostic_flag(host_t host, uint32_t diagnostic_flag)
1386 {
1387 if (host == HOST_NULL) {
1388 return KERN_INVALID_ARGUMENT;
1389 }
1390
1391 if (!IOCurrentTaskHasEntitlement("com.apple.private.set-atm-diagnostic-flag")) {
1392 return KERN_NO_ACCESS;
1393 }
1394
1395 #if CONFIG_ATM
1396 return atm_set_diagnostic_config(diagnostic_flag);
1397 #else
1398 (void)diagnostic_flag;
1399 return KERN_NOT_SUPPORTED;
1400 #endif
1401 }
1402
1403 kern_return_t
host_set_multiuser_config_flags(host_priv_t host_priv,uint32_t multiuser_config)1404 host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
1405 {
1406 #if !defined(XNU_TARGET_OS_OSX)
1407 if (host_priv == HOST_PRIV_NULL) {
1408 return KERN_INVALID_ARGUMENT;
1409 }
1410
1411 /*
1412 * Always enforce that the multiuser bit is set
1413 * if a value is written to the commpage word.
1414 */
1415 commpage_update_multiuser_config(multiuser_config | kIsMultiUserDevice);
1416 return KERN_SUCCESS;
1417 #else
1418 (void)host_priv;
1419 (void)multiuser_config;
1420 return KERN_NOT_SUPPORTED;
1421 #endif
1422 }
1423