xref: /xnu-10063.101.15/osfmk/kern/task.c (revision 94d3b452840153a99b38a3a9659680b2a006908e)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  *	File:	kern/task.c
58  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59  *		David Black
60  *
61  *	Task management primitives implementation.
62  */
63 /*
64  * Copyright (c) 1993 The University of Utah and
65  * the Computer Systems Laboratory (CSL).  All rights reserved.
66  *
67  * Permission to use, copy, modify and distribute this software and its
68  * documentation is hereby granted, provided that both the copyright
69  * notice and this permission notice appear in all copies of the
70  * software, derivative works or modified versions, and any portions
71  * thereof, and that both notices appear in supporting documentation.
72  *
73  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76  *
77  * CSL requests users of this software to return to [email protected] any
78  * improvements that they make and grant CSL redistribution rights.
79  *
80  */
81 /*
82  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83  * support for mandatory and extensible security protections.  This notice
84  * is included in support of clause 2.2 (b) of the Apple Public License,
85  * Version 2.0.
86  * Copyright (c) 2005 SPARTA, Inc.
87  */
88 
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/host_priv.h>
92 #include <mach/machine/vm_types.h>
93 #include <mach/vm_param.h>
94 #include <mach/mach_vm.h>
95 #include <mach/semaphore.h>
96 #include <mach/task_info.h>
97 #include <mach/task_inspect.h>
98 #include <mach/task_special_ports.h>
99 #include <mach/sdt.h>
100 #include <mach/mach_test_upcall.h>
101 
102 #include <ipc/ipc_importance.h>
103 #include <ipc/ipc_types.h>
104 #include <ipc/ipc_space.h>
105 #include <ipc/ipc_entry.h>
106 #include <ipc/ipc_hash.h>
107 #include <ipc/ipc_init.h>
108 
109 #include <kern/kern_types.h>
110 #include <kern/mach_param.h>
111 #include <kern/misc_protos.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/coalition.h>
115 #include <kern/zalloc.h>
116 #include <kern/kalloc.h>
117 #include <kern/kern_cdata.h>
118 #include <kern/processor.h>
119 #include <kern/recount.h>
120 #include <kern/sched_prim.h>    /* for thread_wakeup */
121 #include <kern/ipc_tt.h>
122 #include <kern/host.h>
123 #include <kern/clock.h>
124 #include <kern/timer.h>
125 #include <kern/assert.h>
126 #include <kern/affinity.h>
127 #include <kern/exc_resource.h>
128 #include <kern/machine.h>
129 #include <kern/policy_internal.h>
130 #include <kern/restartable.h>
131 #include <kern/ipc_kobject.h>
132 
133 #include <corpses/task_corpse.h>
134 #if CONFIG_TELEMETRY
135 #include <kern/telemetry.h>
136 #endif
137 
138 #if CONFIG_PERVASIVE_CPI
139 #include <kern/monotonic.h>
140 #include <machine/monotonic.h>
141 #endif /* CONFIG_PERVASIVE_CPI */
142 
143 #if CONFIG_EXCLAVES
144 #include "exclaves_resource.h"
145 #include "exclaves_boot.h"
146 #include "kern/exclaves.tightbeam.h"
147 #endif /* CONFIG_EXCLAVES */
148 
149 #include <os/log.h>
150 
151 #include <vm/pmap.h>
152 #include <vm/vm_map.h>
153 #include <vm/vm_kern.h>         /* for kernel_map, ipc_kernel_map */
154 #include <vm/vm_pageout.h>
155 #include <vm/vm_protos.h>
156 #include <vm/vm_purgeable_internal.h>
157 #include <vm/vm_compressor_pager.h>
158 #include <vm/vm_reclaim_internal.h>
159 
160 #include <sys/proc_ro.h>
161 #include <sys/resource.h>
162 #include <sys/signalvar.h> /* for coredump */
163 #include <sys/bsdtask_info.h>
164 #include <sys/kdebug_triage.h>
165 #include <sys/code_signing.h> /* for address_space_debugged */
166 /*
167  * Exported interfaces
168  */
169 
170 #include <mach/task_server.h>
171 #include <mach/mach_host_server.h>
172 #include <mach/mach_port_server.h>
173 
174 #include <vm/vm_shared_region.h>
175 
176 #include <libkern/OSDebug.h>
177 #include <libkern/OSAtomic.h>
178 #include <libkern/section_keywords.h>
179 
180 #include <mach-o/loader.h>
181 #include <kdp/kdp_dyld.h>
182 
183 #include <kern/sfi.h>           /* picks up ledger.h */
184 
185 #if CONFIG_MACF
186 #include <security/mac_mach_internal.h>
187 #endif
188 
189 #include <IOKit/IOBSD.h>
190 #include <kdp/processor_core.h>
191 
192 #include <string.h>
193 
194 #if KPERF
195 extern int kpc_force_all_ctrs(task_t, int);
196 #endif
197 
198 SECURITY_READ_ONLY_LATE(task_t) kernel_task;
199 
200 int64_t         next_taskuniqueid = 0;
201 const size_t task_alignment = _Alignof(struct task);
202 extern const size_t proc_alignment;
203 extern size_t proc_struct_size;
204 extern size_t proc_and_task_size;
205 size_t task_struct_size;
206 
207 extern uint32_t ipc_control_port_options;
208 
209 extern int large_corpse_count;
210 
211 extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p);
212 extern void task_disown_frozen_csegs(task_t owner_task);
213 
214 static void task_port_no_senders(ipc_port_t, mach_msg_type_number_t);
215 static void task_port_with_flavor_no_senders(ipc_port_t, mach_msg_type_number_t);
216 static void task_suspension_no_senders(ipc_port_t, mach_msg_type_number_t);
217 static inline void task_zone_init(void);
218 
219 #if CONFIG_EXCLAVES
220 static bool task_should_panic_on_exit_due_to_conclave_taint(task_t task);
221 static bool task_is_conclave_tainted(task_t task);
222 static void task_set_conclave_taint(task_t task);
223 kern_return_t task_crash_info_conclave_upcall(task_t task,
224     const xnuupcalls_conclavesharedbuffer_s *shared_buf, uint32_t length);
225 kern_return_t
226 stackshot_exclaves_process_stackshot(const stackshot_stackshotresult_s *_Nonnull result, void *kcdata_ptr);
227 extern void *fake_crash_buffer;
228 extern uint32_t fake_crash_buffer_length;
229 #endif /* CONFIG_EXCLAVES */
230 
231 IPC_KOBJECT_DEFINE(IKOT_TASK_NAME);
232 IPC_KOBJECT_DEFINE(IKOT_TASK_CONTROL,
233     .iko_op_no_senders = task_port_no_senders);
234 IPC_KOBJECT_DEFINE(IKOT_TASK_READ,
235     .iko_op_no_senders = task_port_with_flavor_no_senders);
236 IPC_KOBJECT_DEFINE(IKOT_TASK_INSPECT,
237     .iko_op_no_senders = task_port_with_flavor_no_senders);
238 IPC_KOBJECT_DEFINE(IKOT_TASK_RESUME,
239     .iko_op_no_senders = task_suspension_no_senders);
240 
241 #if CONFIG_PROC_RESOURCE_LIMITS
242 static void task_fatal_port_no_senders(ipc_port_t, mach_msg_type_number_t);
243 static mach_port_t task_allocate_fatal_port(void);
244 
245 IPC_KOBJECT_DEFINE(IKOT_TASK_FATAL,
246     .iko_op_stable     = true,
247     .iko_op_no_senders = task_fatal_port_no_senders);
248 
249 extern void task_id_token_set_port(task_id_token_t token, ipc_port_t port);
250 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
251 
252 /* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
253 int audio_active = 0;
254 
255 /*
256  *	structure for tracking zone usage
257  *	Used either one per task/thread for all zones or <per-task,per-zone>.
258  */
259 typedef struct zinfo_usage_store_t {
260 	/* These fields may be updated atomically, and so must be 8 byte aligned */
261 	uint64_t        alloc __attribute__((aligned(8)));              /* allocation counter */
262 	uint64_t        free __attribute__((aligned(8)));               /* free counter */
263 } zinfo_usage_store_t;
264 
265 /**
266  * Return codes related to diag threshold and memory limit
267  */
268 __options_decl(diagthreshold_check_return, int, {
269 	THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED        = 0,
270 	THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED         = 1,
271 	THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED    = 2,
272 	THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED     = 3,
273 });
274 
275 /**
276  * Return codes related to diag threshold and memory limit
277  */
278 __options_decl(current_, int, {
279 	THRESHOLD_IS_SAME_AS_LIMIT      = 0,
280 	THRESHOLD_IS_NOT_SAME_AS_LIMIT  = 1
281 });
282 
283 zinfo_usage_store_t tasks_tkm_private;
284 zinfo_usage_store_t tasks_tkm_shared;
285 
286 /* A container to accumulate statistics for expired tasks */
287 expired_task_statistics_t               dead_task_statistics;
288 LCK_SPIN_DECLARE_ATTR(dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
289 
290 ledger_template_t task_ledger_template = NULL;
291 
292 /* global lock for task_dyld_process_info_notify_{register, deregister, get_trap} */
293 LCK_GRP_DECLARE(g_dyldinfo_mtx_grp, "g_dyldinfo");
294 LCK_MTX_DECLARE(g_dyldinfo_mtx, &g_dyldinfo_mtx_grp);
295 
296 SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__((used)) =
297 {.cpu_time = -1,
298  .tkm_private = -1,
299  .tkm_shared = -1,
300  .phys_mem = -1,
301  .wired_mem = -1,
302  .internal = -1,
303  .iokit_mapped = -1,
304  .external = -1,
305  .reusable = -1,
306  .alternate_accounting = -1,
307  .alternate_accounting_compressed = -1,
308  .page_table = -1,
309  .phys_footprint = -1,
310  .internal_compressed = -1,
311  .purgeable_volatile = -1,
312  .purgeable_nonvolatile = -1,
313  .purgeable_volatile_compressed = -1,
314  .purgeable_nonvolatile_compressed = -1,
315  .tagged_nofootprint = -1,
316  .tagged_footprint = -1,
317  .tagged_nofootprint_compressed = -1,
318  .tagged_footprint_compressed = -1,
319  .network_volatile = -1,
320  .network_nonvolatile = -1,
321  .network_volatile_compressed = -1,
322  .network_nonvolatile_compressed = -1,
323  .media_nofootprint = -1,
324  .media_footprint = -1,
325  .media_nofootprint_compressed = -1,
326  .media_footprint_compressed = -1,
327  .graphics_nofootprint = -1,
328  .graphics_footprint = -1,
329  .graphics_nofootprint_compressed = -1,
330  .graphics_footprint_compressed = -1,
331  .neural_nofootprint = -1,
332  .neural_footprint = -1,
333  .neural_nofootprint_compressed = -1,
334  .neural_footprint_compressed = -1,
335  .platform_idle_wakeups = -1,
336  .interrupt_wakeups = -1,
337 #if CONFIG_SCHED_SFI
338  .sfi_wait_times = { 0 /* initialized at runtime */},
339 #endif /* CONFIG_SCHED_SFI */
340  .cpu_time_billed_to_me = -1,
341  .cpu_time_billed_to_others = -1,
342  .physical_writes = -1,
343  .logical_writes = -1,
344  .logical_writes_to_external = -1,
345 #if DEBUG || DEVELOPMENT
346  .pages_grabbed = -1,
347  .pages_grabbed_kern = -1,
348  .pages_grabbed_iopl = -1,
349  .pages_grabbed_upl = -1,
350 #endif
351 #if CONFIG_FREEZE
352  .frozen_to_swap = -1,
353 #endif /* CONFIG_FREEZE */
354  .energy_billed_to_me = -1,
355  .energy_billed_to_others = -1,
356 #if CONFIG_PHYS_WRITE_ACCT
357  .fs_metadata_writes = -1,
358 #endif /* CONFIG_PHYS_WRITE_ACCT */
359 #if CONFIG_MEMORYSTATUS
360  .memorystatus_dirty_time = -1,
361 #endif /* CONFIG_MEMORYSTATUS */
362  .swapins = -1, };
363 
364 /* System sleep state */
365 boolean_t tasks_suspend_state;
366 
367 __options_decl(send_exec_resource_is_fatal, bool, {
368 	IS_NOT_FATAL            = false,
369 	IS_FATAL                = true
370 });
371 
372 __options_decl(send_exec_resource_is_diagnostics, bool, {
373 	IS_NOT_DIAGNOSTICS      = false,
374 	IS_DIAGNOSTICS          = true
375 });
376 
377 __options_decl(send_exec_resource_is_warning, bool, {
378 	IS_NOT_WARNING          = false,
379 	IS_WARNING              = true
380 });
381 
382 __options_decl(send_exec_resource_options_t, uint8_t, {
383 	EXEC_RESOURCE_FATAL = 0x01,
384 	EXEC_RESOURCE_DIAGNOSTIC = 0x02,
385 	EXEC_RESOURCE_WARNING = 0x04,
386 });
387 
388 /**
389  * Actions to take when a process has reached the memory limit or the diagnostics threshold limits
390  */
391 static inline void task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning);
392 #if DEBUG || DEVELOPMENT
393 static inline void task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size);
394 #endif
395 void init_task_ledgers(void);
396 void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
397 void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
398 void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
399 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
400 void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options);
401 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
402 #if CONFIG_PROC_RESOURCE_LIMITS
403 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit);
404 mach_port_name_t current_task_get_fatal_port_name(void);
405 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit);
406 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
407 
408 kern_return_t task_suspend_internal_locked(task_t);
409 kern_return_t task_suspend_internal(task_t);
410 kern_return_t task_resume_internal_locked(task_t);
411 kern_return_t task_resume_internal(task_t);
412 static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
413 
414 extern kern_return_t iokit_task_terminate(task_t task, int phase);
415 extern void          iokit_task_app_suspended_changed(task_t task);
416 
417 extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
418 extern void bsd_copythreadname(void *dst_uth, void *src_uth);
419 extern kern_return_t thread_resume(thread_t thread);
420 
421 extern int exit_with_port_space_exception(void *proc, mach_exception_code_t code, mach_exception_subcode_t subcode);
422 
423 // Condition to include diag footprints
424 #define RESETTABLE_DIAG_FOOTPRINT_LIMITS ((DEBUG || DEVELOPMENT) && CONFIG_MEMORYSTATUS)
425 
426 // Warn tasks when they hit 80% of their memory limit.
427 #define PHYS_FOOTPRINT_WARNING_LEVEL 80
428 
429 #define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT              150 /* wakeups per second */
430 #define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL   300 /* in seconds. */
431 
432 /*
433  * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
434  *
435  * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
436  *  stacktraces, aka micro-stackshots)
437  */
438 #define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER        70
439 
440 int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
441 int task_wakeups_monitor_rate;     /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
442 
443 unsigned int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
444 
445 TUNABLE(bool, disable_exc_resource, "disable_exc_resource", false); /* Global override to suppress EXC_RESOURCE for resource monitor violations. */
446 TUNABLE(bool, disable_exc_resource_during_audio, "disable_exc_resource_during_audio", true); /* Global override to suppress EXC_RESOURCE while audio is active */
447 
448 ledger_amount_t max_task_footprint = 0;  /* Per-task limit on physical memory consumption in bytes     */
449 unsigned int max_task_footprint_warning_level = 0;  /* Per-task limit warning percentage */
450 
451 /*
452  * Configure per-task memory limit.
453  * The boot-arg is interpreted as Megabytes,
454  * and takes precedence over the device tree.
455  * Setting the boot-arg to 0 disables task limits.
456  */
457 TUNABLE_DT_WRITEABLE(int, max_task_footprint_mb, "/defaults", "kern.max_task_pmem", "max_task_pmem", 0, TUNABLE_DT_NONE);
458 
459 /* I/O Monitor Limits */
460 #define IOMON_DEFAULT_LIMIT                     (20480ull)      /* MB of logical/physical I/O */
461 #define IOMON_DEFAULT_INTERVAL                  (86400ull)      /* in seconds */
462 
463 uint64_t task_iomon_limit_mb;           /* Per-task I/O monitor limit in MBs */
464 uint64_t task_iomon_interval_secs;      /* Per-task I/O monitor interval in secs */
465 
466 #define IO_TELEMETRY_DEFAULT_LIMIT              (10ll * 1024ll * 1024ll)
467 int64_t io_telemetry_limit;                     /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
468 int64_t global_logical_writes_count = 0;        /* Global count for logical writes */
469 int64_t global_logical_writes_to_external_count = 0;        /* Global count for logical writes to external storage*/
470 static boolean_t global_update_logical_writes(int64_t, int64_t*);
471 
472 #if DEBUG || DEVELOPMENT
473 static diagthreshold_check_return task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value);
474 #endif
475 #define TASK_MAX_THREAD_LIMIT 256
476 
477 #if MACH_ASSERT
478 int pmap_ledgers_panic = 1;
479 int pmap_ledgers_panic_leeway = 3;
480 #endif /* MACH_ASSERT */
481 
482 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
483 
484 #if CONFIG_COREDUMP
485 int hwm_user_cores = 0; /* high watermark violations generate user core files */
486 #endif
487 
488 #ifdef MACH_BSD
489 extern uint32_t proc_platform(const struct proc *);
490 extern uint32_t proc_sdk(struct proc *);
491 extern void     proc_getexecutableuuid(void *, unsigned char *, unsigned long);
492 extern int      proc_pid(struct proc *p);
493 extern int      proc_selfpid(void);
494 extern struct proc *current_proc(void);
495 extern char     *proc_name_address(struct proc *p);
496 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
497 extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize);
498 extern void workq_proc_suspended(struct proc *p);
499 extern void workq_proc_resumed(struct proc *p);
500 extern struct proc *kernproc;
501 
502 #if CONFIG_MEMORYSTATUS
503 extern void     proc_memstat_skip(struct proc* p, boolean_t set);
504 extern void     memorystatus_on_ledger_footprint_exceeded(int warning, bool memlimit_is_active, bool memlimit_is_fatal);
505 extern void     memorystatus_log_exception(const int max_footprint_mb, bool memlimit_is_active, bool memlimit_is_fatal);
506 extern void     memorystatus_log_diag_threshold_exception(const int diag_threshold_value);
507 extern boolean_t memorystatus_allowed_vm_map_fork(task_t task, bool *is_large);
508 extern uint64_t  memorystatus_available_memory_internal(struct proc *p);
509 
510 #if DEVELOPMENT || DEBUG
511 extern void memorystatus_abort_vm_map_fork(task_t);
512 #endif
513 
514 #endif /* CONFIG_MEMORYSTATUS */
515 
516 #endif /* MACH_BSD */
517 
518 /* Boot-arg that turns on fatal pac exception delivery for all first-party apps */
519 static TUNABLE(bool, enable_pac_exception, "enable_pac_exception", false);
520 
521 /*
522  * Defaults for controllable EXC_GUARD behaviors
523  *
524  * Internal builds are fatal by default (except BRIDGE).
525  * Create an alternate set of defaults for special processes by name.
526  */
527 struct task_exc_guard_named_default {
528 	char *name;
529 	uint32_t behavior;
530 };
531 #define _TASK_EXC_GUARD_MP_CORPSE  (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_CORPSE)
532 #define _TASK_EXC_GUARD_MP_ONCE    (_TASK_EXC_GUARD_MP_CORPSE | TASK_EXC_GUARD_MP_ONCE)
533 #define _TASK_EXC_GUARD_MP_FATAL   (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_FATAL)
534 
535 #define _TASK_EXC_GUARD_VM_CORPSE  (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_ONCE)
536 #define _TASK_EXC_GUARD_VM_ONCE    (_TASK_EXC_GUARD_VM_CORPSE | TASK_EXC_GUARD_VM_ONCE)
537 #define _TASK_EXC_GUARD_VM_FATAL   (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_FATAL)
538 
539 #define _TASK_EXC_GUARD_ALL_CORPSE (_TASK_EXC_GUARD_MP_CORPSE | _TASK_EXC_GUARD_VM_CORPSE)
540 #define _TASK_EXC_GUARD_ALL_ONCE   (_TASK_EXC_GUARD_MP_ONCE | _TASK_EXC_GUARD_VM_ONCE)
541 #define _TASK_EXC_GUARD_ALL_FATAL  (_TASK_EXC_GUARD_MP_FATAL | _TASK_EXC_GUARD_VM_FATAL)
542 
543 /* cannot turn off FATAL and DELIVER bit if set */
544 uint32_t task_exc_guard_no_unset_mask = TASK_EXC_GUARD_MP_FATAL | TASK_EXC_GUARD_VM_FATAL |
545     TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_VM_DELIVER;
546 /* cannot turn on ONCE bit if unset */
547 uint32_t task_exc_guard_no_set_mask = TASK_EXC_GUARD_MP_ONCE | TASK_EXC_GUARD_VM_ONCE;
548 
549 #if !defined(XNU_TARGET_OS_BRIDGE)
550 
551 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_FATAL;
552 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
553 /*
554  * These "by-process-name" default overrides are intended to be a short-term fix to
555  * quickly get over races between changes introducing new EXC_GUARD raising behaviors
556  * in some process and a change in default behavior for same. We should ship with
557  * these lists empty (by fixing the bugs, or explicitly changing the task's EXC_GUARD
558  * exception behavior via task_set_exc_guard_behavior()).
559  *
560  * XXX Remember to add/remove TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS back to
561  * task_exc_guard_default when transitioning this list between empty and
562  * non-empty.
563  */
564 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
565 
566 #else /* !defined(XNU_TARGET_OS_BRIDGE) */
567 
568 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_ONCE;
569 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
570 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
571 
572 #endif /* !defined(XNU_TARGET_OS_BRIDGE) */
573 
574 /* Forwards */
575 
576 static void task_hold_locked(task_t task);
577 static void task_wait_locked(task_t task, boolean_t until_not_runnable);
578 static void task_release_locked(task_t task);
579 extern task_t proc_get_task_raw(void *proc);
580 extern void task_ref_hold_proc_task_struct(task_t task);
581 extern void task_release_proc_task_struct(task_t task);
582 
583 static void task_synchronizer_destroy_all(task_t task);
584 static os_ref_count_t
585 task_add_turnstile_watchports_locked(
586 	task_t                      task,
587 	struct task_watchports      *watchports,
588 	struct task_watchport_elem  **previous_elem_array,
589 	ipc_port_t                  *portwatch_ports,
590 	uint32_t                    portwatch_count);
591 
592 static os_ref_count_t
593 task_remove_turnstile_watchports_locked(
594 	task_t                 task,
595 	struct task_watchports *watchports,
596 	ipc_port_t             *port_freelist);
597 
598 static struct task_watchports *
599 task_watchports_alloc_init(
600 	task_t        task,
601 	thread_t      thread,
602 	uint32_t      count);
603 
604 static void
605 task_watchports_deallocate(
606 	struct task_watchports *watchports);
607 
608 __attribute__((always_inline)) inline void
task_lock(task_t task)609 task_lock(task_t task)
610 {
611 	lck_mtx_lock(&(task)->lock);
612 }
613 
614 __attribute__((always_inline)) inline void
task_unlock(task_t task)615 task_unlock(task_t task)
616 {
617 	lck_mtx_unlock(&(task)->lock);
618 }
619 
620 void
task_set_64bit(task_t task,boolean_t is_64bit,boolean_t is_64bit_data)621 task_set_64bit(
622 	task_t task,
623 	boolean_t is_64bit,
624 	boolean_t is_64bit_data)
625 {
626 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
627 	thread_t thread;
628 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
629 
630 	task_lock(task);
631 
632 	/*
633 	 * Switching to/from 64-bit address spaces
634 	 */
635 	if (is_64bit) {
636 		if (!task_has_64Bit_addr(task)) {
637 			task_set_64Bit_addr(task);
638 		}
639 	} else {
640 		if (task_has_64Bit_addr(task)) {
641 			task_clear_64Bit_addr(task);
642 		}
643 	}
644 
645 	/*
646 	 * Switching to/from 64-bit register state.
647 	 */
648 	if (is_64bit_data) {
649 		if (task_has_64Bit_data(task)) {
650 			goto out;
651 		}
652 
653 		task_set_64Bit_data(task);
654 	} else {
655 		if (!task_has_64Bit_data(task)) {
656 			goto out;
657 		}
658 
659 		task_clear_64Bit_data(task);
660 	}
661 
662 	/* FIXME: On x86, the thread save state flavor can diverge from the
663 	 * task's 64-bit feature flag due to the 32-bit/64-bit register save
664 	 * state dichotomy. Since we can be pre-empted in this interval,
665 	 * certain routines may observe the thread as being in an inconsistent
666 	 * state with respect to its task's 64-bitness.
667 	 */
668 
669 #if defined(__x86_64__) || defined(__arm64__)
670 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
671 		thread_mtx_lock(thread);
672 		machine_thread_switch_addrmode(thread);
673 		thread_mtx_unlock(thread);
674 	}
675 #endif /* defined(__x86_64__) || defined(__arm64__) */
676 
677 out:
678 	task_unlock(task);
679 }
680 
681 bool
task_get_64bit_addr(task_t task)682 task_get_64bit_addr(task_t task)
683 {
684 	return task_has_64Bit_addr(task);
685 }
686 
687 bool
task_get_64bit_data(task_t task)688 task_get_64bit_data(task_t task)
689 {
690 	return task_has_64Bit_data(task);
691 }
692 
693 void
task_set_platform_binary(task_t task,boolean_t is_platform)694 task_set_platform_binary(
695 	task_t task,
696 	boolean_t is_platform)
697 {
698 	if (is_platform) {
699 		task_ro_flags_set(task, TFRO_PLATFORM);
700 	} else {
701 		task_ro_flags_clear(task, TFRO_PLATFORM);
702 	}
703 }
704 
705 #if XNU_TARGET_OS_OSX
706 #if DEVELOPMENT || DEBUG
707 SECURITY_READ_ONLY_LATE(bool) AMFI_bootarg_disable_mach_hardening = false;
708 #endif /* DEVELOPMENT || DEBUG */
709 
710 void
task_disable_mach_hardening(task_t task)711 task_disable_mach_hardening(task_t task)
712 {
713 	task_ro_flags_set(task, TFRO_MACH_HARDENING_OPT_OUT);
714 }
715 
716 bool
task_opted_out_mach_hardening(task_t task)717 task_opted_out_mach_hardening(task_t task)
718 {
719 	return task_ro_flags_get(task) & TFRO_MACH_HARDENING_OPT_OUT;
720 }
721 #endif /* XNU_TARGET_OS_OSX */
722 
723 /*
724  * Use the `task_is_hardened_binary` macro below
725  * when applying new security policies.
726  *
727  * Kernel security policies now generally apply to
728  * "hardened binaries" - which are platform binaries, and
729  * third party binaries who adopt hardened runtime on ios.
730  */
731 boolean_t
task_get_platform_binary(task_t task)732 task_get_platform_binary(task_t task)
733 {
734 	return (task_ro_flags_get(task) & TFRO_PLATFORM) != 0;
735 }
736 
737 static boolean_t
task_get_hardened_runtime(task_t task)738 task_get_hardened_runtime(task_t task)
739 {
740 	return (task_ro_flags_get(task) & TFRO_HARDENED) != 0;
741 }
742 
743 boolean_t
task_is_hardened_binary(task_t task)744 task_is_hardened_binary(task_t task)
745 {
746 	return task_get_platform_binary(task) ||
747 	       task_get_hardened_runtime(task);
748 }
749 
750 void
task_set_hardened_runtime(task_t task,bool is_hardened)751 task_set_hardened_runtime(
752 	task_t task,
753 	bool is_hardened)
754 {
755 	if (is_hardened) {
756 		task_ro_flags_set(task, TFRO_HARDENED);
757 	} else {
758 		task_ro_flags_clear(task, TFRO_HARDENED);
759 	}
760 }
761 
762 boolean_t
task_is_a_corpse(task_t task)763 task_is_a_corpse(task_t task)
764 {
765 	return (task_ro_flags_get(task) & TFRO_CORPSE) != 0;
766 }
767 
768 boolean_t
task_is_ipc_active(task_t task)769 task_is_ipc_active(task_t task)
770 {
771 	return task->ipc_active;
772 }
773 
774 void
task_set_corpse(task_t task)775 task_set_corpse(task_t task)
776 {
777 	return task_ro_flags_set(task, TFRO_CORPSE);
778 }
779 
780 void
task_set_immovable_pinned(task_t task)781 task_set_immovable_pinned(task_t task)
782 {
783 	ipc_task_set_immovable_pinned(task);
784 }
785 
786 /*
787  * Set or clear per-task TF_CA_CLIENT_WI flag according to specified argument.
788  * Returns "false" if flag is already set, and "true" in other cases.
789  */
790 bool
task_set_ca_client_wi(task_t task,boolean_t set_or_clear)791 task_set_ca_client_wi(
792 	task_t task,
793 	boolean_t set_or_clear)
794 {
795 	bool ret = true;
796 	task_lock(task);
797 	if (set_or_clear) {
798 		/* Tasks can have only one CA_CLIENT work interval */
799 		if (task->t_flags & TF_CA_CLIENT_WI) {
800 			ret = false;
801 		} else {
802 			task->t_flags |= TF_CA_CLIENT_WI;
803 		}
804 	} else {
805 		task->t_flags &= ~TF_CA_CLIENT_WI;
806 	}
807 	task_unlock(task);
808 	return ret;
809 }
810 
811 /*
812  * task_set_dyld_info() is called at most three times.
813  * 1) at task struct creation to set addr/size to zero.
814  * 2) in mach_loader.c to set location of __all_image_info section in loaded dyld
815  * 3) is from dyld itself to update location of all_image_info
816  * For security any calls after that are ignored.  The TF_DYLD_ALL_IMAGE_SET bit is used to determine state.
817  */
818 kern_return_t
task_set_dyld_info(task_t task,mach_vm_address_t addr,mach_vm_size_t size)819 task_set_dyld_info(
820 	task_t            task,
821 	mach_vm_address_t addr,
822 	mach_vm_size_t    size)
823 {
824 	mach_vm_address_t end;
825 	if (os_add_overflow(addr, size, &end)) {
826 		return KERN_FAILURE;
827 	}
828 
829 	task_lock(task);
830 	/* don't accept updates if all_image_info_addr is final */
831 	if ((task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) == 0) {
832 		bool inputNonZero   = ((addr != 0) || (size != 0));
833 		bool currentNonZero = ((task->all_image_info_addr != 0) || (task->all_image_info_size != 0));
834 		task->all_image_info_addr = addr;
835 		task->all_image_info_size = size;
836 		/* can only change from a non-zero value to another non-zero once */
837 		if (inputNonZero && currentNonZero) {
838 			task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
839 		}
840 		task_unlock(task);
841 		return KERN_SUCCESS;
842 	} else {
843 		task_unlock(task);
844 		return KERN_FAILURE;
845 	}
846 }
847 
848 bool
task_donates_own_pages(task_t task)849 task_donates_own_pages(
850 	task_t task)
851 {
852 	return task->donates_own_pages;
853 }
854 
855 void
task_set_mach_header_address(task_t task,mach_vm_address_t addr)856 task_set_mach_header_address(
857 	task_t task,
858 	mach_vm_address_t addr)
859 {
860 	task_lock(task);
861 	task->mach_header_vm_address = addr;
862 	task_unlock(task);
863 }
864 
865 void
task_bank_reset(__unused task_t task)866 task_bank_reset(__unused task_t task)
867 {
868 	if (task->bank_context != NULL) {
869 		bank_task_destroy(task);
870 	}
871 }
872 
873 /*
874  * NOTE: This should only be called when the P_LINTRANSIT
875  *	 flag is set (the proc_trans lock is held) on the
876  *	 proc associated with the task.
877  */
878 void
task_bank_init(__unused task_t task)879 task_bank_init(__unused task_t task)
880 {
881 	if (task->bank_context != NULL) {
882 		panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
883 	}
884 	bank_task_initialize(task);
885 }
886 
887 void
task_set_did_exec_flag(task_t task)888 task_set_did_exec_flag(task_t task)
889 {
890 	task->t_procflags |= TPF_DID_EXEC;
891 }
892 
893 void
task_clear_exec_copy_flag(task_t task)894 task_clear_exec_copy_flag(task_t task)
895 {
896 	task->t_procflags &= ~TPF_EXEC_COPY;
897 }
898 
899 event_t
task_get_return_wait_event(task_t task)900 task_get_return_wait_event(task_t task)
901 {
902 	return (event_t)&task->returnwait_inheritor;
903 }
904 
905 void
task_clear_return_wait(task_t task,uint32_t flags)906 task_clear_return_wait(task_t task, uint32_t flags)
907 {
908 	if (flags & TCRW_CLEAR_INITIAL_WAIT) {
909 		thread_wakeup(task_get_return_wait_event(task));
910 	}
911 
912 	if (flags & TCRW_CLEAR_FINAL_WAIT) {
913 		is_write_lock(task->itk_space);
914 
915 		task->t_returnwaitflags &= ~TRW_LRETURNWAIT;
916 		task->returnwait_inheritor = NULL;
917 
918 		if (flags & TCRW_CLEAR_EXEC_COMPLETE) {
919 			task->t_returnwaitflags &= ~TRW_LEXEC_COMPLETE;
920 		}
921 
922 		if (task->t_returnwaitflags & TRW_LRETURNWAITER) {
923 			struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
924 			    TURNSTILE_ULOCK);
925 
926 			waitq_wakeup64_all(&turnstile->ts_waitq,
927 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
928 			    THREAD_AWAKENED, WAITQ_UPDATE_INHERITOR);
929 
930 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_HELD);
931 
932 			turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
933 			turnstile_cleanup();
934 			task->t_returnwaitflags &= ~TRW_LRETURNWAITER;
935 		}
936 		is_write_unlock(task->itk_space);
937 	}
938 }
939 
940 void __attribute__((noreturn))
task_wait_to_return(void)941 task_wait_to_return(void)
942 {
943 	task_t task = current_task();
944 	uint8_t returnwaitflags;
945 
946 	is_write_lock(task->itk_space);
947 
948 	if (task->t_returnwaitflags & TRW_LRETURNWAIT) {
949 		struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
950 		    TURNSTILE_ULOCK);
951 
952 		do {
953 			task->t_returnwaitflags |= TRW_LRETURNWAITER;
954 			turnstile_update_inheritor(turnstile, task->returnwait_inheritor,
955 			    (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
956 
957 			waitq_assert_wait64(&turnstile->ts_waitq,
958 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
959 			    THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
960 
961 			is_write_unlock(task->itk_space);
962 
963 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
964 
965 			thread_block(THREAD_CONTINUE_NULL);
966 
967 			is_write_lock(task->itk_space);
968 		} while (task->t_returnwaitflags & TRW_LRETURNWAIT);
969 
970 		turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
971 	}
972 
973 	returnwaitflags = task->t_returnwaitflags;
974 	is_write_unlock(task->itk_space);
975 	turnstile_cleanup();
976 
977 
978 #if CONFIG_MACF
979 	/*
980 	 * Before jumping to userspace and allowing this process
981 	 * to execute any code, make sure its credentials are cached,
982 	 * and notify any interested parties.
983 	 */
984 	extern void current_cached_proc_cred_update(void);
985 
986 	current_cached_proc_cred_update();
987 	if (returnwaitflags & TRW_LEXEC_COMPLETE) {
988 		mac_proc_notify_exec_complete(current_proc());
989 	}
990 #endif
991 
992 	thread_bootstrap_return();
993 }
994 
995 boolean_t
task_is_exec_copy(task_t task)996 task_is_exec_copy(task_t task)
997 {
998 	return task_is_exec_copy_internal(task);
999 }
1000 
1001 boolean_t
task_did_exec(task_t task)1002 task_did_exec(task_t task)
1003 {
1004 	return task_did_exec_internal(task);
1005 }
1006 
1007 boolean_t
task_is_active(task_t task)1008 task_is_active(task_t task)
1009 {
1010 	return task->active;
1011 }
1012 
1013 boolean_t
task_is_halting(task_t task)1014 task_is_halting(task_t task)
1015 {
1016 	return task->halting;
1017 }
1018 
1019 void
task_init(void)1020 task_init(void)
1021 {
1022 	if (max_task_footprint_mb != 0) {
1023 #if CONFIG_MEMORYSTATUS
1024 		if (max_task_footprint_mb < 50) {
1025 			printf("Warning: max_task_pmem %d below minimum.\n",
1026 			    max_task_footprint_mb);
1027 			max_task_footprint_mb = 50;
1028 		}
1029 		printf("Limiting task physical memory footprint to %d MB\n",
1030 		    max_task_footprint_mb);
1031 
1032 		max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024;         // Convert MB to bytes
1033 
1034 		/*
1035 		 * Configure the per-task memory limit warning level.
1036 		 * This is computed as a percentage.
1037 		 */
1038 		max_task_footprint_warning_level = 0;
1039 
1040 		if (max_mem < 0x40000000) {
1041 			/*
1042 			 * On devices with < 1GB of memory:
1043 			 *    -- set warnings to 50MB below the per-task limit.
1044 			 */
1045 			if (max_task_footprint_mb > 50) {
1046 				max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
1047 			}
1048 		} else {
1049 			/*
1050 			 * On devices with >= 1GB of memory:
1051 			 *    -- set warnings to 100MB below the per-task limit.
1052 			 */
1053 			if (max_task_footprint_mb > 100) {
1054 				max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
1055 			}
1056 		}
1057 
1058 		/*
1059 		 * Never allow warning level to land below the default.
1060 		 */
1061 		if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
1062 			max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
1063 		}
1064 
1065 		printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
1066 
1067 #else
1068 		printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
1069 #endif /* CONFIG_MEMORYSTATUS */
1070 	}
1071 
1072 #if DEVELOPMENT || DEBUG
1073 	PE_parse_boot_argn("task_exc_guard_default",
1074 	    &task_exc_guard_default,
1075 	    sizeof(task_exc_guard_default));
1076 #endif /* DEVELOPMENT || DEBUG */
1077 
1078 #if CONFIG_COREDUMP
1079 	if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
1080 	    sizeof(hwm_user_cores))) {
1081 		hwm_user_cores = 0;
1082 	}
1083 #endif
1084 
1085 	proc_init_cpumon_params();
1086 
1087 	if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof(task_wakeups_monitor_rate))) {
1088 		task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
1089 	}
1090 
1091 	if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof(task_wakeups_monitor_interval))) {
1092 		task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
1093 	}
1094 
1095 	if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
1096 	    sizeof(task_wakeups_monitor_ustackshots_trigger_pct))) {
1097 		task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
1098 	}
1099 
1100 	if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof(task_iomon_limit_mb))) {
1101 		task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
1102 	}
1103 
1104 	if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof(task_iomon_interval_secs))) {
1105 		task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
1106 	}
1107 
1108 	if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof(io_telemetry_limit))) {
1109 		io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
1110 	}
1111 
1112 /*
1113  * If we have coalitions, coalition_init() will call init_task_ledgers() as it
1114  * sets up the ledgers for the default coalition. If we don't have coalitions,
1115  * then we have to call it now.
1116  */
1117 #if CONFIG_COALITIONS
1118 	assert(task_ledger_template);
1119 #else /* CONFIG_COALITIONS */
1120 	init_task_ledgers();
1121 #endif /* CONFIG_COALITIONS */
1122 
1123 	task_ref_init();
1124 	task_zone_init();
1125 
1126 #ifdef __LP64__
1127 	boolean_t is_64bit = TRUE;
1128 #else
1129 	boolean_t is_64bit = FALSE;
1130 #endif
1131 
1132 	kernproc = (struct proc *)zalloc_flags(proc_task_zone, Z_WAITOK | Z_ZERO);
1133 	kernel_task = proc_get_task_raw(kernproc);
1134 
1135 	/*
1136 	 * Create the kernel task as the first task.
1137 	 */
1138 	if (task_create_internal(TASK_NULL, NULL, NULL, FALSE, is_64bit,
1139 	    is_64bit, TF_NONE, TF_NONE, TPF_NONE, TWF_NONE, kernel_task) != KERN_SUCCESS) {
1140 		panic("task_init");
1141 	}
1142 
1143 	ipc_task_enable(kernel_task);
1144 
1145 #if defined(HAS_APPLE_PAC)
1146 	kernel_task->rop_pid = ml_default_rop_pid();
1147 	kernel_task->jop_pid = ml_default_jop_pid();
1148 	// kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
1149 	// disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
1150 	ml_task_set_disable_user_jop(kernel_task, FALSE);
1151 #endif
1152 
1153 	vm_map_deallocate(kernel_task->map);
1154 	kernel_task->map = kernel_map;
1155 }
1156 
1157 static inline void
task_zone_init(void)1158 task_zone_init(void)
1159 {
1160 	proc_struct_size = roundup(proc_struct_size, task_alignment);
1161 	task_struct_size = roundup(sizeof(struct task), proc_alignment);
1162 	proc_and_task_size = proc_struct_size + task_struct_size;
1163 
1164 	proc_task_zone = zone_create_ext("proc_task", proc_and_task_size,
1165 	    ZC_ZFREE_CLEARMEM | ZC_SEQUESTER, ZONE_ID_PROC_TASK, NULL); /* sequester is needed for proc_rele() */
1166 }
1167 
1168 /*
1169  * Task ledgers
1170  * ------------
1171  *
1172  * phys_footprint
1173  *   Physical footprint: This is the sum of:
1174  *     + (internal - alternate_accounting)
1175  *     + (internal_compressed - alternate_accounting_compressed)
1176  *     + iokit_mapped
1177  *     + purgeable_nonvolatile
1178  *     + purgeable_nonvolatile_compressed
1179  *     + page_table
1180  *
1181  * internal
1182  *   The task's anonymous memory, which on iOS is always resident.
1183  *
1184  * internal_compressed
1185  *   Amount of this task's internal memory which is held by the compressor.
1186  *   Such memory is no longer actually resident for the task [i.e., resident in its pmap],
1187  *   and could be either decompressed back into memory, or paged out to storage, depending
1188  *   on our implementation.
1189  *
1190  * iokit_mapped
1191  *   IOKit mappings: The total size of all IOKit mappings in this task, regardless of
1192  *    clean/dirty or internal/external state].
1193  *
1194  * alternate_accounting
1195  *   The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
1196  *   are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
1197  *   double counting.
1198  *
1199  * pages_grabbed
1200  *   pages_grabbed counts all page grabs in a task.  It is also broken out into three subtypes
1201  *   which track UPL, IOPL and Kernel page grabs.
1202  */
1203 void
init_task_ledgers(void)1204 init_task_ledgers(void)
1205 {
1206 	ledger_template_t t;
1207 
1208 	assert(task_ledger_template == NULL);
1209 	assert(kernel_task == TASK_NULL);
1210 
1211 #if MACH_ASSERT
1212 	PE_parse_boot_argn("pmap_ledgers_panic",
1213 	    &pmap_ledgers_panic,
1214 	    sizeof(pmap_ledgers_panic));
1215 	PE_parse_boot_argn("pmap_ledgers_panic_leeway",
1216 	    &pmap_ledgers_panic_leeway,
1217 	    sizeof(pmap_ledgers_panic_leeway));
1218 #endif /* MACH_ASSERT */
1219 
1220 	if ((t = ledger_template_create("Per-task ledger")) == NULL) {
1221 		panic("couldn't create task ledger template");
1222 	}
1223 
1224 	task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
1225 	task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
1226 	    "physmem", "bytes");
1227 	task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
1228 	    "bytes");
1229 	task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
1230 	    "bytes");
1231 	task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
1232 	    "bytes");
1233 	task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
1234 	    "bytes");
1235 	task_ledgers.iokit_mapped = ledger_entry_add_with_flags(t, "iokit_mapped", "mappings",
1236 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1237 	task_ledgers.alternate_accounting = ledger_entry_add_with_flags(t, "alternate_accounting", "physmem",
1238 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1239 	task_ledgers.alternate_accounting_compressed = ledger_entry_add_with_flags(t, "alternate_accounting_compressed", "physmem",
1240 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1241 	task_ledgers.page_table = ledger_entry_add_with_flags(t, "page_table", "physmem",
1242 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1243 	task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
1244 	    "bytes");
1245 	task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
1246 	    "bytes");
1247 	task_ledgers.reusable = ledger_entry_add(t, "reusable", "physmem", "bytes");
1248 	task_ledgers.external = ledger_entry_add(t, "external", "physmem", "bytes");
1249 	task_ledgers.purgeable_volatile = ledger_entry_add_with_flags(t, "purgeable_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1250 	task_ledgers.purgeable_nonvolatile = ledger_entry_add_with_flags(t, "purgeable_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1251 	task_ledgers.purgeable_volatile_compressed = ledger_entry_add_with_flags(t, "purgeable_volatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1252 	task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add_with_flags(t, "purgeable_nonvolatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1253 #if DEBUG || DEVELOPMENT
1254 	task_ledgers.pages_grabbed = ledger_entry_add_with_flags(t, "pages_grabbed", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1255 	task_ledgers.pages_grabbed_kern = ledger_entry_add_with_flags(t, "pages_grabbed_kern", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1256 	task_ledgers.pages_grabbed_iopl = ledger_entry_add_with_flags(t, "pages_grabbed_iopl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1257 	task_ledgers.pages_grabbed_upl = ledger_entry_add_with_flags(t, "pages_grabbed_upl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1258 #endif
1259 	task_ledgers.tagged_nofootprint = ledger_entry_add_with_flags(t, "tagged_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1260 	task_ledgers.tagged_footprint = ledger_entry_add_with_flags(t, "tagged_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1261 	task_ledgers.tagged_nofootprint_compressed = ledger_entry_add_with_flags(t, "tagged_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1262 	task_ledgers.tagged_footprint_compressed = ledger_entry_add_with_flags(t, "tagged_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1263 	task_ledgers.network_volatile = ledger_entry_add_with_flags(t, "network_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1264 	task_ledgers.network_nonvolatile = ledger_entry_add_with_flags(t, "network_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1265 	task_ledgers.network_volatile_compressed = ledger_entry_add_with_flags(t, "network_volatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1266 	task_ledgers.network_nonvolatile_compressed = ledger_entry_add_with_flags(t, "network_nonvolatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1267 	task_ledgers.media_nofootprint = ledger_entry_add_with_flags(t, "media_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1268 	task_ledgers.media_footprint = ledger_entry_add_with_flags(t, "media_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1269 	task_ledgers.media_nofootprint_compressed = ledger_entry_add_with_flags(t, "media_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1270 	task_ledgers.media_footprint_compressed = ledger_entry_add_with_flags(t, "media_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1271 	task_ledgers.graphics_nofootprint = ledger_entry_add_with_flags(t, "graphics_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1272 	task_ledgers.graphics_footprint = ledger_entry_add_with_flags(t, "graphics_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1273 	task_ledgers.graphics_nofootprint_compressed = ledger_entry_add_with_flags(t, "graphics_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1274 	task_ledgers.graphics_footprint_compressed = ledger_entry_add_with_flags(t, "graphics_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1275 	task_ledgers.neural_nofootprint = ledger_entry_add_with_flags(t, "neural_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1276 	task_ledgers.neural_footprint = ledger_entry_add_with_flags(t, "neural_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1277 	task_ledgers.neural_nofootprint_compressed = ledger_entry_add_with_flags(t, "neural_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1278 	task_ledgers.neural_footprint_compressed = ledger_entry_add_with_flags(t, "neural_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1279 
1280 #if CONFIG_FREEZE
1281 	task_ledgers.frozen_to_swap = ledger_entry_add(t, "frozen_to_swap", "physmem", "bytes");
1282 #endif /* CONFIG_FREEZE */
1283 
1284 	task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
1285 	    "count");
1286 	task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
1287 	    "count");
1288 
1289 #if CONFIG_SCHED_SFI
1290 	sfi_class_id_t class_id, ledger_alias;
1291 	for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1292 		task_ledgers.sfi_wait_times[class_id] = -1;
1293 	}
1294 
1295 	/* don't account for UNSPECIFIED */
1296 	for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
1297 		ledger_alias = sfi_get_ledger_alias_for_class(class_id);
1298 		if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
1299 			/* Check to see if alias has been registered yet */
1300 			if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
1301 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
1302 			} else {
1303 				/* Otherwise, initialize it first */
1304 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
1305 			}
1306 		} else {
1307 			task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
1308 		}
1309 
1310 		if (task_ledgers.sfi_wait_times[class_id] < 0) {
1311 			panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
1312 		}
1313 	}
1314 
1315 	assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID - 1] != -1);
1316 #endif /* CONFIG_SCHED_SFI */
1317 
1318 	task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
1319 	task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
1320 	task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
1321 	task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
1322 	task_ledgers.logical_writes_to_external = ledger_entry_add(t, "logical_writes_to_external", "res", "bytes");
1323 #if CONFIG_PHYS_WRITE_ACCT
1324 	task_ledgers.fs_metadata_writes = ledger_entry_add(t, "fs_metadata_writes", "res", "bytes");
1325 #endif /* CONFIG_PHYS_WRITE_ACCT */
1326 	task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj");
1327 	task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj");
1328 
1329 #if CONFIG_MEMORYSTATUS
1330 	task_ledgers.memorystatus_dirty_time = ledger_entry_add(t, "memorystatus_dirty_time", "physmem", "ns");
1331 #endif /* CONFIG_MEMORYSTATUS */
1332 
1333 	task_ledgers.swapins = ledger_entry_add_with_flags(t, "swapins", "physmem", "bytes",
1334 	    LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1335 
1336 	if ((task_ledgers.cpu_time < 0) ||
1337 	    (task_ledgers.tkm_private < 0) ||
1338 	    (task_ledgers.tkm_shared < 0) ||
1339 	    (task_ledgers.phys_mem < 0) ||
1340 	    (task_ledgers.wired_mem < 0) ||
1341 	    (task_ledgers.internal < 0) ||
1342 	    (task_ledgers.external < 0) ||
1343 	    (task_ledgers.reusable < 0) ||
1344 	    (task_ledgers.iokit_mapped < 0) ||
1345 	    (task_ledgers.alternate_accounting < 0) ||
1346 	    (task_ledgers.alternate_accounting_compressed < 0) ||
1347 	    (task_ledgers.page_table < 0) ||
1348 	    (task_ledgers.phys_footprint < 0) ||
1349 	    (task_ledgers.internal_compressed < 0) ||
1350 	    (task_ledgers.purgeable_volatile < 0) ||
1351 	    (task_ledgers.purgeable_nonvolatile < 0) ||
1352 	    (task_ledgers.purgeable_volatile_compressed < 0) ||
1353 	    (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
1354 	    (task_ledgers.tagged_nofootprint < 0) ||
1355 	    (task_ledgers.tagged_footprint < 0) ||
1356 	    (task_ledgers.tagged_nofootprint_compressed < 0) ||
1357 	    (task_ledgers.tagged_footprint_compressed < 0) ||
1358 #if CONFIG_FREEZE
1359 	    (task_ledgers.frozen_to_swap < 0) ||
1360 #endif /* CONFIG_FREEZE */
1361 	    (task_ledgers.network_volatile < 0) ||
1362 	    (task_ledgers.network_nonvolatile < 0) ||
1363 	    (task_ledgers.network_volatile_compressed < 0) ||
1364 	    (task_ledgers.network_nonvolatile_compressed < 0) ||
1365 	    (task_ledgers.media_nofootprint < 0) ||
1366 	    (task_ledgers.media_footprint < 0) ||
1367 	    (task_ledgers.media_nofootprint_compressed < 0) ||
1368 	    (task_ledgers.media_footprint_compressed < 0) ||
1369 	    (task_ledgers.graphics_nofootprint < 0) ||
1370 	    (task_ledgers.graphics_footprint < 0) ||
1371 	    (task_ledgers.graphics_nofootprint_compressed < 0) ||
1372 	    (task_ledgers.graphics_footprint_compressed < 0) ||
1373 	    (task_ledgers.neural_nofootprint < 0) ||
1374 	    (task_ledgers.neural_footprint < 0) ||
1375 	    (task_ledgers.neural_nofootprint_compressed < 0) ||
1376 	    (task_ledgers.neural_footprint_compressed < 0) ||
1377 	    (task_ledgers.platform_idle_wakeups < 0) ||
1378 	    (task_ledgers.interrupt_wakeups < 0) ||
1379 	    (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
1380 	    (task_ledgers.physical_writes < 0) ||
1381 	    (task_ledgers.logical_writes < 0) ||
1382 	    (task_ledgers.logical_writes_to_external < 0) ||
1383 #if CONFIG_PHYS_WRITE_ACCT
1384 	    (task_ledgers.fs_metadata_writes < 0) ||
1385 #endif /* CONFIG_PHYS_WRITE_ACCT */
1386 #if CONFIG_MEMORYSTATUS
1387 	    (task_ledgers.memorystatus_dirty_time < 0) ||
1388 #endif /* CONFIG_MEMORYSTATUS */
1389 	    (task_ledgers.energy_billed_to_me < 0) ||
1390 	    (task_ledgers.energy_billed_to_others < 0) ||
1391 	    (task_ledgers.swapins < 0)
1392 	    ) {
1393 		panic("couldn't create entries for task ledger template");
1394 	}
1395 
1396 	ledger_track_credit_only(t, task_ledgers.phys_footprint);
1397 	ledger_track_credit_only(t, task_ledgers.internal);
1398 	ledger_track_credit_only(t, task_ledgers.external);
1399 	ledger_track_credit_only(t, task_ledgers.reusable);
1400 
1401 	ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
1402 	ledger_track_maximum(t, task_ledgers.phys_mem, 60);
1403 	ledger_track_maximum(t, task_ledgers.internal, 60);
1404 	ledger_track_maximum(t, task_ledgers.internal_compressed, 60);
1405 	ledger_track_maximum(t, task_ledgers.reusable, 60);
1406 	ledger_track_maximum(t, task_ledgers.external, 60);
1407 #if MACH_ASSERT
1408 	if (pmap_ledgers_panic) {
1409 		ledger_panic_on_negative(t, task_ledgers.phys_footprint);
1410 		ledger_panic_on_negative(t, task_ledgers.page_table);
1411 		ledger_panic_on_negative(t, task_ledgers.internal);
1412 		ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
1413 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
1414 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
1415 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
1416 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
1417 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
1418 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
1419 #if CONFIG_PHYS_WRITE_ACCT
1420 		ledger_panic_on_negative(t, task_ledgers.fs_metadata_writes);
1421 #endif /* CONFIG_PHYS_WRITE_ACCT */
1422 
1423 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint);
1424 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint);
1425 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint_compressed);
1426 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint_compressed);
1427 		ledger_panic_on_negative(t, task_ledgers.network_volatile);
1428 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile);
1429 		ledger_panic_on_negative(t, task_ledgers.network_volatile_compressed);
1430 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile_compressed);
1431 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint);
1432 		ledger_panic_on_negative(t, task_ledgers.media_footprint);
1433 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint_compressed);
1434 		ledger_panic_on_negative(t, task_ledgers.media_footprint_compressed);
1435 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint);
1436 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint);
1437 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint_compressed);
1438 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint_compressed);
1439 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint);
1440 		ledger_panic_on_negative(t, task_ledgers.neural_footprint);
1441 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint_compressed);
1442 		ledger_panic_on_negative(t, task_ledgers.neural_footprint_compressed);
1443 	}
1444 #endif /* MACH_ASSERT */
1445 
1446 #if CONFIG_MEMORYSTATUS
1447 	ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
1448 #endif /* CONFIG_MEMORYSTATUS */
1449 
1450 	ledger_set_callback(t, task_ledgers.interrupt_wakeups,
1451 	    task_wakeups_rate_exceeded, NULL, NULL);
1452 	ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
1453 
1454 #if CONFIG_SPTM || !XNU_MONITOR
1455 	ledger_template_complete(t);
1456 #else /* CONFIG_SPTM || !XNU_MONITOR */
1457 	ledger_template_complete_secure_alloc(t);
1458 #endif /* XNU_MONITOR */
1459 	task_ledger_template = t;
1460 }
1461 
1462 /* Create a task, but leave the task ports disabled */
1463 kern_return_t
task_create_internal(task_t parent_task,proc_ro_t proc_ro,coalition_t * parent_coalitions __unused,boolean_t inherit_memory,boolean_t is_64bit,boolean_t is_64bit_data,uint32_t t_flags,uint32_t t_flags_ro,uint32_t t_procflags,uint8_t t_returnwaitflags,task_t child_task)1464 task_create_internal(
1465 	task_t             parent_task,            /* Null-able */
1466 	proc_ro_t          proc_ro,
1467 	coalition_t        *parent_coalitions __unused,
1468 	boolean_t          inherit_memory,
1469 	boolean_t          is_64bit,
1470 	boolean_t          is_64bit_data,
1471 	uint32_t           t_flags,
1472 	uint32_t           t_flags_ro,
1473 	uint32_t           t_procflags,
1474 	uint8_t            t_returnwaitflags,
1475 	task_t             child_task)
1476 {
1477 	task_t                  new_task;
1478 	vm_shared_region_t      shared_region;
1479 	ledger_t                ledger = NULL;
1480 	struct task_ro_data     task_ro_data = {};
1481 	uint32_t                parent_t_flags_ro = 0;
1482 
1483 	new_task = child_task;
1484 
1485 	if (task_ref_count_init(new_task) != KERN_SUCCESS) {
1486 		return KERN_RESOURCE_SHORTAGE;
1487 	}
1488 
1489 	/* allocate with active entries */
1490 	assert(task_ledger_template != NULL);
1491 	ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1492 	if (ledger == NULL) {
1493 		task_ref_count_fini(new_task);
1494 		return KERN_RESOURCE_SHORTAGE;
1495 	}
1496 
1497 	counter_alloc(&(new_task->faults));
1498 
1499 #if defined(HAS_APPLE_PAC)
1500 	const uint8_t disable_user_jop = inherit_memory ? parent_task->disable_user_jop : FALSE;
1501 	ml_task_set_rop_pid(new_task, parent_task, inherit_memory);
1502 	ml_task_set_jop_pid(new_task, parent_task, inherit_memory, disable_user_jop);
1503 	ml_task_set_disable_user_jop(new_task, disable_user_jop);
1504 #endif
1505 
1506 
1507 	new_task->ledger = ledger;
1508 
1509 	/* if inherit_memory is true, parent_task MUST not be NULL */
1510 	if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) {
1511 #if CONFIG_DEFERRED_RECLAIM
1512 		if (parent_task->deferred_reclamation_metadata) {
1513 			/*
1514 			 * Prevent concurrent reclaims while we're forking the parent_task's map,
1515 			 * so that the child's map is in sync with the forked reclamation
1516 			 * metadata.
1517 			 */
1518 			vm_deferred_reclamation_buffer_lock(
1519 				parent_task->deferred_reclamation_metadata);
1520 		}
1521 #endif /* CONFIG_DEFERRED_RECLAIM */
1522 		new_task->map = vm_map_fork(ledger, parent_task->map, 0);
1523 #if CONFIG_DEFERRED_RECLAIM
1524 		if (new_task->map != NULL &&
1525 		    parent_task->deferred_reclamation_metadata) {
1526 			new_task->deferred_reclamation_metadata =
1527 			    vm_deferred_reclamation_buffer_fork(new_task,
1528 			    parent_task->deferred_reclamation_metadata);
1529 		}
1530 #endif /* CONFIG_DEFERRED_RECLAIM */
1531 	} else {
1532 		unsigned int pmap_flags = is_64bit ? PMAP_CREATE_64BIT : 0;
1533 		pmap_t pmap = pmap_create_options(ledger, 0, pmap_flags);
1534 		vm_map_t new_map;
1535 
1536 		if (pmap == NULL) {
1537 			counter_free(&new_task->faults);
1538 			ledger_dereference(ledger);
1539 			task_ref_count_fini(new_task);
1540 			return KERN_RESOURCE_SHORTAGE;
1541 		}
1542 		new_map = vm_map_create_options(pmap,
1543 		    (vm_map_offset_t)(VM_MIN_ADDRESS),
1544 		    (vm_map_offset_t)(VM_MAX_ADDRESS),
1545 		    VM_MAP_CREATE_PAGEABLE);
1546 		if (parent_task) {
1547 			vm_map_inherit_limits(new_map, parent_task->map);
1548 		}
1549 		new_task->map = new_map;
1550 	}
1551 
1552 	if (new_task->map == NULL) {
1553 		counter_free(&new_task->faults);
1554 		ledger_dereference(ledger);
1555 		task_ref_count_fini(new_task);
1556 		return KERN_RESOURCE_SHORTAGE;
1557 	}
1558 
1559 #if defined(CONFIG_SCHED_MULTIQ)
1560 	new_task->sched_group = sched_group_create();
1561 #endif
1562 
1563 	lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
1564 	queue_init(&new_task->threads);
1565 	new_task->suspend_count = 0;
1566 	new_task->thread_count = 0;
1567 	new_task->active_thread_count = 0;
1568 	new_task->user_stop_count = 0;
1569 	new_task->legacy_stop_count = 0;
1570 	new_task->active = TRUE;
1571 	new_task->halting = FALSE;
1572 	new_task->priv_flags = 0;
1573 	new_task->t_flags = t_flags;
1574 	task_ro_data.t_flags_ro = t_flags_ro;
1575 	new_task->t_procflags = t_procflags;
1576 	new_task->t_returnwaitflags = t_returnwaitflags;
1577 	new_task->returnwait_inheritor = current_thread();
1578 	new_task->importance = 0;
1579 	new_task->crashed_thread_id = 0;
1580 	new_task->watchports = NULL;
1581 	new_task->t_rr_ranges = NULL;
1582 
1583 	new_task->bank_context = NULL;
1584 
1585 	if (parent_task) {
1586 		parent_t_flags_ro = task_ro_flags_get(parent_task);
1587 	}
1588 
1589 	if (parent_task && inherit_memory) {
1590 #if __has_feature(ptrauth_calls)
1591 		/* Inherit the pac exception flags from parent if in fork */
1592 		task_ro_data.t_flags_ro |= (parent_t_flags_ro & (TFRO_PAC_ENFORCE_USER_STATE |
1593 		    TFRO_PAC_EXC_FATAL));
1594 #endif /* __has_feature(ptrauth_calls) */
1595 		/* Inherit the hardened binary flags from parent if in fork */
1596 		task_ro_data.t_flags_ro |= parent_t_flags_ro & (TFRO_HARDENED | TFRO_PLATFORM);
1597 #if XNU_TARGET_OS_OSX
1598 		task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_MACH_HARDENING_OPT_OUT;
1599 #endif /* XNU_TARGET_OS_OSX */
1600 	}
1601 
1602 #ifdef MACH_BSD
1603 	new_task->corpse_info = NULL;
1604 #endif /* MACH_BSD */
1605 
1606 	/* kern_task not created by this function has unique id 0, start with 1 here. */
1607 	task_set_uniqueid(new_task);
1608 
1609 #if CONFIG_MACF
1610 	set_task_crash_label(new_task, NULL);
1611 
1612 	task_ro_data.task_filters.mach_trap_filter_mask = NULL;
1613 	task_ro_data.task_filters.mach_kobj_filter_mask = NULL;
1614 #endif
1615 
1616 #if CONFIG_MEMORYSTATUS
1617 	if (max_task_footprint != 0) {
1618 		ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1619 	}
1620 #endif /* CONFIG_MEMORYSTATUS */
1621 
1622 	if (task_wakeups_monitor_rate != 0) {
1623 		uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1624 		int32_t  rate;        // Ignored because of WAKEMON_SET_DEFAULTS
1625 		task_wakeups_monitor_ctl(new_task, &flags, &rate);
1626 	}
1627 
1628 #if CONFIG_IO_ACCOUNTING
1629 	uint32_t flags = IOMON_ENABLE;
1630 	task_io_monitor_ctl(new_task, &flags);
1631 #endif /* CONFIG_IO_ACCOUNTING */
1632 
1633 	machine_task_init(new_task, parent_task, inherit_memory);
1634 
1635 	new_task->task_debug = NULL;
1636 
1637 #if DEVELOPMENT || DEBUG
1638 	new_task->task_unnested = FALSE;
1639 	new_task->task_disconnected_count = 0;
1640 #endif
1641 	queue_init(&new_task->semaphore_list);
1642 	new_task->semaphores_owned = 0;
1643 
1644 	new_task->vtimers = 0;
1645 
1646 	new_task->shared_region = NULL;
1647 
1648 	new_task->affinity_space = NULL;
1649 
1650 #if CONFIG_CPU_COUNTERS
1651 	new_task->t_kpc = 0;
1652 #endif /* CONFIG_CPU_COUNTERS */
1653 
1654 	new_task->pidsuspended = FALSE;
1655 	new_task->frozen = FALSE;
1656 	new_task->changing_freeze_state = FALSE;
1657 	new_task->rusage_cpu_flags = 0;
1658 	new_task->rusage_cpu_percentage = 0;
1659 	new_task->rusage_cpu_interval = 0;
1660 	new_task->rusage_cpu_deadline = 0;
1661 	new_task->rusage_cpu_callt = NULL;
1662 #if MACH_ASSERT
1663 	new_task->suspends_outstanding = 0;
1664 #endif
1665 	recount_task_init(&new_task->tk_recount);
1666 
1667 #if HYPERVISOR
1668 	new_task->hv_task_target = NULL;
1669 #endif /* HYPERVISOR */
1670 
1671 #if CONFIG_TASKWATCH
1672 	queue_init(&new_task->task_watchers);
1673 	new_task->num_taskwatchers  = 0;
1674 	new_task->watchapplying  = 0;
1675 #endif /* CONFIG_TASKWATCH */
1676 
1677 	new_task->mem_notify_reserved = 0;
1678 	new_task->memlimit_attrs_reserved = 0;
1679 
1680 	new_task->requested_policy = default_task_requested_policy;
1681 	new_task->effective_policy = default_task_effective_policy;
1682 
1683 	new_task->task_shared_region_slide = -1;
1684 
1685 	if (parent_task != NULL) {
1686 		task_ro_data.task_tokens.sec_token = *task_get_sec_token(parent_task);
1687 		task_ro_data.task_tokens.audit_token = *task_get_audit_token(parent_task);
1688 
1689 		/* only inherit the option bits, no effect until task_set_immovable_pinned() */
1690 		task_ro_data.task_control_port_options = task_get_control_port_options(parent_task);
1691 
1692 		task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_FILTER_MSG;
1693 #if CONFIG_MACF
1694 		if (!(t_flags & TF_CORPSE_FORK)) {
1695 			task_ro_data.task_filters.mach_trap_filter_mask = task_get_mach_trap_filter_mask(parent_task);
1696 			task_ro_data.task_filters.mach_kobj_filter_mask = task_get_mach_kobj_filter_mask(parent_task);
1697 		}
1698 #endif
1699 	} else {
1700 		task_ro_data.task_tokens.sec_token = KERNEL_SECURITY_TOKEN;
1701 		task_ro_data.task_tokens.audit_token = KERNEL_AUDIT_TOKEN;
1702 
1703 		task_ro_data.task_control_port_options = TASK_CONTROL_PORT_OPTIONS_NONE;
1704 	}
1705 
1706 	/* must set before task_importance_init_from_parent: */
1707 	if (proc_ro != NULL) {
1708 		new_task->bsd_info_ro = proc_ro_ref_task(proc_ro, new_task, &task_ro_data);
1709 	} else {
1710 		new_task->bsd_info_ro = proc_ro_alloc(NULL, NULL, new_task, &task_ro_data);
1711 	}
1712 
1713 	ipc_task_init(new_task, parent_task);
1714 
1715 	task_importance_init_from_parent(new_task, parent_task);
1716 
1717 	new_task->corpse_vmobject_list = NULL;
1718 
1719 	if (parent_task != TASK_NULL) {
1720 		/* inherit the parent's shared region */
1721 		shared_region = vm_shared_region_get(parent_task);
1722 		if (shared_region != NULL) {
1723 			vm_shared_region_set(new_task, shared_region);
1724 		}
1725 
1726 #if __has_feature(ptrauth_calls)
1727 		/* use parent's shared_region_id */
1728 		char *shared_region_id = task_get_vm_shared_region_id_and_jop_pid(parent_task, NULL);
1729 		if (shared_region_id != NULL) {
1730 			shared_region_key_alloc(shared_region_id, FALSE, 0);         /* get a reference */
1731 		}
1732 		task_set_shared_region_id(new_task, shared_region_id);
1733 #endif /* __has_feature(ptrauth_calls) */
1734 
1735 		if (task_has_64Bit_addr(parent_task)) {
1736 			task_set_64Bit_addr(new_task);
1737 		}
1738 
1739 		if (task_has_64Bit_data(parent_task)) {
1740 			task_set_64Bit_data(new_task);
1741 		}
1742 
1743 		new_task->all_image_info_addr = parent_task->all_image_info_addr;
1744 		new_task->all_image_info_size = parent_task->all_image_info_size;
1745 		new_task->mach_header_vm_address = 0;
1746 
1747 		if (inherit_memory && parent_task->affinity_space) {
1748 			task_affinity_create(parent_task, new_task);
1749 		}
1750 
1751 		new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
1752 
1753 		new_task->task_exc_guard = parent_task->task_exc_guard;
1754 		if (parent_task->t_flags & TF_NO_SMT) {
1755 			new_task->t_flags |= TF_NO_SMT;
1756 		}
1757 
1758 		if (parent_task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE) {
1759 			new_task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
1760 		}
1761 
1762 		if (parent_task->t_flags & TF_TECS) {
1763 			new_task->t_flags |= TF_TECS;
1764 		}
1765 
1766 #if defined(__x86_64__)
1767 		if (parent_task->t_flags & TF_INSN_COPY_OPTOUT) {
1768 			new_task->t_flags |= TF_INSN_COPY_OPTOUT;
1769 		}
1770 #endif
1771 
1772 		new_task->priority = BASEPRI_DEFAULT;
1773 		new_task->max_priority = MAXPRI_USER;
1774 
1775 		task_policy_create(new_task, parent_task);
1776 	} else {
1777 #ifdef __LP64__
1778 		if (is_64bit) {
1779 			task_set_64Bit_addr(new_task);
1780 		}
1781 #endif
1782 
1783 		if (is_64bit_data) {
1784 			task_set_64Bit_data(new_task);
1785 		}
1786 
1787 		new_task->all_image_info_addr = (mach_vm_address_t)0;
1788 		new_task->all_image_info_size = (mach_vm_size_t)0;
1789 
1790 		new_task->pset_hint = PROCESSOR_SET_NULL;
1791 
1792 		new_task->task_exc_guard = TASK_EXC_GUARD_NONE;
1793 
1794 		if (new_task == kernel_task) {
1795 			new_task->priority = BASEPRI_KERNEL;
1796 			new_task->max_priority = MAXPRI_KERNEL;
1797 		} else {
1798 			new_task->priority = BASEPRI_DEFAULT;
1799 			new_task->max_priority = MAXPRI_USER;
1800 		}
1801 	}
1802 
1803 	bzero(new_task->coalition, sizeof(new_task->coalition));
1804 	for (int i = 0; i < COALITION_NUM_TYPES; i++) {
1805 		queue_chain_init(new_task->task_coalition[i]);
1806 	}
1807 
1808 	/* Allocate I/O Statistics */
1809 	new_task->task_io_stats = kalloc_data(sizeof(struct io_stat_info),
1810 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1811 
1812 	bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats));
1813 	bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats));
1814 
1815 	bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
1816 
1817 	counter_alloc(&(new_task->pageins));
1818 	counter_alloc(&(new_task->cow_faults));
1819 	counter_alloc(&(new_task->messages_sent));
1820 	counter_alloc(&(new_task->messages_received));
1821 
1822 	/* Copy resource acc. info from Parent for Corpe Forked task. */
1823 	if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
1824 		task_rollup_accounting_info(new_task, parent_task);
1825 		task_store_owned_vmobject_info(new_task, parent_task);
1826 	} else {
1827 		/* Initialize to zero for standard fork/spawn case */
1828 		new_task->total_runnable_time = 0;
1829 		new_task->syscalls_mach = 0;
1830 		new_task->syscalls_unix = 0;
1831 		new_task->c_switch = 0;
1832 		new_task->p_switch = 0;
1833 		new_task->ps_switch = 0;
1834 		new_task->decompressions = 0;
1835 		new_task->low_mem_notified_warn = 0;
1836 		new_task->low_mem_notified_critical = 0;
1837 		new_task->purged_memory_warn = 0;
1838 		new_task->purged_memory_critical = 0;
1839 		new_task->low_mem_privileged_listener = 0;
1840 		new_task->memlimit_is_active = 0;
1841 		new_task->memlimit_is_fatal = 0;
1842 		new_task->memlimit_active_exc_resource = 0;
1843 		new_task->memlimit_inactive_exc_resource = 0;
1844 		new_task->task_timer_wakeups_bin_1 = 0;
1845 		new_task->task_timer_wakeups_bin_2 = 0;
1846 		new_task->task_gpu_ns = 0;
1847 		new_task->task_writes_counters_internal.task_immediate_writes = 0;
1848 		new_task->task_writes_counters_internal.task_deferred_writes = 0;
1849 		new_task->task_writes_counters_internal.task_invalidated_writes = 0;
1850 		new_task->task_writes_counters_internal.task_metadata_writes = 0;
1851 		new_task->task_writes_counters_external.task_immediate_writes = 0;
1852 		new_task->task_writes_counters_external.task_deferred_writes = 0;
1853 		new_task->task_writes_counters_external.task_invalidated_writes = 0;
1854 		new_task->task_writes_counters_external.task_metadata_writes = 0;
1855 #if CONFIG_PHYS_WRITE_ACCT
1856 		new_task->task_fs_metadata_writes = 0;
1857 #endif /* CONFIG_PHYS_WRITE_ACCT */
1858 	}
1859 
1860 
1861 	new_task->donates_own_pages = FALSE;
1862 #if CONFIG_COALITIONS
1863 	if (!(t_flags & TF_CORPSE_FORK)) {
1864 		/* TODO: there is no graceful failure path here... */
1865 		if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1866 			coalitions_adopt_task(parent_coalitions, new_task);
1867 			if (parent_coalitions[COALITION_TYPE_JETSAM]) {
1868 				new_task->donates_own_pages = coalition_is_swappable(parent_coalitions[COALITION_TYPE_JETSAM]);
1869 			}
1870 		} else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1871 			/*
1872 			 * all tasks at least have a resource coalition, so
1873 			 * if the parent has one then inherit all coalitions
1874 			 * the parent is a part of
1875 			 */
1876 			coalitions_adopt_task(parent_task->coalition, new_task);
1877 			if (parent_task->coalition[COALITION_TYPE_JETSAM]) {
1878 				new_task->donates_own_pages = coalition_is_swappable(parent_task->coalition[COALITION_TYPE_JETSAM]);
1879 			}
1880 		} else {
1881 			/* TODO: assert that new_task will be PID 1 (launchd) */
1882 			coalitions_adopt_init_task(new_task);
1883 		}
1884 		/*
1885 		 * on exec, we need to transfer the coalition roles from the
1886 		 * parent task to the exec copy task.
1887 		 */
1888 		if (parent_task && (t_procflags & TPF_EXEC_COPY)) {
1889 			int coal_roles[COALITION_NUM_TYPES];
1890 			task_coalition_roles(parent_task, coal_roles);
1891 			(void)coalitions_set_roles(new_task->coalition, new_task, coal_roles);
1892 		}
1893 	} else {
1894 		coalitions_adopt_corpse_task(new_task);
1895 	}
1896 
1897 	if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1898 		panic("created task is not a member of a resource coalition");
1899 	}
1900 	task_set_coalition_member(new_task);
1901 #endif /* CONFIG_COALITIONS */
1902 
1903 	new_task->dispatchqueue_offset = 0;
1904 	if (parent_task != NULL) {
1905 		new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
1906 	}
1907 
1908 	new_task->task_can_transfer_memory_ownership = FALSE;
1909 	new_task->task_volatile_objects = 0;
1910 	new_task->task_nonvolatile_objects = 0;
1911 	new_task->task_objects_disowning = FALSE;
1912 	new_task->task_objects_disowned = FALSE;
1913 	new_task->task_owned_objects = 0;
1914 	queue_init(&new_task->task_objq);
1915 
1916 #if CONFIG_FREEZE
1917 	queue_init(&new_task->task_frozen_cseg_q);
1918 #endif /* CONFIG_FREEZE */
1919 
1920 	task_objq_lock_init(new_task);
1921 
1922 #if __arm64__
1923 	new_task->task_legacy_footprint = FALSE;
1924 	new_task->task_extra_footprint_limit = FALSE;
1925 	new_task->task_ios13extended_footprint_limit = FALSE;
1926 #endif /* __arm64__ */
1927 	new_task->task_region_footprint = FALSE;
1928 	new_task->task_has_crossed_thread_limit = FALSE;
1929 	new_task->task_thread_limit = 0;
1930 #if CONFIG_SECLUDED_MEMORY
1931 	new_task->task_can_use_secluded_mem = FALSE;
1932 	new_task->task_could_use_secluded_mem = FALSE;
1933 	new_task->task_could_also_use_secluded_mem = FALSE;
1934 	new_task->task_suppressed_secluded = FALSE;
1935 #endif /* CONFIG_SECLUDED_MEMORY */
1936 
1937 	/*
1938 	 * t_flags is set up above. But since we don't
1939 	 * support darkwake mode being set that way
1940 	 * currently, we clear it out here explicitly.
1941 	 */
1942 	new_task->t_flags &= ~(TF_DARKWAKE_MODE);
1943 
1944 	queue_init(&new_task->io_user_clients);
1945 	new_task->loadTag = 0;
1946 
1947 	lck_mtx_lock(&tasks_threads_lock);
1948 	queue_enter(&tasks, new_task, task_t, tasks);
1949 	tasks_count++;
1950 	if (tasks_suspend_state) {
1951 		task_suspend_internal(new_task);
1952 	}
1953 	lck_mtx_unlock(&tasks_threads_lock);
1954 	task_ref_hold_proc_task_struct(new_task);
1955 
1956 	return KERN_SUCCESS;
1957 }
1958 
1959 /*
1960  *	task_rollup_accounting_info
1961  *
1962  *	Roll up accounting stats. Used to rollup stats
1963  *	for exec copy task and corpse fork.
1964  */
1965 void
task_rollup_accounting_info(task_t to_task,task_t from_task)1966 task_rollup_accounting_info(task_t to_task, task_t from_task)
1967 {
1968 	assert(from_task != to_task);
1969 
1970 	recount_task_copy(&to_task->tk_recount, &from_task->tk_recount);
1971 	to_task->total_runnable_time = from_task->total_runnable_time;
1972 	counter_add(&to_task->faults, counter_load(&from_task->faults));
1973 	counter_add(&to_task->pageins, counter_load(&from_task->pageins));
1974 	counter_add(&to_task->cow_faults, counter_load(&from_task->cow_faults));
1975 	counter_add(&to_task->messages_sent, counter_load(&from_task->messages_sent));
1976 	counter_add(&to_task->messages_received, counter_load(&from_task->messages_received));
1977 	to_task->decompressions = from_task->decompressions;
1978 	to_task->syscalls_mach = from_task->syscalls_mach;
1979 	to_task->syscalls_unix = from_task->syscalls_unix;
1980 	to_task->c_switch = from_task->c_switch;
1981 	to_task->p_switch = from_task->p_switch;
1982 	to_task->ps_switch = from_task->ps_switch;
1983 	to_task->extmod_statistics = from_task->extmod_statistics;
1984 	to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
1985 	to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
1986 	to_task->purged_memory_warn = from_task->purged_memory_warn;
1987 	to_task->purged_memory_critical = from_task->purged_memory_critical;
1988 	to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
1989 	*to_task->task_io_stats = *from_task->task_io_stats;
1990 	to_task->cpu_time_eqos_stats = from_task->cpu_time_eqos_stats;
1991 	to_task->cpu_time_rqos_stats = from_task->cpu_time_rqos_stats;
1992 	to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
1993 	to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
1994 	to_task->task_gpu_ns = from_task->task_gpu_ns;
1995 	to_task->task_writes_counters_internal.task_immediate_writes = from_task->task_writes_counters_internal.task_immediate_writes;
1996 	to_task->task_writes_counters_internal.task_deferred_writes = from_task->task_writes_counters_internal.task_deferred_writes;
1997 	to_task->task_writes_counters_internal.task_invalidated_writes = from_task->task_writes_counters_internal.task_invalidated_writes;
1998 	to_task->task_writes_counters_internal.task_metadata_writes = from_task->task_writes_counters_internal.task_metadata_writes;
1999 	to_task->task_writes_counters_external.task_immediate_writes = from_task->task_writes_counters_external.task_immediate_writes;
2000 	to_task->task_writes_counters_external.task_deferred_writes = from_task->task_writes_counters_external.task_deferred_writes;
2001 	to_task->task_writes_counters_external.task_invalidated_writes = from_task->task_writes_counters_external.task_invalidated_writes;
2002 	to_task->task_writes_counters_external.task_metadata_writes = from_task->task_writes_counters_external.task_metadata_writes;
2003 #if CONFIG_PHYS_WRITE_ACCT
2004 	to_task->task_fs_metadata_writes = from_task->task_fs_metadata_writes;
2005 #endif /* CONFIG_PHYS_WRITE_ACCT */
2006 
2007 #if CONFIG_MEMORYSTATUS
2008 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.memorystatus_dirty_time);
2009 #endif /* CONFIG_MEMORYSTATUS */
2010 
2011 	/* Skip ledger roll up for memory accounting entries */
2012 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
2013 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
2014 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
2015 #if CONFIG_SCHED_SFI
2016 	for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
2017 		ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
2018 	}
2019 #endif
2020 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
2021 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
2022 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
2023 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
2024 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_me);
2025 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_others);
2026 }
2027 
2028 /*
2029  *	task_deallocate_internal:
2030  *
2031  *	Drop a reference on a task.
2032  *	Don't call this directly.
2033  */
2034 extern void task_deallocate_internal(task_t task, os_ref_count_t refs);
2035 void
task_deallocate_internal(task_t task,os_ref_count_t refs)2036 task_deallocate_internal(
2037 	task_t          task,
2038 	os_ref_count_t  refs)
2039 {
2040 	ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
2041 
2042 	if (task == TASK_NULL) {
2043 		return;
2044 	}
2045 
2046 #if IMPORTANCE_INHERITANCE
2047 	if (refs == 1) {
2048 		/*
2049 		 * If last ref potentially comes from the task's importance,
2050 		 * disconnect it.  But more task refs may be added before
2051 		 * that completes, so wait for the reference to go to zero
2052 		 * naturally (it may happen on a recursive task_deallocate()
2053 		 * from the ipc_importance_disconnect_task() call).
2054 		 */
2055 		if (IIT_NULL != task->task_imp_base) {
2056 			ipc_importance_disconnect_task(task);
2057 		}
2058 		return;
2059 	}
2060 #endif /* IMPORTANCE_INHERITANCE */
2061 
2062 	if (refs > 0) {
2063 		return;
2064 	}
2065 
2066 	/*
2067 	 * The task should be dead at this point. Ensure other resources
2068 	 * like threads, are gone before we trash the world.
2069 	 */
2070 	assert(queue_empty(&task->threads));
2071 	assert(get_bsdtask_info(task) == NULL);
2072 	assert(!is_active(task->itk_space));
2073 	assert(!task->active);
2074 	assert(task->active_thread_count == 0);
2075 	assert(!task_get_game_mode(task));
2076 
2077 	lck_mtx_lock(&tasks_threads_lock);
2078 	assert(terminated_tasks_count > 0);
2079 	queue_remove(&terminated_tasks, task, task_t, tasks);
2080 	terminated_tasks_count--;
2081 	lck_mtx_unlock(&tasks_threads_lock);
2082 
2083 	/*
2084 	 * remove the reference on bank context
2085 	 */
2086 	task_bank_reset(task);
2087 
2088 	kfree_data(task->task_io_stats, sizeof(struct io_stat_info));
2089 
2090 	/*
2091 	 *	Give the machine dependent code a chance
2092 	 *	to perform cleanup before ripping apart
2093 	 *	the task.
2094 	 */
2095 	machine_task_terminate(task);
2096 
2097 	ipc_task_terminate(task);
2098 
2099 	/* let iokit know 2 */
2100 	iokit_task_terminate(task, 2);
2101 
2102 	/* Unregister task from userspace coredumps on panic */
2103 	kern_unregister_userspace_coredump(task);
2104 
2105 	if (task->affinity_space) {
2106 		task_affinity_deallocate(task);
2107 	}
2108 
2109 #if MACH_ASSERT
2110 	if (task->ledger != NULL &&
2111 	    task->map != NULL &&
2112 	    task->map->pmap != NULL &&
2113 	    task->map->pmap->ledger != NULL) {
2114 		assert(task->ledger == task->map->pmap->ledger);
2115 	}
2116 #endif /* MACH_ASSERT */
2117 
2118 	vm_owned_objects_disown(task);
2119 	assert(task->task_objects_disowned);
2120 	if (task->task_owned_objects != 0) {
2121 		panic("task_deallocate(%p): "
2122 		    "volatile_objects=%d nonvolatile_objects=%d owned=%d\n",
2123 		    task,
2124 		    task->task_volatile_objects,
2125 		    task->task_nonvolatile_objects,
2126 		    task->task_owned_objects);
2127 	}
2128 
2129 #if CONFIG_DEFERRED_RECLAIM
2130 	if (task->deferred_reclamation_metadata != NULL) {
2131 		vm_deferred_reclamation_buffer_deallocate(task->deferred_reclamation_metadata);
2132 		task->deferred_reclamation_metadata = NULL;
2133 	}
2134 #endif /* CONFIG_DEFERRED_RECLAIM */
2135 
2136 	vm_map_deallocate(task->map);
2137 	if (task->is_large_corpse) {
2138 		assert(large_corpse_count > 0);
2139 		OSDecrementAtomic(&large_corpse_count);
2140 		task->is_large_corpse = false;
2141 	}
2142 	is_release(task->itk_space);
2143 
2144 	if (task->t_rr_ranges) {
2145 		restartable_ranges_release(task->t_rr_ranges);
2146 	}
2147 
2148 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
2149 	    &interrupt_wakeups, &debit);
2150 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
2151 	    &platform_idle_wakeups, &debit);
2152 
2153 #if defined(CONFIG_SCHED_MULTIQ)
2154 	sched_group_destroy(task->sched_group);
2155 #endif
2156 
2157 	struct recount_times_mach sum = { 0 };
2158 	struct recount_times_mach p_only = { 0 };
2159 	recount_task_times_perf_only(task, &sum, &p_only);
2160 #if CONFIG_PERVASIVE_ENERGY
2161 	uint64_t energy = recount_task_energy_nj(task);
2162 #endif /* CONFIG_PERVASIVE_ENERGY */
2163 	recount_task_deinit(&task->tk_recount);
2164 
2165 	/* Accumulate statistics for dead tasks */
2166 	lck_spin_lock(&dead_task_statistics_lock);
2167 	dead_task_statistics.total_user_time += sum.rtm_user;
2168 	dead_task_statistics.total_system_time += sum.rtm_system;
2169 
2170 	dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
2171 	dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
2172 
2173 	dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
2174 	dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
2175 	dead_task_statistics.total_ptime += p_only.rtm_user + p_only.rtm_system;
2176 	dead_task_statistics.total_pset_switches += task->ps_switch;
2177 	dead_task_statistics.task_gpu_ns += task->task_gpu_ns;
2178 #if CONFIG_PERVASIVE_ENERGY
2179 	dead_task_statistics.task_energy += energy;
2180 #endif /* CONFIG_PERVASIVE_ENERGY */
2181 
2182 	lck_spin_unlock(&dead_task_statistics_lock);
2183 	lck_mtx_destroy(&task->lock, &task_lck_grp);
2184 
2185 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
2186 	    &debit)) {
2187 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
2188 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
2189 	}
2190 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
2191 	    &debit)) {
2192 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
2193 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
2194 	}
2195 	ledger_dereference(task->ledger);
2196 
2197 	counter_free(&task->faults);
2198 	counter_free(&task->pageins);
2199 	counter_free(&task->cow_faults);
2200 	counter_free(&task->messages_sent);
2201 	counter_free(&task->messages_received);
2202 
2203 #if CONFIG_COALITIONS
2204 	task_release_coalitions(task);
2205 #endif /* CONFIG_COALITIONS */
2206 
2207 	bzero(task->coalition, sizeof(task->coalition));
2208 
2209 #if MACH_BSD
2210 	/* clean up collected information since last reference to task is gone */
2211 	if (task->corpse_info) {
2212 		void *corpse_info_kernel = kcdata_memory_get_begin_addr(task->corpse_info);
2213 		task_crashinfo_destroy(task->corpse_info);
2214 		task->corpse_info = NULL;
2215 		kfree_data(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
2216 	}
2217 #endif
2218 
2219 #if CONFIG_MACF
2220 	if (get_task_crash_label(task)) {
2221 		mac_exc_free_label(get_task_crash_label(task));
2222 		set_task_crash_label(task, NULL);
2223 	}
2224 #endif
2225 
2226 	assert(queue_empty(&task->task_objq));
2227 	task_objq_lock_destroy(task);
2228 
2229 	if (task->corpse_vmobject_list) {
2230 		kfree_data(task->corpse_vmobject_list,
2231 		    (vm_size_t)task->corpse_vmobject_list_size);
2232 	}
2233 
2234 	task_ref_count_fini(task);
2235 	proc_ro_erase_task(task->bsd_info_ro);
2236 	task_release_proc_task_struct(task);
2237 }
2238 
2239 /*
2240  *	task_name_deallocate_mig:
2241  *
2242  *	Drop a reference on a task name.
2243  */
2244 void
task_name_deallocate_mig(task_name_t task_name)2245 task_name_deallocate_mig(
2246 	task_name_t             task_name)
2247 {
2248 	return task_deallocate_grp((task_t)task_name, TASK_GRP_MIG);
2249 }
2250 
2251 /*
2252  *	task_policy_set_deallocate_mig:
2253  *
2254  *	Drop a reference on a task type.
2255  */
2256 void
task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)2257 task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)
2258 {
2259 	return task_deallocate_grp((task_t)task_policy_set, TASK_GRP_MIG);
2260 }
2261 
2262 /*
2263  *	task_policy_get_deallocate_mig:
2264  *
2265  *	Drop a reference on a task type.
2266  */
2267 void
task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)2268 task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)
2269 {
2270 	return task_deallocate_grp((task_t)task_policy_get, TASK_GRP_MIG);
2271 }
2272 
2273 /*
2274  *	task_inspect_deallocate_mig:
2275  *
2276  *	Drop a task inspection reference.
2277  */
2278 void
task_inspect_deallocate_mig(task_inspect_t task_inspect)2279 task_inspect_deallocate_mig(
2280 	task_inspect_t          task_inspect)
2281 {
2282 	return task_deallocate_grp((task_t)task_inspect, TASK_GRP_MIG);
2283 }
2284 
2285 /*
2286  *	task_read_deallocate_mig:
2287  *
2288  *	Drop a reference on task read port.
2289  */
2290 void
task_read_deallocate_mig(task_read_t task_read)2291 task_read_deallocate_mig(
2292 	task_read_t          task_read)
2293 {
2294 	return task_deallocate_grp((task_t)task_read, TASK_GRP_MIG);
2295 }
2296 
2297 /*
2298  *	task_suspension_token_deallocate:
2299  *
2300  *	Drop a reference on a task suspension token.
2301  */
2302 void
task_suspension_token_deallocate(task_suspension_token_t token)2303 task_suspension_token_deallocate(
2304 	task_suspension_token_t         token)
2305 {
2306 	return task_deallocate((task_t)token);
2307 }
2308 
2309 void
task_suspension_token_deallocate_grp(task_suspension_token_t token,task_grp_t grp)2310 task_suspension_token_deallocate_grp(
2311 	task_suspension_token_t         token,
2312 	task_grp_t                      grp)
2313 {
2314 	return task_deallocate_grp((task_t)token, grp);
2315 }
2316 
2317 /*
2318  * task_collect_crash_info:
2319  *
2320  * collect crash info from bsd and mach based data
2321  */
2322 kern_return_t
task_collect_crash_info(task_t task,struct label * crash_label,int is_corpse_fork)2323 task_collect_crash_info(
2324 	task_t task,
2325 #ifdef CONFIG_MACF
2326 	struct label *crash_label,
2327 #endif
2328 	int is_corpse_fork)
2329 {
2330 	kern_return_t kr = KERN_SUCCESS;
2331 
2332 	kcdata_descriptor_t crash_data = NULL;
2333 	kcdata_descriptor_t crash_data_release = NULL;
2334 	mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
2335 	mach_vm_offset_t crash_data_ptr = 0;
2336 	void *crash_data_kernel = NULL;
2337 	void *crash_data_kernel_release = NULL;
2338 #if CONFIG_MACF
2339 	struct label *label, *free_label;
2340 #endif
2341 
2342 	if (!corpses_enabled()) {
2343 		return KERN_NOT_SUPPORTED;
2344 	}
2345 
2346 #if CONFIG_MACF
2347 	free_label = label = mac_exc_create_label(NULL);
2348 #endif
2349 
2350 	task_lock(task);
2351 
2352 	assert(is_corpse_fork || get_bsdtask_info(task) != NULL);
2353 	if (task->corpse_info == NULL && (is_corpse_fork || get_bsdtask_info(task) != NULL)) {
2354 #if CONFIG_MACF
2355 		/* Set the crash label, used by the exception delivery mac hook */
2356 		free_label = get_task_crash_label(task);         // Most likely NULL.
2357 		set_task_crash_label(task, label);
2358 		mac_exc_update_task_crash_label(task, crash_label);
2359 #endif
2360 		task_unlock(task);
2361 
2362 		crash_data_kernel = kalloc_data(CORPSEINFO_ALLOCATION_SIZE,
2363 		    Z_WAITOK | Z_ZERO);
2364 		if (crash_data_kernel == NULL) {
2365 			kr = KERN_RESOURCE_SHORTAGE;
2366 			goto out_no_lock;
2367 		}
2368 		crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
2369 
2370 		/* Do not get a corpse ref for corpse fork */
2371 		crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size,
2372 		    is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF,
2373 		    KCFLAG_USE_MEMCOPY);
2374 		if (crash_data) {
2375 			task_lock(task);
2376 			crash_data_release = task->corpse_info;
2377 			crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);
2378 			task->corpse_info = crash_data;
2379 
2380 			task_unlock(task);
2381 			kr = KERN_SUCCESS;
2382 		} else {
2383 			kfree_data(crash_data_kernel,
2384 			    CORPSEINFO_ALLOCATION_SIZE);
2385 			kr = KERN_FAILURE;
2386 		}
2387 
2388 		if (crash_data_release != NULL) {
2389 			task_crashinfo_destroy(crash_data_release);
2390 		}
2391 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2392 	} else {
2393 		task_unlock(task);
2394 	}
2395 
2396 out_no_lock:
2397 #if CONFIG_MACF
2398 	if (free_label != NULL) {
2399 		mac_exc_free_label(free_label);
2400 	}
2401 #endif
2402 	return kr;
2403 }
2404 
2405 /*
2406  * task_deliver_crash_notification:
2407  *
2408  * Makes outcall to registered host port for a corpse.
2409  */
2410 kern_return_t
task_deliver_crash_notification(task_t corpse,thread_t thread,exception_type_t etype,mach_exception_subcode_t subcode)2411 task_deliver_crash_notification(
2412 	task_t corpse, /* corpse or corpse fork */
2413 	thread_t thread,
2414 	exception_type_t etype,
2415 	mach_exception_subcode_t subcode)
2416 {
2417 	kcdata_descriptor_t crash_info = corpse->corpse_info;
2418 	thread_t th_iter = NULL;
2419 	kern_return_t kr = KERN_SUCCESS;
2420 	wait_interrupt_t wsave;
2421 	mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2422 	ipc_port_t corpse_port;
2423 
2424 	if (crash_info == NULL) {
2425 		return KERN_FAILURE;
2426 	}
2427 
2428 	assert(task_is_a_corpse(corpse));
2429 
2430 	task_lock(corpse);
2431 
2432 	/*
2433 	 * Always populate code[0] as the effective exception type for EXC_CORPSE_NOTIFY.
2434 	 * Crash reporters should derive whether it's fatal from corpse blob.
2435 	 */
2436 	code[0] = etype;
2437 	code[1] = subcode;
2438 
2439 	queue_iterate(&corpse->threads, th_iter, thread_t, task_threads)
2440 	{
2441 		if (th_iter->corpse_dup == FALSE) {
2442 			ipc_thread_reset(th_iter);
2443 		}
2444 	}
2445 	task_unlock(corpse);
2446 
2447 	/* Arm the no-sender notification for taskport */
2448 	task_reference(corpse);
2449 	corpse_port = convert_corpse_to_port_and_nsrequest(corpse);
2450 
2451 	wsave = thread_interrupt_level(THREAD_UNINT);
2452 	kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
2453 	if (kr != KERN_SUCCESS) {
2454 		printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(corpse));
2455 	}
2456 
2457 	(void)thread_interrupt_level(wsave);
2458 
2459 	/*
2460 	 * Drop the send right on corpse port, will fire the
2461 	 * no-sender notification if exception deliver failed.
2462 	 */
2463 	ipc_port_release_send(corpse_port);
2464 	return kr;
2465 }
2466 
2467 /*
2468  *	task_terminate:
2469  *
2470  *	Terminate the specified task.  See comments on thread_terminate
2471  *	(kern/thread.c) about problems with terminating the "current task."
2472  */
2473 
2474 kern_return_t
task_terminate(task_t task)2475 task_terminate(
2476 	task_t          task)
2477 {
2478 	if (task == TASK_NULL) {
2479 		return KERN_INVALID_ARGUMENT;
2480 	}
2481 
2482 	if (get_bsdtask_info(task)) {
2483 		return KERN_FAILURE;
2484 	}
2485 
2486 	return task_terminate_internal(task);
2487 }
2488 
2489 #if MACH_ASSERT
2490 extern int proc_pid(struct proc *);
2491 extern void proc_name_kdp(struct proc *p, char *buf, int size);
2492 #endif /* MACH_ASSERT */
2493 
2494 #define VM_MAP_PARTIAL_REAP 0x54  /* 0x150 */
2495 static void
task_partial_reap(task_t task,__unused int pid)2496 __unused task_partial_reap(task_t task, __unused int pid)
2497 {
2498 	unsigned int    reclaimed_resident = 0;
2499 	unsigned int    reclaimed_compressed = 0;
2500 	uint64_t        task_page_count;
2501 
2502 	task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
2503 
2504 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_START),
2505 	    pid, task_page_count, 0, 0, 0);
2506 
2507 	vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
2508 
2509 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_END),
2510 	    pid, reclaimed_resident, reclaimed_compressed, 0, 0);
2511 }
2512 
2513 /*
2514  * task_mark_corpse:
2515  *
2516  * Mark the task as a corpse. Called by crashing thread.
2517  */
2518 kern_return_t
task_mark_corpse(task_t task)2519 task_mark_corpse(task_t task)
2520 {
2521 	kern_return_t kr = KERN_SUCCESS;
2522 	thread_t self_thread;
2523 	(void) self_thread;
2524 	wait_interrupt_t wsave;
2525 #if CONFIG_MACF
2526 	struct label *crash_label = NULL;
2527 #endif
2528 
2529 	assert(task != kernel_task);
2530 	assert(task == current_task());
2531 	assert(!task_is_a_corpse(task));
2532 
2533 #if CONFIG_MACF
2534 	crash_label = mac_exc_create_label_for_proc((struct proc*)get_bsdtask_info(task));
2535 #endif
2536 
2537 	kr = task_collect_crash_info(task,
2538 #if CONFIG_MACF
2539 	    crash_label,
2540 #endif
2541 	    FALSE);
2542 	if (kr != KERN_SUCCESS) {
2543 		goto out;
2544 	}
2545 
2546 	self_thread = current_thread();
2547 
2548 	wsave = thread_interrupt_level(THREAD_UNINT);
2549 	task_lock(task);
2550 
2551 	/*
2552 	 * Check if any other thread called task_terminate_internal
2553 	 * and made the task inactive before we could mark it for
2554 	 * corpse pending report. Bail out if the task is inactive.
2555 	 */
2556 	if (!task->active) {
2557 		kcdata_descriptor_t crash_data_release = task->corpse_info;;
2558 		void *crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);;
2559 
2560 		task->corpse_info = NULL;
2561 		task_unlock(task);
2562 
2563 		if (crash_data_release != NULL) {
2564 			task_crashinfo_destroy(crash_data_release);
2565 		}
2566 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2567 		return KERN_TERMINATED;
2568 	}
2569 
2570 	task_set_corpse_pending_report(task);
2571 	task_set_corpse(task);
2572 	task->crashed_thread_id = thread_tid(self_thread);
2573 
2574 	kr = task_start_halt_locked(task, TRUE);
2575 	assert(kr == KERN_SUCCESS);
2576 
2577 	task_set_uniqueid(task);
2578 
2579 	task_unlock(task);
2580 
2581 	/*
2582 	 * ipc_task_reset() moved to last thread_terminate_self(): rdar://75737960.
2583 	 * disable old ports here instead.
2584 	 *
2585 	 * The vm_map and ipc_space must exist until this function returns,
2586 	 * convert_port_to_{map,space}_with_flavor relies on this behavior.
2587 	 */
2588 	ipc_task_disable(task);
2589 
2590 	/* let iokit know 1 */
2591 	iokit_task_terminate(task, 1);
2592 
2593 	/* terminate the ipc space */
2594 	ipc_space_terminate(task->itk_space);
2595 
2596 	/* Add it to global corpse task list */
2597 	task_add_to_corpse_task_list(task);
2598 
2599 	thread_terminate_internal(self_thread);
2600 
2601 	(void) thread_interrupt_level(wsave);
2602 	assert(task->halting == TRUE);
2603 
2604 out:
2605 #if CONFIG_MACF
2606 	mac_exc_free_label(crash_label);
2607 #endif
2608 	return kr;
2609 }
2610 
2611 /*
2612  *	task_set_uniqueid
2613  *
2614  *	Set task uniqueid to systemwide unique 64 bit value
2615  */
2616 void
task_set_uniqueid(task_t task)2617 task_set_uniqueid(task_t task)
2618 {
2619 	task->task_uniqueid = OSIncrementAtomic64(&next_taskuniqueid);
2620 }
2621 
2622 /*
2623  *	task_clear_corpse
2624  *
2625  *	Clears the corpse pending bit on task.
2626  *	Removes inspection bit on the threads.
2627  */
2628 void
task_clear_corpse(task_t task)2629 task_clear_corpse(task_t task)
2630 {
2631 	thread_t th_iter = NULL;
2632 
2633 	task_lock(task);
2634 	queue_iterate(&task->threads, th_iter, thread_t, task_threads)
2635 	{
2636 		thread_mtx_lock(th_iter);
2637 		th_iter->inspection = FALSE;
2638 		ipc_thread_disable(th_iter);
2639 		thread_mtx_unlock(th_iter);
2640 	}
2641 
2642 	thread_terminate_crashed_threads();
2643 	/* remove the pending corpse report flag */
2644 	task_clear_corpse_pending_report(task);
2645 
2646 	task_unlock(task);
2647 }
2648 
2649 /*
2650  *	task_port_no_senders
2651  *
2652  *	Called whenever the Mach port system detects no-senders on
2653  *	the task port of a corpse.
2654  *	Each notification that comes in should terminate the task (corpse).
2655  */
2656 static void
task_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)2657 task_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
2658 {
2659 	task_t task = ipc_kobject_get_locked(port, IKOT_TASK_CONTROL);
2660 
2661 	assert(task != TASK_NULL);
2662 	assert(task_is_a_corpse(task));
2663 
2664 	/* Remove the task from global corpse task list */
2665 	task_remove_from_corpse_task_list(task);
2666 
2667 	task_clear_corpse(task);
2668 	vm_map_unset_corpse_source(task->map);
2669 	task_terminate_internal(task);
2670 }
2671 
2672 /*
2673  *	task_port_with_flavor_no_senders
2674  *
2675  *	Called whenever the Mach port system detects no-senders on
2676  *	the task inspect or read port. These ports are allocated lazily and
2677  *	should be deallocated here when there are no senders remaining.
2678  */
2679 static void
task_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)2680 task_port_with_flavor_no_senders(
2681 	ipc_port_t          port,
2682 	mach_port_mscount_t mscount __unused)
2683 {
2684 	task_t task;
2685 	mach_task_flavor_t flavor;
2686 	ipc_kobject_type_t kotype;
2687 
2688 	ip_mq_lock(port);
2689 	if (port->ip_srights > 0) {
2690 		ip_mq_unlock(port);
2691 		return;
2692 	}
2693 	kotype = ip_kotype(port);
2694 	assert((IKOT_TASK_READ == kotype) || (IKOT_TASK_INSPECT == kotype));
2695 	task = ipc_kobject_get_locked(port, kotype);
2696 	if (task != TASK_NULL) {
2697 		task_reference(task);
2698 	}
2699 	ip_mq_unlock(port);
2700 
2701 	if (task == TASK_NULL) {
2702 		/* The task is exiting or disabled; it will eventually deallocate the port */
2703 		return;
2704 	}
2705 
2706 	if (kotype == IKOT_TASK_READ) {
2707 		flavor = TASK_FLAVOR_READ;
2708 	} else {
2709 		flavor = TASK_FLAVOR_INSPECT;
2710 	}
2711 
2712 	itk_lock(task);
2713 	ip_mq_lock(port);
2714 
2715 	/*
2716 	 * If the port is no longer active, then ipc_task_terminate() ran
2717 	 * and destroyed the kobject already. Just deallocate the task
2718 	 * ref we took and go away.
2719 	 *
2720 	 * It is also possible that several nsrequests are in flight,
2721 	 * only one shall NULL-out the port entry, and this is the one
2722 	 * that gets to dealloc the port.
2723 	 *
2724 	 * Check for a stale no-senders notification. A call to any function
2725 	 * that vends out send rights to this port could resurrect it between
2726 	 * this notification being generated and actually being handled here.
2727 	 */
2728 	if (!ip_active(port) ||
2729 	    task->itk_task_ports[flavor] != port ||
2730 	    port->ip_srights > 0) {
2731 		ip_mq_unlock(port);
2732 		itk_unlock(task);
2733 		task_deallocate(task);
2734 		return;
2735 	}
2736 
2737 	assert(task->itk_task_ports[flavor] == port);
2738 	task->itk_task_ports[flavor] = IP_NULL;
2739 	itk_unlock(task);
2740 
2741 	ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
2742 
2743 	task_deallocate(task);
2744 }
2745 
2746 /*
2747  *	task_wait_till_threads_terminate_locked
2748  *
2749  *	Wait till all the threads in the task are terminated.
2750  *	Might release the task lock and re-acquire it.
2751  */
2752 void
task_wait_till_threads_terminate_locked(task_t task)2753 task_wait_till_threads_terminate_locked(task_t task)
2754 {
2755 	/* wait for all the threads in the task to terminate */
2756 	while (task->active_thread_count != 0) {
2757 		assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
2758 		task_unlock(task);
2759 		thread_block(THREAD_CONTINUE_NULL);
2760 
2761 		task_lock(task);
2762 	}
2763 }
2764 
2765 /*
2766  *	task_duplicate_map_and_threads
2767  *
2768  *	Copy vmmap of source task.
2769  *	Copy active threads from source task to destination task.
2770  *	Source task would be suspended during the copy.
2771  */
2772 kern_return_t
task_duplicate_map_and_threads(task_t task,void * p,task_t new_task,thread_t * thread_ret,uint64_t ** udata_buffer,int * size,int * num_udata,bool for_exception)2773 task_duplicate_map_and_threads(
2774 	task_t task,
2775 	void *p,
2776 	task_t new_task,
2777 	thread_t *thread_ret,
2778 	uint64_t **udata_buffer,
2779 	int *size,
2780 	int *num_udata,
2781 	bool for_exception)
2782 {
2783 	kern_return_t kr = KERN_SUCCESS;
2784 	int active;
2785 	thread_t thread, self, thread_return = THREAD_NULL;
2786 	thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL;
2787 	thread_t *thread_array;
2788 	uint32_t active_thread_count = 0, array_count = 0, i;
2789 	vm_map_t oldmap;
2790 	uint64_t *buffer = NULL;
2791 	int buf_size = 0;
2792 	int est_knotes = 0, num_knotes = 0;
2793 
2794 	self = current_thread();
2795 
2796 	/*
2797 	 * Suspend the task to copy thread state, use the internal
2798 	 * variant so that no user-space process can resume
2799 	 * the task from under us
2800 	 */
2801 	kr = task_suspend_internal(task);
2802 	if (kr != KERN_SUCCESS) {
2803 		return kr;
2804 	}
2805 
2806 	if (task->map->disable_vmentry_reuse == TRUE) {
2807 		/*
2808 		 * Quite likely GuardMalloc (or some debugging tool)
2809 		 * is being used on this task. And it has gone through
2810 		 * its limit. Making a corpse will likely encounter
2811 		 * a lot of VM entries that will need COW.
2812 		 *
2813 		 * Skip it.
2814 		 */
2815 #if DEVELOPMENT || DEBUG
2816 		memorystatus_abort_vm_map_fork(task);
2817 #endif
2818 		ktriage_record(thread_tid(self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_FAIL_LIBGMALLOC), 0 /* arg */);
2819 		task_resume_internal(task);
2820 		return KERN_FAILURE;
2821 	}
2822 
2823 	/* Check with VM if vm_map_fork is allowed for this task */
2824 	bool is_large = false;
2825 	if (memorystatus_allowed_vm_map_fork(task, &is_large)) {
2826 		/* Setup new task's vmmap, switch from parent task's map to it COW map */
2827 		oldmap = new_task->map;
2828 		new_task->map = vm_map_fork(new_task->ledger,
2829 		    task->map,
2830 		    (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
2831 		    VM_MAP_FORK_PRESERVE_PURGEABLE |
2832 		    VM_MAP_FORK_CORPSE_FOOTPRINT));
2833 		if (new_task->map) {
2834 			new_task->is_large_corpse = is_large;
2835 			vm_map_deallocate(oldmap);
2836 
2837 			/* copy ledgers that impact the memory footprint */
2838 			vm_map_copy_footprint_ledgers(task, new_task);
2839 
2840 			/* Get all the udata pointers from kqueue */
2841 			est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2842 			if (est_knotes > 0) {
2843 				buf_size = (est_knotes + 32) * sizeof(uint64_t);
2844 				buffer = kalloc_data(buf_size, Z_WAITOK);
2845 				num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2846 				if (num_knotes > est_knotes + 32) {
2847 					num_knotes = est_knotes + 32;
2848 				}
2849 			}
2850 		} else {
2851 			if (is_large) {
2852 				assert(large_corpse_count > 0);
2853 				OSDecrementAtomic(&large_corpse_count);
2854 			}
2855 			new_task->map = oldmap;
2856 #if DEVELOPMENT || DEBUG
2857 			memorystatus_abort_vm_map_fork(task);
2858 #endif
2859 			task_resume_internal(task);
2860 			return KERN_NO_SPACE;
2861 		}
2862 	} else if (!for_exception) {
2863 #if DEVELOPMENT || DEBUG
2864 		memorystatus_abort_vm_map_fork(task);
2865 #endif
2866 		task_resume_internal(task);
2867 		return KERN_NO_SPACE;
2868 	}
2869 
2870 	active_thread_count = task->active_thread_count;
2871 	if (active_thread_count == 0) {
2872 		kfree_data(buffer, buf_size);
2873 		task_resume_internal(task);
2874 		return KERN_FAILURE;
2875 	}
2876 
2877 	thread_array = kalloc_type(thread_t, active_thread_count, Z_WAITOK);
2878 
2879 	/* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
2880 	task_lock(task);
2881 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2882 		/* Skip inactive threads */
2883 		active = thread->active;
2884 		if (!active) {
2885 			continue;
2886 		}
2887 
2888 		if (array_count >= active_thread_count) {
2889 			break;
2890 		}
2891 
2892 		thread_array[array_count++] = thread;
2893 		thread_reference(thread);
2894 	}
2895 	task_unlock(task);
2896 
2897 	for (i = 0; i < array_count; i++) {
2898 		kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
2899 		if (kr != KERN_SUCCESS) {
2900 			break;
2901 		}
2902 
2903 		/* Equivalent of current thread in corpse */
2904 		if (thread_array[i] == self) {
2905 			thread_return = new_thread;
2906 			new_task->crashed_thread_id = thread_tid(new_thread);
2907 		} else if (first_thread == NULL) {
2908 			first_thread = new_thread;
2909 		} else {
2910 			/* drop the extra ref returned by thread_create_with_continuation */
2911 			thread_deallocate(new_thread);
2912 		}
2913 
2914 		kr = thread_dup2(thread_array[i], new_thread);
2915 		if (kr != KERN_SUCCESS) {
2916 			thread_mtx_lock(new_thread);
2917 			new_thread->corpse_dup = TRUE;
2918 			thread_mtx_unlock(new_thread);
2919 			continue;
2920 		}
2921 
2922 		/* Copy thread name */
2923 		bsd_copythreadname(get_bsdthread_info(new_thread),
2924 		    get_bsdthread_info(thread_array[i]));
2925 		new_thread->thread_tag = thread_array[i]->thread_tag &
2926 		    ~THREAD_TAG_USER_JOIN;
2927 		thread_copy_resource_info(new_thread, thread_array[i]);
2928 	}
2929 
2930 	/* return the first thread if we couldn't find the equivalent of current */
2931 	if (thread_return == THREAD_NULL) {
2932 		thread_return = first_thread;
2933 	} else if (first_thread != THREAD_NULL) {
2934 		/* drop the extra ref returned by thread_create_with_continuation */
2935 		thread_deallocate(first_thread);
2936 	}
2937 
2938 	task_resume_internal(task);
2939 
2940 	for (i = 0; i < array_count; i++) {
2941 		thread_deallocate(thread_array[i]);
2942 	}
2943 	kfree_type(thread_t, active_thread_count, thread_array);
2944 
2945 	if (kr == KERN_SUCCESS) {
2946 		*thread_ret = thread_return;
2947 		*udata_buffer = buffer;
2948 		*size = buf_size;
2949 		*num_udata = num_knotes;
2950 	} else {
2951 		if (thread_return != THREAD_NULL) {
2952 			thread_deallocate(thread_return);
2953 		}
2954 		kfree_data(buffer, buf_size);
2955 	}
2956 
2957 	return kr;
2958 }
2959 
2960 #if CONFIG_SECLUDED_MEMORY
2961 extern void task_set_can_use_secluded_mem_locked(
2962 	task_t          task,
2963 	boolean_t       can_use_secluded_mem);
2964 #endif /* CONFIG_SECLUDED_MEMORY */
2965 
2966 #if MACH_ASSERT
2967 int debug4k_panic_on_terminate = 0;
2968 #endif /* MACH_ASSERT */
2969 kern_return_t
task_terminate_internal(task_t task)2970 task_terminate_internal(
2971 	task_t                  task)
2972 {
2973 	thread_t                        thread, self;
2974 	task_t                          self_task;
2975 	boolean_t                       interrupt_save;
2976 	int                             pid = 0;
2977 
2978 	assert(task != kernel_task);
2979 
2980 	self = current_thread();
2981 	self_task = current_task();
2982 
2983 	/*
2984 	 *	Get the task locked and make sure that we are not racing
2985 	 *	with someone else trying to terminate us.
2986 	 */
2987 	if (task == self_task) {
2988 		task_lock(task);
2989 	} else if (task < self_task) {
2990 		task_lock(task);
2991 		task_lock(self_task);
2992 	} else {
2993 		task_lock(self_task);
2994 		task_lock(task);
2995 	}
2996 
2997 #if CONFIG_SECLUDED_MEMORY
2998 	if (task->task_can_use_secluded_mem) {
2999 		task_set_can_use_secluded_mem_locked(task, FALSE);
3000 	}
3001 	task->task_could_use_secluded_mem = FALSE;
3002 	task->task_could_also_use_secluded_mem = FALSE;
3003 
3004 	if (task->task_suppressed_secluded) {
3005 		stop_secluded_suppression(task);
3006 	}
3007 #endif /* CONFIG_SECLUDED_MEMORY */
3008 
3009 	if (!task->active) {
3010 		/*
3011 		 *	Task is already being terminated.
3012 		 *	Just return an error. If we are dying, this will
3013 		 *	just get us to our AST special handler and that
3014 		 *	will get us to finalize the termination of ourselves.
3015 		 */
3016 		task_unlock(task);
3017 		if (self_task != task) {
3018 			task_unlock(self_task);
3019 		}
3020 
3021 		return KERN_FAILURE;
3022 	}
3023 
3024 	if (task_corpse_pending_report(task)) {
3025 		/*
3026 		 *	Task is marked for reporting as corpse.
3027 		 *	Just return an error. This will
3028 		 *	just get us to our AST special handler and that
3029 		 *	will get us to finish the path to death
3030 		 */
3031 		task_unlock(task);
3032 		if (self_task != task) {
3033 			task_unlock(self_task);
3034 		}
3035 
3036 		return KERN_FAILURE;
3037 	}
3038 
3039 	if (self_task != task) {
3040 		task_unlock(self_task);
3041 	}
3042 
3043 	/*
3044 	 * Make sure the current thread does not get aborted out of
3045 	 * the waits inside these operations.
3046 	 */
3047 	interrupt_save = thread_interrupt_level(THREAD_UNINT);
3048 
3049 	/*
3050 	 *	Indicate that we want all the threads to stop executing
3051 	 *	at user space by holding the task (we would have held
3052 	 *	each thread independently in thread_terminate_internal -
3053 	 *	but this way we may be more likely to already find it
3054 	 *	held there).  Mark the task inactive, and prevent
3055 	 *	further task operations via the task port.
3056 	 *
3057 	 *	The vm_map and ipc_space must exist until this function returns,
3058 	 *	convert_port_to_{map,space}_with_flavor relies on this behavior.
3059 	 */
3060 	task_hold_locked(task);
3061 	task->active = FALSE;
3062 	ipc_task_disable(task);
3063 
3064 #if CONFIG_EXCLAVES
3065 	task_stop_conclave(task, false);
3066 #endif /* CONFIG_EXCLAVES */
3067 
3068 #if CONFIG_TELEMETRY
3069 	/*
3070 	 * Notify telemetry that this task is going away.
3071 	 */
3072 	telemetry_task_ctl_locked(task, TF_TELEMETRY, 0);
3073 #endif
3074 
3075 	/*
3076 	 *	Terminate each thread in the task.
3077 	 */
3078 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3079 		thread_terminate_internal(thread);
3080 	}
3081 
3082 #ifdef MACH_BSD
3083 	void *bsd_info = get_bsdtask_info(task);
3084 	if (bsd_info != NULL) {
3085 		pid = proc_pid(bsd_info);
3086 	}
3087 #endif /* MACH_BSD */
3088 
3089 	task_unlock(task);
3090 
3091 	proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
3092 	    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3093 
3094 	/* Early object reap phase */
3095 
3096 // PR-17045188: Revisit implementation
3097 //        task_partial_reap(task, pid);
3098 
3099 #if CONFIG_TASKWATCH
3100 	/*
3101 	 * remove all task watchers
3102 	 */
3103 	task_removewatchers(task);
3104 
3105 #endif /* CONFIG_TASKWATCH */
3106 
3107 	/*
3108 	 *	Destroy all synchronizers owned by the task.
3109 	 */
3110 	task_synchronizer_destroy_all(task);
3111 
3112 	/*
3113 	 *	Clear the watchport boost on the task.
3114 	 */
3115 	task_remove_turnstile_watchports(task);
3116 
3117 	/* let iokit know 1 */
3118 	iokit_task_terminate(task, 1);
3119 
3120 	/*
3121 	 *	Destroy the IPC space, leaving just a reference for it.
3122 	 */
3123 	ipc_space_terminate(task->itk_space);
3124 
3125 #if 00
3126 	/* if some ledgers go negative on tear-down again... */
3127 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3128 	    task_ledgers.phys_footprint);
3129 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3130 	    task_ledgers.internal);
3131 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3132 	    task_ledgers.iokit_mapped);
3133 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3134 	    task_ledgers.alternate_accounting);
3135 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3136 	    task_ledgers.alternate_accounting_compressed);
3137 #endif
3138 
3139 #if CONFIG_DEFERRED_RECLAIM
3140 	/*
3141 	 * Remove this tasks reclaim buffer from global queues.
3142 	 */
3143 	if (task->deferred_reclamation_metadata != NULL) {
3144 		vm_deferred_reclamation_buffer_uninstall(task->deferred_reclamation_metadata);
3145 	}
3146 #endif /* CONFIG_DEFERRED_RECLAIM */
3147 
3148 	/*
3149 	 * If the current thread is a member of the task
3150 	 * being terminated, then the last reference to
3151 	 * the task will not be dropped until the thread
3152 	 * is finally reaped.  To avoid incurring the
3153 	 * expense of removing the address space regions
3154 	 * at reap time, we do it explictly here.
3155 	 */
3156 
3157 #if MACH_ASSERT
3158 	/*
3159 	 * Identify the pmap's process, in case the pmap ledgers drift
3160 	 * and we have to report it.
3161 	 */
3162 	char procname[17];
3163 	void *proc = get_bsdtask_info(task);
3164 	if (proc) {
3165 		pid = proc_pid(proc);
3166 		proc_name_kdp(proc, procname, sizeof(procname));
3167 	} else {
3168 		pid = 0;
3169 		strlcpy(procname, "<unknown>", sizeof(procname));
3170 	}
3171 	pmap_set_process(task->map->pmap, pid, procname);
3172 	if (vm_map_page_shift(task->map) < (int)PAGE_SHIFT) {
3173 		DEBUG4K_LIFE("map %p procname: %s\n", task->map, procname);
3174 		if (debug4k_panic_on_terminate) {
3175 			panic("DEBUG4K: %s:%d %d[%s] map %p", __FUNCTION__, __LINE__, pid, procname, task->map);
3176 		}
3177 	}
3178 #endif /* MACH_ASSERT */
3179 
3180 	vm_map_terminate(task->map);
3181 
3182 	/* release our shared region */
3183 	vm_shared_region_set(task, NULL);
3184 
3185 #if __has_feature(ptrauth_calls)
3186 	task_set_shared_region_id(task, NULL);
3187 #endif /* __has_feature(ptrauth_calls) */
3188 
3189 	lck_mtx_lock(&tasks_threads_lock);
3190 	queue_remove(&tasks, task, task_t, tasks);
3191 	queue_enter(&terminated_tasks, task, task_t, tasks);
3192 	tasks_count--;
3193 	terminated_tasks_count++;
3194 	lck_mtx_unlock(&tasks_threads_lock);
3195 
3196 	/*
3197 	 * We no longer need to guard against being aborted, so restore
3198 	 * the previous interruptible state.
3199 	 */
3200 	thread_interrupt_level(interrupt_save);
3201 
3202 #if CONFIG_CPU_COUNTERS
3203 	/* force the task to release all ctrs */
3204 	if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) {
3205 		kpc_force_all_ctrs(task, 0);
3206 	}
3207 #endif /* CONFIG_CPU_COUNTERS */
3208 
3209 #if CONFIG_COALITIONS
3210 	/*
3211 	 * Leave the coalition for corpse task or task that
3212 	 * never had any active threads (e.g. fork, exec failure).
3213 	 * For task with active threads, the task will be removed
3214 	 * from coalition by last terminating thread.
3215 	 */
3216 	if (task->active_thread_count == 0) {
3217 		coalitions_remove_task(task);
3218 	}
3219 #endif
3220 
3221 #if CONFIG_FREEZE
3222 	extern int      vm_compressor_available;
3223 	if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE && vm_compressor_available) {
3224 		task_disown_frozen_csegs(task);
3225 		assert(queue_empty(&task->task_frozen_cseg_q));
3226 	}
3227 #endif /* CONFIG_FREEZE */
3228 
3229 
3230 	/*
3231 	 * Get rid of the task active reference on itself.
3232 	 */
3233 	task_deallocate_grp(task, TASK_GRP_INTERNAL);
3234 
3235 	return KERN_SUCCESS;
3236 }
3237 
3238 void
tasks_system_suspend(boolean_t suspend)3239 tasks_system_suspend(boolean_t suspend)
3240 {
3241 	task_t task;
3242 
3243 	KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SUSPEND_USERSPACE) |
3244 	    (suspend ? DBG_FUNC_START : DBG_FUNC_END));
3245 
3246 	lck_mtx_lock(&tasks_threads_lock);
3247 	assert(tasks_suspend_state != suspend);
3248 	tasks_suspend_state = suspend;
3249 	queue_iterate(&tasks, task, task_t, tasks) {
3250 		if (task == kernel_task) {
3251 			continue;
3252 		}
3253 		suspend ? task_suspend_internal(task) : task_resume_internal(task);
3254 	}
3255 	lck_mtx_unlock(&tasks_threads_lock);
3256 }
3257 
3258 /*
3259  * task_start_halt:
3260  *
3261  *      Shut the current task down (except for the current thread) in
3262  *	preparation for dramatic changes to the task (probably exec).
3263  *	We hold the task and mark all other threads in the task for
3264  *	termination.
3265  */
3266 kern_return_t
task_start_halt(task_t task)3267 task_start_halt(task_t task)
3268 {
3269 	kern_return_t kr = KERN_SUCCESS;
3270 	task_lock(task);
3271 	kr = task_start_halt_locked(task, FALSE);
3272 	task_unlock(task);
3273 	return kr;
3274 }
3275 
3276 static kern_return_t
task_start_halt_locked(task_t task,boolean_t should_mark_corpse)3277 task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
3278 {
3279 	thread_t thread, self;
3280 	uint64_t dispatchqueue_offset;
3281 
3282 	assert(task != kernel_task);
3283 
3284 	self = current_thread();
3285 
3286 	if (task != get_threadtask(self) && !task_is_a_corpse_fork(task)) {
3287 		return KERN_INVALID_ARGUMENT;
3288 	}
3289 
3290 	if (!should_mark_corpse &&
3291 	    (task->halting || !task->active || !self->active)) {
3292 		/*
3293 		 * Task or current thread is already being terminated.
3294 		 * Hurry up and return out of the current kernel context
3295 		 * so that we run our AST special handler to terminate
3296 		 * ourselves. If should_mark_corpse is set, the corpse
3297 		 * creation might have raced with exec, let the corpse
3298 		 * creation continue, once the current thread reaches AST
3299 		 * thread in exec will be woken up from task_complete_halt.
3300 		 * Exec will fail cause the proc was marked for exit.
3301 		 * Once the thread in exec reaches AST, it will call proc_exit
3302 		 * and deliver the EXC_CORPSE_NOTIFY.
3303 		 */
3304 		return KERN_FAILURE;
3305 	}
3306 
3307 	/* Thread creation will fail after this point of no return. */
3308 	task->halting = TRUE;
3309 
3310 	/*
3311 	 * Mark all the threads to keep them from starting any more
3312 	 * user-level execution. The thread_terminate_internal code
3313 	 * would do this on a thread by thread basis anyway, but this
3314 	 * gives us a better chance of not having to wait there.
3315 	 */
3316 	task_hold_locked(task);
3317 
3318 #if CONFIG_EXCLAVES
3319 	if (should_mark_corpse) {
3320 		void *crash_info_ptr = task_get_corpseinfo(task);
3321 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
3322 			if (crash_info_ptr != NULL && thread->th_exclaves_state & TH_EXCLAVES_RPC) {
3323 				struct thread_crash_exclaves_info info = { 0 };
3324 
3325 				info.tcei_flags = kExclaveRPCActive;
3326 				if (thread->th_exclaves_state & TH_EXCLAVES_SCHEDULER_REQUEST) {
3327 					info.tcei_flags |= kExclaveSchedulerRequest;
3328 				}
3329 				if (thread->th_exclaves_state & TH_EXCLAVES_UPCALL) {
3330 					info.tcei_flags |= kExclaveUpcallActive;
3331 				}
3332 				info.tcei_scid = thread->th_exclaves_scheduling_context_id;
3333 				info.tcei_thread_id = thread->thread_id;
3334 
3335 				kcdata_push_data(crash_info_ptr,
3336 				    STACKSHOT_KCTYPE_KERN_EXCLAVES_CRASH_THREADINFO,
3337 				    sizeof(struct thread_crash_exclaves_info), &info);
3338 			}
3339 		}
3340 
3341 		task_unlock(task);
3342 		task_stop_conclave(task, true);
3343 		task_lock(task);
3344 	}
3345 #endif /* CONFIG_EXCLAVES */
3346 
3347 	dispatchqueue_offset = get_dispatchqueue_offset_from_proc(get_bsdtask_info(task));
3348 	/*
3349 	 * Terminate all the other threads in the task.
3350 	 */
3351 	queue_iterate(&task->threads, thread, thread_t, task_threads)
3352 	{
3353 		/*
3354 		 * Remove priority throttles for threads to terminate timely. This has
3355 		 * to be done after task_hold_locked() traps all threads to AST, but before
3356 		 * threads are marked inactive in thread_terminate_internal(). Takes thread
3357 		 * mutex lock.
3358 		 *
3359 		 * We need task_is_a_corpse() check so that we don't accidently update policy
3360 		 * for tasks that are doing posix_spawn().
3361 		 *
3362 		 * See: thread_policy_update_tasklocked().
3363 		 */
3364 		if (task_is_a_corpse(task)) {
3365 			proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE,
3366 			    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3367 		}
3368 
3369 		if (should_mark_corpse) {
3370 			thread_mtx_lock(thread);
3371 			thread->inspection = TRUE;
3372 			thread_mtx_unlock(thread);
3373 		}
3374 		if (thread != self) {
3375 			thread_terminate_internal(thread);
3376 		}
3377 	}
3378 	task->dispatchqueue_offset = dispatchqueue_offset;
3379 
3380 	task_release_locked(task);
3381 
3382 	return KERN_SUCCESS;
3383 }
3384 
3385 
3386 /*
3387  * task_complete_halt:
3388  *
3389  *	Complete task halt by waiting for threads to terminate, then clean
3390  *	up task resources (VM, port namespace, etc...) and then let the
3391  *	current thread go in the (practically empty) task context.
3392  *
3393  *	Note: task->halting flag is not cleared in order to avoid creation
3394  *	of new thread in old exec'ed task.
3395  */
3396 void
task_complete_halt(task_t task)3397 task_complete_halt(task_t task)
3398 {
3399 	task_lock(task);
3400 	assert(task->halting);
3401 	assert(task == current_task());
3402 
3403 	/*
3404 	 *	Wait for the other threads to get shut down.
3405 	 *      When the last other thread is reaped, we'll be
3406 	 *	woken up.
3407 	 */
3408 	if (task->thread_count > 1) {
3409 		assert_wait((event_t)&task->halting, THREAD_UNINT);
3410 		task_unlock(task);
3411 		thread_block(THREAD_CONTINUE_NULL);
3412 	} else {
3413 		task_unlock(task);
3414 	}
3415 
3416 #if CONFIG_DEFERRED_RECLAIM
3417 	if (task->deferred_reclamation_metadata) {
3418 		vm_deferred_reclamation_buffer_uninstall(
3419 			task->deferred_reclamation_metadata);
3420 		vm_deferred_reclamation_buffer_deallocate(
3421 			task->deferred_reclamation_metadata);
3422 		task->deferred_reclamation_metadata = NULL;
3423 	}
3424 #endif /* CONFIG_DEFERRED_RECLAIM */
3425 
3426 	/*
3427 	 *	Give the machine dependent code a chance
3428 	 *	to perform cleanup of task-level resources
3429 	 *	associated with the current thread before
3430 	 *	ripping apart the task.
3431 	 */
3432 	machine_task_terminate(task);
3433 
3434 	/*
3435 	 *	Destroy all synchronizers owned by the task.
3436 	 */
3437 	task_synchronizer_destroy_all(task);
3438 
3439 	/* let iokit know 1 */
3440 	iokit_task_terminate(task, 1);
3441 
3442 	/*
3443 	 *	Terminate the IPC space.  A long time ago,
3444 	 *	this used to be ipc_space_clean() which would
3445 	 *	keep the space active but hollow it.
3446 	 *
3447 	 *	We really do not need this semantics given
3448 	 *	tasks die with exec now.
3449 	 */
3450 	ipc_space_terminate(task->itk_space);
3451 
3452 	/*
3453 	 * Clean out the address space, as we are going to be
3454 	 * getting a new one.
3455 	 */
3456 	vm_map_terminate(task->map);
3457 
3458 	/*
3459 	 * Kick out any IOKitUser handles to the task. At best they're stale,
3460 	 * at worst someone is racing a SUID exec.
3461 	 */
3462 	/* let iokit know 2 */
3463 	iokit_task_terminate(task, 2);
3464 }
3465 
3466 #ifdef CONFIG_TASK_SUSPEND_STATS
3467 
3468 static void
_task_mark_suspend_source(task_t task)3469 _task_mark_suspend_source(task_t task)
3470 {
3471 	int idx;
3472 	task_suspend_stats_t stats;
3473 	task_suspend_source_t source;
3474 	task_lock_assert_owned(task);
3475 	stats = &task->t_suspend_stats;
3476 
3477 	idx = stats->tss_count % TASK_SUSPEND_SOURCES_MAX;
3478 	source = &task->t_suspend_sources[idx];
3479 	bzero(source, sizeof(*source));
3480 
3481 	source->tss_time = mach_absolute_time();
3482 	source->tss_tid = current_thread()->thread_id;
3483 	source->tss_pid = task_pid(current_task());
3484 	task_best_name(current_task(), source->tss_procname, sizeof(source->tss_procname));
3485 
3486 	stats->tss_count++;
3487 }
3488 
3489 static inline void
_task_mark_suspend_start(task_t task)3490 _task_mark_suspend_start(task_t task)
3491 {
3492 	task_lock_assert_owned(task);
3493 	task->t_suspend_stats.tss_last_start = mach_absolute_time();
3494 }
3495 
3496 static inline void
_task_mark_suspend_end(task_t task)3497 _task_mark_suspend_end(task_t task)
3498 {
3499 	task_lock_assert_owned(task);
3500 	task->t_suspend_stats.tss_last_end = mach_absolute_time();
3501 	task->t_suspend_stats.tss_duration += (task->t_suspend_stats.tss_last_end -
3502 	    task->t_suspend_stats.tss_last_start);
3503 }
3504 
3505 static kern_return_t
_task_get_suspend_stats_locked(task_t task,task_suspend_stats_t stats)3506 _task_get_suspend_stats_locked(task_t task, task_suspend_stats_t stats)
3507 {
3508 	if (task == TASK_NULL || stats == NULL) {
3509 		return KERN_INVALID_ARGUMENT;
3510 	}
3511 	task_lock_assert_owned(task);
3512 	memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3513 	return KERN_SUCCESS;
3514 }
3515 
3516 static kern_return_t
_task_get_suspend_sources_locked(task_t task,task_suspend_source_t sources)3517 _task_get_suspend_sources_locked(task_t task, task_suspend_source_t sources)
3518 {
3519 	if (task == TASK_NULL || sources == NULL) {
3520 		return KERN_INVALID_ARGUMENT;
3521 	}
3522 	task_lock_assert_owned(task);
3523 	memcpy(sources, task->t_suspend_sources,
3524 	    sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3525 	return KERN_SUCCESS;
3526 }
3527 
3528 #endif /* CONFIG_TASK_SUSPEND_STATS */
3529 
3530 kern_return_t
task_get_suspend_stats(task_t task,task_suspend_stats_t stats)3531 task_get_suspend_stats(task_t task, task_suspend_stats_t stats)
3532 {
3533 #ifdef CONFIG_TASK_SUSPEND_STATS
3534 	kern_return_t kr;
3535 	if (task == TASK_NULL || stats == NULL) {
3536 		return KERN_INVALID_ARGUMENT;
3537 	}
3538 	task_lock(task);
3539 	kr = _task_get_suspend_stats_locked(task, stats);
3540 	task_unlock(task);
3541 	return kr;
3542 #else /* CONFIG_TASK_SUSPEND_STATS */
3543 	(void)task;
3544 	(void)stats;
3545 	return KERN_NOT_SUPPORTED;
3546 #endif
3547 }
3548 
3549 kern_return_t
task_get_suspend_stats_kdp(task_t task,task_suspend_stats_t stats)3550 task_get_suspend_stats_kdp(task_t task, task_suspend_stats_t stats)
3551 {
3552 #ifdef CONFIG_TASK_SUSPEND_STATS
3553 	if (task == TASK_NULL || stats == NULL) {
3554 		return KERN_INVALID_ARGUMENT;
3555 	}
3556 	memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3557 	return KERN_SUCCESS;
3558 #else /* CONFIG_TASK_SUSPEND_STATS */
3559 #pragma unused(task, stats)
3560 	return KERN_NOT_SUPPORTED;
3561 #endif /* CONFIG_TASK_SUSPEND_STATS */
3562 }
3563 
3564 kern_return_t
task_get_suspend_sources(task_t task,task_suspend_source_array_t sources)3565 task_get_suspend_sources(task_t task, task_suspend_source_array_t sources)
3566 {
3567 #ifdef CONFIG_TASK_SUSPEND_STATS
3568 	kern_return_t kr;
3569 	if (task == TASK_NULL || sources == NULL) {
3570 		return KERN_INVALID_ARGUMENT;
3571 	}
3572 	task_lock(task);
3573 	kr = _task_get_suspend_sources_locked(task, sources);
3574 	task_unlock(task);
3575 	return kr;
3576 #else /* CONFIG_TASK_SUSPEND_STATS */
3577 	(void)task;
3578 	(void)sources;
3579 	return KERN_NOT_SUPPORTED;
3580 #endif
3581 }
3582 
3583 kern_return_t
task_get_suspend_sources_kdp(task_t task,task_suspend_source_array_t sources)3584 task_get_suspend_sources_kdp(task_t task, task_suspend_source_array_t sources)
3585 {
3586 #ifdef CONFIG_TASK_SUSPEND_STATS
3587 	if (task == TASK_NULL || sources == NULL) {
3588 		return KERN_INVALID_ARGUMENT;
3589 	}
3590 	memcpy(sources, task->t_suspend_sources,
3591 	    sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3592 	return KERN_SUCCESS;
3593 #else /* CONFIG_TASK_SUSPEND_STATS */
3594 #pragma unused(task, sources)
3595 	return KERN_NOT_SUPPORTED;
3596 #endif
3597 }
3598 
3599 /*
3600  *	task_hold_locked:
3601  *
3602  *	Suspend execution of the specified task.
3603  *	This is a recursive-style suspension of the task, a count of
3604  *	suspends is maintained.
3605  *
3606  *	CONDITIONS: the task is locked and active.
3607  */
3608 void
task_hold_locked(task_t task)3609 task_hold_locked(
3610 	task_t          task)
3611 {
3612 	thread_t        thread;
3613 	void *bsd_info = get_bsdtask_info(task);
3614 
3615 	assert(task->active);
3616 
3617 	if (task->suspend_count++ > 0) {
3618 		return;
3619 	}
3620 
3621 	if (bsd_info) {
3622 		workq_proc_suspended(bsd_info);
3623 	}
3624 
3625 	/*
3626 	 *	Iterate through all the threads and hold them.
3627 	 */
3628 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3629 		thread_mtx_lock(thread);
3630 		thread_hold(thread);
3631 		thread_mtx_unlock(thread);
3632 	}
3633 
3634 #ifdef CONFIG_TASK_SUSPEND_STATS
3635 	_task_mark_suspend_start(task);
3636 #endif
3637 }
3638 
3639 /*
3640  *	task_hold_and_wait
3641  *
3642  *	Same as the internal routine above, except that is must lock
3643  *	and verify that the task is active.  This differs from task_suspend
3644  *	in that it places a kernel hold on the task rather than just a
3645  *	user-level hold.  This keeps users from over resuming and setting
3646  *	it running out from under the kernel.
3647  *
3648  *      CONDITIONS: the caller holds a reference on the task
3649  */
3650 kern_return_t
task_hold_and_wait(task_t task)3651 task_hold_and_wait(
3652 	task_t          task)
3653 {
3654 	if (task == TASK_NULL) {
3655 		return KERN_INVALID_ARGUMENT;
3656 	}
3657 
3658 	task_lock(task);
3659 	if (!task->active) {
3660 		task_unlock(task);
3661 		return KERN_FAILURE;
3662 	}
3663 
3664 #ifdef CONFIG_TASK_SUSPEND_STATS
3665 	_task_mark_suspend_source(task);
3666 #endif /* CONFIG_TASK_SUSPEND_STATS */
3667 
3668 	task_hold_locked(task);
3669 	task_wait_locked(task, FALSE);
3670 	task_unlock(task);
3671 
3672 	return KERN_SUCCESS;
3673 }
3674 
3675 /*
3676  *	task_wait_locked:
3677  *
3678  *	Wait for all threads in task to stop.
3679  *
3680  * Conditions:
3681  *	Called with task locked, active, and held.
3682  */
3683 void
task_wait_locked(task_t task,boolean_t until_not_runnable)3684 task_wait_locked(
3685 	task_t          task,
3686 	boolean_t               until_not_runnable)
3687 {
3688 	thread_t        thread, self;
3689 
3690 	assert(task->active);
3691 	assert(task->suspend_count > 0);
3692 
3693 	self = current_thread();
3694 
3695 	/*
3696 	 *	Iterate through all the threads and wait for them to
3697 	 *	stop.  Do not wait for the current thread if it is within
3698 	 *	the task.
3699 	 */
3700 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3701 		if (thread != self) {
3702 			thread_wait(thread, until_not_runnable);
3703 		}
3704 	}
3705 }
3706 
3707 boolean_t
task_is_app_suspended(task_t task)3708 task_is_app_suspended(task_t task)
3709 {
3710 	return task->pidsuspended;
3711 }
3712 
3713 /*
3714  *	task_release_locked:
3715  *
3716  *	Release a kernel hold on a task.
3717  *
3718  *      CONDITIONS: the task is locked and active
3719  */
3720 void
task_release_locked(task_t task)3721 task_release_locked(
3722 	task_t          task)
3723 {
3724 	thread_t        thread;
3725 	void *bsd_info = get_bsdtask_info(task);
3726 
3727 	assert(task->active);
3728 	assert(task->suspend_count > 0);
3729 
3730 	if (--task->suspend_count > 0) {
3731 		return;
3732 	}
3733 
3734 	if (bsd_info) {
3735 		workq_proc_resumed(bsd_info);
3736 	}
3737 
3738 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3739 		thread_mtx_lock(thread);
3740 		thread_release(thread);
3741 		thread_mtx_unlock(thread);
3742 	}
3743 
3744 #if CONFIG_TASK_SUSPEND_STATS
3745 	_task_mark_suspend_end(task);
3746 #endif
3747 }
3748 
3749 /*
3750  *	task_release:
3751  *
3752  *	Same as the internal routine above, except that it must lock
3753  *	and verify that the task is active.
3754  *
3755  *      CONDITIONS: The caller holds a reference to the task
3756  */
3757 kern_return_t
task_release(task_t task)3758 task_release(
3759 	task_t          task)
3760 {
3761 	if (task == TASK_NULL) {
3762 		return KERN_INVALID_ARGUMENT;
3763 	}
3764 
3765 	task_lock(task);
3766 
3767 	if (!task->active) {
3768 		task_unlock(task);
3769 
3770 		return KERN_FAILURE;
3771 	}
3772 
3773 	task_release_locked(task);
3774 	task_unlock(task);
3775 
3776 	return KERN_SUCCESS;
3777 }
3778 
3779 static kern_return_t
task_threads_internal(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * countp,mach_thread_flavor_t flavor)3780 task_threads_internal(
3781 	task_t                      task,
3782 	thread_act_array_t         *threads_out,
3783 	mach_msg_type_number_t     *countp,
3784 	mach_thread_flavor_t        flavor)
3785 {
3786 	mach_msg_type_number_t  actual, count, count_needed;
3787 	thread_t               *thread_list;
3788 	thread_t                thread;
3789 	unsigned int            i;
3790 
3791 	count = 0;
3792 	thread_list = NULL;
3793 
3794 	if (task == TASK_NULL) {
3795 		return KERN_INVALID_ARGUMENT;
3796 	}
3797 
3798 	assert(flavor <= THREAD_FLAVOR_INSPECT);
3799 
3800 	for (;;) {
3801 		task_lock(task);
3802 		if (!task->active) {
3803 			task_unlock(task);
3804 
3805 			kfree_type(thread_t, count, thread_list);
3806 			return KERN_FAILURE;
3807 		}
3808 
3809 		count_needed = actual = task->thread_count;
3810 		if (count_needed <= count) {
3811 			break;
3812 		}
3813 
3814 		/* unlock the task and allocate more memory */
3815 		task_unlock(task);
3816 
3817 		kfree_type(thread_t, count, thread_list);
3818 		count = count_needed;
3819 		thread_list = kalloc_type(thread_t, count, Z_WAITOK);
3820 
3821 		if (thread_list == NULL) {
3822 			return KERN_RESOURCE_SHORTAGE;
3823 		}
3824 	}
3825 
3826 	i = 0;
3827 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3828 		assert(i < actual);
3829 		thread_reference(thread);
3830 		thread_list[i++] = thread;
3831 	}
3832 
3833 	count_needed = actual;
3834 
3835 	/* can unlock task now that we've got the thread refs */
3836 	task_unlock(task);
3837 
3838 	if (actual == 0) {
3839 		/* no threads, so return null pointer and deallocate memory */
3840 
3841 		*threads_out = NULL;
3842 		*countp = 0;
3843 		kfree_type(thread_t, count, thread_list);
3844 	} else {
3845 		/* if we allocated too much, must copy */
3846 		if (count_needed < count) {
3847 			void *newaddr;
3848 
3849 			newaddr = kalloc_type(thread_t, count_needed, Z_WAITOK);
3850 			if (newaddr == NULL) {
3851 				for (i = 0; i < actual; ++i) {
3852 					thread_deallocate(thread_list[i]);
3853 				}
3854 				kfree_type(thread_t, count, thread_list);
3855 				return KERN_RESOURCE_SHORTAGE;
3856 			}
3857 
3858 			bcopy(thread_list, newaddr, count_needed * sizeof(thread_t));
3859 			kfree_type(thread_t, count, thread_list);
3860 			thread_list = (thread_t *)newaddr;
3861 		}
3862 
3863 		*threads_out = thread_list;
3864 		*countp = actual;
3865 
3866 		/* do the conversion that Mig should handle */
3867 
3868 		switch (flavor) {
3869 		case THREAD_FLAVOR_CONTROL:
3870 			if (task == current_task()) {
3871 				for (i = 0; i < actual; ++i) {
3872 					((ipc_port_t *) thread_list)[i] = convert_thread_to_port_pinned(thread_list[i]);
3873 				}
3874 			} else {
3875 				for (i = 0; i < actual; ++i) {
3876 					((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
3877 				}
3878 			}
3879 			break;
3880 		case THREAD_FLAVOR_READ:
3881 			for (i = 0; i < actual; ++i) {
3882 				((ipc_port_t *) thread_list)[i] = convert_thread_read_to_port(thread_list[i]);
3883 			}
3884 			break;
3885 		case THREAD_FLAVOR_INSPECT:
3886 			for (i = 0; i < actual; ++i) {
3887 				((ipc_port_t *) thread_list)[i] = convert_thread_inspect_to_port(thread_list[i]);
3888 			}
3889 			break;
3890 		}
3891 	}
3892 
3893 	return KERN_SUCCESS;
3894 }
3895 
3896 kern_return_t
task_threads(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3897 task_threads(
3898 	task_t                      task,
3899 	thread_act_array_t         *threads_out,
3900 	mach_msg_type_number_t     *count)
3901 {
3902 	return task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3903 }
3904 
3905 
3906 kern_return_t
task_threads_from_user(mach_port_t port,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3907 task_threads_from_user(
3908 	mach_port_t                 port,
3909 	thread_act_array_t         *threads_out,
3910 	mach_msg_type_number_t     *count)
3911 {
3912 	ipc_kobject_type_t kotype;
3913 	kern_return_t kr;
3914 
3915 	task_t task = convert_port_to_task_inspect_no_eval(port);
3916 
3917 	if (task == TASK_NULL) {
3918 		return KERN_INVALID_ARGUMENT;
3919 	}
3920 
3921 	kotype = ip_kotype(port);
3922 
3923 	switch (kotype) {
3924 	case IKOT_TASK_CONTROL:
3925 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3926 		break;
3927 	case IKOT_TASK_READ:
3928 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_READ);
3929 		break;
3930 	case IKOT_TASK_INSPECT:
3931 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_INSPECT);
3932 		break;
3933 	default:
3934 		panic("strange kobject type");
3935 		break;
3936 	}
3937 
3938 	task_deallocate(task);
3939 	return kr;
3940 }
3941 
3942 #define TASK_HOLD_NORMAL        0
3943 #define TASK_HOLD_PIDSUSPEND    1
3944 #define TASK_HOLD_LEGACY        2
3945 #define TASK_HOLD_LEGACY_ALL    3
3946 
3947 static kern_return_t
place_task_hold(task_t task,int mode)3948 place_task_hold(
3949 	task_t task,
3950 	int mode)
3951 {
3952 	if (!task->active && !task_is_a_corpse(task)) {
3953 		return KERN_FAILURE;
3954 	}
3955 
3956 	/* Return success for corpse task */
3957 	if (task_is_a_corpse(task)) {
3958 		return KERN_SUCCESS;
3959 	}
3960 
3961 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_SUSPEND),
3962 	    task_pid(task),
3963 	    task->thread_count > 0 ?((thread_t)queue_first(&task->threads))->thread_id : 0,
3964 	    task->user_stop_count, task->user_stop_count + 1);
3965 
3966 #if MACH_ASSERT
3967 	current_task()->suspends_outstanding++;
3968 #endif
3969 
3970 	if (mode == TASK_HOLD_LEGACY) {
3971 		task->legacy_stop_count++;
3972 	}
3973 
3974 #ifdef CONFIG_TASK_SUSPEND_STATS
3975 	_task_mark_suspend_source(task);
3976 #endif /* CONFIG_TASK_SUSPEND_STATS */
3977 
3978 	if (task->user_stop_count++ > 0) {
3979 		/*
3980 		 *	If the stop count was positive, the task is
3981 		 *	already stopped and we can exit.
3982 		 */
3983 		return KERN_SUCCESS;
3984 	}
3985 
3986 	/*
3987 	 * Put a kernel-level hold on the threads in the task (all
3988 	 * user-level task suspensions added together represent a
3989 	 * single kernel-level hold).  We then wait for the threads
3990 	 * to stop executing user code.
3991 	 */
3992 	task_hold_locked(task);
3993 	task_wait_locked(task, FALSE);
3994 
3995 	return KERN_SUCCESS;
3996 }
3997 
3998 static kern_return_t
release_task_hold(task_t task,int mode)3999 release_task_hold(
4000 	task_t          task,
4001 	int                     mode)
4002 {
4003 	boolean_t release = FALSE;
4004 
4005 	if (!task->active && !task_is_a_corpse(task)) {
4006 		return KERN_FAILURE;
4007 	}
4008 
4009 	/* Return success for corpse task */
4010 	if (task_is_a_corpse(task)) {
4011 		return KERN_SUCCESS;
4012 	}
4013 
4014 	if (mode == TASK_HOLD_PIDSUSPEND) {
4015 		if (task->pidsuspended == FALSE) {
4016 			return KERN_FAILURE;
4017 		}
4018 		task->pidsuspended = FALSE;
4019 	}
4020 
4021 	if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
4022 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4023 		    MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_RESUME) | DBG_FUNC_NONE,
4024 		    task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id,
4025 		    task->user_stop_count, mode, task->legacy_stop_count);
4026 
4027 #if MACH_ASSERT
4028 		/*
4029 		 * This is obviously not robust; if we suspend one task and then resume a different one,
4030 		 * we'll fly under the radar. This is only meant to catch the common case of a crashed
4031 		 * or buggy suspender.
4032 		 */
4033 		current_task()->suspends_outstanding--;
4034 #endif
4035 
4036 		if (mode == TASK_HOLD_LEGACY_ALL) {
4037 			if (task->legacy_stop_count >= task->user_stop_count) {
4038 				task->user_stop_count = 0;
4039 				release = TRUE;
4040 			} else {
4041 				task->user_stop_count -= task->legacy_stop_count;
4042 			}
4043 			task->legacy_stop_count = 0;
4044 		} else {
4045 			if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) {
4046 				task->legacy_stop_count--;
4047 			}
4048 			if (--task->user_stop_count == 0) {
4049 				release = TRUE;
4050 			}
4051 		}
4052 	} else {
4053 		return KERN_FAILURE;
4054 	}
4055 
4056 	/*
4057 	 *	Release the task if necessary.
4058 	 */
4059 	if (release) {
4060 		task_release_locked(task);
4061 	}
4062 
4063 	return KERN_SUCCESS;
4064 }
4065 
4066 boolean_t
get_task_suspended(task_t task)4067 get_task_suspended(task_t task)
4068 {
4069 	return 0 != task->user_stop_count;
4070 }
4071 
4072 /*
4073  *	task_suspend:
4074  *
4075  *	Implement an (old-fashioned) user-level suspension on a task.
4076  *
4077  *	Because the user isn't expecting to have to manage a suspension
4078  *	token, we'll track it for him in the kernel in the form of a naked
4079  *	send right to the task's resume port.  All such send rights
4080  *	account for a single suspension against the task (unlike task_suspend2()
4081  *	where each caller gets a unique suspension count represented by a
4082  *	unique send-once right).
4083  *
4084  * Conditions:
4085  *      The caller holds a reference to the task
4086  */
4087 kern_return_t
task_suspend(task_t task)4088 task_suspend(
4089 	task_t          task)
4090 {
4091 	kern_return_t                   kr;
4092 	mach_port_t                     port;
4093 	mach_port_name_t                name;
4094 
4095 	if (task == TASK_NULL || task == kernel_task) {
4096 		return KERN_INVALID_ARGUMENT;
4097 	}
4098 
4099 	/*
4100 	 * place a legacy hold on the task.
4101 	 */
4102 	task_lock(task);
4103 	kr = place_task_hold(task, TASK_HOLD_LEGACY);
4104 	task_unlock(task);
4105 
4106 	if (kr != KERN_SUCCESS) {
4107 		return kr;
4108 	}
4109 
4110 	/*
4111 	 * Claim a send right on the task resume port, and request a no-senders
4112 	 * notification on that port (if none outstanding).
4113 	 */
4114 	itk_lock(task);
4115 	port = task->itk_resume;
4116 	if (port == IP_NULL) {
4117 		port = ipc_kobject_alloc_port(task, IKOT_TASK_RESUME,
4118 		    IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
4119 		task->itk_resume = port;
4120 	} else {
4121 		(void)ipc_kobject_make_send_nsrequest(port, task, IKOT_TASK_RESUME);
4122 	}
4123 	itk_unlock(task);
4124 
4125 	/*
4126 	 * Copyout the send right into the calling task's IPC space.  It won't know it is there,
4127 	 * but we'll look it up when calling a traditional resume.  Any IPC operations that
4128 	 * deallocate the send right will auto-release the suspension.
4129 	 */
4130 	if (IP_VALID(port)) {
4131 		kr = ipc_object_copyout(current_space(), ip_to_object(port),
4132 		    MACH_MSG_TYPE_MOVE_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
4133 		    NULL, NULL, &name);
4134 	} else {
4135 		kr = KERN_SUCCESS;
4136 	}
4137 	if (kr != KERN_SUCCESS) {
4138 		printf("warning: %s(%d) failed to copyout suspension "
4139 		    "token for pid %d with error: %d\n",
4140 		    proc_name_address(get_bsdtask_info(current_task())),
4141 		    proc_pid(get_bsdtask_info(current_task())),
4142 		    task_pid(task), kr);
4143 	}
4144 
4145 	return kr;
4146 }
4147 
4148 /*
4149  *	task_resume:
4150  *		Release a user hold on a task.
4151  *
4152  * Conditions:
4153  *		The caller holds a reference to the task
4154  */
4155 kern_return_t
task_resume(task_t task)4156 task_resume(
4157 	task_t  task)
4158 {
4159 	kern_return_t    kr;
4160 	mach_port_name_t resume_port_name;
4161 	ipc_entry_t              resume_port_entry;
4162 	ipc_space_t              space = current_task()->itk_space;
4163 
4164 	if (task == TASK_NULL || task == kernel_task) {
4165 		return KERN_INVALID_ARGUMENT;
4166 	}
4167 
4168 	/* release a legacy task hold */
4169 	task_lock(task);
4170 	kr = release_task_hold(task, TASK_HOLD_LEGACY);
4171 	task_unlock(task);
4172 
4173 	itk_lock(task); /* for itk_resume */
4174 	is_write_lock(space); /* spin lock */
4175 	if (is_active(space) && IP_VALID(task->itk_resume) &&
4176 	    ipc_hash_lookup(space, ip_to_object(task->itk_resume), &resume_port_name, &resume_port_entry) == TRUE) {
4177 		/*
4178 		 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
4179 		 * we are holding one less legacy hold on the task from this caller.  If the release failed,
4180 		 * go ahead and drop all the rights, as someone either already released our holds or the task
4181 		 * is gone.
4182 		 */
4183 		itk_unlock(task);
4184 		if (kr == KERN_SUCCESS) {
4185 			ipc_right_dealloc(space, resume_port_name, resume_port_entry);
4186 		} else {
4187 			ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0);
4188 		}
4189 		/* space unlocked */
4190 	} else {
4191 		itk_unlock(task);
4192 		is_write_unlock(space);
4193 		if (kr == KERN_SUCCESS) {
4194 			printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
4195 			    proc_name_address(get_bsdtask_info(current_task())), proc_pid(get_bsdtask_info(current_task())),
4196 			    task_pid(task));
4197 		}
4198 	}
4199 
4200 	return kr;
4201 }
4202 
4203 /*
4204  * Suspend a task that is already protected by a held lock.
4205  * Making/holding a token/reference/port is the caller's responsibility.
4206  */
4207 kern_return_t
task_suspend_internal_locked(task_t task)4208 task_suspend_internal_locked(task_t task)
4209 {
4210 	if (task == TASK_NULL || task == kernel_task) {
4211 		return KERN_INVALID_ARGUMENT;
4212 	}
4213 
4214 	return place_task_hold(task, TASK_HOLD_NORMAL);
4215 }
4216 
4217 /*
4218  * Suspend a task.
4219  * Making/holding a token/reference/port is the caller's responsibility.
4220  */
4221 kern_return_t
task_suspend_internal(task_t task)4222 task_suspend_internal(task_t task)
4223 {
4224 	kern_return_t    kr;
4225 
4226 	if (task == TASK_NULL || task == kernel_task) {
4227 		return KERN_INVALID_ARGUMENT;
4228 	}
4229 
4230 	task_lock(task);
4231 	kr = task_suspend_internal_locked(task);
4232 	task_unlock(task);
4233 	return kr;
4234 }
4235 
4236 /*
4237  * Suspend the target task, and return a suspension token. The token
4238  * represents a reference on the suspended task.
4239  */
4240 static kern_return_t
task_suspend2_grp(task_t task,task_suspension_token_t * suspend_token,task_grp_t grp)4241 task_suspend2_grp(
4242 	task_t                  task,
4243 	task_suspension_token_t *suspend_token,
4244 	task_grp_t              grp)
4245 {
4246 	kern_return_t    kr;
4247 
4248 	kr = task_suspend_internal(task);
4249 	if (kr != KERN_SUCCESS) {
4250 		*suspend_token = TASK_NULL;
4251 		return kr;
4252 	}
4253 
4254 	/*
4255 	 * Take a reference on the target task and return that to the caller
4256 	 * as a "suspension token," which can be converted into an SO right to
4257 	 * the now-suspended task's resume port.
4258 	 */
4259 	task_reference_grp(task, grp);
4260 	*suspend_token = task;
4261 
4262 	return KERN_SUCCESS;
4263 }
4264 
4265 kern_return_t
task_suspend2_mig(task_t task,task_suspension_token_t * suspend_token)4266 task_suspend2_mig(
4267 	task_t                  task,
4268 	task_suspension_token_t *suspend_token)
4269 {
4270 	return task_suspend2_grp(task, suspend_token, TASK_GRP_MIG);
4271 }
4272 
4273 kern_return_t
task_suspend2_external(task_t task,task_suspension_token_t * suspend_token)4274 task_suspend2_external(
4275 	task_t                  task,
4276 	task_suspension_token_t *suspend_token)
4277 {
4278 	return task_suspend2_grp(task, suspend_token, TASK_GRP_EXTERNAL);
4279 }
4280 
4281 /*
4282  * Resume a task that is already protected by a held lock.
4283  * (reference/token/port management is caller's responsibility).
4284  */
4285 kern_return_t
task_resume_internal_locked(task_suspension_token_t task)4286 task_resume_internal_locked(
4287 	task_suspension_token_t         task)
4288 {
4289 	if (task == TASK_NULL || task == kernel_task) {
4290 		return KERN_INVALID_ARGUMENT;
4291 	}
4292 
4293 	return release_task_hold(task, TASK_HOLD_NORMAL);
4294 }
4295 
4296 /*
4297  * Resume a task.
4298  * (reference/token/port management is caller's responsibility).
4299  */
4300 kern_return_t
task_resume_internal(task_suspension_token_t task)4301 task_resume_internal(
4302 	task_suspension_token_t         task)
4303 {
4304 	kern_return_t kr;
4305 
4306 	if (task == TASK_NULL || task == kernel_task) {
4307 		return KERN_INVALID_ARGUMENT;
4308 	}
4309 
4310 	task_lock(task);
4311 	kr = task_resume_internal_locked(task);
4312 	task_unlock(task);
4313 	return kr;
4314 }
4315 
4316 /*
4317  * Resume the task using a suspension token. Consumes the token's ref.
4318  */
4319 static kern_return_t
task_resume2_grp(task_suspension_token_t task,task_grp_t grp)4320 task_resume2_grp(
4321 	task_suspension_token_t         task,
4322 	task_grp_t                      grp)
4323 {
4324 	kern_return_t kr;
4325 
4326 	kr = task_resume_internal(task);
4327 	task_suspension_token_deallocate_grp(task, grp);
4328 
4329 	return kr;
4330 }
4331 
4332 kern_return_t
task_resume2_mig(task_suspension_token_t task)4333 task_resume2_mig(
4334 	task_suspension_token_t         task)
4335 {
4336 	return task_resume2_grp(task, TASK_GRP_MIG);
4337 }
4338 
4339 kern_return_t
task_resume2_external(task_suspension_token_t task)4340 task_resume2_external(
4341 	task_suspension_token_t         task)
4342 {
4343 	return task_resume2_grp(task, TASK_GRP_EXTERNAL);
4344 }
4345 
4346 static void
task_suspension_no_senders(ipc_port_t port,mach_port_mscount_t mscount)4347 task_suspension_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
4348 {
4349 	task_t task = convert_port_to_task_suspension_token(port);
4350 	kern_return_t kr;
4351 
4352 	if (task == TASK_NULL) {
4353 		return;
4354 	}
4355 
4356 	if (task == kernel_task) {
4357 		task_suspension_token_deallocate(task);
4358 		return;
4359 	}
4360 
4361 	task_lock(task);
4362 
4363 	kr = ipc_kobject_nsrequest(port, mscount, NULL);
4364 	if (kr == KERN_FAILURE) {
4365 		/* release all the [remaining] outstanding legacy holds */
4366 		release_task_hold(task, TASK_HOLD_LEGACY_ALL);
4367 	}
4368 
4369 	task_unlock(task);
4370 
4371 	task_suspension_token_deallocate(task);         /* drop token reference */
4372 }
4373 
4374 /*
4375  * Fires when a send once made
4376  * by convert_task_suspension_token_to_port() dies.
4377  */
4378 void
task_suspension_send_once(ipc_port_t port)4379 task_suspension_send_once(ipc_port_t port)
4380 {
4381 	task_t task = convert_port_to_task_suspension_token(port);
4382 
4383 	if (task == TASK_NULL || task == kernel_task) {
4384 		return; /* nothing to do */
4385 	}
4386 
4387 	/* release the hold held by this specific send-once right */
4388 	task_lock(task);
4389 	release_task_hold(task, TASK_HOLD_NORMAL);
4390 	task_unlock(task);
4391 
4392 	task_suspension_token_deallocate(task);         /* drop token reference */
4393 }
4394 
4395 static kern_return_t
task_pidsuspend_locked(task_t task)4396 task_pidsuspend_locked(task_t task)
4397 {
4398 	kern_return_t kr;
4399 
4400 	if (task->pidsuspended) {
4401 		kr = KERN_FAILURE;
4402 		goto out;
4403 	}
4404 
4405 	task->pidsuspended = TRUE;
4406 
4407 	kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
4408 	if (kr != KERN_SUCCESS) {
4409 		task->pidsuspended = FALSE;
4410 	}
4411 out:
4412 	return kr;
4413 }
4414 
4415 
4416 /*
4417  *	task_pidsuspend:
4418  *
4419  *	Suspends a task by placing a hold on its threads.
4420  *
4421  * Conditions:
4422  *      The caller holds a reference to the task
4423  */
4424 kern_return_t
task_pidsuspend(task_t task)4425 task_pidsuspend(
4426 	task_t          task)
4427 {
4428 	kern_return_t    kr;
4429 
4430 	if (task == TASK_NULL || task == kernel_task) {
4431 		return KERN_INVALID_ARGUMENT;
4432 	}
4433 
4434 	task_lock(task);
4435 
4436 	kr = task_pidsuspend_locked(task);
4437 
4438 	task_unlock(task);
4439 
4440 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4441 		iokit_task_app_suspended_changed(task);
4442 	}
4443 
4444 	return kr;
4445 }
4446 
4447 /*
4448  *	task_pidresume:
4449  *		Resumes a previously suspended task.
4450  *
4451  * Conditions:
4452  *		The caller holds a reference to the task
4453  */
4454 kern_return_t
task_pidresume(task_t task)4455 task_pidresume(
4456 	task_t  task)
4457 {
4458 	kern_return_t    kr;
4459 
4460 	if (task == TASK_NULL || task == kernel_task) {
4461 		return KERN_INVALID_ARGUMENT;
4462 	}
4463 
4464 	task_lock(task);
4465 
4466 #if CONFIG_FREEZE
4467 
4468 	while (task->changing_freeze_state) {
4469 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4470 		task_unlock(task);
4471 		thread_block(THREAD_CONTINUE_NULL);
4472 
4473 		task_lock(task);
4474 	}
4475 	task->changing_freeze_state = TRUE;
4476 #endif
4477 
4478 	kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
4479 
4480 	task_unlock(task);
4481 
4482 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4483 		iokit_task_app_suspended_changed(task);
4484 	}
4485 
4486 #if CONFIG_FREEZE
4487 
4488 	task_lock(task);
4489 
4490 	if (kr == KERN_SUCCESS) {
4491 		task->frozen = FALSE;
4492 	}
4493 	task->changing_freeze_state = FALSE;
4494 	thread_wakeup(&task->changing_freeze_state);
4495 
4496 	task_unlock(task);
4497 #endif
4498 
4499 	return kr;
4500 }
4501 
4502 os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
4503 
4504 /*
4505  *	task_add_turnstile_watchports:
4506  *		Setup watchports to boost the main thread of the task.
4507  *
4508  *	Arguments:
4509  *		task: task being spawned
4510  *		thread: main thread of task
4511  *		portwatch_ports: array of watchports
4512  *		portwatch_count: number of watchports
4513  *
4514  *	Conditions:
4515  *		Nothing locked.
4516  */
4517 void
task_add_turnstile_watchports(task_t task,thread_t thread,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4518 task_add_turnstile_watchports(
4519 	task_t          task,
4520 	thread_t        thread,
4521 	ipc_port_t      *portwatch_ports,
4522 	uint32_t        portwatch_count)
4523 {
4524 	struct task_watchports *watchports = NULL;
4525 	struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
4526 	os_ref_count_t refs;
4527 
4528 	/* Check if the task has terminated */
4529 	if (!task->active) {
4530 		return;
4531 	}
4532 
4533 	assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
4534 
4535 	watchports = task_watchports_alloc_init(task, thread, portwatch_count);
4536 
4537 	/* Lock the ipc space */
4538 	is_write_lock(task->itk_space);
4539 
4540 	/* Setup watchports to boost the main thread */
4541 	refs = task_add_turnstile_watchports_locked(task,
4542 	    watchports, previous_elem_array, portwatch_ports,
4543 	    portwatch_count);
4544 
4545 	/* Drop the space lock */
4546 	is_write_unlock(task->itk_space);
4547 
4548 	if (refs == 0) {
4549 		task_watchports_deallocate(watchports);
4550 	}
4551 
4552 	/* Drop the ref on previous_elem_array */
4553 	for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
4554 		task_watchport_elem_deallocate(previous_elem_array[i]);
4555 	}
4556 }
4557 
4558 /*
4559  *	task_remove_turnstile_watchports:
4560  *		Clear all turnstile boost on the task from watchports.
4561  *
4562  *	Arguments:
4563  *		task: task being terminated
4564  *
4565  *	Conditions:
4566  *		Nothing locked.
4567  */
4568 void
task_remove_turnstile_watchports(task_t task)4569 task_remove_turnstile_watchports(
4570 	task_t          task)
4571 {
4572 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4573 	struct task_watchports *watchports = NULL;
4574 	ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
4575 	uint32_t portwatch_count;
4576 
4577 	/* Lock the ipc space */
4578 	is_write_lock(task->itk_space);
4579 
4580 	/* Check if watchport boost exist */
4581 	if (task->watchports == NULL) {
4582 		is_write_unlock(task->itk_space);
4583 		return;
4584 	}
4585 	watchports = task->watchports;
4586 	portwatch_count = watchports->tw_elem_array_count;
4587 
4588 	refs = task_remove_turnstile_watchports_locked(task, watchports,
4589 	    port_freelist);
4590 
4591 	is_write_unlock(task->itk_space);
4592 
4593 	/* Drop all the port references */
4594 	for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
4595 		ip_release(port_freelist[i]);
4596 	}
4597 
4598 	/* Clear the task and thread references for task_watchport */
4599 	if (refs == 0) {
4600 		task_watchports_deallocate(watchports);
4601 	}
4602 }
4603 
4604 /*
4605  *	task_transfer_turnstile_watchports:
4606  *		Transfer all watchport turnstile boost from old task to new task.
4607  *
4608  *	Arguments:
4609  *		old_task: task calling exec
4610  *		new_task: new exec'ed task
4611  *		thread: main thread of new task
4612  *
4613  *	Conditions:
4614  *		Nothing locked.
4615  */
4616 void
task_transfer_turnstile_watchports(task_t old_task,task_t new_task,thread_t new_thread)4617 task_transfer_turnstile_watchports(
4618 	task_t   old_task,
4619 	task_t   new_task,
4620 	thread_t new_thread)
4621 {
4622 	struct task_watchports *old_watchports = NULL;
4623 	struct task_watchports *new_watchports = NULL;
4624 	os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
4625 	os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
4626 	uint32_t portwatch_count;
4627 
4628 	if (old_task->watchports == NULL || !new_task->active) {
4629 		return;
4630 	}
4631 
4632 	/* Get the watch port count from the old task */
4633 	is_write_lock(old_task->itk_space);
4634 	if (old_task->watchports == NULL) {
4635 		is_write_unlock(old_task->itk_space);
4636 		return;
4637 	}
4638 
4639 	portwatch_count = old_task->watchports->tw_elem_array_count;
4640 	is_write_unlock(old_task->itk_space);
4641 
4642 	new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
4643 
4644 	/* Lock the ipc space for old task */
4645 	is_write_lock(old_task->itk_space);
4646 
4647 	/* Lock the ipc space for new task */
4648 	is_write_lock(new_task->itk_space);
4649 
4650 	/* Check if watchport boost exist */
4651 	if (old_task->watchports == NULL || !new_task->active) {
4652 		is_write_unlock(new_task->itk_space);
4653 		is_write_unlock(old_task->itk_space);
4654 		(void)task_watchports_release(new_watchports);
4655 		task_watchports_deallocate(new_watchports);
4656 		return;
4657 	}
4658 
4659 	old_watchports = old_task->watchports;
4660 	assert(portwatch_count == old_task->watchports->tw_elem_array_count);
4661 
4662 	/* Setup new task watchports */
4663 	new_task->watchports = new_watchports;
4664 
4665 	for (uint32_t i = 0; i < portwatch_count; i++) {
4666 		ipc_port_t port = old_watchports->tw_elem[i].twe_port;
4667 
4668 		if (port == NULL) {
4669 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4670 			continue;
4671 		}
4672 
4673 		/* Lock the port and check if it has the entry */
4674 		ip_mq_lock(port);
4675 
4676 		task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
4677 
4678 		if (ipc_port_replace_watchport_elem_conditional_locked(port,
4679 		    &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
4680 			task_watchport_elem_clear(&old_watchports->tw_elem[i]);
4681 
4682 			task_watchports_retain(new_watchports);
4683 			old_refs = task_watchports_release(old_watchports);
4684 
4685 			/* Check if all ports are cleaned */
4686 			if (old_refs == 0) {
4687 				old_task->watchports = NULL;
4688 			}
4689 		} else {
4690 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4691 		}
4692 		/* port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
4693 	}
4694 
4695 	/* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
4696 	new_refs = task_watchports_release(new_watchports);
4697 	if (new_refs == 0) {
4698 		new_task->watchports = NULL;
4699 	}
4700 
4701 	is_write_unlock(new_task->itk_space);
4702 	is_write_unlock(old_task->itk_space);
4703 
4704 	/* Clear the task and thread references for old_watchport */
4705 	if (old_refs == 0) {
4706 		task_watchports_deallocate(old_watchports);
4707 	}
4708 
4709 	/* Clear the task and thread references for new_watchport */
4710 	if (new_refs == 0) {
4711 		task_watchports_deallocate(new_watchports);
4712 	}
4713 }
4714 
4715 /*
4716  *	task_add_turnstile_watchports_locked:
4717  *		Setup watchports to boost the main thread of the task.
4718  *
4719  *	Arguments:
4720  *		task: task to boost
4721  *		watchports: watchport structure to be attached to the task
4722  *		previous_elem_array: an array of old watchport_elem to be returned to caller
4723  *		portwatch_ports: array of watchports
4724  *		portwatch_count: number of watchports
4725  *
4726  *	Conditions:
4727  *		ipc space of the task locked.
4728  *		returns array of old watchport_elem in previous_elem_array
4729  */
4730 static os_ref_count_t
task_add_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,struct task_watchport_elem ** previous_elem_array,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4731 task_add_turnstile_watchports_locked(
4732 	task_t                      task,
4733 	struct task_watchports      *watchports,
4734 	struct task_watchport_elem  **previous_elem_array,
4735 	ipc_port_t                  *portwatch_ports,
4736 	uint32_t                    portwatch_count)
4737 {
4738 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4739 
4740 	/* Check if the task is still active */
4741 	if (!task->active) {
4742 		refs = task_watchports_release(watchports);
4743 		return refs;
4744 	}
4745 
4746 	assert(task->watchports == NULL);
4747 	task->watchports = watchports;
4748 
4749 	for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
4750 		ipc_port_t port = portwatch_ports[i];
4751 
4752 		task_watchport_elem_init(&watchports->tw_elem[i], task, port);
4753 		if (port == NULL) {
4754 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4755 			continue;
4756 		}
4757 
4758 		ip_mq_lock(port);
4759 
4760 		/* Check if port is in valid state to be setup as watchport */
4761 		if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
4762 		    &previous_elem_array[j]) != KERN_SUCCESS) {
4763 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4764 			continue;
4765 		}
4766 		/* port unlocked on return */
4767 
4768 		ip_reference(port);
4769 		task_watchports_retain(watchports);
4770 		if (previous_elem_array[j] != NULL) {
4771 			j++;
4772 		}
4773 	}
4774 
4775 	/* Drop the reference on task_watchport struct returned by os_ref_init */
4776 	refs = task_watchports_release(watchports);
4777 	if (refs == 0) {
4778 		task->watchports = NULL;
4779 	}
4780 
4781 	return refs;
4782 }
4783 
4784 /*
4785  *	task_remove_turnstile_watchports_locked:
4786  *		Clear all turnstile boost on the task from watchports.
4787  *
4788  *	Arguments:
4789  *		task: task to remove watchports from
4790  *		watchports: watchports structure for the task
4791  *		port_freelist: array of ports returned with ref to caller
4792  *
4793  *
4794  *	Conditions:
4795  *		ipc space of the task locked.
4796  *		array of ports with refs are returned in port_freelist
4797  */
4798 static os_ref_count_t
task_remove_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,ipc_port_t * port_freelist)4799 task_remove_turnstile_watchports_locked(
4800 	task_t                 task,
4801 	struct task_watchports *watchports,
4802 	ipc_port_t             *port_freelist)
4803 {
4804 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4805 
4806 	for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
4807 		ipc_port_t port = watchports->tw_elem[i].twe_port;
4808 		if (port == NULL) {
4809 			continue;
4810 		}
4811 
4812 		/* Lock the port and check if it has the entry */
4813 		ip_mq_lock(port);
4814 		if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
4815 		    &watchports->tw_elem[i]) == KERN_SUCCESS) {
4816 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4817 			port_freelist[j++] = port;
4818 			refs = task_watchports_release(watchports);
4819 
4820 			/* Check if all ports are cleaned */
4821 			if (refs == 0) {
4822 				task->watchports = NULL;
4823 				break;
4824 			}
4825 		}
4826 		/* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
4827 	}
4828 	return refs;
4829 }
4830 
4831 /*
4832  *	task_watchports_alloc_init:
4833  *		Allocate and initialize task watchport struct.
4834  *
4835  *	Conditions:
4836  *		Nothing locked.
4837  */
4838 static struct task_watchports *
task_watchports_alloc_init(task_t task,thread_t thread,uint32_t count)4839 task_watchports_alloc_init(
4840 	task_t        task,
4841 	thread_t      thread,
4842 	uint32_t      count)
4843 {
4844 	struct task_watchports *watchports = kalloc_type(struct task_watchports,
4845 	    struct task_watchport_elem, count, Z_WAITOK | Z_ZERO | Z_NOFAIL);
4846 
4847 	task_reference(task);
4848 	thread_reference(thread);
4849 	watchports->tw_task = task;
4850 	watchports->tw_thread = thread;
4851 	watchports->tw_elem_array_count = count;
4852 	os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
4853 
4854 	return watchports;
4855 }
4856 
4857 /*
4858  *	task_watchports_deallocate:
4859  *		Deallocate task watchport struct.
4860  *
4861  *	Conditions:
4862  *		Nothing locked.
4863  */
4864 static void
task_watchports_deallocate(struct task_watchports * watchports)4865 task_watchports_deallocate(
4866 	struct task_watchports *watchports)
4867 {
4868 	uint32_t portwatch_count = watchports->tw_elem_array_count;
4869 
4870 	task_deallocate(watchports->tw_task);
4871 	thread_deallocate(watchports->tw_thread);
4872 	kfree_type(struct task_watchports, struct task_watchport_elem,
4873 	    portwatch_count, watchports);
4874 }
4875 
4876 /*
4877  *	task_watchport_elem_deallocate:
4878  *		Deallocate task watchport element and release its ref on task_watchport.
4879  *
4880  *	Conditions:
4881  *		Nothing locked.
4882  */
4883 void
task_watchport_elem_deallocate(struct task_watchport_elem * watchport_elem)4884 task_watchport_elem_deallocate(
4885 	struct task_watchport_elem *watchport_elem)
4886 {
4887 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4888 	task_t task = watchport_elem->twe_task;
4889 	struct task_watchports *watchports = NULL;
4890 	ipc_port_t port = NULL;
4891 
4892 	assert(task != NULL);
4893 
4894 	/* Take the space lock to modify the elememt */
4895 	is_write_lock(task->itk_space);
4896 
4897 	watchports = task->watchports;
4898 	assert(watchports != NULL);
4899 
4900 	port = watchport_elem->twe_port;
4901 	assert(port != NULL);
4902 
4903 	task_watchport_elem_clear(watchport_elem);
4904 	refs = task_watchports_release(watchports);
4905 
4906 	if (refs == 0) {
4907 		task->watchports = NULL;
4908 	}
4909 
4910 	is_write_unlock(task->itk_space);
4911 
4912 	ip_release(port);
4913 	if (refs == 0) {
4914 		task_watchports_deallocate(watchports);
4915 	}
4916 }
4917 
4918 /*
4919  *	task_has_watchports:
4920  *		Return TRUE if task has watchport boosts.
4921  *
4922  *	Conditions:
4923  *		Nothing locked.
4924  */
4925 boolean_t
task_has_watchports(task_t task)4926 task_has_watchports(task_t task)
4927 {
4928 	return task->watchports != NULL;
4929 }
4930 
4931 #if DEVELOPMENT || DEBUG
4932 
4933 extern void IOSleep(int);
4934 
4935 kern_return_t
task_disconnect_page_mappings(task_t task)4936 task_disconnect_page_mappings(task_t task)
4937 {
4938 	int     n;
4939 
4940 	if (task == TASK_NULL || task == kernel_task) {
4941 		return KERN_INVALID_ARGUMENT;
4942 	}
4943 
4944 	/*
4945 	 * this function is used to strip all of the mappings from
4946 	 * the pmap for the specified task to force the task to
4947 	 * re-fault all of the pages it is actively using... this
4948 	 * allows us to approximate the true working set of the
4949 	 * specified task.  We only engage if at least 1 of the
4950 	 * threads in the task is runnable, but we want to continuously
4951 	 * sweep (at least for a while - I've arbitrarily set the limit at
4952 	 * 100 sweeps to be re-looked at as we gain experience) to get a better
4953 	 * view into what areas within a page are being visited (as opposed to only
4954 	 * seeing the first fault of a page after the task becomes
4955 	 * runnable)...  in the future I may
4956 	 * try to block until awakened by a thread in this task
4957 	 * being made runnable, but for now we'll periodically poll from the
4958 	 * user level debug tool driving the sysctl
4959 	 */
4960 	for (n = 0; n < 100; n++) {
4961 		thread_t        thread;
4962 		boolean_t       runnable;
4963 		boolean_t       do_unnest;
4964 		int             page_count;
4965 
4966 		runnable = FALSE;
4967 		do_unnest = FALSE;
4968 
4969 		task_lock(task);
4970 
4971 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
4972 			if (thread->state & TH_RUN) {
4973 				runnable = TRUE;
4974 				break;
4975 			}
4976 		}
4977 		if (n == 0) {
4978 			task->task_disconnected_count++;
4979 		}
4980 
4981 		if (task->task_unnested == FALSE) {
4982 			if (runnable == TRUE) {
4983 				task->task_unnested = TRUE;
4984 				do_unnest = TRUE;
4985 			}
4986 		}
4987 		task_unlock(task);
4988 
4989 		if (runnable == FALSE) {
4990 			break;
4991 		}
4992 
4993 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
4994 		    task, do_unnest, task->task_disconnected_count, 0, 0);
4995 
4996 		page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
4997 
4998 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
4999 		    task, page_count, 0, 0, 0);
5000 
5001 		if ((n % 5) == 4) {
5002 			IOSleep(1);
5003 		}
5004 	}
5005 	return KERN_SUCCESS;
5006 }
5007 
5008 #endif
5009 
5010 
5011 #if CONFIG_FREEZE
5012 
5013 /*
5014  *	task_freeze:
5015  *
5016  *	Freeze a task.
5017  *
5018  * Conditions:
5019  *      The caller holds a reference to the task
5020  */
5021 extern void     vm_wake_compactor_swapper(void);
5022 extern struct freezer_context freezer_context_global;
5023 
5024 kern_return_t
task_freeze(task_t task,uint32_t * purgeable_count,uint32_t * wired_count,uint32_t * clean_count,uint32_t * dirty_count,uint32_t dirty_budget,uint32_t * shared_count,int * freezer_error_code,boolean_t eval_only)5025 task_freeze(
5026 	task_t    task,
5027 	uint32_t           *purgeable_count,
5028 	uint32_t           *wired_count,
5029 	uint32_t           *clean_count,
5030 	uint32_t           *dirty_count,
5031 	uint32_t           dirty_budget,
5032 	uint32_t           *shared_count,
5033 	int                *freezer_error_code,
5034 	boolean_t          eval_only)
5035 {
5036 	kern_return_t kr = KERN_SUCCESS;
5037 
5038 	if (task == TASK_NULL || task == kernel_task) {
5039 		return KERN_INVALID_ARGUMENT;
5040 	}
5041 
5042 	task_lock(task);
5043 
5044 	while (task->changing_freeze_state) {
5045 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5046 		task_unlock(task);
5047 		thread_block(THREAD_CONTINUE_NULL);
5048 
5049 		task_lock(task);
5050 	}
5051 	if (task->frozen) {
5052 		task_unlock(task);
5053 		return KERN_FAILURE;
5054 	}
5055 	task->changing_freeze_state = TRUE;
5056 
5057 	freezer_context_global.freezer_ctx_task = task;
5058 
5059 	task_unlock(task);
5060 
5061 	kr = vm_map_freeze(task,
5062 	    purgeable_count,
5063 	    wired_count,
5064 	    clean_count,
5065 	    dirty_count,
5066 	    dirty_budget,
5067 	    shared_count,
5068 	    freezer_error_code,
5069 	    eval_only);
5070 
5071 	task_lock(task);
5072 
5073 	if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) {
5074 		task->frozen = TRUE;
5075 
5076 		freezer_context_global.freezer_ctx_task = NULL;
5077 		freezer_context_global.freezer_ctx_uncompressed_pages = 0;
5078 
5079 		if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
5080 			/*
5081 			 * reset the counter tracking the # of swapped compressed pages
5082 			 * because we are now done with this freeze session and task.
5083 			 */
5084 
5085 			*dirty_count = (uint32_t) (freezer_context_global.freezer_ctx_swapped_bytes / PAGE_SIZE_64);         /*used to track pageouts*/
5086 		}
5087 
5088 		freezer_context_global.freezer_ctx_swapped_bytes = 0;
5089 	}
5090 
5091 	task->changing_freeze_state = FALSE;
5092 	thread_wakeup(&task->changing_freeze_state);
5093 
5094 	task_unlock(task);
5095 
5096 	if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
5097 	    (kr == KERN_SUCCESS) &&
5098 	    (eval_only == FALSE)) {
5099 		vm_wake_compactor_swapper();
5100 		/*
5101 		 * We do an explicit wakeup of the swapout thread here
5102 		 * because the compact_and_swap routines don't have
5103 		 * knowledge about these kind of "per-task packed c_segs"
5104 		 * and so will not be evaluating whether we need to do
5105 		 * a wakeup there.
5106 		 */
5107 		thread_wakeup((event_t)&vm_swapout_thread);
5108 	}
5109 
5110 	return kr;
5111 }
5112 
5113 /*
5114  *	task_thaw:
5115  *
5116  *	Thaw a currently frozen task.
5117  *
5118  * Conditions:
5119  *      The caller holds a reference to the task
5120  */
5121 kern_return_t
task_thaw(task_t task)5122 task_thaw(
5123 	task_t          task)
5124 {
5125 	if (task == TASK_NULL || task == kernel_task) {
5126 		return KERN_INVALID_ARGUMENT;
5127 	}
5128 
5129 	task_lock(task);
5130 
5131 	while (task->changing_freeze_state) {
5132 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5133 		task_unlock(task);
5134 		thread_block(THREAD_CONTINUE_NULL);
5135 
5136 		task_lock(task);
5137 	}
5138 	if (!task->frozen) {
5139 		task_unlock(task);
5140 		return KERN_FAILURE;
5141 	}
5142 	task->frozen = FALSE;
5143 
5144 	task_unlock(task);
5145 
5146 	return KERN_SUCCESS;
5147 }
5148 
5149 void
task_update_frozen_to_swap_acct(task_t task,int64_t amount,freezer_acct_op_t op)5150 task_update_frozen_to_swap_acct(task_t task, int64_t amount, freezer_acct_op_t op)
5151 {
5152 	/*
5153 	 * We don't assert that the task lock is held because we call this
5154 	 * routine from the decompression path and we won't be holding the
5155 	 * task lock. However, since we are in the context of the task we are
5156 	 * safe.
5157 	 * In the case of the task_freeze path, we call it from behind the task
5158 	 * lock but we don't need to because we have a reference on the proc
5159 	 * being frozen.
5160 	 */
5161 
5162 	assert(task);
5163 	if (amount == 0) {
5164 		return;
5165 	}
5166 
5167 	if (op == CREDIT_TO_SWAP) {
5168 		ledger_credit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5169 	} else if (op == DEBIT_FROM_SWAP) {
5170 		ledger_debit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5171 	} else {
5172 		panic("task_update_frozen_to_swap_acct: Invalid ledger op");
5173 	}
5174 }
5175 #endif /* CONFIG_FREEZE */
5176 
5177 kern_return_t
task_set_security_tokens(task_t task,security_token_t sec_token,audit_token_t audit_token,host_priv_t host_priv)5178 task_set_security_tokens(
5179 	task_t           task,
5180 	security_token_t sec_token,
5181 	audit_token_t    audit_token,
5182 	host_priv_t      host_priv)
5183 {
5184 	ipc_port_t       host_port = IP_NULL;
5185 	kern_return_t    kr;
5186 
5187 	if (task == TASK_NULL) {
5188 		return KERN_INVALID_ARGUMENT;
5189 	}
5190 
5191 	task_lock(task);
5192 	task_set_tokens(task, &sec_token, &audit_token);
5193 	task_unlock(task);
5194 
5195 	if (host_priv != HOST_PRIV_NULL) {
5196 		kr = host_get_host_priv_port(host_priv, &host_port);
5197 	} else {
5198 		kr = host_get_host_port(host_priv_self(), &host_port);
5199 	}
5200 	assert(kr == KERN_SUCCESS);
5201 
5202 	kr = task_set_special_port_internal(task, TASK_HOST_PORT, host_port);
5203 	return kr;
5204 }
5205 
5206 kern_return_t
task_send_trace_memory(__unused task_t target_task,__unused uint32_t pid,__unused uint64_t uniqueid)5207 task_send_trace_memory(
5208 	__unused task_t   target_task,
5209 	__unused uint32_t pid,
5210 	__unused uint64_t uniqueid)
5211 {
5212 	return KERN_INVALID_ARGUMENT;
5213 }
5214 
5215 /*
5216  * This routine was added, pretty much exclusively, for registering the
5217  * RPC glue vector for in-kernel short circuited tasks.  Rather than
5218  * removing it completely, I have only disabled that feature (which was
5219  * the only feature at the time).  It just appears that we are going to
5220  * want to add some user data to tasks in the future (i.e. bsd info,
5221  * task names, etc...), so I left it in the formal task interface.
5222  */
5223 kern_return_t
task_set_info(task_t task,task_flavor_t flavor,__unused task_info_t task_info_in,__unused mach_msg_type_number_t task_info_count)5224 task_set_info(
5225 	task_t          task,
5226 	task_flavor_t   flavor,
5227 	__unused task_info_t    task_info_in,           /* pointer to IN array */
5228 	__unused mach_msg_type_number_t task_info_count)
5229 {
5230 	if (task == TASK_NULL) {
5231 		return KERN_INVALID_ARGUMENT;
5232 	}
5233 	switch (flavor) {
5234 #if CONFIG_ATM
5235 	case TASK_TRACE_MEMORY_INFO:
5236 		return KERN_NOT_SUPPORTED;
5237 #endif // CONFIG_ATM
5238 	default:
5239 		return KERN_INVALID_ARGUMENT;
5240 	}
5241 }
5242 
5243 static void
_task_fill_times(task_t task,time_value_t * user_time,time_value_t * sys_time)5244 _task_fill_times(task_t task, time_value_t *user_time, time_value_t *sys_time)
5245 {
5246 	clock_sec_t sec;
5247 	clock_usec_t usec;
5248 
5249 	struct recount_times_mach times = recount_task_terminated_times(task);
5250 	absolutetime_to_microtime(times.rtm_user, &sec, &usec);
5251 	user_time->seconds = (typeof(user_time->seconds))sec;
5252 	user_time->microseconds = usec;
5253 	absolutetime_to_microtime(times.rtm_system, &sec, &usec);
5254 	sys_time->seconds = (typeof(sys_time->seconds))sec;
5255 	sys_time->microseconds = usec;
5256 }
5257 
5258 int radar_20146450 = 1;
5259 kern_return_t
task_info(task_t task,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)5260 task_info(
5261 	task_t                  task,
5262 	task_flavor_t           flavor,
5263 	task_info_t             task_info_out,
5264 	mach_msg_type_number_t  *task_info_count)
5265 {
5266 	kern_return_t error = KERN_SUCCESS;
5267 	mach_msg_type_number_t  original_task_info_count;
5268 	bool is_kernel_task = (task == kernel_task);
5269 
5270 	if (task == TASK_NULL) {
5271 		return KERN_INVALID_ARGUMENT;
5272 	}
5273 
5274 	original_task_info_count = *task_info_count;
5275 	task_lock(task);
5276 
5277 	if (task != current_task() && !task->active) {
5278 		task_unlock(task);
5279 		return KERN_INVALID_ARGUMENT;
5280 	}
5281 
5282 
5283 	switch (flavor) {
5284 	case TASK_BASIC_INFO_32:
5285 	case TASK_BASIC2_INFO_32:
5286 #if defined(__arm64__)
5287 	case TASK_BASIC_INFO_64:
5288 #endif
5289 		{
5290 			task_basic_info_32_t basic_info;
5291 			ledger_amount_t      tmp;
5292 
5293 			if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
5294 				error = KERN_INVALID_ARGUMENT;
5295 				break;
5296 			}
5297 
5298 			basic_info = (task_basic_info_32_t)task_info_out;
5299 
5300 			basic_info->virtual_size = (typeof(basic_info->virtual_size))
5301 			    vm_map_adjusted_size(is_kernel_task ? kernel_map : task->map);
5302 			if (flavor == TASK_BASIC2_INFO_32) {
5303 				/*
5304 				 * The "BASIC2" flavor gets the maximum resident
5305 				 * size instead of the current resident size...
5306 				 */
5307 				ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, &tmp);
5308 			} else {
5309 				ledger_get_balance(task->ledger, task_ledgers.phys_mem, &tmp);
5310 			}
5311 			basic_info->resident_size = (natural_t) MIN((ledger_amount_t) UINT32_MAX, tmp);
5312 
5313 			_task_fill_times(task, &basic_info->user_time,
5314 			    &basic_info->system_time);
5315 
5316 			basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5317 			basic_info->suspend_count = task->user_stop_count;
5318 
5319 			*task_info_count = TASK_BASIC_INFO_32_COUNT;
5320 			break;
5321 		}
5322 
5323 #if defined(__arm64__)
5324 	case TASK_BASIC_INFO_64_2:
5325 	{
5326 		task_basic_info_64_2_t  basic_info;
5327 
5328 		if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) {
5329 			error = KERN_INVALID_ARGUMENT;
5330 			break;
5331 		}
5332 
5333 		basic_info = (task_basic_info_64_2_t)task_info_out;
5334 
5335 		basic_info->virtual_size  = vm_map_adjusted_size(is_kernel_task ?
5336 		    kernel_map : task->map);
5337 		ledger_get_balance(task->ledger, task_ledgers.phys_mem,
5338 		    (ledger_amount_t *)&basic_info->resident_size);
5339 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5340 		basic_info->suspend_count = task->user_stop_count;
5341 		_task_fill_times(task, &basic_info->user_time,
5342 		    &basic_info->system_time);
5343 
5344 		*task_info_count = TASK_BASIC_INFO_64_2_COUNT;
5345 		break;
5346 	}
5347 
5348 #else /* defined(__arm64__) */
5349 	case TASK_BASIC_INFO_64:
5350 	{
5351 		task_basic_info_64_t basic_info;
5352 
5353 		if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
5354 			error = KERN_INVALID_ARGUMENT;
5355 			break;
5356 		}
5357 
5358 		basic_info = (task_basic_info_64_t)task_info_out;
5359 
5360 		basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5361 		    kernel_map : task->map);
5362 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *)&basic_info->resident_size);
5363 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5364 		basic_info->suspend_count = task->user_stop_count;
5365 		_task_fill_times(task, &basic_info->user_time,
5366 		    &basic_info->system_time);
5367 
5368 		*task_info_count = TASK_BASIC_INFO_64_COUNT;
5369 		break;
5370 	}
5371 #endif /* defined(__arm64__) */
5372 
5373 	case MACH_TASK_BASIC_INFO:
5374 	{
5375 		mach_task_basic_info_t  basic_info;
5376 
5377 		if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
5378 			error = KERN_INVALID_ARGUMENT;
5379 			break;
5380 		}
5381 
5382 		basic_info = (mach_task_basic_info_t)task_info_out;
5383 
5384 		basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5385 		    kernel_map : task->map);
5386 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size);
5387 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size_max);
5388 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5389 		basic_info->suspend_count = task->user_stop_count;
5390 		_task_fill_times(task, &basic_info->user_time,
5391 		    &basic_info->system_time);
5392 
5393 		*task_info_count = MACH_TASK_BASIC_INFO_COUNT;
5394 		break;
5395 	}
5396 
5397 	case TASK_THREAD_TIMES_INFO:
5398 	{
5399 		task_thread_times_info_t times_info;
5400 		thread_t                 thread;
5401 
5402 		if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
5403 			error = KERN_INVALID_ARGUMENT;
5404 			break;
5405 		}
5406 
5407 		times_info = (task_thread_times_info_t)task_info_out;
5408 		times_info->user_time = (time_value_t){ 0 };
5409 		times_info->system_time = (time_value_t){ 0 };
5410 
5411 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5412 			if ((thread->options & TH_OPT_IDLE_THREAD) == 0) {
5413 				time_value_t user_time, system_time;
5414 
5415 				thread_read_times(thread, &user_time, &system_time, NULL);
5416 				time_value_add(&times_info->user_time, &user_time);
5417 				time_value_add(&times_info->system_time, &system_time);
5418 			}
5419 		}
5420 
5421 		*task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
5422 		break;
5423 	}
5424 
5425 	case TASK_ABSOLUTETIME_INFO:
5426 	{
5427 		task_absolutetime_info_t        info;
5428 
5429 		if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
5430 			error = KERN_INVALID_ARGUMENT;
5431 			break;
5432 		}
5433 
5434 		info = (task_absolutetime_info_t)task_info_out;
5435 
5436 		struct recount_times_mach term_times =
5437 		    recount_task_terminated_times(task);
5438 		struct recount_times_mach total_times = recount_task_times(task);
5439 
5440 		info->total_user = total_times.rtm_user;
5441 		info->total_system = total_times.rtm_system;
5442 		info->threads_user = total_times.rtm_user - term_times.rtm_user;
5443 		info->threads_system += total_times.rtm_system - term_times.rtm_system;
5444 
5445 		*task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
5446 		break;
5447 	}
5448 
5449 	case TASK_DYLD_INFO:
5450 	{
5451 		task_dyld_info_t info;
5452 
5453 		/*
5454 		 * We added the format field to TASK_DYLD_INFO output.  For
5455 		 * temporary backward compatibility, accept the fact that
5456 		 * clients may ask for the old version - distinquished by the
5457 		 * size of the expected result structure.
5458 		 */
5459 #define TASK_LEGACY_DYLD_INFO_COUNT \
5460 	        offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
5461 
5462 		if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
5463 			error = KERN_INVALID_ARGUMENT;
5464 			break;
5465 		}
5466 
5467 		info = (task_dyld_info_t)task_info_out;
5468 		info->all_image_info_addr = task->all_image_info_addr;
5469 		info->all_image_info_size = task->all_image_info_size;
5470 
5471 		/* only set format on output for those expecting it */
5472 		if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
5473 			info->all_image_info_format = task_has_64Bit_addr(task) ?
5474 			    TASK_DYLD_ALL_IMAGE_INFO_64 :
5475 			    TASK_DYLD_ALL_IMAGE_INFO_32;
5476 			*task_info_count = TASK_DYLD_INFO_COUNT;
5477 		} else {
5478 			*task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
5479 		}
5480 		break;
5481 	}
5482 
5483 	case TASK_EXTMOD_INFO:
5484 	{
5485 		task_extmod_info_t info;
5486 		void *p;
5487 
5488 		if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
5489 			error = KERN_INVALID_ARGUMENT;
5490 			break;
5491 		}
5492 
5493 		info = (task_extmod_info_t)task_info_out;
5494 
5495 		p = get_bsdtask_info(task);
5496 		if (p) {
5497 			proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
5498 		} else {
5499 			bzero(info->task_uuid, sizeof(info->task_uuid));
5500 		}
5501 		info->extmod_statistics = task->extmod_statistics;
5502 		*task_info_count = TASK_EXTMOD_INFO_COUNT;
5503 
5504 		break;
5505 	}
5506 
5507 	case TASK_KERNELMEMORY_INFO:
5508 	{
5509 		task_kernelmemory_info_t        tkm_info;
5510 		ledger_amount_t                 credit, debit;
5511 
5512 		if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
5513 			error = KERN_INVALID_ARGUMENT;
5514 			break;
5515 		}
5516 
5517 		tkm_info = (task_kernelmemory_info_t) task_info_out;
5518 		tkm_info->total_palloc = 0;
5519 		tkm_info->total_pfree = 0;
5520 		tkm_info->total_salloc = 0;
5521 		tkm_info->total_sfree = 0;
5522 
5523 		if (task == kernel_task) {
5524 			/*
5525 			 * All shared allocs/frees from other tasks count against
5526 			 * the kernel private memory usage.  If we are looking up
5527 			 * info for the kernel task, gather from everywhere.
5528 			 */
5529 			task_unlock(task);
5530 
5531 			/* start by accounting for all the terminated tasks against the kernel */
5532 			tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
5533 			tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
5534 
5535 			/* count all other task/thread shared alloc/free against the kernel */
5536 			lck_mtx_lock(&tasks_threads_lock);
5537 
5538 			/* XXX this really shouldn't be using the function parameter 'task' as a local var! */
5539 			queue_iterate(&tasks, task, task_t, tasks) {
5540 				if (task == kernel_task) {
5541 					if (ledger_get_entries(task->ledger,
5542 					    task_ledgers.tkm_private, &credit,
5543 					    &debit) == KERN_SUCCESS) {
5544 						tkm_info->total_palloc += credit;
5545 						tkm_info->total_pfree += debit;
5546 					}
5547 				}
5548 				if (!ledger_get_entries(task->ledger,
5549 				    task_ledgers.tkm_shared, &credit, &debit)) {
5550 					tkm_info->total_palloc += credit;
5551 					tkm_info->total_pfree += debit;
5552 				}
5553 			}
5554 			lck_mtx_unlock(&tasks_threads_lock);
5555 		} else {
5556 			if (!ledger_get_entries(task->ledger,
5557 			    task_ledgers.tkm_private, &credit, &debit)) {
5558 				tkm_info->total_palloc = credit;
5559 				tkm_info->total_pfree = debit;
5560 			}
5561 			if (!ledger_get_entries(task->ledger,
5562 			    task_ledgers.tkm_shared, &credit, &debit)) {
5563 				tkm_info->total_salloc = credit;
5564 				tkm_info->total_sfree = debit;
5565 			}
5566 			task_unlock(task);
5567 		}
5568 
5569 		*task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
5570 		return KERN_SUCCESS;
5571 	}
5572 
5573 	/* OBSOLETE */
5574 	case TASK_SCHED_FIFO_INFO:
5575 	{
5576 		if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
5577 			error = KERN_INVALID_ARGUMENT;
5578 			break;
5579 		}
5580 
5581 		error = KERN_INVALID_POLICY;
5582 		break;
5583 	}
5584 
5585 	/* OBSOLETE */
5586 	case TASK_SCHED_RR_INFO:
5587 	{
5588 		policy_rr_base_t        rr_base;
5589 		uint32_t quantum_time;
5590 		uint64_t quantum_ns;
5591 
5592 		if (*task_info_count < POLICY_RR_BASE_COUNT) {
5593 			error = KERN_INVALID_ARGUMENT;
5594 			break;
5595 		}
5596 
5597 		rr_base = (policy_rr_base_t) task_info_out;
5598 
5599 		if (task != kernel_task) {
5600 			error = KERN_INVALID_POLICY;
5601 			break;
5602 		}
5603 
5604 		rr_base->base_priority = task->priority;
5605 
5606 		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
5607 		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
5608 
5609 		rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
5610 
5611 		*task_info_count = POLICY_RR_BASE_COUNT;
5612 		break;
5613 	}
5614 
5615 	/* OBSOLETE */
5616 	case TASK_SCHED_TIMESHARE_INFO:
5617 	{
5618 		policy_timeshare_base_t ts_base;
5619 
5620 		if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
5621 			error = KERN_INVALID_ARGUMENT;
5622 			break;
5623 		}
5624 
5625 		ts_base = (policy_timeshare_base_t) task_info_out;
5626 
5627 		if (task == kernel_task) {
5628 			error = KERN_INVALID_POLICY;
5629 			break;
5630 		}
5631 
5632 		ts_base->base_priority = task->priority;
5633 
5634 		*task_info_count = POLICY_TIMESHARE_BASE_COUNT;
5635 		break;
5636 	}
5637 
5638 	case TASK_SECURITY_TOKEN:
5639 	{
5640 		security_token_t        *sec_token_p;
5641 
5642 		if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
5643 			error = KERN_INVALID_ARGUMENT;
5644 			break;
5645 		}
5646 
5647 		sec_token_p = (security_token_t *) task_info_out;
5648 
5649 		*sec_token_p = *task_get_sec_token(task);
5650 
5651 		*task_info_count = TASK_SECURITY_TOKEN_COUNT;
5652 		break;
5653 	}
5654 
5655 	case TASK_AUDIT_TOKEN:
5656 	{
5657 		audit_token_t   *audit_token_p;
5658 
5659 		if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
5660 			error = KERN_INVALID_ARGUMENT;
5661 			break;
5662 		}
5663 
5664 		audit_token_p = (audit_token_t *) task_info_out;
5665 
5666 		*audit_token_p = *task_get_audit_token(task);
5667 
5668 		*task_info_count = TASK_AUDIT_TOKEN_COUNT;
5669 		break;
5670 	}
5671 
5672 	case TASK_SCHED_INFO:
5673 		error = KERN_INVALID_ARGUMENT;
5674 		break;
5675 
5676 	case TASK_EVENTS_INFO:
5677 	{
5678 		task_events_info_t      events_info;
5679 		thread_t                thread;
5680 		uint64_t                n_syscalls_mach, n_syscalls_unix, n_csw;
5681 
5682 		if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
5683 			error = KERN_INVALID_ARGUMENT;
5684 			break;
5685 		}
5686 
5687 		events_info = (task_events_info_t) task_info_out;
5688 
5689 
5690 		events_info->faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
5691 		events_info->pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
5692 		events_info->cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
5693 		events_info->messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
5694 		events_info->messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
5695 
5696 		n_syscalls_mach = task->syscalls_mach;
5697 		n_syscalls_unix = task->syscalls_unix;
5698 		n_csw = task->c_switch;
5699 
5700 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5701 			n_csw           += thread->c_switch;
5702 			n_syscalls_mach += thread->syscalls_mach;
5703 			n_syscalls_unix += thread->syscalls_unix;
5704 		}
5705 
5706 		events_info->syscalls_mach = (int32_t) MIN(n_syscalls_mach, INT32_MAX);
5707 		events_info->syscalls_unix = (int32_t) MIN(n_syscalls_unix, INT32_MAX);
5708 		events_info->csw = (int32_t) MIN(n_csw, INT32_MAX);
5709 
5710 		*task_info_count = TASK_EVENTS_INFO_COUNT;
5711 		break;
5712 	}
5713 	case TASK_AFFINITY_TAG_INFO:
5714 	{
5715 		if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
5716 			error = KERN_INVALID_ARGUMENT;
5717 			break;
5718 		}
5719 
5720 		error = task_affinity_info(task, task_info_out, task_info_count);
5721 		break;
5722 	}
5723 	case TASK_POWER_INFO:
5724 	{
5725 		if (*task_info_count < TASK_POWER_INFO_COUNT) {
5726 			error = KERN_INVALID_ARGUMENT;
5727 			break;
5728 		}
5729 
5730 		task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL, NULL);
5731 		break;
5732 	}
5733 
5734 	case TASK_POWER_INFO_V2:
5735 	{
5736 		if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
5737 			error = KERN_INVALID_ARGUMENT;
5738 			break;
5739 		}
5740 		task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
5741 		task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2, NULL);
5742 		break;
5743 	}
5744 
5745 	case TASK_VM_INFO:
5746 	case TASK_VM_INFO_PURGEABLE:
5747 	{
5748 		task_vm_info_t          vm_info;
5749 		vm_map_t                map;
5750 		ledger_amount_t         tmp_amount;
5751 
5752 		struct proc *p;
5753 		uint32_t platform, sdk;
5754 		p = current_proc();
5755 		platform = proc_platform(p);
5756 		sdk = proc_sdk(p);
5757 		if (original_task_info_count > TASK_VM_INFO_COUNT) {
5758 			/*
5759 			 * Some iOS apps pass an incorrect value for
5760 			 * task_info_count, expressed in number of bytes
5761 			 * instead of number of "natural_t" elements, which
5762 			 * can lead to binary compatibility issues (including
5763 			 * stack corruption) when the data structure is
5764 			 * expanded in the future.
5765 			 * Let's make this potential issue visible by
5766 			 * logging about it...
5767 			 */
5768 			printf("%s:%d %d[%s] task_info(flavor=%d) possibly invalid "
5769 			    "task_info_count=%d > TASK_VM_INFO_COUNT=%d platform %d sdk "
5770 			    "%d.%d.%d - please use TASK_VM_INFO_COUNT.\n",
5771 			    __FUNCTION__, __LINE__, proc_pid(p), proc_name_address(p),
5772 			    flavor, original_task_info_count, TASK_VM_INFO_COUNT,
5773 			    platform, (sdk >> 16), ((sdk >> 8) & 0xff), (sdk & 0xff));
5774 			DTRACE_VM4(suspicious_task_vm_info_count,
5775 			    mach_msg_type_number_t, original_task_info_count,
5776 			    mach_msg_type_number_t, TASK_VM_INFO_COUNT,
5777 			    uint32_t, platform,
5778 			    uint32_t, sdk);
5779 		}
5780 #if __arm64__
5781 		if (original_task_info_count > TASK_VM_INFO_REV2_COUNT &&
5782 		    platform == PLATFORM_IOS &&
5783 		    sdk != 0 &&
5784 		    (sdk >> 16) <= 12) {
5785 			/*
5786 			 * Some iOS apps pass an incorrect value for
5787 			 * task_info_count, expressed in number of bytes
5788 			 * instead of number of "natural_t" elements.
5789 			 * For the sake of backwards binary compatibility
5790 			 * for apps built with an iOS12 or older SDK and using
5791 			 * the "rev2" data structure, let's fix task_info_count
5792 			 * for them, to avoid stomping past the actual end
5793 			 * of their buffer.
5794 			 */
5795 #if DEVELOPMENT || DEBUG
5796 			printf("%s:%d %d[%s] rdar://49484582 task_info_count %d -> %d "
5797 			    "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5798 			    proc_name_address(p), original_task_info_count,
5799 			    TASK_VM_INFO_REV2_COUNT, platform, (sdk >> 16),
5800 			    ((sdk >> 8) & 0xff), (sdk & 0xff));
5801 #endif /* DEVELOPMENT || DEBUG */
5802 			DTRACE_VM4(workaround_task_vm_info_count,
5803 			    mach_msg_type_number_t, original_task_info_count,
5804 			    mach_msg_type_number_t, TASK_VM_INFO_REV2_COUNT,
5805 			    uint32_t, platform,
5806 			    uint32_t, sdk);
5807 			original_task_info_count = TASK_VM_INFO_REV2_COUNT;
5808 			*task_info_count = original_task_info_count;
5809 		}
5810 		if (original_task_info_count > TASK_VM_INFO_REV5_COUNT &&
5811 		    platform == PLATFORM_IOS &&
5812 		    sdk != 0 &&
5813 		    (sdk >> 16) <= 15) {
5814 			/*
5815 			 * Some iOS apps pass an incorrect value for
5816 			 * task_info_count, expressed in number of bytes
5817 			 * instead of number of "natural_t" elements.
5818 			 */
5819 			printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5820 			    "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5821 			    proc_name_address(p), original_task_info_count,
5822 			    TASK_VM_INFO_REV5_COUNT, platform, (sdk >> 16),
5823 			    ((sdk >> 8) & 0xff), (sdk & 0xff));
5824 			DTRACE_VM4(workaround_task_vm_info_count,
5825 			    mach_msg_type_number_t, original_task_info_count,
5826 			    mach_msg_type_number_t, TASK_VM_INFO_REV5_COUNT,
5827 			    uint32_t, platform,
5828 			    uint32_t, sdk);
5829 #if DEVELOPMENT || DEBUG
5830 			/*
5831 			 * For the sake of internal builds livability,
5832 			 * work around this user-space bug by capping the
5833 			 * buffer's size to what it was with the iOS15 SDK.
5834 			 */
5835 			original_task_info_count = TASK_VM_INFO_REV5_COUNT;
5836 			*task_info_count = original_task_info_count;
5837 #endif /* DEVELOPMENT || DEBUG */
5838 		}
5839 #endif /* __arm64__ */
5840 
5841 		if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
5842 			error = KERN_INVALID_ARGUMENT;
5843 			break;
5844 		}
5845 
5846 		vm_info = (task_vm_info_t)task_info_out;
5847 
5848 		/*
5849 		 * Do not hold both the task and map locks,
5850 		 * so convert the task lock into a map reference,
5851 		 * drop the task lock, then lock the map.
5852 		 */
5853 		if (is_kernel_task) {
5854 			map = kernel_map;
5855 			task_unlock(task);
5856 			/* no lock, no reference */
5857 		} else {
5858 			map = task->map;
5859 			vm_map_reference(map);
5860 			task_unlock(task);
5861 			vm_map_lock_read(map);
5862 		}
5863 
5864 		vm_info->virtual_size = (typeof(vm_info->virtual_size))vm_map_adjusted_size(map);
5865 		vm_info->region_count = map->hdr.nentries;
5866 		vm_info->page_size = vm_map_page_size(map);
5867 
5868 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size);
5869 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size_peak);
5870 
5871 		vm_info->device = 0;
5872 		vm_info->device_peak = 0;
5873 		ledger_get_balance(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external);
5874 		ledger_get_lifetime_max(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external_peak);
5875 		ledger_get_balance(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal);
5876 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal_peak);
5877 		ledger_get_balance(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable);
5878 		ledger_get_lifetime_max(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable_peak);
5879 		ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed);
5880 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_peak);
5881 		ledger_get_entries(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_lifetime, &tmp_amount);
5882 
5883 		vm_info->purgeable_volatile_pmap = 0;
5884 		vm_info->purgeable_volatile_resident = 0;
5885 		vm_info->purgeable_volatile_virtual = 0;
5886 		if (is_kernel_task) {
5887 			/*
5888 			 * We do not maintain the detailed stats for the
5889 			 * kernel_pmap, so just count everything as
5890 			 * "internal"...
5891 			 */
5892 			vm_info->internal = vm_info->resident_size;
5893 			/*
5894 			 * ... but since the memory held by the VM compressor
5895 			 * in the kernel address space ought to be attributed
5896 			 * to user-space tasks, we subtract it from "internal"
5897 			 * to give memory reporting tools a more accurate idea
5898 			 * of what the kernel itself is actually using, instead
5899 			 * of making it look like the kernel is leaking memory
5900 			 * when the system is under memory pressure.
5901 			 */
5902 			vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
5903 			    PAGE_SIZE);
5904 		} else {
5905 			mach_vm_size_t  volatile_virtual_size;
5906 			mach_vm_size_t  volatile_resident_size;
5907 			mach_vm_size_t  volatile_compressed_size;
5908 			mach_vm_size_t  volatile_pmap_size;
5909 			mach_vm_size_t  volatile_compressed_pmap_size;
5910 			kern_return_t   kr;
5911 
5912 			if (flavor == TASK_VM_INFO_PURGEABLE) {
5913 				kr = vm_map_query_volatile(
5914 					map,
5915 					&volatile_virtual_size,
5916 					&volatile_resident_size,
5917 					&volatile_compressed_size,
5918 					&volatile_pmap_size,
5919 					&volatile_compressed_pmap_size);
5920 				if (kr == KERN_SUCCESS) {
5921 					vm_info->purgeable_volatile_pmap =
5922 					    volatile_pmap_size;
5923 					if (radar_20146450) {
5924 						vm_info->compressed -=
5925 						    volatile_compressed_pmap_size;
5926 					}
5927 					vm_info->purgeable_volatile_resident =
5928 					    volatile_resident_size;
5929 					vm_info->purgeable_volatile_virtual =
5930 					    volatile_virtual_size;
5931 				}
5932 			}
5933 		}
5934 		*task_info_count = TASK_VM_INFO_REV0_COUNT;
5935 
5936 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5937 			/* must be captured while we still have the map lock */
5938 			vm_info->min_address = map->min_offset;
5939 			vm_info->max_address = map->max_offset;
5940 		}
5941 
5942 		/*
5943 		 * Done with vm map things, can drop the map lock and reference,
5944 		 * and take the task lock back.
5945 		 *
5946 		 * Re-validate that the task didn't die on us.
5947 		 */
5948 		if (!is_kernel_task) {
5949 			vm_map_unlock_read(map);
5950 			vm_map_deallocate(map);
5951 		}
5952 		map = VM_MAP_NULL;
5953 
5954 		task_lock(task);
5955 
5956 		if ((task != current_task()) && (!task->active)) {
5957 			error = KERN_INVALID_ARGUMENT;
5958 			break;
5959 		}
5960 
5961 		if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
5962 			vm_info->phys_footprint =
5963 			    (mach_vm_size_t) get_task_phys_footprint(task);
5964 			*task_info_count = TASK_VM_INFO_REV1_COUNT;
5965 		}
5966 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5967 			/* data was captured above */
5968 			*task_info_count = TASK_VM_INFO_REV2_COUNT;
5969 		}
5970 
5971 		if (original_task_info_count >= TASK_VM_INFO_REV3_COUNT) {
5972 			ledger_get_lifetime_max(task->ledger,
5973 			    task_ledgers.phys_footprint,
5974 			    &vm_info->ledger_phys_footprint_peak);
5975 			ledger_get_balance(task->ledger,
5976 			    task_ledgers.purgeable_nonvolatile,
5977 			    &vm_info->ledger_purgeable_nonvolatile);
5978 			ledger_get_balance(task->ledger,
5979 			    task_ledgers.purgeable_nonvolatile_compressed,
5980 			    &vm_info->ledger_purgeable_novolatile_compressed);
5981 			ledger_get_balance(task->ledger,
5982 			    task_ledgers.purgeable_volatile,
5983 			    &vm_info->ledger_purgeable_volatile);
5984 			ledger_get_balance(task->ledger,
5985 			    task_ledgers.purgeable_volatile_compressed,
5986 			    &vm_info->ledger_purgeable_volatile_compressed);
5987 			ledger_get_balance(task->ledger,
5988 			    task_ledgers.network_nonvolatile,
5989 			    &vm_info->ledger_tag_network_nonvolatile);
5990 			ledger_get_balance(task->ledger,
5991 			    task_ledgers.network_nonvolatile_compressed,
5992 			    &vm_info->ledger_tag_network_nonvolatile_compressed);
5993 			ledger_get_balance(task->ledger,
5994 			    task_ledgers.network_volatile,
5995 			    &vm_info->ledger_tag_network_volatile);
5996 			ledger_get_balance(task->ledger,
5997 			    task_ledgers.network_volatile_compressed,
5998 			    &vm_info->ledger_tag_network_volatile_compressed);
5999 			ledger_get_balance(task->ledger,
6000 			    task_ledgers.media_footprint,
6001 			    &vm_info->ledger_tag_media_footprint);
6002 			ledger_get_balance(task->ledger,
6003 			    task_ledgers.media_footprint_compressed,
6004 			    &vm_info->ledger_tag_media_footprint_compressed);
6005 			ledger_get_balance(task->ledger,
6006 			    task_ledgers.media_nofootprint,
6007 			    &vm_info->ledger_tag_media_nofootprint);
6008 			ledger_get_balance(task->ledger,
6009 			    task_ledgers.media_nofootprint_compressed,
6010 			    &vm_info->ledger_tag_media_nofootprint_compressed);
6011 			ledger_get_balance(task->ledger,
6012 			    task_ledgers.graphics_footprint,
6013 			    &vm_info->ledger_tag_graphics_footprint);
6014 			ledger_get_balance(task->ledger,
6015 			    task_ledgers.graphics_footprint_compressed,
6016 			    &vm_info->ledger_tag_graphics_footprint_compressed);
6017 			ledger_get_balance(task->ledger,
6018 			    task_ledgers.graphics_nofootprint,
6019 			    &vm_info->ledger_tag_graphics_nofootprint);
6020 			ledger_get_balance(task->ledger,
6021 			    task_ledgers.graphics_nofootprint_compressed,
6022 			    &vm_info->ledger_tag_graphics_nofootprint_compressed);
6023 			ledger_get_balance(task->ledger,
6024 			    task_ledgers.neural_footprint,
6025 			    &vm_info->ledger_tag_neural_footprint);
6026 			ledger_get_balance(task->ledger,
6027 			    task_ledgers.neural_footprint_compressed,
6028 			    &vm_info->ledger_tag_neural_footprint_compressed);
6029 			ledger_get_balance(task->ledger,
6030 			    task_ledgers.neural_nofootprint,
6031 			    &vm_info->ledger_tag_neural_nofootprint);
6032 			ledger_get_balance(task->ledger,
6033 			    task_ledgers.neural_nofootprint_compressed,
6034 			    &vm_info->ledger_tag_neural_nofootprint_compressed);
6035 			*task_info_count = TASK_VM_INFO_REV3_COUNT;
6036 		}
6037 		if (original_task_info_count >= TASK_VM_INFO_REV4_COUNT) {
6038 			if (get_bsdtask_info(task)) {
6039 				vm_info->limit_bytes_remaining =
6040 				    memorystatus_available_memory_internal(get_bsdtask_info(task));
6041 			} else {
6042 				vm_info->limit_bytes_remaining = 0;
6043 			}
6044 			*task_info_count = TASK_VM_INFO_REV4_COUNT;
6045 		}
6046 		if (original_task_info_count >= TASK_VM_INFO_REV5_COUNT) {
6047 			thread_t thread;
6048 			uint64_t total = task->decompressions;
6049 			queue_iterate(&task->threads, thread, thread_t, task_threads) {
6050 				total += thread->decompressions;
6051 			}
6052 			vm_info->decompressions = (int32_t) MIN(total, INT32_MAX);
6053 			*task_info_count = TASK_VM_INFO_REV5_COUNT;
6054 		}
6055 		if (original_task_info_count >= TASK_VM_INFO_REV6_COUNT) {
6056 			ledger_get_balance(task->ledger, task_ledgers.swapins,
6057 			    &vm_info->ledger_swapins);
6058 			*task_info_count = TASK_VM_INFO_REV6_COUNT;
6059 		}
6060 
6061 		break;
6062 	}
6063 
6064 	case TASK_WAIT_STATE_INFO:
6065 	{
6066 		/*
6067 		 * Deprecated flavor. Currently allowing some results until all users
6068 		 * stop calling it. The results may not be accurate.
6069 		 */
6070 		task_wait_state_info_t  wait_state_info;
6071 		uint64_t total_sfi_ledger_val = 0;
6072 
6073 		if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
6074 			error = KERN_INVALID_ARGUMENT;
6075 			break;
6076 		}
6077 
6078 		wait_state_info = (task_wait_state_info_t) task_info_out;
6079 
6080 		wait_state_info->total_wait_state_time = 0;
6081 		bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
6082 
6083 #if CONFIG_SCHED_SFI
6084 		int i, prev_lentry = -1;
6085 		int64_t  val_credit, val_debit;
6086 
6087 		for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
6088 			val_credit = 0;
6089 			/*
6090 			 * checking with prev_lentry != entry ensures adjacent classes
6091 			 * which share the same ledger do not add wait times twice.
6092 			 * Note: Use ledger() call to get data for each individual sfi class.
6093 			 */
6094 			if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
6095 			    KERN_SUCCESS == ledger_get_entries(task->ledger,
6096 			    task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
6097 				total_sfi_ledger_val += val_credit;
6098 			}
6099 			prev_lentry = task_ledgers.sfi_wait_times[i];
6100 		}
6101 
6102 #endif /* CONFIG_SCHED_SFI */
6103 		wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
6104 		*task_info_count = TASK_WAIT_STATE_INFO_COUNT;
6105 
6106 		break;
6107 	}
6108 	case TASK_VM_INFO_PURGEABLE_ACCOUNT:
6109 	{
6110 #if DEVELOPMENT || DEBUG
6111 		pvm_account_info_t      acnt_info;
6112 
6113 		if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
6114 			error = KERN_INVALID_ARGUMENT;
6115 			break;
6116 		}
6117 
6118 		if (task_info_out == NULL) {
6119 			error = KERN_INVALID_ARGUMENT;
6120 			break;
6121 		}
6122 
6123 		acnt_info = (pvm_account_info_t) task_info_out;
6124 
6125 		error = vm_purgeable_account(task, acnt_info);
6126 
6127 		*task_info_count = PVM_ACCOUNT_INFO_COUNT;
6128 
6129 		break;
6130 #else /* DEVELOPMENT || DEBUG */
6131 		error = KERN_NOT_SUPPORTED;
6132 		break;
6133 #endif /* DEVELOPMENT || DEBUG */
6134 	}
6135 	case TASK_FLAGS_INFO:
6136 	{
6137 		task_flags_info_t               flags_info;
6138 
6139 		if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
6140 			error = KERN_INVALID_ARGUMENT;
6141 			break;
6142 		}
6143 
6144 		flags_info = (task_flags_info_t)task_info_out;
6145 
6146 		/* only publish the 64-bit flag of the task */
6147 		flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA);
6148 
6149 		*task_info_count = TASK_FLAGS_INFO_COUNT;
6150 		break;
6151 	}
6152 
6153 	case TASK_DEBUG_INFO_INTERNAL:
6154 	{
6155 #if DEVELOPMENT || DEBUG
6156 		task_debug_info_internal_t dbg_info;
6157 		ipc_space_t space = task->itk_space;
6158 		if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
6159 			error = KERN_NOT_SUPPORTED;
6160 			break;
6161 		}
6162 
6163 		if (task_info_out == NULL) {
6164 			error = KERN_INVALID_ARGUMENT;
6165 			break;
6166 		}
6167 		dbg_info = (task_debug_info_internal_t) task_info_out;
6168 		dbg_info->ipc_space_size = 0;
6169 
6170 		if (space) {
6171 			smr_ipc_enter();
6172 			ipc_entry_table_t table = smr_entered_load(&space->is_table);
6173 			if (table) {
6174 				dbg_info->ipc_space_size =
6175 				    ipc_entry_table_count(table);
6176 			}
6177 			smr_ipc_leave();
6178 		}
6179 
6180 		dbg_info->suspend_count = task->suspend_count;
6181 
6182 		error = KERN_SUCCESS;
6183 		*task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
6184 		break;
6185 #else /* DEVELOPMENT || DEBUG */
6186 		error = KERN_NOT_SUPPORTED;
6187 		break;
6188 #endif /* DEVELOPMENT || DEBUG */
6189 	}
6190 	case TASK_SUSPEND_STATS_INFO:
6191 	{
6192 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6193 		if (*task_info_count < TASK_SUSPEND_STATS_INFO_COUNT || task_info_out == NULL) {
6194 			error = KERN_INVALID_ARGUMENT;
6195 			break;
6196 		}
6197 		error = _task_get_suspend_stats_locked(task, (task_suspend_stats_t)task_info_out);
6198 		*task_info_count = TASK_SUSPEND_STATS_INFO_COUNT;
6199 		break;
6200 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6201 		error = KERN_NOT_SUPPORTED;
6202 		break;
6203 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6204 	}
6205 	case TASK_SUSPEND_SOURCES_INFO:
6206 	{
6207 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6208 		if (*task_info_count < TASK_SUSPEND_SOURCES_INFO_COUNT || task_info_out == NULL) {
6209 			error = KERN_INVALID_ARGUMENT;
6210 			break;
6211 		}
6212 		error = _task_get_suspend_sources_locked(task, (task_suspend_source_t)task_info_out);
6213 		*task_info_count = TASK_SUSPEND_SOURCES_INFO_COUNT;
6214 		break;
6215 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6216 		error = KERN_NOT_SUPPORTED;
6217 		break;
6218 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6219 	}
6220 	default:
6221 		error = KERN_INVALID_ARGUMENT;
6222 	}
6223 
6224 	task_unlock(task);
6225 	return error;
6226 }
6227 
6228 /*
6229  * task_info_from_user
6230  *
6231  * When calling task_info from user space,
6232  * this function will be executed as mig server side
6233  * instead of calling directly into task_info.
6234  * This gives the possibility to perform more security
6235  * checks on task_port.
6236  *
6237  * In the case of TASK_DYLD_INFO, we require the more
6238  * privileged task_read_port not the less-privileged task_name_port.
6239  *
6240  */
6241 kern_return_t
task_info_from_user(mach_port_t task_port,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)6242 task_info_from_user(
6243 	mach_port_t             task_port,
6244 	task_flavor_t           flavor,
6245 	task_info_t             task_info_out,
6246 	mach_msg_type_number_t  *task_info_count)
6247 {
6248 	task_t task;
6249 	kern_return_t ret;
6250 
6251 	if (flavor == TASK_DYLD_INFO) {
6252 		task = convert_port_to_task_read(task_port);
6253 	} else {
6254 		task = convert_port_to_task_name(task_port);
6255 	}
6256 
6257 	ret = task_info(task, flavor, task_info_out, task_info_count);
6258 
6259 	task_deallocate(task);
6260 
6261 	return ret;
6262 }
6263 
6264 /*
6265  * Routine: task_dyld_process_info_update_helper
6266  *
6267  * Release send rights in release_ports.
6268  *
6269  * If no active ports found in task's dyld notifier array, unset the magic value
6270  * in user space to indicate so.
6271  *
6272  * Condition:
6273  *      task's itk_lock is locked, and is unlocked upon return.
6274  *      Global g_dyldinfo_mtx is locked, and is unlocked upon return.
6275  */
6276 void
task_dyld_process_info_update_helper(task_t task,size_t active_count,vm_map_address_t magic_addr,ipc_port_t * release_ports,size_t release_count)6277 task_dyld_process_info_update_helper(
6278 	task_t                  task,
6279 	size_t                  active_count,
6280 	vm_map_address_t        magic_addr,    /* a userspace address */
6281 	ipc_port_t             *release_ports,
6282 	size_t                  release_count)
6283 {
6284 	void *notifiers_ptr = NULL;
6285 
6286 	assert(release_count <= DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT);
6287 
6288 	if (active_count == 0) {
6289 		assert(task->itk_dyld_notify != NULL);
6290 		notifiers_ptr = task->itk_dyld_notify;
6291 		task->itk_dyld_notify = NULL;
6292 		itk_unlock(task);
6293 
6294 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6295 		(void)copyoutmap_atomic32(task->map, MACH_PORT_NULL, magic_addr); /* unset magic */
6296 	} else {
6297 		itk_unlock(task);
6298 		(void)copyoutmap_atomic32(task->map, (mach_port_name_t)DYLD_PROCESS_INFO_NOTIFY_MAGIC,
6299 		    magic_addr);     /* reset magic */
6300 	}
6301 
6302 	lck_mtx_unlock(&g_dyldinfo_mtx);
6303 
6304 	for (size_t i = 0; i < release_count; i++) {
6305 		ipc_port_release_send(release_ports[i]);
6306 	}
6307 }
6308 
6309 /*
6310  * Routine: task_dyld_process_info_notify_register
6311  *
6312  * Insert a send right to target task's itk_dyld_notify array. Allocate kernel
6313  * memory for the array if it's the first port to be registered. Also cleanup
6314  * any dead rights found in the array.
6315  *
6316  * Consumes sright if returns KERN_SUCCESS, otherwise MIG will destroy it.
6317  *
6318  * Args:
6319  *     task:   Target task for the registration.
6320  *     sright: A send right.
6321  *
6322  * Returns:
6323  *     KERN_SUCCESS: Registration succeeded.
6324  *     KERN_INVALID_TASK: task is invalid.
6325  *     KERN_INVALID_RIGHT: sright is invalid.
6326  *     KERN_DENIED: Security policy denied this call.
6327  *     KERN_RESOURCE_SHORTAGE: Kernel memory allocation failed.
6328  *     KERN_NO_SPACE: No available notifier port slot left for this task.
6329  *     KERN_RIGHT_EXISTS: The notifier port is already registered and active.
6330  *
6331  *     Other error code see task_info().
6332  *
6333  * See Also:
6334  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6335  */
6336 kern_return_t
task_dyld_process_info_notify_register(task_t task,ipc_port_t sright)6337 task_dyld_process_info_notify_register(
6338 	task_t                  task,
6339 	ipc_port_t              sright)
6340 {
6341 	struct task_dyld_info dyld_info;
6342 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6343 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6344 	uint32_t release_count = 0, active_count = 0;
6345 	mach_vm_address_t ports_addr; /* a user space address */
6346 	kern_return_t kr;
6347 	boolean_t right_exists = false;
6348 	ipc_port_t *notifiers_ptr = NULL;
6349 	ipc_port_t *portp;
6350 
6351 	if (task == TASK_NULL || task == kernel_task) {
6352 		return KERN_INVALID_TASK;
6353 	}
6354 
6355 	if (!IP_VALID(sright)) {
6356 		return KERN_INVALID_RIGHT;
6357 	}
6358 
6359 #if CONFIG_MACF
6360 	if (mac_task_check_dyld_process_info_notify_register()) {
6361 		return KERN_DENIED;
6362 	}
6363 #endif
6364 
6365 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6366 	if (kr) {
6367 		return kr;
6368 	}
6369 
6370 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6371 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6372 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6373 	} else {
6374 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6375 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6376 	}
6377 
6378 	if (task->itk_dyld_notify == NULL) {
6379 		notifiers_ptr = kalloc_type(ipc_port_t,
6380 		    DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT,
6381 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
6382 	}
6383 
6384 	lck_mtx_lock(&g_dyldinfo_mtx);
6385 	itk_lock(task);
6386 
6387 	if (task->itk_dyld_notify == NULL) {
6388 		task->itk_dyld_notify = notifiers_ptr;
6389 		notifiers_ptr = NULL;
6390 	}
6391 
6392 	assert(task->itk_dyld_notify != NULL);
6393 	/* First pass: clear dead names and check for duplicate registration */
6394 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6395 		portp = &task->itk_dyld_notify[slot];
6396 		if (*portp != IPC_PORT_NULL && !ip_active(*portp)) {
6397 			release_ports[release_count++] = *portp;
6398 			*portp = IPC_PORT_NULL;
6399 		} else if (*portp == sright) {
6400 			/* the port is already registered and is active */
6401 			right_exists = true;
6402 		}
6403 
6404 		if (*portp != IPC_PORT_NULL) {
6405 			active_count++;
6406 		}
6407 	}
6408 
6409 	if (right_exists) {
6410 		/* skip second pass */
6411 		kr = KERN_RIGHT_EXISTS;
6412 		goto out;
6413 	}
6414 
6415 	/* Second pass: register the port */
6416 	kr = KERN_NO_SPACE;
6417 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6418 		portp = &task->itk_dyld_notify[slot];
6419 		if (*portp == IPC_PORT_NULL) {
6420 			*portp = sright;
6421 			active_count++;
6422 			kr = KERN_SUCCESS;
6423 			break;
6424 		}
6425 	}
6426 
6427 out:
6428 	assert(active_count > 0);
6429 
6430 	task_dyld_process_info_update_helper(task, active_count,
6431 	    (vm_map_address_t)ports_addr, release_ports, release_count);
6432 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6433 
6434 	kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6435 
6436 	return kr;
6437 }
6438 
6439 /*
6440  * Routine: task_dyld_process_info_notify_deregister
6441  *
6442  * Remove a send right in target task's itk_dyld_notify array matching the receive
6443  * right name passed in. Deallocate kernel memory for the array if it's the last port to
6444  * be deregistered, or all ports have died. Also cleanup any dead rights found in the array.
6445  *
6446  * Does not consume any reference.
6447  *
6448  * Args:
6449  *     task: Target task for the deregistration.
6450  *     rcv_name: The name denoting the receive right in caller's space.
6451  *
6452  * Returns:
6453  *     KERN_SUCCESS: A matching entry found and degistration succeeded.
6454  *     KERN_INVALID_TASK: task is invalid.
6455  *     KERN_INVALID_NAME: name is invalid.
6456  *     KERN_DENIED: Security policy denied this call.
6457  *     KERN_FAILURE: A matching entry is not found.
6458  *     KERN_INVALID_RIGHT: The name passed in does not represent a valid rcv right.
6459  *
6460  *     Other error code see task_info().
6461  *
6462  * See Also:
6463  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6464  */
6465 kern_return_t
task_dyld_process_info_notify_deregister(task_t task,mach_port_name_t rcv_name)6466 task_dyld_process_info_notify_deregister(
6467 	task_t                  task,
6468 	mach_port_name_t        rcv_name)
6469 {
6470 	struct task_dyld_info dyld_info;
6471 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6472 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6473 	uint32_t release_count = 0, active_count = 0;
6474 	boolean_t port_found = false;
6475 	mach_vm_address_t ports_addr; /* a user space address */
6476 	ipc_port_t sright;
6477 	kern_return_t kr;
6478 	ipc_port_t *portp;
6479 
6480 	if (task == TASK_NULL || task == kernel_task) {
6481 		return KERN_INVALID_TASK;
6482 	}
6483 
6484 	if (!MACH_PORT_VALID(rcv_name)) {
6485 		return KERN_INVALID_NAME;
6486 	}
6487 
6488 #if CONFIG_MACF
6489 	if (mac_task_check_dyld_process_info_notify_register()) {
6490 		return KERN_DENIED;
6491 	}
6492 #endif
6493 
6494 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6495 	if (kr) {
6496 		return kr;
6497 	}
6498 
6499 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6500 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6501 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6502 	} else {
6503 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6504 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6505 	}
6506 
6507 	kr = ipc_port_translate_receive(current_space(), rcv_name, &sright); /* does not produce port ref */
6508 	if (kr) {
6509 		return KERN_INVALID_RIGHT;
6510 	}
6511 
6512 	ip_reference(sright);
6513 	ip_mq_unlock(sright);
6514 
6515 	assert(sright != IPC_PORT_NULL);
6516 
6517 	lck_mtx_lock(&g_dyldinfo_mtx);
6518 	itk_lock(task);
6519 
6520 	if (task->itk_dyld_notify == NULL) {
6521 		itk_unlock(task);
6522 		lck_mtx_unlock(&g_dyldinfo_mtx);
6523 		ip_release(sright);
6524 		return KERN_FAILURE;
6525 	}
6526 
6527 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6528 		portp = &task->itk_dyld_notify[slot];
6529 		if (*portp == sright) {
6530 			release_ports[release_count++] = *portp;
6531 			*portp = IPC_PORT_NULL;
6532 			port_found = true;
6533 		} else if ((*portp != IPC_PORT_NULL && !ip_active(*portp))) {
6534 			release_ports[release_count++] = *portp;
6535 			*portp = IPC_PORT_NULL;
6536 		}
6537 
6538 		if (*portp != IPC_PORT_NULL) {
6539 			active_count++;
6540 		}
6541 	}
6542 
6543 	task_dyld_process_info_update_helper(task, active_count,
6544 	    (vm_map_address_t)ports_addr, release_ports, release_count);
6545 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6546 
6547 	ip_release(sright);
6548 
6549 	return port_found ? KERN_SUCCESS : KERN_FAILURE;
6550 }
6551 
6552 /*
6553  *	task_power_info
6554  *
6555  *	Returns power stats for the task.
6556  *	Note: Called with task locked.
6557  */
6558 void
task_power_info_locked(task_t task,task_power_info_t info,gpu_energy_data_t ginfo,task_power_info_v2_t infov2,struct task_power_info_extra * extra_info)6559 task_power_info_locked(
6560 	task_t                        task,
6561 	task_power_info_t             info,
6562 	gpu_energy_data_t             ginfo,
6563 	task_power_info_v2_t          infov2,
6564 	struct task_power_info_extra *extra_info)
6565 {
6566 	thread_t                thread;
6567 	ledger_amount_t         tmp;
6568 
6569 	uint64_t                runnable_time_sum = 0;
6570 
6571 	task_lock_assert_owned(task);
6572 
6573 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
6574 	    (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
6575 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
6576 	    (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
6577 
6578 	info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
6579 	info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
6580 
6581 	struct recount_usage usage = { 0 };
6582 	struct recount_usage usage_perf = { 0 };
6583 	recount_task_usage_perf_only(task, &usage, &usage_perf);
6584 
6585 	info->total_user = usage.ru_metrics[RCT_LVL_USER].rm_time_mach;
6586 	info->total_system = recount_usage_system_time_mach(&usage);
6587 	runnable_time_sum = task->total_runnable_time;
6588 
6589 	if (ginfo) {
6590 		ginfo->task_gpu_utilisation = task->task_gpu_ns;
6591 	}
6592 
6593 	if (infov2) {
6594 		infov2->task_ptime = recount_usage_time_mach(&usage_perf);
6595 		infov2->task_pset_switches = task->ps_switch;
6596 #if CONFIG_PERVASIVE_ENERGY
6597 		infov2->task_energy = usage.ru_energy_nj;
6598 #endif /* CONFIG_PERVASIVE_ENERGY */
6599 	}
6600 
6601 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6602 		spl_t x;
6603 
6604 		if (thread->options & TH_OPT_IDLE_THREAD) {
6605 			continue;
6606 		}
6607 
6608 		x = splsched();
6609 		thread_lock(thread);
6610 
6611 		info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
6612 		info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
6613 
6614 		if (infov2) {
6615 			infov2->task_pset_switches += thread->ps_switch;
6616 		}
6617 
6618 		runnable_time_sum += timer_grab(&thread->runnable_timer);
6619 
6620 		if (ginfo) {
6621 			ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
6622 		}
6623 		thread_unlock(thread);
6624 		splx(x);
6625 	}
6626 
6627 	if (extra_info) {
6628 		extra_info->runnable_time = runnable_time_sum;
6629 #if CONFIG_PERVASIVE_CPI
6630 		extra_info->cycles = recount_usage_cycles(&usage);
6631 		extra_info->instructions = recount_usage_instructions(&usage);
6632 		extra_info->pcycles = recount_usage_cycles(&usage_perf);
6633 		extra_info->pinstructions = recount_usage_instructions(&usage_perf);
6634 		extra_info->user_ptime = usage_perf.ru_metrics[RCT_LVL_USER].rm_time_mach;
6635 		extra_info->system_ptime = recount_usage_system_time_mach(&usage_perf);
6636 #endif // CONFIG_PERVASIVE_CPI
6637 #if CONFIG_PERVASIVE_ENERGY
6638 		extra_info->energy = usage.ru_energy_nj;
6639 		extra_info->penergy = usage_perf.ru_energy_nj;
6640 #endif // CONFIG_PERVASIVE_ENERGY
6641 #if RECOUNT_SECURE_METRICS
6642 		if (PE_i_can_has_debugger(NULL)) {
6643 			extra_info->secure_time = usage.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6644 			extra_info->secure_ptime = usage_perf.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6645 		}
6646 #endif // RECOUNT_SECURE_METRICS
6647 	}
6648 }
6649 
6650 /*
6651  *	task_gpu_utilisation
6652  *
6653  *	Returns the total gpu time used by the all the threads of the task
6654  *  (both dead and alive)
6655  */
6656 uint64_t
task_gpu_utilisation(task_t task)6657 task_gpu_utilisation(
6658 	task_t  task)
6659 {
6660 	uint64_t gpu_time = 0;
6661 #if defined(__x86_64__)
6662 	thread_t thread;
6663 
6664 	task_lock(task);
6665 	gpu_time += task->task_gpu_ns;
6666 
6667 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6668 		spl_t x;
6669 		x = splsched();
6670 		thread_lock(thread);
6671 		gpu_time += ml_gpu_stat(thread);
6672 		thread_unlock(thread);
6673 		splx(x);
6674 	}
6675 
6676 	task_unlock(task);
6677 #else /* defined(__x86_64__) */
6678 	/* silence compiler warning */
6679 	(void)task;
6680 #endif /* defined(__x86_64__) */
6681 	return gpu_time;
6682 }
6683 
6684 /* This function updates the cpu time in the arrays for each
6685  * effective and requested QoS class
6686  */
6687 void
task_update_cpu_time_qos_stats(task_t task,uint64_t * eqos_stats,uint64_t * rqos_stats)6688 task_update_cpu_time_qos_stats(
6689 	task_t  task,
6690 	uint64_t *eqos_stats,
6691 	uint64_t *rqos_stats)
6692 {
6693 	if (!eqos_stats && !rqos_stats) {
6694 		return;
6695 	}
6696 
6697 	task_lock(task);
6698 	thread_t thread;
6699 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6700 		if (thread->options & TH_OPT_IDLE_THREAD) {
6701 			continue;
6702 		}
6703 
6704 		thread_update_qos_cpu_time(thread);
6705 	}
6706 
6707 	if (eqos_stats) {
6708 		eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default;
6709 		eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
6710 		eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background;
6711 		eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility;
6712 		eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy;
6713 		eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
6714 		eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
6715 	}
6716 
6717 	if (rqos_stats) {
6718 		rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default;
6719 		rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance;
6720 		rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background;
6721 		rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility;
6722 		rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy;
6723 		rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated;
6724 		rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive;
6725 	}
6726 
6727 	task_unlock(task);
6728 }
6729 
6730 kern_return_t
task_purgable_info(task_t task,task_purgable_info_t * stats)6731 task_purgable_info(
6732 	task_t                  task,
6733 	task_purgable_info_t    *stats)
6734 {
6735 	if (task == TASK_NULL || stats == NULL) {
6736 		return KERN_INVALID_ARGUMENT;
6737 	}
6738 	/* Take task reference */
6739 	task_reference(task);
6740 	vm_purgeable_stats((vm_purgeable_info_t)stats, task);
6741 	/* Drop task reference */
6742 	task_deallocate(task);
6743 	return KERN_SUCCESS;
6744 }
6745 
6746 void
task_vtimer_set(task_t task,integer_t which)6747 task_vtimer_set(
6748 	task_t          task,
6749 	integer_t       which)
6750 {
6751 	thread_t        thread;
6752 	spl_t           x;
6753 
6754 	task_lock(task);
6755 
6756 	task->vtimers |= which;
6757 
6758 	switch (which) {
6759 	case TASK_VTIMER_USER:
6760 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6761 			x = splsched();
6762 			thread_lock(thread);
6763 			struct recount_times_mach times = recount_thread_times(thread);
6764 			thread->vtimer_user_save = times.rtm_user;
6765 			thread_unlock(thread);
6766 			splx(x);
6767 		}
6768 		break;
6769 
6770 	case TASK_VTIMER_PROF:
6771 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6772 			x = splsched();
6773 			thread_lock(thread);
6774 			thread->vtimer_prof_save = recount_thread_time_mach(thread);
6775 			thread_unlock(thread);
6776 			splx(x);
6777 		}
6778 		break;
6779 
6780 	case TASK_VTIMER_RLIM:
6781 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6782 			x = splsched();
6783 			thread_lock(thread);
6784 			thread->vtimer_rlim_save = recount_thread_time_mach(thread);
6785 			thread_unlock(thread);
6786 			splx(x);
6787 		}
6788 		break;
6789 	}
6790 
6791 	task_unlock(task);
6792 }
6793 
6794 void
task_vtimer_clear(task_t task,integer_t which)6795 task_vtimer_clear(
6796 	task_t          task,
6797 	integer_t       which)
6798 {
6799 	task_lock(task);
6800 
6801 	task->vtimers &= ~which;
6802 
6803 	task_unlock(task);
6804 }
6805 
6806 void
task_vtimer_update(__unused task_t task,integer_t which,uint32_t * microsecs)6807 task_vtimer_update(
6808 	__unused
6809 	task_t          task,
6810 	integer_t       which,
6811 	uint32_t        *microsecs)
6812 {
6813 	thread_t        thread = current_thread();
6814 	uint32_t        tdelt = 0;
6815 	clock_sec_t     secs = 0;
6816 	uint64_t        tsum;
6817 
6818 	assert(task == current_task());
6819 
6820 	spl_t s = splsched();
6821 	thread_lock(thread);
6822 
6823 	if ((task->vtimers & which) != (uint32_t)which) {
6824 		thread_unlock(thread);
6825 		splx(s);
6826 		return;
6827 	}
6828 
6829 	switch (which) {
6830 	case TASK_VTIMER_USER:;
6831 		struct recount_times_mach times = recount_thread_times(thread);
6832 		tsum = times.rtm_user;
6833 		tdelt = (uint32_t)(tsum - thread->vtimer_user_save);
6834 		thread->vtimer_user_save = tsum;
6835 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6836 		break;
6837 
6838 	case TASK_VTIMER_PROF:
6839 		tsum = recount_current_thread_time_mach();
6840 		tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
6841 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6842 		/* if the time delta is smaller than a usec, ignore */
6843 		if (*microsecs != 0) {
6844 			thread->vtimer_prof_save = tsum;
6845 		}
6846 		break;
6847 
6848 	case TASK_VTIMER_RLIM:
6849 		tsum = recount_current_thread_time_mach();
6850 		tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
6851 		thread->vtimer_rlim_save = tsum;
6852 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6853 		break;
6854 	}
6855 
6856 	thread_unlock(thread);
6857 	splx(s);
6858 }
6859 
6860 uint64_t
get_task_dispatchqueue_offset(task_t task)6861 get_task_dispatchqueue_offset(
6862 	task_t          task)
6863 {
6864 	return task->dispatchqueue_offset;
6865 }
6866 
6867 void
task_synchronizer_destroy_all(task_t task)6868 task_synchronizer_destroy_all(task_t task)
6869 {
6870 	/*
6871 	 *  Destroy owned semaphores
6872 	 */
6873 	semaphore_destroy_all(task);
6874 }
6875 
6876 /*
6877  * Install default (machine-dependent) initial thread state
6878  * on the task.  Subsequent thread creation will have this initial
6879  * state set on the thread by machine_thread_inherit_taskwide().
6880  * Flavors and structures are exactly the same as those to thread_set_state()
6881  */
6882 kern_return_t
task_set_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t state_count)6883 task_set_state(
6884 	task_t task,
6885 	int flavor,
6886 	thread_state_t state,
6887 	mach_msg_type_number_t state_count)
6888 {
6889 	kern_return_t ret;
6890 
6891 	if (task == TASK_NULL) {
6892 		return KERN_INVALID_ARGUMENT;
6893 	}
6894 
6895 	task_lock(task);
6896 
6897 	if (!task->active) {
6898 		task_unlock(task);
6899 		return KERN_FAILURE;
6900 	}
6901 
6902 	ret = machine_task_set_state(task, flavor, state, state_count);
6903 
6904 	task_unlock(task);
6905 	return ret;
6906 }
6907 
6908 /*
6909  * Examine the default (machine-dependent) initial thread state
6910  * on the task, as set by task_set_state().  Flavors and structures
6911  * are exactly the same as those passed to thread_get_state().
6912  */
6913 kern_return_t
task_get_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)6914 task_get_state(
6915 	task_t  task,
6916 	int     flavor,
6917 	thread_state_t state,
6918 	mach_msg_type_number_t *state_count)
6919 {
6920 	kern_return_t ret;
6921 
6922 	if (task == TASK_NULL) {
6923 		return KERN_INVALID_ARGUMENT;
6924 	}
6925 
6926 	task_lock(task);
6927 
6928 	if (!task->active) {
6929 		task_unlock(task);
6930 		return KERN_FAILURE;
6931 	}
6932 
6933 	ret = machine_task_get_state(task, flavor, state, state_count);
6934 
6935 	task_unlock(task);
6936 	return ret;
6937 }
6938 
6939 
6940 static kern_return_t __attribute__((noinline, not_tail_called))
PROC_VIOLATED_GUARD__SEND_EXC_GUARD(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,boolean_t backtrace_only)6941 PROC_VIOLATED_GUARD__SEND_EXC_GUARD(
6942 	mach_exception_code_t code,
6943 	mach_exception_subcode_t subcode,
6944 	void *reason,
6945 	boolean_t backtrace_only)
6946 {
6947 #ifdef MACH_BSD
6948 	if (1 == proc_selfpid()) {
6949 		return KERN_NOT_SUPPORTED;              // initproc is immune
6950 	}
6951 #endif
6952 	mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
6953 		[0] = code,
6954 		[1] = subcode,
6955 	};
6956 	task_t task = current_task();
6957 	kern_return_t kr;
6958 	void *bsd_info = get_bsdtask_info(task);
6959 
6960 	/* (See jetsam-related comments below) */
6961 
6962 	proc_memstat_skip(bsd_info, TRUE);
6963 	kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason, backtrace_only);
6964 	proc_memstat_skip(bsd_info, FALSE);
6965 	return kr;
6966 }
6967 
6968 kern_return_t
task_violated_guard(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,bool backtrace_only)6969 task_violated_guard(
6970 	mach_exception_code_t code,
6971 	mach_exception_subcode_t subcode,
6972 	void *reason,
6973 	bool backtrace_only)
6974 {
6975 	return PROC_VIOLATED_GUARD__SEND_EXC_GUARD(code, subcode, reason, backtrace_only);
6976 }
6977 
6978 
6979 #if CONFIG_MEMORYSTATUS
6980 
6981 boolean_t
task_get_memlimit_is_active(task_t task)6982 task_get_memlimit_is_active(task_t task)
6983 {
6984 	assert(task != NULL);
6985 
6986 	if (task->memlimit_is_active == 1) {
6987 		return TRUE;
6988 	} else {
6989 		return FALSE;
6990 	}
6991 }
6992 
6993 void
task_set_memlimit_is_active(task_t task,boolean_t memlimit_is_active)6994 task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
6995 {
6996 	assert(task != NULL);
6997 
6998 	if (memlimit_is_active) {
6999 		task->memlimit_is_active = 1;
7000 	} else {
7001 		task->memlimit_is_active = 0;
7002 	}
7003 }
7004 
7005 boolean_t
task_get_memlimit_is_fatal(task_t task)7006 task_get_memlimit_is_fatal(task_t task)
7007 {
7008 	assert(task != NULL);
7009 
7010 	if (task->memlimit_is_fatal == 1) {
7011 		return TRUE;
7012 	} else {
7013 		return FALSE;
7014 	}
7015 }
7016 
7017 void
task_set_memlimit_is_fatal(task_t task,boolean_t memlimit_is_fatal)7018 task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
7019 {
7020 	assert(task != NULL);
7021 
7022 	if (memlimit_is_fatal) {
7023 		task->memlimit_is_fatal = 1;
7024 	} else {
7025 		task->memlimit_is_fatal = 0;
7026 	}
7027 }
7028 
7029 uint64_t
task_get_dirty_start(task_t task)7030 task_get_dirty_start(task_t task)
7031 {
7032 	return task->memstat_dirty_start;
7033 }
7034 
7035 void
task_set_dirty_start(task_t task,uint64_t start)7036 task_set_dirty_start(task_t task, uint64_t start)
7037 {
7038 	task_lock(task);
7039 	task->memstat_dirty_start = start;
7040 	task_unlock(task);
7041 }
7042 
7043 boolean_t
task_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)7044 task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
7045 {
7046 	boolean_t triggered = FALSE;
7047 
7048 	assert(task == current_task());
7049 
7050 	/*
7051 	 * Returns true, if task has already triggered an exc_resource exception.
7052 	 */
7053 
7054 	if (memlimit_is_active) {
7055 		triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
7056 	} else {
7057 		triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
7058 	}
7059 
7060 	return triggered;
7061 }
7062 
7063 void
task_mark_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)7064 task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
7065 {
7066 	assert(task == current_task());
7067 
7068 	/*
7069 	 * We allow one exc_resource per process per active/inactive limit.
7070 	 * The limit's fatal attribute does not come into play.
7071 	 */
7072 
7073 	if (memlimit_is_active) {
7074 		task->memlimit_active_exc_resource = 1;
7075 	} else {
7076 		task->memlimit_inactive_exc_resource = 1;
7077 	}
7078 }
7079 
7080 #define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
7081 
7082 void __attribute__((noinline))
PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb,send_exec_resource_options_t exception_options)7083 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options)
7084 {
7085 	task_t                                          task            = current_task();
7086 	int                                                     pid         = 0;
7087 	const char                                      *procname       = "unknown";
7088 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
7089 	boolean_t send_sync_exc_resource = FALSE;
7090 	void *cur_bsd_info = get_bsdtask_info(current_task());
7091 
7092 #ifdef MACH_BSD
7093 	pid = proc_selfpid();
7094 
7095 	if (pid == 1) {
7096 		/*
7097 		 * Cannot have ReportCrash analyzing
7098 		 * a suspended initproc.
7099 		 */
7100 		return;
7101 	}
7102 
7103 	if (cur_bsd_info != NULL) {
7104 		procname = proc_name_address(cur_bsd_info);
7105 		send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(cur_bsd_info);
7106 	}
7107 #endif
7108 #if CONFIG_COREDUMP
7109 	if (hwm_user_cores) {
7110 		int                             error;
7111 		uint64_t                starttime, end;
7112 		clock_sec_t             secs = 0;
7113 		uint32_t                microsecs = 0;
7114 
7115 		starttime = mach_absolute_time();
7116 		/*
7117 		 * Trigger a coredump of this process. Don't proceed unless we know we won't
7118 		 * be filling up the disk; and ignore the core size resource limit for this
7119 		 * core file.
7120 		 */
7121 		if ((error = coredump(cur_bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
7122 			printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
7123 		}
7124 		/*
7125 		 * coredump() leaves the task suspended.
7126 		 */
7127 		task_resume_internal(current_task());
7128 
7129 		end = mach_absolute_time();
7130 		absolutetime_to_microtime(end - starttime, &secs, &microsecs);
7131 		printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
7132 		    proc_name_address(cur_bsd_info), pid, (int)secs, microsecs);
7133 	}
7134 #endif /* CONFIG_COREDUMP */
7135 
7136 	if (disable_exc_resource) {
7137 		printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7138 		    "suppressed by a boot-arg.\n", procname, pid, max_footprint_mb);
7139 		return;
7140 	}
7141 	printf("process %s [%d] crossed memory %s (%d MB); EXC_RESOURCE "
7142 	    "\n", procname, pid, (!(exception_options & EXEC_RESOURCE_DIAGNOSTIC) ? "high watermark" : "diagnostics limit"), max_footprint_mb);
7143 
7144 	/*
7145 	 * A task that has triggered an EXC_RESOURCE, should not be
7146 	 * jetsammed when the device is under memory pressure.  Here
7147 	 * we set the P_MEMSTAT_SKIP flag so that the process
7148 	 * will be skipped if the memorystatus_thread wakes up.
7149 	 *
7150 	 * This is a debugging aid to ensure we can get a corpse before
7151 	 * the jetsam thread kills the process.
7152 	 * Note that proc_memstat_skip is a no-op on release kernels.
7153 	 */
7154 	proc_memstat_skip(cur_bsd_info, TRUE);
7155 
7156 	code[0] = code[1] = 0;
7157 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
7158 	/*
7159 	 * Regardless if there was a diag memlimit violation, fatal exceptions shall be notified always
7160 	 * as high level watermaks. In another words, if there was a diag limit and a watermark, and the
7161 	 * violation if for limit watermark, a watermark shall be reported.
7162 	 */
7163 	if (!(exception_options & EXEC_RESOURCE_FATAL)) {
7164 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], !(exception_options & EXEC_RESOURCE_DIAGNOSTIC)  ? FLAVOR_HIGH_WATERMARK : FLAVOR_DIAG_MEMLIMIT);
7165 	} else {
7166 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK );
7167 	}
7168 	EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
7169 	/*
7170 	 * Do not generate a corpse fork if the violation is a fatal one
7171 	 * or the process wants synchronous EXC_RESOURCE exceptions.
7172 	 */
7173 	if ((exception_options & EXEC_RESOURCE_FATAL) || send_sync_exc_resource || !exc_via_corpse_forking) {
7174 		if (exception_options & EXEC_RESOURCE_FATAL) {
7175 			vm_map_set_corpse_source(task->map);
7176 		}
7177 
7178 		/* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */
7179 		if (send_sync_exc_resource || !corpse_for_fatal_memkill) {
7180 			/*
7181 			 * Use the _internal_ variant so that no user-space
7182 			 * process can resume our task from under us.
7183 			 */
7184 			task_suspend_internal(task);
7185 			exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
7186 			task_resume_internal(task);
7187 		}
7188 	} else {
7189 		if (disable_exc_resource_during_audio && audio_active) {
7190 			printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7191 			    "suppressed due to audio playback.\n", procname, pid, max_footprint_mb);
7192 		} else {
7193 			task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
7194 			    code, EXCEPTION_CODE_MAX, NULL, FALSE);
7195 		}
7196 	}
7197 
7198 	/*
7199 	 * After the EXC_RESOURCE has been handled, we must clear the
7200 	 * P_MEMSTAT_SKIP flag so that the process can again be
7201 	 * considered for jetsam if the memorystatus_thread wakes up.
7202 	 */
7203 	proc_memstat_skip(cur_bsd_info, FALSE);         /* clear the flag */
7204 }
7205 /*
7206  * Callback invoked when a task exceeds its physical footprint limit.
7207  */
7208 void
task_footprint_exceeded(int warning,__unused const void * param0,__unused const void * param1)7209 task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
7210 {
7211 	ledger_amount_t max_footprint = 0;
7212 	ledger_amount_t max_footprint_mb = 0;
7213 #if DEBUG || DEVELOPMENT
7214 	ledger_amount_t diag_threshold_limit_mb = 0;
7215 	ledger_amount_t diag_threshold_limit = 0;
7216 #endif
7217 #if CONFIG_DEFERRED_RECLAIM
7218 	ledger_amount_t current_footprint;
7219 #endif /* CONFIG_DEFERRED_RECLAIM */
7220 	task_t task;
7221 	send_exec_resource_is_warning is_warning = IS_NOT_WARNING;
7222 	boolean_t memlimit_is_active;
7223 	send_exec_resource_is_fatal memlimit_is_fatal;
7224 	send_exec_resource_is_diagnostics is_diag_mem_threshold = IS_NOT_DIAGNOSTICS;
7225 	if (warning == LEDGER_WARNING_DIAG_MEM_THRESHOLD) {
7226 		is_diag_mem_threshold = IS_DIAGNOSTICS;
7227 		is_warning = IS_WARNING;
7228 	} else if (warning == LEDGER_WARNING_DIPPED_BELOW) {
7229 		/*
7230 		 * Task memory limits only provide a warning on the way up.
7231 		 */
7232 		return;
7233 	} else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
7234 		/*
7235 		 * This task is in danger of violating a memory limit,
7236 		 * It has exceeded a percentage level of the limit.
7237 		 */
7238 		is_warning = IS_WARNING;
7239 	} else {
7240 		/*
7241 		 * The task has exceeded the physical footprint limit.
7242 		 * This is not a warning but a true limit violation.
7243 		 */
7244 		is_warning = IS_NOT_WARNING;
7245 	}
7246 
7247 	task = current_task();
7248 
7249 	ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint);
7250 #if DEBUG || DEVELOPMENT
7251 	ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &diag_threshold_limit);
7252 #endif
7253 #if CONFIG_DEFERRED_RECLAIM
7254 	if (task->deferred_reclamation_metadata != NULL) {
7255 		/*
7256 		 * Task is enrolled in deferred reclamation.
7257 		 * Do a reclaim to ensure it's really over its limit.
7258 		 */
7259 		vm_deferred_reclamation_reclaim_from_task_sync(task, UINT64_MAX);
7260 		ledger_get_balance(task->ledger, task_ledgers.phys_footprint, &current_footprint);
7261 		if (current_footprint < max_footprint) {
7262 			return;
7263 		}
7264 	}
7265 #endif /* CONFIG_DEFERRED_RECLAIM */
7266 	max_footprint_mb = max_footprint >> 20;
7267 #if DEBUG || DEVELOPMENT
7268 	diag_threshold_limit_mb = diag_threshold_limit >> 20;
7269 #endif
7270 	memlimit_is_active = task_get_memlimit_is_active(task);
7271 	memlimit_is_fatal = task_get_memlimit_is_fatal(task) == FALSE ? IS_NOT_FATAL : IS_FATAL;
7272 #if DEBUG || DEVELOPMENT
7273 	if (is_diag_mem_threshold == IS_NOT_DIAGNOSTICS) {
7274 		task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7275 	} else {
7276 		task_process_crossed_limit_diag(diag_threshold_limit_mb);
7277 	}
7278 #else
7279 	task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7280 #endif
7281 }
7282 
7283 /*
7284  * Actions to perfrom when a process has crossed watermark or is a fatal consumption */
7285 static inline void
task_process_crossed_limit_no_diag(task_t task,ledger_amount_t ledger_limit_size,bool memlimit_is_fatal,bool memlimit_is_active,send_exec_resource_is_warning is_warning)7286 task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning)
7287 {
7288 	send_exec_resource_options_t exception_options = 0;
7289 	if (memlimit_is_fatal) {
7290 		exception_options |= EXEC_RESOURCE_FATAL;
7291 	}
7292 	/*
7293 	 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7294 	 * We only generate the exception once per process per memlimit (active/inactive limit).
7295 	 * To enforce this, we monitor state based on the  memlimit's active/inactive attribute
7296 	 * and we disable it by marking that memlimit as exception triggered.
7297 	 */
7298 	if (is_warning == IS_NOT_WARNING && !task_has_triggered_exc_resource(task, memlimit_is_active)) {
7299 		PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7300 		// If it was not a diag threshold (if was a memory limit), then we do not want more signalling,
7301 		// however, if was a diag limit, the user may reload a different limit and signal again the violation
7302 		memorystatus_log_exception((int)ledger_limit_size, memlimit_is_active, memlimit_is_fatal);
7303 		task_mark_has_triggered_exc_resource(task, memlimit_is_active);
7304 	}
7305 	memorystatus_on_ledger_footprint_exceeded(is_warning == IS_NOT_WARNING ? FALSE : TRUE, memlimit_is_active, memlimit_is_fatal);
7306 }
7307 
7308 #if DEBUG || DEVELOPMENT
7309 /**
7310  * Actions to take when a process has crossed the diagnostics limit
7311  */
7312 static inline void
task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)7313 task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)
7314 {
7315 	/*
7316 	 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7317 	 * In the case of the diagnostics thresholds, the exception will be signaled only once, but the
7318 	 * inhibit / rearm mechanism if performed at ledger level.
7319 	 */
7320 	send_exec_resource_options_t exception_options = EXEC_RESOURCE_DIAGNOSTIC;
7321 	PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7322 	memorystatus_log_diag_threshold_exception((int)ledger_limit_size);
7323 }
7324 #endif
7325 
7326 extern int proc_check_footprint_priv(void);
7327 
7328 kern_return_t
task_set_phys_footprint_limit(task_t task,int new_limit_mb,int * old_limit_mb)7329 task_set_phys_footprint_limit(
7330 	task_t task,
7331 	int new_limit_mb,
7332 	int *old_limit_mb)
7333 {
7334 	kern_return_t error;
7335 
7336 	boolean_t memlimit_is_active;
7337 	boolean_t memlimit_is_fatal;
7338 
7339 	if ((error = proc_check_footprint_priv())) {
7340 		return KERN_NO_ACCESS;
7341 	}
7342 
7343 	/*
7344 	 * This call should probably be obsoleted.
7345 	 * But for now, we default to current state.
7346 	 */
7347 	memlimit_is_active = task_get_memlimit_is_active(task);
7348 	memlimit_is_fatal = task_get_memlimit_is_fatal(task);
7349 
7350 	return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
7351 }
7352 
7353 /*
7354  * Set the limit of diagnostics memory consumption for a concrete task
7355  */
7356 #if CONFIG_MEMORYSTATUS
7357 #if DEVELOPMENT || DEBUG
7358 kern_return_t
task_set_diag_footprint_limit(task_t task,uint64_t new_limit_mb,uint64_t * old_limit_mb)7359 task_set_diag_footprint_limit(
7360 	task_t task,
7361 	uint64_t new_limit_mb,
7362 	uint64_t *old_limit_mb)
7363 {
7364 	kern_return_t error;
7365 
7366 	if ((error = proc_check_footprint_priv())) {
7367 		return KERN_NO_ACCESS;
7368 	}
7369 
7370 	return task_set_diag_footprint_limit_internal(task, new_limit_mb, old_limit_mb);
7371 }
7372 
7373 #endif // DEVELOPMENT || DEBUG
7374 #endif // CONFIG_MEMORYSTATUS
7375 
7376 kern_return_t
task_convert_phys_footprint_limit(int limit_mb,int * converted_limit_mb)7377 task_convert_phys_footprint_limit(
7378 	int limit_mb,
7379 	int *converted_limit_mb)
7380 {
7381 	if (limit_mb == -1) {
7382 		/*
7383 		 * No limit
7384 		 */
7385 		if (max_task_footprint != 0) {
7386 			*converted_limit_mb = (int)(max_task_footprint / 1024 / 1024);         /* bytes to MB */
7387 		} else {
7388 			*converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
7389 		}
7390 	} else {
7391 		/* nothing to convert */
7392 		*converted_limit_mb = limit_mb;
7393 	}
7394 	return KERN_SUCCESS;
7395 }
7396 
7397 kern_return_t
task_set_phys_footprint_limit_internal(task_t task,int new_limit_mb,int * old_limit_mb,boolean_t memlimit_is_active,boolean_t memlimit_is_fatal)7398 task_set_phys_footprint_limit_internal(
7399 	task_t task,
7400 	int new_limit_mb,
7401 	int *old_limit_mb,
7402 	boolean_t memlimit_is_active,
7403 	boolean_t memlimit_is_fatal)
7404 {
7405 	ledger_amount_t old;
7406 	kern_return_t ret;
7407 #if DEVELOPMENT || DEBUG
7408 	diagthreshold_check_return diag_threshold_validity;
7409 #endif
7410 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
7411 
7412 	if (ret != KERN_SUCCESS) {
7413 		return ret;
7414 	}
7415 	/**
7416 	 * Maybe we will need to re-enable the diag threshold, lets get the value
7417 	 * and the current status
7418 	 */
7419 #if DEVELOPMENT || DEBUG
7420 	diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_mb, false);
7421 	/**
7422 	 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7423 	 */
7424 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7425 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7426 	} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7427 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7428 	}
7429 #endif
7430 
7431 	/*
7432 	 * Check that limit >> 20 will not give an "unexpected" 32-bit
7433 	 * result. There are, however, implicit assumptions that -1 mb limit
7434 	 * equates to LEDGER_LIMIT_INFINITY.
7435 	 */
7436 	assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
7437 
7438 	if (old_limit_mb) {
7439 		*old_limit_mb = (int)(old >> 20);
7440 	}
7441 
7442 	if (new_limit_mb == -1) {
7443 		/*
7444 		 * Caller wishes to remove the limit.
7445 		 */
7446 		ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7447 		    max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
7448 		    max_task_footprint ? (uint8_t)max_task_footprint_warning_level : 0);
7449 
7450 		task_lock(task);
7451 		task_set_memlimit_is_active(task, memlimit_is_active);
7452 		task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7453 		task_unlock(task);
7454 		/**
7455 		 * If the diagnostics were disabled, and now we have a new limit, we have to re-enable it.
7456 		 */
7457 #if DEVELOPMENT || DEBUG
7458 		if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7459 			ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7460 		} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7461 			ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7462 		}
7463 	#endif
7464 		return KERN_SUCCESS;
7465 	}
7466 
7467 #ifdef CONFIG_NOMONITORS
7468 	return KERN_SUCCESS;
7469 #endif /* CONFIG_NOMONITORS */
7470 
7471 	task_lock(task);
7472 
7473 	if ((memlimit_is_active == task_get_memlimit_is_active(task)) &&
7474 	    (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) &&
7475 	    (((ledger_amount_t)new_limit_mb << 20) == old)) {
7476 		/*
7477 		 * memlimit state is not changing
7478 		 */
7479 		task_unlock(task);
7480 		return KERN_SUCCESS;
7481 	}
7482 
7483 	task_set_memlimit_is_active(task, memlimit_is_active);
7484 	task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7485 
7486 	ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7487 	    (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
7488 
7489 	if (task == current_task()) {
7490 		ledger_check_new_balance(current_thread(), task->ledger,
7491 		    task_ledgers.phys_footprint);
7492 	}
7493 
7494 	task_unlock(task);
7495 #if DEVELOPMENT || DEBUG
7496 	if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7497 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7498 	}
7499 	#endif
7500 
7501 	return KERN_SUCCESS;
7502 }
7503 
7504 #if RESETTABLE_DIAG_FOOTPRINT_LIMITS
7505 kern_return_t
task_set_diag_footprint_limit_internal(task_t task,uint64_t new_limit_bytes,uint64_t * old_limit_bytes)7506 task_set_diag_footprint_limit_internal(
7507 	task_t task,
7508 	uint64_t new_limit_bytes,
7509 	uint64_t *old_limit_bytes)
7510 {
7511 	ledger_amount_t old = 0;
7512 	kern_return_t ret = KERN_SUCCESS;
7513 	diagthreshold_check_return diag_threshold_validity;
7514 	ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &old);
7515 
7516 	if (ret != KERN_SUCCESS) {
7517 		return ret;
7518 	}
7519 	/**
7520 	 * Maybe we will need to re-enable the diag threshold, lets get the value
7521 	 * and the current status
7522 	 */
7523 	diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_bytes >> 20, true);
7524 	/**
7525 	 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7526 	 */
7527 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7528 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7529 	}
7530 
7531 	/*
7532 	 * Check that limit >> 20 will not give an "unexpected" 32-bit
7533 	 * result. There are, however, implicit assumptions that -1 mb limit
7534 	 * equates to LEDGER_LIMIT_INFINITY.
7535 	 */
7536 	if (old_limit_bytes) {
7537 		*old_limit_bytes = old;
7538 	}
7539 
7540 	if (new_limit_bytes == -1) {
7541 		/*
7542 		 * Caller wishes to remove the limit.
7543 		 */
7544 		ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7545 		    LEDGER_LIMIT_INFINITY);
7546 		/*
7547 		 * If the memory diagnostics flag was disabled, lets enable it again
7548 		 */
7549 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7550 		return KERN_SUCCESS;
7551 	}
7552 
7553 #ifdef CONFIG_NOMONITORS
7554 	return KERN_SUCCESS;
7555 #else
7556 
7557 	task_lock(task);
7558 	ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7559 	    (ledger_amount_t)new_limit_bytes );
7560 	if (task == current_task()) {
7561 		ledger_check_new_balance(current_thread(), task->ledger,
7562 		    task_ledgers.phys_footprint);
7563 	}
7564 
7565 	task_unlock(task);
7566 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7567 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7568 	} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7569 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7570 	}
7571 
7572 	return KERN_SUCCESS;
7573 #endif /* CONFIG_NOMONITORS */
7574 }
7575 
7576 kern_return_t
task_get_diag_footprint_limit_internal(task_t task,uint64_t * new_limit_bytes,bool * threshold_disabled)7577 task_get_diag_footprint_limit_internal(
7578 	task_t task,
7579 	uint64_t *new_limit_bytes,
7580 	bool *threshold_disabled)
7581 {
7582 	ledger_amount_t ledger_limit;
7583 	kern_return_t ret = KERN_SUCCESS;
7584 	if (new_limit_bytes == NULL || threshold_disabled == NULL) {
7585 		return KERN_INVALID_ARGUMENT;
7586 	}
7587 	ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &ledger_limit);
7588 	if (ledger_limit == LEDGER_LIMIT_INFINITY) {
7589 		ledger_limit = -1;
7590 	}
7591 	if (ret == KERN_SUCCESS) {
7592 		*new_limit_bytes = ledger_limit;
7593 		ret = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, threshold_disabled);
7594 	}
7595 	return ret;
7596 }
7597 #endif /* RESETTABLE_DIAG_FOOTPRINT_LIMITS */
7598 
7599 
7600 kern_return_t
task_get_phys_footprint_limit(task_t task,int * limit_mb)7601 task_get_phys_footprint_limit(
7602 	task_t task,
7603 	int *limit_mb)
7604 {
7605 	ledger_amount_t limit;
7606 	kern_return_t ret;
7607 
7608 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
7609 	if (ret != KERN_SUCCESS) {
7610 		return ret;
7611 	}
7612 
7613 	/*
7614 	 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
7615 	 * result. There are, however, implicit assumptions that -1 mb limit
7616 	 * equates to LEDGER_LIMIT_INFINITY.
7617 	 */
7618 	assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
7619 	*limit_mb = (int)(limit >> 20);
7620 
7621 	return KERN_SUCCESS;
7622 }
7623 #else /* CONFIG_MEMORYSTATUS */
7624 kern_return_t
task_set_phys_footprint_limit(__unused task_t task,__unused int new_limit_mb,__unused int * old_limit_mb)7625 task_set_phys_footprint_limit(
7626 	__unused task_t task,
7627 	__unused int new_limit_mb,
7628 	__unused int *old_limit_mb)
7629 {
7630 	return KERN_FAILURE;
7631 }
7632 
7633 kern_return_t
task_get_phys_footprint_limit(__unused task_t task,__unused int * limit_mb)7634 task_get_phys_footprint_limit(
7635 	__unused task_t task,
7636 	__unused int *limit_mb)
7637 {
7638 	return KERN_FAILURE;
7639 }
7640 #endif /* CONFIG_MEMORYSTATUS */
7641 
7642 security_token_t *
task_get_sec_token(task_t task)7643 task_get_sec_token(task_t task)
7644 {
7645 	return &task_get_ro(task)->task_tokens.sec_token;
7646 }
7647 
7648 void
task_set_sec_token(task_t task,security_token_t * token)7649 task_set_sec_token(task_t task, security_token_t *token)
7650 {
7651 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7652 	    task_tokens.sec_token, token);
7653 }
7654 
7655 audit_token_t *
task_get_audit_token(task_t task)7656 task_get_audit_token(task_t task)
7657 {
7658 	return &task_get_ro(task)->task_tokens.audit_token;
7659 }
7660 
7661 void
task_set_audit_token(task_t task,audit_token_t * token)7662 task_set_audit_token(task_t task, audit_token_t *token)
7663 {
7664 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7665 	    task_tokens.audit_token, token);
7666 }
7667 
7668 void
task_set_tokens(task_t task,security_token_t * sec_token,audit_token_t * audit_token)7669 task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token)
7670 {
7671 	struct task_token_ro_data tokens;
7672 
7673 	tokens = task_get_ro(task)->task_tokens;
7674 	tokens.sec_token = *sec_token;
7675 	tokens.audit_token = *audit_token;
7676 
7677 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task), task_tokens,
7678 	    &tokens);
7679 }
7680 
7681 boolean_t
task_is_privileged(task_t task)7682 task_is_privileged(task_t task)
7683 {
7684 	return task_get_sec_token(task)->val[0] == 0;
7685 }
7686 
7687 #ifdef CONFIG_MACF
7688 uint8_t *
task_get_mach_trap_filter_mask(task_t task)7689 task_get_mach_trap_filter_mask(task_t task)
7690 {
7691 	return task_get_ro(task)->task_filters.mach_trap_filter_mask;
7692 }
7693 
7694 void
task_set_mach_trap_filter_mask(task_t task,uint8_t * mask)7695 task_set_mach_trap_filter_mask(task_t task, uint8_t *mask)
7696 {
7697 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7698 	    task_filters.mach_trap_filter_mask, &mask);
7699 }
7700 
7701 uint8_t *
task_get_mach_kobj_filter_mask(task_t task)7702 task_get_mach_kobj_filter_mask(task_t task)
7703 {
7704 	return task_get_ro(task)->task_filters.mach_kobj_filter_mask;
7705 }
7706 
7707 mach_vm_address_t
task_get_all_image_info_addr(task_t task)7708 task_get_all_image_info_addr(task_t task)
7709 {
7710 	return task->all_image_info_addr;
7711 }
7712 
7713 void
task_set_mach_kobj_filter_mask(task_t task,uint8_t * mask)7714 task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask)
7715 {
7716 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7717 	    task_filters.mach_kobj_filter_mask, &mask);
7718 }
7719 
7720 #endif /* CONFIG_MACF */
7721 
7722 void
task_set_thread_limit(task_t task,uint16_t thread_limit)7723 task_set_thread_limit(task_t task, uint16_t thread_limit)
7724 {
7725 	assert(task != kernel_task);
7726 	if (thread_limit <= TASK_MAX_THREAD_LIMIT) {
7727 		task_lock(task);
7728 		task->task_thread_limit = thread_limit;
7729 		task_unlock(task);
7730 	}
7731 }
7732 
7733 #if CONFIG_PROC_RESOURCE_LIMITS
7734 kern_return_t
task_set_port_space_limits(task_t task,uint32_t soft_limit,uint32_t hard_limit)7735 task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit)
7736 {
7737 	return ipc_space_set_table_size_limits(task->itk_space, soft_limit, hard_limit);
7738 }
7739 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7740 
7741 #if XNU_TARGET_OS_OSX
7742 boolean_t
task_has_system_version_compat_enabled(task_t task)7743 task_has_system_version_compat_enabled(task_t task)
7744 {
7745 	boolean_t enabled = FALSE;
7746 
7747 	task_lock(task);
7748 	enabled = (task->t_flags & TF_SYS_VERSION_COMPAT);
7749 	task_unlock(task);
7750 
7751 	return enabled;
7752 }
7753 
7754 void
task_set_system_version_compat_enabled(task_t task,boolean_t enable_system_version_compat)7755 task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat)
7756 {
7757 	assert(task == current_task());
7758 	assert(task != kernel_task);
7759 
7760 	task_lock(task);
7761 	if (enable_system_version_compat) {
7762 		task->t_flags |= TF_SYS_VERSION_COMPAT;
7763 	} else {
7764 		task->t_flags &= ~TF_SYS_VERSION_COMPAT;
7765 	}
7766 	task_unlock(task);
7767 }
7768 #endif /* XNU_TARGET_OS_OSX */
7769 
7770 /*
7771  * We need to export some functions to other components that
7772  * are currently implemented in macros within the osfmk
7773  * component.  Just export them as functions of the same name.
7774  */
7775 boolean_t
is_kerneltask(task_t t)7776 is_kerneltask(task_t t)
7777 {
7778 	if (t == kernel_task) {
7779 		return TRUE;
7780 	}
7781 
7782 	return FALSE;
7783 }
7784 
7785 boolean_t
is_corpsefork(task_t t)7786 is_corpsefork(task_t t)
7787 {
7788 	return task_is_a_corpse_fork(t);
7789 }
7790 
7791 task_t
current_task_early(void)7792 current_task_early(void)
7793 {
7794 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
7795 		if (current_thread()->t_tro == NULL) {
7796 			return TASK_NULL;
7797 		}
7798 	}
7799 	return get_threadtask(current_thread());
7800 }
7801 
7802 task_t
current_task(void)7803 current_task(void)
7804 {
7805 	return get_threadtask(current_thread());
7806 }
7807 
7808 /* defined in bsd/kern/kern_prot.c */
7809 extern int get_audit_token_pid(audit_token_t *audit_token);
7810 
7811 int
task_pid(task_t task)7812 task_pid(task_t task)
7813 {
7814 	if (task) {
7815 		return get_audit_token_pid(task_get_audit_token(task));
7816 	}
7817 	return -1;
7818 }
7819 
7820 #if __has_feature(ptrauth_calls)
7821 /*
7822  * Get the shared region id and jop signing key for the task.
7823  * The function will allocate a kalloc buffer and return
7824  * it to caller, the caller needs to free it. This is used
7825  * for getting the information via task port.
7826  */
7827 char *
task_get_vm_shared_region_id_and_jop_pid(task_t task,uint64_t * jop_pid)7828 task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid)
7829 {
7830 	size_t len;
7831 	char *shared_region_id = NULL;
7832 
7833 	task_lock(task);
7834 	if (task->shared_region_id == NULL) {
7835 		task_unlock(task);
7836 		return NULL;
7837 	}
7838 	len = strlen(task->shared_region_id) + 1;
7839 
7840 	/* don't hold task lock while allocating */
7841 	task_unlock(task);
7842 	shared_region_id = kalloc_data(len, Z_WAITOK);
7843 	task_lock(task);
7844 
7845 	if (task->shared_region_id == NULL) {
7846 		task_unlock(task);
7847 		kfree_data(shared_region_id, len);
7848 		return NULL;
7849 	}
7850 	assert(len == strlen(task->shared_region_id) + 1);         /* should never change */
7851 	strlcpy(shared_region_id, task->shared_region_id, len);
7852 	task_unlock(task);
7853 
7854 	/* find key from its auth pager */
7855 	if (jop_pid != NULL) {
7856 		*jop_pid = shared_region_find_key(shared_region_id);
7857 	}
7858 
7859 	return shared_region_id;
7860 }
7861 
7862 /*
7863  * set the shared region id for a task
7864  */
7865 void
task_set_shared_region_id(task_t task,char * id)7866 task_set_shared_region_id(task_t task, char *id)
7867 {
7868 	char *old_id;
7869 
7870 	task_lock(task);
7871 	old_id = task->shared_region_id;
7872 	task->shared_region_id = id;
7873 	task->shared_region_auth_remapped = FALSE;
7874 	task_unlock(task);
7875 
7876 	/* free any pre-existing shared region id */
7877 	if (old_id != NULL) {
7878 		shared_region_key_dealloc(old_id);
7879 		kfree_data(old_id, strlen(old_id) + 1);
7880 	}
7881 }
7882 #endif /* __has_feature(ptrauth_calls) */
7883 
7884 /*
7885  * This routine finds a thread in a task by its unique id
7886  * Returns a referenced thread or THREAD_NULL if the thread was not found
7887  *
7888  * TODO: This is super inefficient - it's an O(threads in task) list walk!
7889  *       We should make a tid hash, or transition all tid clients to thread ports
7890  *
7891  * Precondition: No locks held (will take task lock)
7892  */
7893 thread_t
task_findtid(task_t task,uint64_t tid)7894 task_findtid(task_t task, uint64_t tid)
7895 {
7896 	thread_t self           = current_thread();
7897 	thread_t found_thread   = THREAD_NULL;
7898 	thread_t iter_thread    = THREAD_NULL;
7899 
7900 	/* Short-circuit the lookup if we're looking up ourselves */
7901 	if (tid == self->thread_id || tid == TID_NULL) {
7902 		assert(get_threadtask(self) == task);
7903 
7904 		thread_reference(self);
7905 
7906 		return self;
7907 	}
7908 
7909 	task_lock(task);
7910 
7911 	queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
7912 		if (iter_thread->thread_id == tid) {
7913 			found_thread = iter_thread;
7914 			thread_reference(found_thread);
7915 			break;
7916 		}
7917 	}
7918 
7919 	task_unlock(task);
7920 
7921 	return found_thread;
7922 }
7923 
7924 int
pid_from_task(task_t task)7925 pid_from_task(task_t task)
7926 {
7927 	int pid = -1;
7928 	void *bsd_info = get_bsdtask_info(task);
7929 
7930 	if (bsd_info) {
7931 		pid = proc_pid(bsd_info);
7932 	} else {
7933 		pid = task_pid(task);
7934 	}
7935 
7936 	return pid;
7937 }
7938 
7939 /*
7940  * Control the CPU usage monitor for a task.
7941  */
7942 kern_return_t
task_cpu_usage_monitor_ctl(task_t task,uint32_t * flags)7943 task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
7944 {
7945 	int error = KERN_SUCCESS;
7946 
7947 	if (*flags & CPUMON_MAKE_FATAL) {
7948 		task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
7949 	} else {
7950 		error = KERN_INVALID_ARGUMENT;
7951 	}
7952 
7953 	return error;
7954 }
7955 
7956 /*
7957  * Control the wakeups monitor for a task.
7958  */
7959 kern_return_t
task_wakeups_monitor_ctl(task_t task,uint32_t * flags,int32_t * rate_hz)7960 task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
7961 {
7962 	ledger_t ledger = task->ledger;
7963 
7964 	task_lock(task);
7965 	if (*flags & WAKEMON_GET_PARAMS) {
7966 		ledger_amount_t limit;
7967 		uint64_t                period;
7968 
7969 		ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
7970 		ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
7971 
7972 		if (limit != LEDGER_LIMIT_INFINITY) {
7973 			/*
7974 			 * An active limit means the wakeups monitor is enabled.
7975 			 */
7976 			*rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
7977 			*flags = WAKEMON_ENABLE;
7978 			if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
7979 				*flags |= WAKEMON_MAKE_FATAL;
7980 			}
7981 		} else {
7982 			*flags = WAKEMON_DISABLE;
7983 			*rate_hz = -1;
7984 		}
7985 
7986 		/*
7987 		 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
7988 		 */
7989 		task_unlock(task);
7990 		return KERN_SUCCESS;
7991 	}
7992 
7993 	if (*flags & WAKEMON_ENABLE) {
7994 		if (*flags & WAKEMON_SET_DEFAULTS) {
7995 			*rate_hz = task_wakeups_monitor_rate;
7996 		}
7997 
7998 #ifndef CONFIG_NOMONITORS
7999 		if (*flags & WAKEMON_MAKE_FATAL) {
8000 			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
8001 		}
8002 #endif /* CONFIG_NOMONITORS */
8003 
8004 		if (*rate_hz <= 0) {
8005 			task_unlock(task);
8006 			return KERN_INVALID_ARGUMENT;
8007 		}
8008 
8009 #ifndef CONFIG_NOMONITORS
8010 		ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
8011 		    (uint8_t)task_wakeups_monitor_ustackshots_trigger_pct);
8012 		ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
8013 		ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
8014 #endif /* CONFIG_NOMONITORS */
8015 	} else if (*flags & WAKEMON_DISABLE) {
8016 		/*
8017 		 * Caller wishes to disable wakeups monitor on the task.
8018 		 *
8019 		 * Disable telemetry if it was triggered by the wakeups monitor, and
8020 		 * remove the limit & callback on the wakeups ledger entry.
8021 		 */
8022 #if CONFIG_TELEMETRY
8023 		telemetry_task_ctl_locked(task, TF_WAKEMON_WARNING, 0);
8024 #endif
8025 		ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
8026 		ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
8027 	}
8028 
8029 	task_unlock(task);
8030 	return KERN_SUCCESS;
8031 }
8032 
8033 void
task_wakeups_rate_exceeded(int warning,__unused const void * param0,__unused const void * param1)8034 task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
8035 {
8036 	if (warning == LEDGER_WARNING_ROSE_ABOVE) {
8037 #if CONFIG_TELEMETRY
8038 		/*
8039 		 * This task is in danger of violating the wakeups monitor. Enable telemetry on this task
8040 		 * so there are micro-stackshots available if and when EXC_RESOURCE is triggered.
8041 		 */
8042 		telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 1);
8043 #endif
8044 		return;
8045 	}
8046 
8047 #if CONFIG_TELEMETRY
8048 	/*
8049 	 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
8050 	 * exceeded the limit, turn telemetry off for the task.
8051 	 */
8052 	telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 0);
8053 #endif
8054 
8055 	if (warning == 0) {
8056 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
8057 	}
8058 }
8059 
8060 TUNABLE(bool, enable_wakeup_reports, "enable_wakeup_reports", false); /* Enable wakeup reports. */
8061 
8062 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)8063 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
8064 {
8065 	task_t                      task        = current_task();
8066 	int                         pid         = 0;
8067 	const char                  *procname   = "unknown";
8068 	boolean_t                   fatal;
8069 	kern_return_t               kr;
8070 #ifdef EXC_RESOURCE_MONITORS
8071 	mach_exception_data_type_t  code[EXCEPTION_CODE_MAX];
8072 #endif /* EXC_RESOURCE_MONITORS */
8073 	struct ledger_entry_info    lei;
8074 
8075 #ifdef MACH_BSD
8076 	pid = proc_selfpid();
8077 	if (get_bsdtask_info(task) != NULL) {
8078 		procname = proc_name_address(get_bsdtask_info(current_task()));
8079 	}
8080 #endif
8081 
8082 	ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
8083 
8084 	/*
8085 	 * Disable the exception notification so we don't overwhelm
8086 	 * the listener with an endless stream of redundant exceptions.
8087 	 * TODO: detect whether another thread is already reporting the violation.
8088 	 */
8089 	uint32_t flags = WAKEMON_DISABLE;
8090 	task_wakeups_monitor_ctl(task, &flags, NULL);
8091 
8092 	fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
8093 	trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
8094 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
8095 	    "over ~%llu seconds, averaging %llu wakes / second and "
8096 	    "violating a %slimit of %llu wakes over %llu seconds.\n",
8097 	    procname, pid,
8098 	    lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
8099 	    lei.lei_last_refill == 0 ? 0 :
8100 	    (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
8101 	    fatal ? "FATAL " : "",
8102 	    lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
8103 
8104 	if (enable_wakeup_reports) {
8105 		kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
8106 		    fatal ? kRNFatalLimitFlag : 0);
8107 		if (kr) {
8108 			printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
8109 		}
8110 	}
8111 
8112 #ifdef EXC_RESOURCE_MONITORS
8113 	if (disable_exc_resource) {
8114 		printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8115 		    "suppressed by a boot-arg\n", procname, pid);
8116 		return;
8117 	}
8118 	if (disable_exc_resource_during_audio && audio_active) {
8119 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8120 		    "suppressed due to audio playback\n", procname, pid);
8121 		return;
8122 	}
8123 	if (lei.lei_last_refill == 0) {
8124 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8125 		    "suppressed due to lei.lei_last_refill = 0 \n", procname, pid);
8126 	}
8127 
8128 	code[0] = code[1] = 0;
8129 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
8130 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
8131 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
8132 	    NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
8133 	EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
8134 	    lei.lei_last_refill);
8135 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
8136 	    NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
8137 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8138 #endif /* EXC_RESOURCE_MONITORS */
8139 
8140 	if (fatal) {
8141 		task_terminate_internal(task);
8142 	}
8143 }
8144 
8145 static boolean_t
global_update_logical_writes(int64_t io_delta,int64_t * global_write_count)8146 global_update_logical_writes(int64_t io_delta, int64_t *global_write_count)
8147 {
8148 	int64_t old_count, new_count;
8149 	boolean_t needs_telemetry;
8150 
8151 	do {
8152 		new_count = old_count = *global_write_count;
8153 		new_count += io_delta;
8154 		if (new_count >= io_telemetry_limit) {
8155 			new_count = 0;
8156 			needs_telemetry = TRUE;
8157 		} else {
8158 			needs_telemetry = FALSE;
8159 		}
8160 	} while (!OSCompareAndSwap64(old_count, new_count, global_write_count));
8161 	return needs_telemetry;
8162 }
8163 
8164 void
task_update_physical_writes(__unused task_t task,__unused task_physical_write_flavor_t flavor,__unused uint64_t io_size,__unused task_balance_flags_t flags)8165 task_update_physical_writes(__unused task_t task, __unused task_physical_write_flavor_t flavor, __unused uint64_t io_size, __unused task_balance_flags_t flags)
8166 {
8167 #if CONFIG_PHYS_WRITE_ACCT
8168 	if (!io_size) {
8169 		return;
8170 	}
8171 
8172 	/*
8173 	 * task == NULL means that we have to update kernel_task ledgers
8174 	 */
8175 	if (!task) {
8176 		task = kernel_task;
8177 	}
8178 
8179 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PHYS_WRITE_ACCT)) | DBG_FUNC_NONE,
8180 	    task_pid(task), flavor, io_size, flags, 0);
8181 	DTRACE_IO4(physical_writes, struct task *, task, task_physical_write_flavor_t, flavor, uint64_t, io_size, task_balance_flags_t, flags);
8182 
8183 	if (flags & TASK_BALANCE_CREDIT) {
8184 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8185 			OSAddAtomic64(io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8186 			ledger_credit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8187 		}
8188 	} else if (flags & TASK_BALANCE_DEBIT) {
8189 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8190 			OSAddAtomic64(-1 * io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8191 			ledger_debit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8192 		}
8193 	}
8194 #endif /* CONFIG_PHYS_WRITE_ACCT */
8195 }
8196 
8197 void
task_update_logical_writes(task_t task,uint32_t io_size,int flags,void * vp)8198 task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
8199 {
8200 	int64_t io_delta = 0;
8201 	int64_t * global_counter_to_update;
8202 	boolean_t needs_telemetry = FALSE;
8203 	boolean_t is_external_device = FALSE;
8204 	int ledger_to_update = 0;
8205 	struct task_writes_counters * writes_counters_to_update;
8206 
8207 	if ((!task) || (!io_size) || (!vp)) {
8208 		return;
8209 	}
8210 
8211 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_DATA_WRITE)) | DBG_FUNC_NONE,
8212 	    task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp), 0);
8213 	DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
8214 
8215 	// Is the drive backing this vnode internal or external to the system?
8216 	if (vnode_isonexternalstorage(vp) == false) {
8217 		global_counter_to_update = &global_logical_writes_count;
8218 		ledger_to_update = task_ledgers.logical_writes;
8219 		writes_counters_to_update = &task->task_writes_counters_internal;
8220 		is_external_device = FALSE;
8221 	} else {
8222 		global_counter_to_update = &global_logical_writes_to_external_count;
8223 		ledger_to_update = task_ledgers.logical_writes_to_external;
8224 		writes_counters_to_update = &task->task_writes_counters_external;
8225 		is_external_device = TRUE;
8226 	}
8227 
8228 	switch (flags) {
8229 	case TASK_WRITE_IMMEDIATE:
8230 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_immediate_writes));
8231 		ledger_credit(task->ledger, ledger_to_update, io_size);
8232 		if (!is_external_device) {
8233 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8234 		}
8235 		break;
8236 	case TASK_WRITE_DEFERRED:
8237 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_deferred_writes));
8238 		ledger_credit(task->ledger, ledger_to_update, io_size);
8239 		if (!is_external_device) {
8240 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8241 		}
8242 		break;
8243 	case TASK_WRITE_INVALIDATED:
8244 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_invalidated_writes));
8245 		ledger_debit(task->ledger, ledger_to_update, io_size);
8246 		if (!is_external_device) {
8247 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, FALSE, io_size);
8248 		}
8249 		break;
8250 	case TASK_WRITE_METADATA:
8251 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_metadata_writes));
8252 		ledger_credit(task->ledger, ledger_to_update, io_size);
8253 		if (!is_external_device) {
8254 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8255 		}
8256 		break;
8257 	}
8258 
8259 	io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
8260 	if (io_telemetry_limit != 0) {
8261 		/* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
8262 		needs_telemetry = global_update_logical_writes(io_delta, global_counter_to_update);
8263 		if (needs_telemetry && !is_external_device) {
8264 			act_set_io_telemetry_ast(current_thread());
8265 		}
8266 	}
8267 }
8268 
8269 /*
8270  * Control the I/O monitor for a task.
8271  */
8272 kern_return_t
task_io_monitor_ctl(task_t task,uint32_t * flags)8273 task_io_monitor_ctl(task_t task, uint32_t *flags)
8274 {
8275 	ledger_t ledger = task->ledger;
8276 
8277 	task_lock(task);
8278 	if (*flags & IOMON_ENABLE) {
8279 		/* Configure the physical I/O ledger */
8280 		ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
8281 		ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
8282 	} else if (*flags & IOMON_DISABLE) {
8283 		/*
8284 		 * Caller wishes to disable I/O monitor on the task.
8285 		 */
8286 		ledger_disable_refill(ledger, task_ledgers.physical_writes);
8287 		ledger_disable_callback(ledger, task_ledgers.physical_writes);
8288 	}
8289 
8290 	task_unlock(task);
8291 	return KERN_SUCCESS;
8292 }
8293 
8294 void
task_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)8295 task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
8296 {
8297 	if (warning == 0) {
8298 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
8299 	}
8300 }
8301 
8302 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)8303 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
8304 {
8305 	int                             pid = 0;
8306 	task_t                          task = current_task();
8307 #ifdef EXC_RESOURCE_MONITORS
8308 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
8309 #endif /* EXC_RESOURCE_MONITORS */
8310 	struct ledger_entry_info        lei = {};
8311 	kern_return_t                   kr;
8312 
8313 #ifdef MACH_BSD
8314 	pid = proc_selfpid();
8315 #endif
8316 	/*
8317 	 * Get the ledger entry info. We need to do this before disabling the exception
8318 	 * to get correct values for all fields.
8319 	 */
8320 	switch (flavor) {
8321 	case FLAVOR_IO_PHYSICAL_WRITES:
8322 		ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
8323 		break;
8324 	}
8325 
8326 
8327 	/*
8328 	 * Disable the exception notification so we don't overwhelm
8329 	 * the listener with an endless stream of redundant exceptions.
8330 	 * TODO: detect whether another thread is already reporting the violation.
8331 	 */
8332 	uint32_t flags = IOMON_DISABLE;
8333 	task_io_monitor_ctl(task, &flags);
8334 
8335 	if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
8336 		trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
8337 	}
8338 	os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
8339 	    pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
8340 
8341 	kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
8342 	if (kr) {
8343 		printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
8344 	}
8345 
8346 #ifdef EXC_RESOURCE_MONITORS
8347 	code[0] = code[1] = 0;
8348 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
8349 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
8350 	EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
8351 	EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
8352 	EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
8353 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8354 #endif /* EXC_RESOURCE_MONITORS */
8355 }
8356 
8357 void
task_port_space_ast(__unused task_t task)8358 task_port_space_ast(__unused task_t task)
8359 {
8360 	uint32_t current_size, soft_limit, hard_limit;
8361 	assert(task == current_task());
8362 	bool should_notify = ipc_space_check_table_size_limit(task->itk_space,
8363 	    &current_size, &soft_limit, &hard_limit);
8364 	if (should_notify) {
8365 		SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task, current_size, soft_limit, hard_limit);
8366 	}
8367 }
8368 
8369 #if CONFIG_PROC_RESOURCE_LIMITS
8370 static mach_port_t
task_allocate_fatal_port(void)8371 task_allocate_fatal_port(void)
8372 {
8373 	mach_port_t task_fatal_port = MACH_PORT_NULL;
8374 	task_id_token_t token;
8375 
8376 	kern_return_t kr = task_create_identity_token(current_task(), &token); /* Takes a reference on the token */
8377 	if (kr) {
8378 		return MACH_PORT_NULL;
8379 	}
8380 	task_fatal_port = ipc_kobject_alloc_port((ipc_kobject_t)token, IKOT_TASK_FATAL,
8381 	    IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
8382 
8383 	task_id_token_set_port(token, task_fatal_port);
8384 
8385 	return task_fatal_port;
8386 }
8387 
8388 static void
task_fatal_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)8389 task_fatal_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
8390 {
8391 	task_t task = TASK_NULL;
8392 	kern_return_t kr;
8393 
8394 	task_id_token_t token = ipc_kobject_get_stable(port, IKOT_TASK_FATAL);
8395 
8396 	assert(token != NULL);
8397 	if (token) {
8398 		kr = task_identity_token_get_task_grp(token, &task, TASK_GRP_KERNEL); /* takes a reference on task */
8399 		if (task) {
8400 			task_bsdtask_kill(task);
8401 			task_deallocate(task);
8402 		}
8403 		task_id_token_release(token); /* consumes ref given by notification */
8404 	}
8405 }
8406 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8407 
8408 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,uint32_t current_size,uint32_t soft_limit,uint32_t hard_limit)8409 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task, uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit)
8410 {
8411 	int pid = 0;
8412 	char *procname = (char *) "unknown";
8413 	__unused kern_return_t kr;
8414 	__unused resource_notify_flags_t flags = kRNFlagsNone;
8415 	__unused uint32_t limit;
8416 	__unused mach_port_t task_fatal_port = MACH_PORT_NULL;
8417 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
8418 
8419 	pid = proc_selfpid();
8420 	if (get_bsdtask_info(task) != NULL) {
8421 		procname = proc_name_address(get_bsdtask_info(task));
8422 	}
8423 
8424 	/*
8425 	 * Only kernel_task and launchd may be allowed to
8426 	 * have really large ipc space.
8427 	 */
8428 	if (pid == 0 || pid == 1) {
8429 		return;
8430 	}
8431 
8432 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many mach ports. \
8433 	    Num of ports allocated %u; \n", procname, pid, current_size);
8434 
8435 	/* Abort the process if it has hit the system-wide limit for ipc port table size */
8436 	if (!hard_limit && !soft_limit) {
8437 		code[0] = code[1] = 0;
8438 		EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_PORTS);
8439 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_PORT_SPACE_FULL);
8440 		EXC_RESOURCE_PORTS_ENCODE_PORTS(code[0], current_size);
8441 
8442 		exit_with_port_space_exception(current_proc(), code[0], code[1]);
8443 
8444 		return;
8445 	}
8446 
8447 #if CONFIG_PROC_RESOURCE_LIMITS
8448 	if (hard_limit > 0) {
8449 		flags |= kRNHardLimitFlag;
8450 		limit = hard_limit;
8451 		task_fatal_port = task_allocate_fatal_port();
8452 		if (!task_fatal_port) {
8453 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8454 			task_bsdtask_kill(task);
8455 		}
8456 	} else {
8457 		flags |= kRNSoftLimitFlag;
8458 		limit = soft_limit;
8459 	}
8460 
8461 	kr = send_resource_violation_with_fatal_port(send_port_space_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8462 	if (kr) {
8463 		os_log(OS_LOG_DEFAULT, "send_resource_violation(ports, ...): error %#x\n", kr);
8464 	}
8465 	if (task_fatal_port) {
8466 		ipc_port_release_send(task_fatal_port);
8467 	}
8468 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8469 }
8470 
8471 #if CONFIG_PROC_RESOURCE_LIMITS
8472 void
task_kqworkloop_ast(task_t task,int current_size,int soft_limit,int hard_limit)8473 task_kqworkloop_ast(task_t task, int current_size, int soft_limit, int hard_limit)
8474 {
8475 	assert(task == current_task());
8476 	return SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task, current_size, soft_limit, hard_limit);
8477 }
8478 
8479 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task,int current_size,int soft_limit,int hard_limit)8480 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit)
8481 {
8482 	int pid = 0;
8483 	char *procname = (char *) "unknown";
8484 #ifdef MACH_BSD
8485 	pid = proc_selfpid();
8486 	if (get_bsdtask_info(task) != NULL) {
8487 		procname = proc_name_address(get_bsdtask_info(task));
8488 	}
8489 #endif
8490 	if (pid == 0 || pid == 1) {
8491 		return;
8492 	}
8493 
8494 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many kqworkloops. \
8495 	    Num of kqworkloops allocated %u; \n", procname, pid, current_size);
8496 
8497 	int limit = 0;
8498 	resource_notify_flags_t flags = kRNFlagsNone;
8499 	mach_port_t task_fatal_port = MACH_PORT_NULL;
8500 	if (hard_limit) {
8501 		flags |= kRNHardLimitFlag;
8502 		limit = hard_limit;
8503 
8504 		task_fatal_port = task_allocate_fatal_port();
8505 		if (task_fatal_port == MACH_PORT_NULL) {
8506 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8507 			task_bsdtask_kill(task);
8508 		}
8509 	} else {
8510 		flags |= kRNSoftLimitFlag;
8511 		limit = soft_limit;
8512 	}
8513 
8514 	kern_return_t kr;
8515 	kr = send_resource_violation_with_fatal_port(send_kqworkloops_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8516 	if (kr) {
8517 		os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(kqworkloops, ...): error %#x\n", kr);
8518 	}
8519 	if (task_fatal_port) {
8520 		ipc_port_release_send(task_fatal_port);
8521 	}
8522 }
8523 
8524 
8525 void
task_filedesc_ast(__unused task_t task,__unused int current_size,__unused int soft_limit,__unused int hard_limit)8526 task_filedesc_ast(__unused task_t task, __unused int current_size, __unused int soft_limit, __unused int hard_limit)
8527 {
8528 	assert(task == current_task());
8529 	SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task, current_size, soft_limit, hard_limit);
8530 }
8531 
8532 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task,int current_size,int soft_limit,int hard_limit)8533 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit)
8534 {
8535 	int pid = 0;
8536 	char *procname = (char *) "unknown";
8537 	kern_return_t kr;
8538 	resource_notify_flags_t flags = kRNFlagsNone;
8539 	int limit;
8540 	mach_port_t task_fatal_port = MACH_PORT_NULL;
8541 
8542 #ifdef MACH_BSD
8543 	pid = proc_selfpid();
8544 	if (get_bsdtask_info(task) != NULL) {
8545 		procname = proc_name_address(get_bsdtask_info(task));
8546 	}
8547 #endif
8548 	/*
8549 	 * Only kernel_task and launchd may be allowed to
8550 	 * have really large ipc space.
8551 	 */
8552 	if (pid == 0 || pid == 1) {
8553 		return;
8554 	}
8555 
8556 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many file descriptors. \
8557 	    Num of fds allocated %u; \n", procname, pid, current_size);
8558 
8559 	if (hard_limit > 0) {
8560 		flags |= kRNHardLimitFlag;
8561 		limit = hard_limit;
8562 		task_fatal_port = task_allocate_fatal_port();
8563 		if (!task_fatal_port) {
8564 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8565 			task_bsdtask_kill(task);
8566 		}
8567 	} else {
8568 		flags |= kRNSoftLimitFlag;
8569 		limit = soft_limit;
8570 	}
8571 
8572 	kr = send_resource_violation_with_fatal_port(send_file_descriptors_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8573 	if (kr) {
8574 		os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(filedesc, ...): error %#x\n", kr);
8575 	}
8576 	if (task_fatal_port) {
8577 		ipc_port_release_send(task_fatal_port);
8578 	}
8579 }
8580 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8581 
8582 /* Placeholders for the task set/get voucher interfaces */
8583 kern_return_t
task_get_mach_voucher(task_t task,mach_voucher_selector_t __unused which,ipc_voucher_t * voucher)8584 task_get_mach_voucher(
8585 	task_t                  task,
8586 	mach_voucher_selector_t __unused which,
8587 	ipc_voucher_t           *voucher)
8588 {
8589 	if (TASK_NULL == task) {
8590 		return KERN_INVALID_TASK;
8591 	}
8592 
8593 	*voucher = NULL;
8594 	return KERN_SUCCESS;
8595 }
8596 
8597 kern_return_t
task_set_mach_voucher(task_t task,ipc_voucher_t __unused voucher)8598 task_set_mach_voucher(
8599 	task_t                  task,
8600 	ipc_voucher_t           __unused voucher)
8601 {
8602 	if (TASK_NULL == task) {
8603 		return KERN_INVALID_TASK;
8604 	}
8605 
8606 	return KERN_SUCCESS;
8607 }
8608 
8609 kern_return_t
task_swap_mach_voucher(__unused task_t task,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)8610 task_swap_mach_voucher(
8611 	__unused task_t         task,
8612 	__unused ipc_voucher_t  new_voucher,
8613 	ipc_voucher_t          *in_out_old_voucher)
8614 {
8615 	/*
8616 	 * Currently this function is only called from a MIG generated
8617 	 * routine which doesn't release the reference on the voucher
8618 	 * addressed by in_out_old_voucher. To avoid leaking this reference,
8619 	 * a call to release it has been added here.
8620 	 */
8621 	ipc_voucher_release(*in_out_old_voucher);
8622 	OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
8623 }
8624 
8625 void
task_set_gpu_denied(task_t task,boolean_t denied)8626 task_set_gpu_denied(task_t task, boolean_t denied)
8627 {
8628 	task_lock(task);
8629 
8630 	if (denied) {
8631 		task->t_flags |= TF_GPU_DENIED;
8632 	} else {
8633 		task->t_flags &= ~TF_GPU_DENIED;
8634 	}
8635 
8636 	task_unlock(task);
8637 }
8638 
8639 boolean_t
task_is_gpu_denied(task_t task)8640 task_is_gpu_denied(task_t task)
8641 {
8642 	/* We don't need the lock to read this flag */
8643 	return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE;
8644 }
8645 
8646 /*
8647  * Task policy termination uses this path to clear the bit the final time
8648  * during the termination flow, and the TASK_POLICY_TERMINATED bit guarantees
8649  * that it won't be changed again on a terminated task.
8650  */
8651 bool
task_set_game_mode_locked(task_t task,bool enabled)8652 task_set_game_mode_locked(task_t task, bool enabled)
8653 {
8654 	task_lock_assert_owned(task);
8655 
8656 	if (enabled) {
8657 		assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
8658 	}
8659 
8660 	bool previously_enabled = task_get_game_mode(task);
8661 	bool needs_update = false;
8662 	uint32_t new_count = 0;
8663 
8664 	if (enabled) {
8665 		task->t_flags |= TF_GAME_MODE;
8666 	} else {
8667 		task->t_flags &= ~TF_GAME_MODE;
8668 	}
8669 
8670 	if (enabled && !previously_enabled) {
8671 		if (task_coalition_adjust_game_mode_count(task, 1, &new_count) && (new_count == 1)) {
8672 			needs_update = true;
8673 		}
8674 	} else if (!enabled && previously_enabled) {
8675 		if (task_coalition_adjust_game_mode_count(task, -1, &new_count) && (new_count == 0)) {
8676 			needs_update = true;
8677 		}
8678 	}
8679 
8680 	return needs_update;
8681 }
8682 
8683 void
task_set_game_mode(task_t task,bool enabled)8684 task_set_game_mode(task_t task, bool enabled)
8685 {
8686 	bool needs_update = false;
8687 
8688 	task_lock(task);
8689 
8690 	/* After termination, further updates are no longer effective */
8691 	if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
8692 		needs_update = task_set_game_mode_locked(task, enabled);
8693 	}
8694 
8695 	task_unlock(task);
8696 
8697 #if CONFIG_THREAD_GROUPS
8698 	if (needs_update) {
8699 		task_coalition_thread_group_game_mode_update(task);
8700 	}
8701 #endif /* CONFIG_THREAD_GROUPS */
8702 }
8703 
8704 bool
task_get_game_mode(task_t task)8705 task_get_game_mode(task_t task)
8706 {
8707 	/* We don't need the lock to read this flag */
8708 	return task->t_flags & TF_GAME_MODE;
8709 }
8710 
8711 
8712 uint64_t
get_task_memory_region_count(task_t task)8713 get_task_memory_region_count(task_t task)
8714 {
8715 	vm_map_t map;
8716 	map = (task == kernel_task) ? kernel_map: task->map;
8717 	return (uint64_t)get_map_nentries(map);
8718 }
8719 
8720 static void
kdebug_trace_dyld_internal(uint32_t base_code,struct dyld_kernel_image_info * info)8721 kdebug_trace_dyld_internal(uint32_t base_code,
8722     struct dyld_kernel_image_info *info)
8723 {
8724 	static_assert(sizeof(info->uuid) >= 16);
8725 
8726 #if defined(__LP64__)
8727 	uint64_t *uuid = (uint64_t *)&(info->uuid);
8728 
8729 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8730 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
8731 	    uuid[1], info->load_addr,
8732 	    (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
8733 	    0);
8734 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8735 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
8736 	    (uint64_t)info->fsobjid.fid_objno |
8737 	    ((uint64_t)info->fsobjid.fid_generation << 32),
8738 	    0, 0, 0, 0);
8739 #else /* defined(__LP64__) */
8740 	uint32_t *uuid = (uint32_t *)&(info->uuid);
8741 
8742 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8743 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
8744 	    uuid[1], uuid[2], uuid[3], 0);
8745 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8746 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
8747 	    (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
8748 	    info->fsobjid.fid_objno, 0);
8749 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8750 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
8751 	    info->fsobjid.fid_generation, 0, 0, 0, 0);
8752 #endif /* !defined(__LP64__) */
8753 }
8754 
8755 static kern_return_t
kdebug_trace_dyld(task_t task,uint32_t base_code,vm_map_copy_t infos_copy,mach_msg_type_number_t infos_len)8756 kdebug_trace_dyld(task_t task, uint32_t base_code,
8757     vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
8758 {
8759 	kern_return_t kr;
8760 	dyld_kernel_image_info_array_t infos;
8761 	vm_map_offset_t map_data;
8762 	vm_offset_t data;
8763 
8764 	if (!infos_copy) {
8765 		return KERN_INVALID_ADDRESS;
8766 	}
8767 
8768 	if (!kdebug_enable ||
8769 	    !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) {
8770 		vm_map_copy_discard(infos_copy);
8771 		return KERN_SUCCESS;
8772 	}
8773 
8774 	if (task == NULL || task != current_task()) {
8775 		return KERN_INVALID_TASK;
8776 	}
8777 
8778 	kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
8779 	if (kr != KERN_SUCCESS) {
8780 		return kr;
8781 	}
8782 
8783 	infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
8784 
8785 	for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
8786 		kdebug_trace_dyld_internal(base_code, &(infos[i]));
8787 	}
8788 
8789 	data = CAST_DOWN(vm_offset_t, map_data);
8790 	mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
8791 	return KERN_SUCCESS;
8792 }
8793 
8794 kern_return_t
task_register_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8795 task_register_dyld_image_infos(task_t task,
8796     dyld_kernel_image_info_array_t infos_copy,
8797     mach_msg_type_number_t infos_len)
8798 {
8799 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
8800 	           (vm_map_copy_t)infos_copy, infos_len);
8801 }
8802 
8803 kern_return_t
task_unregister_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8804 task_unregister_dyld_image_infos(task_t task,
8805     dyld_kernel_image_info_array_t infos_copy,
8806     mach_msg_type_number_t infos_len)
8807 {
8808 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
8809 	           (vm_map_copy_t)infos_copy, infos_len);
8810 }
8811 
8812 kern_return_t
task_get_dyld_image_infos(__unused task_t task,__unused dyld_kernel_image_info_array_t * dyld_images,__unused mach_msg_type_number_t * dyld_imagesCnt)8813 task_get_dyld_image_infos(__unused task_t task,
8814     __unused dyld_kernel_image_info_array_t * dyld_images,
8815     __unused mach_msg_type_number_t * dyld_imagesCnt)
8816 {
8817 	return KERN_NOT_SUPPORTED;
8818 }
8819 
8820 kern_return_t
task_register_dyld_shared_cache_image_info(task_t task,dyld_kernel_image_info_t cache_img,__unused boolean_t no_cache,__unused boolean_t private_cache)8821 task_register_dyld_shared_cache_image_info(task_t task,
8822     dyld_kernel_image_info_t cache_img,
8823     __unused boolean_t no_cache,
8824     __unused boolean_t private_cache)
8825 {
8826 	if (task == NULL || task != current_task()) {
8827 		return KERN_INVALID_TASK;
8828 	}
8829 
8830 	kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
8831 	return KERN_SUCCESS;
8832 }
8833 
8834 kern_return_t
task_register_dyld_set_dyld_state(__unused task_t task,__unused uint8_t dyld_state)8835 task_register_dyld_set_dyld_state(__unused task_t task,
8836     __unused uint8_t dyld_state)
8837 {
8838 	return KERN_NOT_SUPPORTED;
8839 }
8840 
8841 kern_return_t
task_register_dyld_get_process_state(__unused task_t task,__unused dyld_kernel_process_info_t * dyld_process_state)8842 task_register_dyld_get_process_state(__unused task_t task,
8843     __unused dyld_kernel_process_info_t * dyld_process_state)
8844 {
8845 	return KERN_NOT_SUPPORTED;
8846 }
8847 
8848 kern_return_t
task_inspect(task_inspect_t task_insp,task_inspect_flavor_t flavor,task_inspect_info_t info_out,mach_msg_type_number_t * size_in_out)8849 task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
8850     task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out)
8851 {
8852 #if CONFIG_PERVASIVE_CPI
8853 	task_t task = (task_t)task_insp;
8854 	kern_return_t kr = KERN_SUCCESS;
8855 	mach_msg_type_number_t size;
8856 
8857 	if (task == TASK_NULL) {
8858 		return KERN_INVALID_ARGUMENT;
8859 	}
8860 
8861 	size = *size_in_out;
8862 
8863 	switch (flavor) {
8864 	case TASK_INSPECT_BASIC_COUNTS: {
8865 		struct task_inspect_basic_counts *bc =
8866 		    (struct task_inspect_basic_counts *)info_out;
8867 		struct recount_usage stats = { 0 };
8868 		if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
8869 			kr = KERN_INVALID_ARGUMENT;
8870 			break;
8871 		}
8872 
8873 		recount_sum(&recount_task_plan, task->tk_recount.rtk_lifetime, &stats);
8874 		bc->instructions = recount_usage_instructions(&stats);
8875 		bc->cycles = recount_usage_cycles(&stats);
8876 		size = TASK_INSPECT_BASIC_COUNTS_COUNT;
8877 		break;
8878 	}
8879 	default:
8880 		kr = KERN_INVALID_ARGUMENT;
8881 		break;
8882 	}
8883 
8884 	if (kr == KERN_SUCCESS) {
8885 		*size_in_out = size;
8886 	}
8887 	return kr;
8888 #else /* CONFIG_PERVASIVE_CPI */
8889 #pragma unused(task_insp, flavor, info_out, size_in_out)
8890 	return KERN_NOT_SUPPORTED;
8891 #endif /* !CONFIG_PERVASIVE_CPI */
8892 }
8893 
8894 #if CONFIG_SECLUDED_MEMORY
8895 int num_tasks_can_use_secluded_mem = 0;
8896 
8897 void
task_set_can_use_secluded_mem(task_t task,boolean_t can_use_secluded_mem)8898 task_set_can_use_secluded_mem(
8899 	task_t          task,
8900 	boolean_t       can_use_secluded_mem)
8901 {
8902 	if (!task->task_could_use_secluded_mem) {
8903 		return;
8904 	}
8905 	task_lock(task);
8906 	task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
8907 	task_unlock(task);
8908 }
8909 
8910 void
task_set_can_use_secluded_mem_locked(task_t task,boolean_t can_use_secluded_mem)8911 task_set_can_use_secluded_mem_locked(
8912 	task_t          task,
8913 	boolean_t       can_use_secluded_mem)
8914 {
8915 	assert(task->task_could_use_secluded_mem);
8916 	if (can_use_secluded_mem &&
8917 	    secluded_for_apps &&         /* global boot-arg */
8918 	    !task->task_can_use_secluded_mem) {
8919 		assert(num_tasks_can_use_secluded_mem >= 0);
8920 		OSAddAtomic(+1,
8921 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
8922 		task->task_can_use_secluded_mem = TRUE;
8923 	} else if (!can_use_secluded_mem &&
8924 	    task->task_can_use_secluded_mem) {
8925 		assert(num_tasks_can_use_secluded_mem > 0);
8926 		OSAddAtomic(-1,
8927 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
8928 		task->task_can_use_secluded_mem = FALSE;
8929 	}
8930 }
8931 
8932 void
task_set_could_use_secluded_mem(task_t task,boolean_t could_use_secluded_mem)8933 task_set_could_use_secluded_mem(
8934 	task_t          task,
8935 	boolean_t       could_use_secluded_mem)
8936 {
8937 	task->task_could_use_secluded_mem = !!could_use_secluded_mem;
8938 }
8939 
8940 void
task_set_could_also_use_secluded_mem(task_t task,boolean_t could_also_use_secluded_mem)8941 task_set_could_also_use_secluded_mem(
8942 	task_t          task,
8943 	boolean_t       could_also_use_secluded_mem)
8944 {
8945 	task->task_could_also_use_secluded_mem = !!could_also_use_secluded_mem;
8946 }
8947 
8948 boolean_t
task_can_use_secluded_mem(task_t task,boolean_t is_alloc)8949 task_can_use_secluded_mem(
8950 	task_t          task,
8951 	boolean_t       is_alloc)
8952 {
8953 	if (task->task_can_use_secluded_mem) {
8954 		assert(task->task_could_use_secluded_mem);
8955 		assert(num_tasks_can_use_secluded_mem > 0);
8956 		return TRUE;
8957 	}
8958 	if (task->task_could_also_use_secluded_mem &&
8959 	    num_tasks_can_use_secluded_mem > 0) {
8960 		assert(num_tasks_can_use_secluded_mem > 0);
8961 		return TRUE;
8962 	}
8963 
8964 	/*
8965 	 * If a single task is using more than some large amount of
8966 	 * memory (i.e. secluded_shutoff_trigger) and is approaching
8967 	 * its task limit, allow it to dip into secluded and begin
8968 	 * suppression of rebuilding secluded memory until that task exits.
8969 	 */
8970 	if (is_alloc && secluded_shutoff_trigger != 0) {
8971 		uint64_t phys_used = get_task_phys_footprint(task);
8972 		uint64_t limit = get_task_phys_footprint_limit(task);
8973 		if (phys_used > secluded_shutoff_trigger &&
8974 		    limit > secluded_shutoff_trigger &&
8975 		    phys_used > limit - secluded_shutoff_headroom) {
8976 			start_secluded_suppression(task);
8977 			return TRUE;
8978 		}
8979 	}
8980 
8981 	return FALSE;
8982 }
8983 
8984 boolean_t
task_could_use_secluded_mem(task_t task)8985 task_could_use_secluded_mem(
8986 	task_t  task)
8987 {
8988 	return task->task_could_use_secluded_mem;
8989 }
8990 
8991 boolean_t
task_could_also_use_secluded_mem(task_t task)8992 task_could_also_use_secluded_mem(
8993 	task_t  task)
8994 {
8995 	return task->task_could_also_use_secluded_mem;
8996 }
8997 #endif /* CONFIG_SECLUDED_MEMORY */
8998 
8999 queue_head_t *
task_io_user_clients(task_t task)9000 task_io_user_clients(task_t task)
9001 {
9002 	return &task->io_user_clients;
9003 }
9004 
9005 void
task_set_message_app_suspended(task_t task,boolean_t enable)9006 task_set_message_app_suspended(task_t task, boolean_t enable)
9007 {
9008 	task->message_app_suspended = enable;
9009 }
9010 
9011 void
task_copy_fields_for_exec(task_t dst_task,task_t src_task)9012 task_copy_fields_for_exec(task_t dst_task, task_t src_task)
9013 {
9014 	dst_task->vtimers = src_task->vtimers;
9015 }
9016 
9017 #if DEVELOPMENT || DEBUG
9018 int vm_region_footprint = 0;
9019 #endif /* DEVELOPMENT || DEBUG */
9020 
9021 boolean_t
task_self_region_footprint(void)9022 task_self_region_footprint(void)
9023 {
9024 #if DEVELOPMENT || DEBUG
9025 	if (vm_region_footprint) {
9026 		/* system-wide override */
9027 		return TRUE;
9028 	}
9029 #endif /* DEVELOPMENT || DEBUG */
9030 	return current_task()->task_region_footprint;
9031 }
9032 
9033 void
task_self_region_footprint_set(boolean_t newval)9034 task_self_region_footprint_set(
9035 	boolean_t newval)
9036 {
9037 	task_t  curtask;
9038 
9039 	curtask = current_task();
9040 	task_lock(curtask);
9041 	if (newval) {
9042 		curtask->task_region_footprint = TRUE;
9043 	} else {
9044 		curtask->task_region_footprint = FALSE;
9045 	}
9046 	task_unlock(curtask);
9047 }
9048 
9049 void
task_set_darkwake_mode(task_t task,boolean_t set_mode)9050 task_set_darkwake_mode(task_t task, boolean_t set_mode)
9051 {
9052 	assert(task);
9053 
9054 	task_lock(task);
9055 
9056 	if (set_mode) {
9057 		task->t_flags |= TF_DARKWAKE_MODE;
9058 	} else {
9059 		task->t_flags &= ~(TF_DARKWAKE_MODE);
9060 	}
9061 
9062 	task_unlock(task);
9063 }
9064 
9065 boolean_t
task_get_darkwake_mode(task_t task)9066 task_get_darkwake_mode(task_t task)
9067 {
9068 	assert(task);
9069 	return (task->t_flags & TF_DARKWAKE_MODE) != 0;
9070 }
9071 
9072 /*
9073  * Set default behavior for task's control port and EXC_GUARD variants that have
9074  * settable behavior.
9075  *
9076  * Platform binaries typically have one behavior, third parties another -
9077  * but there are special exception we may need to account for.
9078  */
9079 void
task_set_exc_guard_ctrl_port_default(task_t task,thread_t main_thread,const char * name,unsigned int namelen,boolean_t is_simulated,uint32_t platform,uint32_t sdk)9080 task_set_exc_guard_ctrl_port_default(
9081 	task_t task,
9082 	thread_t main_thread,
9083 	const char *name,
9084 	unsigned int namelen,
9085 	boolean_t is_simulated,
9086 	uint32_t platform,
9087 	uint32_t sdk)
9088 {
9089 	task_control_port_options_t opts = TASK_CONTROL_PORT_OPTIONS_NONE;
9090 
9091 	if (task_is_hardened_binary(task)) {
9092 		/* set exc guard default behavior for hardened binaries */
9093 		task->task_exc_guard = (task_exc_guard_default & TASK_EXC_GUARD_ALL);
9094 
9095 		if (1 == task_pid(task)) {
9096 			/* special flags for inittask - delivery every instance as corpse */
9097 			task->task_exc_guard = _TASK_EXC_GUARD_ALL_CORPSE;
9098 		} else if (task_exc_guard_default & TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS) {
9099 			/* honor by-name default setting overrides */
9100 
9101 			int count = sizeof(task_exc_guard_named_defaults) / sizeof(struct task_exc_guard_named_default);
9102 
9103 			for (int i = 0; i < count; i++) {
9104 				const struct task_exc_guard_named_default *named_default =
9105 				    &task_exc_guard_named_defaults[i];
9106 				if (strncmp(named_default->name, name, namelen) == 0 &&
9107 				    strlen(named_default->name) == namelen) {
9108 					task->task_exc_guard = named_default->behavior;
9109 					break;
9110 				}
9111 			}
9112 		}
9113 
9114 		/* set control port options for 1p code, inherited from parent task by default */
9115 		opts = ipc_control_port_options & ICP_OPTIONS_1P_MASK;
9116 	} else {
9117 		/* set exc guard default behavior for third-party code */
9118 		task->task_exc_guard = ((task_exc_guard_default >> TASK_EXC_GUARD_THIRD_PARTY_DEFAULT_SHIFT) & TASK_EXC_GUARD_ALL);
9119 		/* set control port options for 3p code, inherited from parent task by default */
9120 		opts = (ipc_control_port_options & ICP_OPTIONS_3P_MASK) >> ICP_OPTIONS_3P_SHIFT;
9121 	}
9122 
9123 	if (is_simulated) {
9124 		/* If simulated and built against pre-iOS 15 SDK, disable all EXC_GUARD */
9125 		if ((platform == PLATFORM_IOSSIMULATOR && sdk < 0xf0000) ||
9126 		    (platform == PLATFORM_TVOSSIMULATOR && sdk < 0xf0000) ||
9127 		    (platform == PLATFORM_WATCHOSSIMULATOR && sdk < 0x80000)) {
9128 			task->task_exc_guard = TASK_EXC_GUARD_NONE;
9129 		}
9130 		/* Disable protection for control ports for simulated binaries */
9131 		opts = TASK_CONTROL_PORT_OPTIONS_NONE;
9132 	}
9133 
9134 
9135 	task_set_control_port_options(task, opts);
9136 
9137 	task_set_immovable_pinned(task);
9138 	main_thread_set_immovable_pinned(main_thread);
9139 }
9140 
9141 kern_return_t
task_get_exc_guard_behavior(task_t task,task_exc_guard_behavior_t * behaviorp)9142 task_get_exc_guard_behavior(
9143 	task_t task,
9144 	task_exc_guard_behavior_t *behaviorp)
9145 {
9146 	if (task == TASK_NULL) {
9147 		return KERN_INVALID_TASK;
9148 	}
9149 	*behaviorp = task->task_exc_guard;
9150 	return KERN_SUCCESS;
9151 }
9152 
9153 kern_return_t
task_set_exc_guard_behavior(task_t task,task_exc_guard_behavior_t new_behavior)9154 task_set_exc_guard_behavior(
9155 	task_t task,
9156 	task_exc_guard_behavior_t new_behavior)
9157 {
9158 	if (task == TASK_NULL) {
9159 		return KERN_INVALID_TASK;
9160 	}
9161 	if (new_behavior & ~TASK_EXC_GUARD_ALL) {
9162 		return KERN_INVALID_VALUE;
9163 	}
9164 
9165 	/* limit setting to that allowed for this config */
9166 	new_behavior = new_behavior & task_exc_guard_config_mask;
9167 
9168 #if !defined (DEBUG) && !defined (DEVELOPMENT)
9169 	/* On release kernels, only allow _upgrading_ exc guard behavior */
9170 	task_exc_guard_behavior_t cur_behavior;
9171 
9172 	os_atomic_rmw_loop(&task->task_exc_guard, cur_behavior, new_behavior, relaxed, {
9173 		if ((cur_behavior & task_exc_guard_no_unset_mask) & ~(new_behavior & task_exc_guard_no_unset_mask)) {
9174 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
9175 		}
9176 
9177 		if ((new_behavior & task_exc_guard_no_set_mask) & ~(cur_behavior & task_exc_guard_no_set_mask)) {
9178 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
9179 		}
9180 
9181 		/* no restrictions on CORPSE bit */
9182 	});
9183 #else
9184 	task->task_exc_guard = new_behavior;
9185 #endif
9186 	return KERN_SUCCESS;
9187 }
9188 
9189 kern_return_t
task_set_corpse_forking_behavior(task_t task,task_corpse_forking_behavior_t behavior)9190 task_set_corpse_forking_behavior(task_t task, task_corpse_forking_behavior_t behavior)
9191 {
9192 #if DEVELOPMENT || DEBUG
9193 	if (task == TASK_NULL) {
9194 		return KERN_INVALID_TASK;
9195 	}
9196 
9197 	task_lock(task);
9198 	if (behavior & TASK_CORPSE_FORKING_DISABLED_MEM_DIAG) {
9199 		task->t_flags |= TF_NO_CORPSE_FORKING;
9200 	} else {
9201 		task->t_flags &= ~TF_NO_CORPSE_FORKING;
9202 	}
9203 	task_unlock(task);
9204 
9205 	return KERN_SUCCESS;
9206 #else
9207 	(void)task;
9208 	(void)behavior;
9209 	return KERN_NOT_SUPPORTED;
9210 #endif
9211 }
9212 
9213 boolean_t
task_corpse_forking_disabled(task_t task)9214 task_corpse_forking_disabled(task_t task)
9215 {
9216 	boolean_t disabled = FALSE;
9217 
9218 	task_lock(task);
9219 	disabled = (task->t_flags & TF_NO_CORPSE_FORKING);
9220 	task_unlock(task);
9221 
9222 	return disabled;
9223 }
9224 
9225 #if __arm64__
9226 extern int legacy_footprint_entitlement_mode;
9227 extern void memorystatus_act_on_legacy_footprint_entitlement(struct proc *, boolean_t);
9228 extern void memorystatus_act_on_ios13extended_footprint_entitlement(struct proc *);
9229 
9230 
9231 void
task_set_legacy_footprint(task_t task)9232 task_set_legacy_footprint(
9233 	task_t task)
9234 {
9235 	task_lock(task);
9236 	task->task_legacy_footprint = TRUE;
9237 	task_unlock(task);
9238 }
9239 
9240 void
task_set_extra_footprint_limit(task_t task)9241 task_set_extra_footprint_limit(
9242 	task_t task)
9243 {
9244 	if (task->task_extra_footprint_limit) {
9245 		return;
9246 	}
9247 	task_lock(task);
9248 	if (task->task_extra_footprint_limit) {
9249 		task_unlock(task);
9250 		return;
9251 	}
9252 	task->task_extra_footprint_limit = TRUE;
9253 	task_unlock(task);
9254 	memorystatus_act_on_legacy_footprint_entitlement(get_bsdtask_info(task), TRUE);
9255 }
9256 
9257 void
task_set_ios13extended_footprint_limit(task_t task)9258 task_set_ios13extended_footprint_limit(
9259 	task_t task)
9260 {
9261 	if (task->task_ios13extended_footprint_limit) {
9262 		return;
9263 	}
9264 	task_lock(task);
9265 	if (task->task_ios13extended_footprint_limit) {
9266 		task_unlock(task);
9267 		return;
9268 	}
9269 	task->task_ios13extended_footprint_limit = TRUE;
9270 	task_unlock(task);
9271 	memorystatus_act_on_ios13extended_footprint_entitlement(get_bsdtask_info(task));
9272 }
9273 #endif /* __arm64__ */
9274 
9275 static inline ledger_amount_t
task_ledger_get_balance(ledger_t ledger,int ledger_idx)9276 task_ledger_get_balance(
9277 	ledger_t        ledger,
9278 	int             ledger_idx)
9279 {
9280 	ledger_amount_t amount;
9281 	amount = 0;
9282 	ledger_get_balance(ledger, ledger_idx, &amount);
9283 	return amount;
9284 }
9285 
9286 /*
9287  * Gather the amount of memory counted in a task's footprint due to
9288  * being in a specific set of ledgers.
9289  */
9290 void
task_ledgers_footprint(ledger_t ledger,ledger_amount_t * ledger_resident,ledger_amount_t * ledger_compressed)9291 task_ledgers_footprint(
9292 	ledger_t        ledger,
9293 	ledger_amount_t *ledger_resident,
9294 	ledger_amount_t *ledger_compressed)
9295 {
9296 	*ledger_resident = 0;
9297 	*ledger_compressed = 0;
9298 
9299 	/* purgeable non-volatile memory */
9300 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile);
9301 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile_compressed);
9302 
9303 	/* "default" tagged memory */
9304 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint);
9305 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint_compressed);
9306 
9307 	/* "network" currently never counts in the footprint... */
9308 
9309 	/* "media" tagged memory */
9310 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.media_footprint);
9311 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.media_footprint_compressed);
9312 
9313 	/* "graphics" tagged memory */
9314 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint);
9315 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint_compressed);
9316 
9317 	/* "neural" tagged memory */
9318 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.neural_footprint);
9319 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.neural_footprint_compressed);
9320 }
9321 
9322 #if CONFIG_MEMORYSTATUS
9323 /*
9324  * Credit any outstanding task dirty time to the ledger.
9325  * memstat_dirty_start is pushed forward to prevent any possibility of double
9326  * counting, making it safe to call this as often as necessary to ensure that
9327  * anyone reading the ledger gets up-to-date information.
9328  */
9329 void
task_ledger_settle_dirty_time(task_t t)9330 task_ledger_settle_dirty_time(task_t t)
9331 {
9332 	task_lock(t);
9333 
9334 	uint64_t start = t->memstat_dirty_start;
9335 	if (start) {
9336 		uint64_t now = mach_absolute_time();
9337 
9338 		uint64_t duration;
9339 		absolutetime_to_nanoseconds(now - start, &duration);
9340 
9341 		ledger_t ledger = get_task_ledger(t);
9342 		ledger_credit(ledger, task_ledgers.memorystatus_dirty_time, duration);
9343 
9344 		t->memstat_dirty_start = now;
9345 	}
9346 
9347 	task_unlock(t);
9348 }
9349 #endif /* CONFIG_MEMORYSTATUS */
9350 
9351 void
task_set_memory_ownership_transfer(task_t task,boolean_t value)9352 task_set_memory_ownership_transfer(
9353 	task_t    task,
9354 	boolean_t value)
9355 {
9356 	task_lock(task);
9357 	task->task_can_transfer_memory_ownership = !!value;
9358 	task_unlock(task);
9359 }
9360 
9361 #if DEVELOPMENT || DEBUG
9362 
9363 void
task_set_no_footprint_for_debug(task_t task,boolean_t value)9364 task_set_no_footprint_for_debug(task_t task, boolean_t value)
9365 {
9366 	task_lock(task);
9367 	task->task_no_footprint_for_debug = !!value;
9368 	task_unlock(task);
9369 }
9370 
9371 int
task_get_no_footprint_for_debug(task_t task)9372 task_get_no_footprint_for_debug(task_t task)
9373 {
9374 	return task->task_no_footprint_for_debug;
9375 }
9376 
9377 #endif /* DEVELOPMENT || DEBUG */
9378 
9379 void
task_copy_vmobjects(task_t task,vm_object_query_t query,size_t len,size_t * num)9380 task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num)
9381 {
9382 	vm_object_t find_vmo;
9383 	size_t size = 0;
9384 
9385 	task_objq_lock(task);
9386 	if (query != NULL) {
9387 		queue_iterate(&task->task_objq, find_vmo, vm_object_t, task_objq)
9388 		{
9389 			vm_object_query_t p = &query[size++];
9390 
9391 			/* make sure to not overrun */
9392 			if (size * sizeof(vm_object_query_data_t) > len) {
9393 				--size;
9394 				break;
9395 			}
9396 
9397 			bzero(p, sizeof(*p));
9398 			p->object_id = (vm_object_id_t) VM_KERNEL_ADDRPERM(find_vmo);
9399 			p->virtual_size = find_vmo->internal ? find_vmo->vo_size : 0;
9400 			p->resident_size = find_vmo->resident_page_count * PAGE_SIZE;
9401 			p->wired_size = find_vmo->wired_page_count * PAGE_SIZE;
9402 			p->reusable_size = find_vmo->reusable_page_count * PAGE_SIZE;
9403 			p->vo_no_footprint = find_vmo->vo_no_footprint;
9404 			p->vo_ledger_tag = find_vmo->vo_ledger_tag;
9405 			p->purgable = find_vmo->purgable;
9406 
9407 			if (find_vmo->internal && find_vmo->pager_created && find_vmo->pager != NULL) {
9408 				p->compressed_size = vm_compressor_pager_get_count(find_vmo->pager) * PAGE_SIZE;
9409 			} else {
9410 				p->compressed_size = 0;
9411 			}
9412 		}
9413 	} else {
9414 		size = (size_t)task->task_owned_objects;
9415 	}
9416 	task_objq_unlock(task);
9417 
9418 	*num = size;
9419 }
9420 
9421 void
task_get_owned_vmobjects(task_t task,size_t buffer_size,vmobject_list_output_t buffer,size_t * output_size,size_t * entries)9422 task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries)
9423 {
9424 	assert(output_size);
9425 	assert(entries);
9426 
9427 	/* copy the vmobjects and vmobject data out of the task */
9428 	if (buffer_size == 0) {
9429 		task_copy_vmobjects(task, NULL, 0, entries);
9430 		*output_size = (*entries > 0) ? *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer) : 0;
9431 	} else {
9432 		assert(buffer);
9433 		task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(*buffer), entries);
9434 		buffer->entries = (uint64_t)*entries;
9435 		*output_size = *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer);
9436 	}
9437 }
9438 
9439 void
task_store_owned_vmobject_info(task_t to_task,task_t from_task)9440 task_store_owned_vmobject_info(task_t to_task, task_t from_task)
9441 {
9442 	size_t buffer_size;
9443 	vmobject_list_output_t buffer;
9444 	size_t output_size;
9445 	size_t entries;
9446 
9447 	assert(to_task != from_task);
9448 
9449 	/* get the size, allocate a bufferr, and populate */
9450 	entries = 0;
9451 	output_size = 0;
9452 	task_get_owned_vmobjects(from_task, 0, NULL, &output_size, &entries);
9453 
9454 	if (output_size) {
9455 		buffer_size = output_size;
9456 		buffer = kalloc_data(buffer_size, Z_WAITOK);
9457 
9458 		if (buffer) {
9459 			entries = 0;
9460 			output_size = 0;
9461 
9462 			task_get_owned_vmobjects(from_task, buffer_size, buffer, &output_size, &entries);
9463 
9464 			if (entries) {
9465 				to_task->corpse_vmobject_list = buffer;
9466 				to_task->corpse_vmobject_list_size = buffer_size;
9467 			}
9468 		}
9469 	}
9470 }
9471 
9472 void
task_set_filter_msg_flag(task_t task,boolean_t flag)9473 task_set_filter_msg_flag(
9474 	task_t task,
9475 	boolean_t flag)
9476 {
9477 	assert(task != TASK_NULL);
9478 
9479 	if (flag) {
9480 		task_ro_flags_set(task, TFRO_FILTER_MSG);
9481 	} else {
9482 		task_ro_flags_clear(task, TFRO_FILTER_MSG);
9483 	}
9484 }
9485 
9486 boolean_t
task_get_filter_msg_flag(task_t task)9487 task_get_filter_msg_flag(
9488 	task_t task)
9489 {
9490 	if (!task) {
9491 		return false;
9492 	}
9493 
9494 	return (task_ro_flags_get(task) & TFRO_FILTER_MSG) ? TRUE : FALSE;
9495 }
9496 bool
task_is_exotic(task_t task)9497 task_is_exotic(
9498 	task_t task)
9499 {
9500 	if (task == TASK_NULL) {
9501 		return false;
9502 	}
9503 	return vm_map_is_exotic(get_task_map(task));
9504 }
9505 
9506 bool
task_is_alien(task_t task)9507 task_is_alien(
9508 	task_t task)
9509 {
9510 	if (task == TASK_NULL) {
9511 		return false;
9512 	}
9513 	return vm_map_is_alien(get_task_map(task));
9514 }
9515 
9516 
9517 
9518 #if CONFIG_MACF
9519 /* Set the filter mask for Mach traps. */
9520 void
mac_task_set_mach_filter_mask(task_t task,uint8_t * maskptr)9521 mac_task_set_mach_filter_mask(task_t task, uint8_t *maskptr)
9522 {
9523 	assert(task);
9524 
9525 	task_set_mach_trap_filter_mask(task, maskptr);
9526 }
9527 
9528 /* Set the filter mask for kobject msgs. */
9529 void
mac_task_set_kobj_filter_mask(task_t task,uint8_t * maskptr)9530 mac_task_set_kobj_filter_mask(task_t task, uint8_t *maskptr)
9531 {
9532 	assert(task);
9533 
9534 	task_set_mach_kobj_filter_mask(task, maskptr);
9535 }
9536 
9537 /* Hook for mach trap/sc filter evaluation policy. */
9538 SECURITY_READ_ONLY_LATE(mac_task_mach_filter_cbfunc_t) mac_task_mach_trap_evaluate = NULL;
9539 
9540 /* Hook for kobj message filter evaluation policy. */
9541 SECURITY_READ_ONLY_LATE(mac_task_kobj_filter_cbfunc_t) mac_task_kobj_msg_evaluate = NULL;
9542 
9543 /* Set the callback hooks for the filtering policy. */
9544 int
mac_task_register_filter_callbacks(const mac_task_mach_filter_cbfunc_t mach_cbfunc,const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)9545 mac_task_register_filter_callbacks(
9546 	const mac_task_mach_filter_cbfunc_t mach_cbfunc,
9547 	const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)
9548 {
9549 	if (mach_cbfunc != NULL) {
9550 		if (mac_task_mach_trap_evaluate != NULL) {
9551 			return KERN_FAILURE;
9552 		}
9553 		mac_task_mach_trap_evaluate = mach_cbfunc;
9554 	}
9555 	if (kobj_cbfunc != NULL) {
9556 		if (mac_task_kobj_msg_evaluate != NULL) {
9557 			return KERN_FAILURE;
9558 		}
9559 		mac_task_kobj_msg_evaluate = kobj_cbfunc;
9560 	}
9561 
9562 	return KERN_SUCCESS;
9563 }
9564 #endif /* CONFIG_MACF */
9565 
9566 #if CONFIG_ROSETTA
9567 bool
task_is_translated(task_t task)9568 task_is_translated(task_t task)
9569 {
9570 	extern boolean_t proc_is_translated(struct proc* p);
9571 	return task && proc_is_translated(get_bsdtask_info(task));
9572 }
9573 #endif
9574 
9575 
9576 
9577 #if __has_feature(ptrauth_calls)
9578 /* On FPAC, we want to deliver all PAC violations as fatal exceptions, regardless
9579  * of the enable_pac_exception boot-arg value or any other entitlements.
9580  * The only case where we allow non-fatal PAC exceptions on FPAC is for debugging,
9581  * which requires Developer Mode enabled.
9582  *
9583  * On non-FPAC hardware, we gate the decision behind entitlements and the
9584  * enable_pac_exception boot-arg.
9585  */
9586 extern int gARM_FEAT_FPAC;
9587 /*
9588  * Having the PAC_EXCEPTION_ENTITLEMENT entitlement means we always enforce all
9589  * of the PAC exception hardening: fatal exceptions and signed user state.
9590  */
9591 #define PAC_EXCEPTION_ENTITLEMENT "com.apple.private.pac.exception"
9592 /*
9593  * On non-FPAC hardware, when enable_pac_exception boot-arg is set to true,
9594  * processes can choose to get non-fatal PAC exception delivery by setting
9595  * the SKIP_PAC_EXCEPTION_ENTITLEMENT entitlement.
9596  */
9597 #define SKIP_PAC_EXCEPTION_ENTITLEMENT "com.apple.private.skip.pac.exception"
9598 
9599 void
task_set_pac_exception_fatal_flag(task_t task)9600 task_set_pac_exception_fatal_flag(
9601 	task_t task)
9602 {
9603 	assert(task != TASK_NULL);
9604 	bool pac_hardened_task = false;
9605 	uint32_t set_flags = 0;
9606 
9607 	/*
9608 	 * On non-FPAC hardware, we allow gating PAC exceptions behind
9609 	 * SKIP_PAC_EXCEPTION_ENTITLEMENT and the boot-arg.
9610 	 */
9611 	if (!gARM_FEAT_FPAC && enable_pac_exception &&
9612 	    IOTaskHasEntitlement(task, SKIP_PAC_EXCEPTION_ENTITLEMENT)) {
9613 		return;
9614 	}
9615 
9616 	if (IOTaskHasEntitlement(task, PAC_EXCEPTION_ENTITLEMENT) || task_get_hardened_runtime(task)) {
9617 		pac_hardened_task = true;
9618 		set_flags |= TFRO_PAC_ENFORCE_USER_STATE;
9619 	}
9620 
9621 	/* On non-FPAC hardware, gate the fatal property behind entitlements and boot-arg. */
9622 	if (pac_hardened_task ||
9623 	    ((enable_pac_exception || gARM_FEAT_FPAC) && task_get_platform_binary(task))) {
9624 		/* If debugging is configured, do not make PAC exception fatal. */
9625 		if (address_space_debugged(task_get_proc_raw(task)) != KERN_SUCCESS) {
9626 			set_flags |= TFRO_PAC_EXC_FATAL;
9627 		}
9628 	}
9629 
9630 	if (set_flags != 0) {
9631 		task_ro_flags_set(task, set_flags);
9632 	}
9633 }
9634 
9635 bool
task_is_pac_exception_fatal(task_t task)9636 task_is_pac_exception_fatal(
9637 	task_t task)
9638 {
9639 	assert(task != TASK_NULL);
9640 	return !!(task_ro_flags_get(task) & TFRO_PAC_EXC_FATAL);
9641 }
9642 #endif /* __has_feature(ptrauth_calls) */
9643 
9644 bool
task_needs_user_signed_thread_state(task_t task)9645 task_needs_user_signed_thread_state(
9646 	task_t task)
9647 {
9648 	assert(task != TASK_NULL);
9649 	return !!(task_ro_flags_get(task) & TFRO_PAC_ENFORCE_USER_STATE);
9650 }
9651 
9652 void
task_set_tecs(task_t task)9653 task_set_tecs(task_t task)
9654 {
9655 	if (task == TASK_NULL) {
9656 		task = current_task();
9657 	}
9658 
9659 	if (!machine_csv(CPUVN_CI)) {
9660 		return;
9661 	}
9662 
9663 	LCK_MTX_ASSERT(&task->lock, LCK_MTX_ASSERT_NOTOWNED);
9664 
9665 	task_lock(task);
9666 
9667 	task->t_flags |= TF_TECS;
9668 
9669 	thread_t thread;
9670 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
9671 		machine_tecs(thread);
9672 	}
9673 	task_unlock(task);
9674 }
9675 
9676 kern_return_t
task_test_sync_upcall(task_t task,ipc_port_t send_port)9677 task_test_sync_upcall(
9678 	task_t     task,
9679 	ipc_port_t send_port)
9680 {
9681 #if DEVELOPMENT || DEBUG
9682 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9683 		return KERN_INVALID_ARGUMENT;
9684 	}
9685 
9686 	/* Block on sync kernel upcall on the given send port */
9687 	mach_test_sync_upcall(send_port);
9688 
9689 	ipc_port_release_send(send_port);
9690 	return KERN_SUCCESS;
9691 #else
9692 	(void)task;
9693 	(void)send_port;
9694 	return KERN_NOT_SUPPORTED;
9695 #endif
9696 }
9697 
9698 kern_return_t
task_test_async_upcall_propagation(task_t task,ipc_port_t send_port,int qos,int iotier)9699 task_test_async_upcall_propagation(
9700 	task_t      task,
9701 	ipc_port_t  send_port,
9702 	int         qos,
9703 	int         iotier)
9704 {
9705 #if DEVELOPMENT || DEBUG
9706 	kern_return_t kr;
9707 
9708 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9709 		return KERN_INVALID_ARGUMENT;
9710 	}
9711 
9712 	if (qos < THREAD_QOS_DEFAULT || qos > THREAD_QOS_USER_INTERACTIVE ||
9713 	    iotier < THROTTLE_LEVEL_START || iotier > THROTTLE_LEVEL_END) {
9714 		return KERN_INVALID_ARGUMENT;
9715 	}
9716 
9717 	struct thread_attr_for_ipc_propagation attr = {
9718 		.tafip_iotier = iotier,
9719 		.tafip_qos = qos
9720 	};
9721 
9722 	/* Apply propagate attr to port */
9723 	kr = ipc_port_propagate_thread_attr(send_port, attr);
9724 	if (kr != KERN_SUCCESS) {
9725 		return kr;
9726 	}
9727 
9728 	thread_enable_send_importance(current_thread(), TRUE);
9729 
9730 	/* Perform an async kernel upcall on the given send port */
9731 	mach_test_async_upcall(send_port);
9732 	thread_enable_send_importance(current_thread(), FALSE);
9733 
9734 	ipc_port_release_send(send_port);
9735 	return KERN_SUCCESS;
9736 #else
9737 	(void)task;
9738 	(void)send_port;
9739 	(void)qos;
9740 	(void)iotier;
9741 	return KERN_NOT_SUPPORTED;
9742 #endif
9743 }
9744 
9745 #if CONFIG_PROC_RESOURCE_LIMITS
9746 mach_port_name_t
current_task_get_fatal_port_name(void)9747 current_task_get_fatal_port_name(void)
9748 {
9749 	mach_port_t task_fatal_port = MACH_PORT_NULL;
9750 	mach_port_name_t port_name = 0;
9751 
9752 	task_fatal_port = task_allocate_fatal_port();
9753 
9754 	if (task_fatal_port) {
9755 		ipc_object_copyout(current_space(), ip_to_object(task_fatal_port), MACH_MSG_TYPE_PORT_SEND,
9756 		    IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, &port_name);
9757 	}
9758 
9759 	return port_name;
9760 }
9761 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
9762 
9763 #if defined(__x86_64__)
9764 bool
curtask_get_insn_copy_optout(void)9765 curtask_get_insn_copy_optout(void)
9766 {
9767 	bool optout;
9768 	task_t cur_task = current_task();
9769 
9770 	task_lock(cur_task);
9771 	optout = (cur_task->t_flags & TF_INSN_COPY_OPTOUT) ? true : false;
9772 	task_unlock(cur_task);
9773 
9774 	return optout;
9775 }
9776 
9777 void
curtask_set_insn_copy_optout(void)9778 curtask_set_insn_copy_optout(void)
9779 {
9780 	task_t cur_task = current_task();
9781 
9782 	task_lock(cur_task);
9783 
9784 	cur_task->t_flags |= TF_INSN_COPY_OPTOUT;
9785 
9786 	thread_t thread;
9787 	queue_iterate(&cur_task->threads, thread, thread_t, task_threads) {
9788 		machine_thread_set_insn_copy_optout(thread);
9789 	}
9790 	task_unlock(cur_task);
9791 }
9792 #endif /* defined(__x86_64__) */
9793 
9794 void
task_get_corpse_vmobject_list(task_t task,vmobject_list_output_t * list,size_t * list_size)9795 task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size)
9796 {
9797 	assert(task);
9798 	assert(list_size);
9799 
9800 	*list = task->corpse_vmobject_list;
9801 	*list_size = (size_t)task->corpse_vmobject_list_size;
9802 }
9803 
9804 __abortlike
9805 static void
panic_proc_ro_task_backref_mismatch(task_t t,proc_ro_t ro)9806 panic_proc_ro_task_backref_mismatch(task_t t, proc_ro_t ro)
9807 {
9808 	panic("proc_ro->task backref mismatch: t=%p, ro=%p, "
9809 	    "proc_ro_task(ro)=%p", t, ro, proc_ro_task(ro));
9810 }
9811 
9812 proc_ro_t
task_get_ro(task_t t)9813 task_get_ro(task_t t)
9814 {
9815 	proc_ro_t ro = (proc_ro_t)t->bsd_info_ro;
9816 
9817 	zone_require_ro(ZONE_ID_PROC_RO, sizeof(struct proc_ro), ro);
9818 	if (__improbable(proc_ro_task(ro) != t)) {
9819 		panic_proc_ro_task_backref_mismatch(t, ro);
9820 	}
9821 
9822 	return ro;
9823 }
9824 
9825 uint32_t
task_ro_flags_get(task_t task)9826 task_ro_flags_get(task_t task)
9827 {
9828 	return task_get_ro(task)->t_flags_ro;
9829 }
9830 
9831 void
task_ro_flags_set(task_t task,uint32_t flags)9832 task_ro_flags_set(task_t task, uint32_t flags)
9833 {
9834 	zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
9835 	    t_flags_ro, ZRO_ATOMIC_OR_32, flags);
9836 }
9837 
9838 void
task_ro_flags_clear(task_t task,uint32_t flags)9839 task_ro_flags_clear(task_t task, uint32_t flags)
9840 {
9841 	zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
9842 	    t_flags_ro, ZRO_ATOMIC_AND_32, ~flags);
9843 }
9844 
9845 task_control_port_options_t
task_get_control_port_options(task_t task)9846 task_get_control_port_options(task_t task)
9847 {
9848 	return task_get_ro(task)->task_control_port_options;
9849 }
9850 
9851 void
task_set_control_port_options(task_t task,task_control_port_options_t opts)9852 task_set_control_port_options(task_t task, task_control_port_options_t opts)
9853 {
9854 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
9855 	    task_control_port_options, &opts);
9856 }
9857 
9858 /*!
9859  * @function kdp_task_is_locked
9860  *
9861  * @abstract
9862  * Checks if task is locked.
9863  *
9864  * @discussion
9865  * NOT SAFE: To be used only by kernel debugger.
9866  *
9867  * @param task task to check
9868  *
9869  * @returns TRUE if the task is locked.
9870  */
9871 boolean_t
kdp_task_is_locked(task_t task)9872 kdp_task_is_locked(task_t task)
9873 {
9874 	return kdp_lck_mtx_lock_spin_is_acquired(&task->lock);
9875 }
9876 
9877 #if DEBUG || DEVELOPMENT
9878 /**
9879  *
9880  * Check if a threshold limit is valid based on the actual phys memory
9881  * limit. If they are same, race conditions may arise, so we have to prevent
9882  * it to happen.
9883  */
9884 static diagthreshold_check_return
task_check_memorythreshold_is_valid(task_t task,uint64_t new_limit,bool is_diagnostics_value)9885 task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value)
9886 {
9887 	int phys_limit_mb;
9888 	kern_return_t ret_value;
9889 	bool threshold_enabled;
9890 	bool dummy;
9891 	ret_value = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, &threshold_enabled);
9892 	if (ret_value != KERN_SUCCESS) {
9893 		return ret_value;
9894 	}
9895 	if (is_diagnostics_value == true) {
9896 		ret_value = task_get_phys_footprint_limit(task, &phys_limit_mb);
9897 	} else {
9898 		uint64_t diag_limit;
9899 		ret_value = task_get_diag_footprint_limit_internal(task, &diag_limit, &dummy);
9900 		phys_limit_mb = (int)(diag_limit >> 20);
9901 	}
9902 	if (ret_value != KERN_SUCCESS) {
9903 		return ret_value;
9904 	}
9905 	if (phys_limit_mb == (int)  new_limit) {
9906 		if (threshold_enabled == false) {
9907 			return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED;
9908 		} else {
9909 			return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED;
9910 		}
9911 	}
9912 	if (threshold_enabled == false) {
9913 		return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED;
9914 	} else {
9915 		return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED;
9916 	}
9917 }
9918 #endif
9919 
9920 #if CONFIG_EXCLAVES
9921 kern_return_t
task_add_conclave(task_t task,const char * task_conclave_id)9922 task_add_conclave(task_t task, const char *task_conclave_id)
9923 {
9924 	/*
9925 	 * Make this EXCLAVES_BOOT_STAGE_2 until userspace is actually
9926 	 * triggering the EXCLAVESKIT boot stage.
9927 	 */
9928 	kern_return_t kr = exclaves_boot_wait(EXCLAVES_BOOT_STAGE_2);
9929 	if (kr != KERN_SUCCESS) {
9930 		return kr;
9931 	}
9932 
9933 	return exclaves_conclave_attach(EXCLAVES_DOMAIN_KERNEL, task_conclave_id, task);
9934 }
9935 
9936 kern_return_t
task_start_conclave_and_lookup_resources(mach_port_name_t port __unused,bool conclave_start,struct exclaves_resource_user * conclave_resource_user,int resource_count)9937 task_start_conclave_and_lookup_resources(mach_port_name_t port __unused, bool conclave_start,
9938     struct exclaves_resource_user *conclave_resource_user, int resource_count)
9939 {
9940 	kern_return_t kr = KERN_FAILURE;
9941 	assert3u(port, ==, MACH_PORT_NULL);
9942 	exclaves_resource_t *conclave = task_get_conclave(current_task());
9943 	if (conclave == NULL) {
9944 		return kr;
9945 	}
9946 
9947 	if (conclave_start) {
9948 		kr = exclaves_conclave_launch(conclave);
9949 		if (kr != KERN_SUCCESS) {
9950 			return kr;
9951 		}
9952 		task_set_conclave_taint(current_task());
9953 	}
9954 
9955 	kr = exclaves_conclave_lookup_resources(conclave, conclave_resource_user, resource_count);
9956 	return kr;
9957 }
9958 
9959 kern_return_t
task_inherit_conclave(task_t old_task,task_t new_task)9960 task_inherit_conclave(task_t old_task, task_t new_task)
9961 {
9962 	if (old_task->conclave == NULL) {
9963 		return KERN_SUCCESS;
9964 	}
9965 
9966 	return exclaves_conclave_inherit(old_task->conclave, old_task, new_task);
9967 }
9968 
9969 void
task_clear_conclave(task_t task)9970 task_clear_conclave(task_t task)
9971 {
9972 	if (task->exclave_crash_info) {
9973 		kfree_data(task->exclave_crash_info, CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE);
9974 		task->exclave_crash_info = NULL;
9975 	}
9976 
9977 	if (task->conclave == NULL) {
9978 		return;
9979 	}
9980 
9981 	/*
9982 	 * XXX
9983 	 * This should only fail if either the conclave is in an unexpected
9984 	 * state (i.e. not ATTACHED) or if the wrong port is supplied.
9985 	 * We should re-visit this and make sure we guarantee the above
9986 	 * constraints.
9987 	 */
9988 	__assert_only kern_return_t ret =
9989 	    exclaves_conclave_detach(task->conclave, task);
9990 	assert3u(ret, ==, KERN_SUCCESS);
9991 }
9992 
9993 void
task_stop_conclave(task_t task,bool gather_crash_bt)9994 task_stop_conclave(task_t task, bool gather_crash_bt)
9995 {
9996 	thread_t thread = current_thread();
9997 
9998 	if (task->conclave == NULL) {
9999 		return;
10000 	}
10001 
10002 	if (task_should_panic_on_exit_due_to_conclave_taint(task)) {
10003 		panic("Conclave tainted task %p terminated\n", task);
10004 	}
10005 
10006 	/* Stash the task on current thread for conclave teardown */
10007 	thread->conclave_stop_task = task;
10008 
10009 	__assert_only kern_return_t ret =
10010 	    exclaves_conclave_stop(task->conclave, gather_crash_bt);
10011 
10012 	thread->conclave_stop_task = TASK_NULL;
10013 
10014 	assert3u(ret, ==, KERN_SUCCESS);
10015 }
10016 
10017 kern_return_t
task_stop_conclave_upcall(void)10018 task_stop_conclave_upcall(void)
10019 {
10020 	task_t task = current_task();
10021 	if (task->conclave == NULL) {
10022 		return KERN_INVALID_TASK;
10023 	}
10024 
10025 	return exclaves_conclave_stop_upcall(task->conclave, task);
10026 }
10027 
10028 kern_return_t
task_suspend_conclave_upcall(uint64_t * scid_list,size_t scid_list_count)10029 task_suspend_conclave_upcall(uint64_t *scid_list, size_t scid_list_count)
10030 {
10031 	task_t task = current_task();
10032 	thread_t thread;
10033 	int scid_count = 0;
10034 	kern_return_t kr;
10035 	if (task->conclave == NULL) {
10036 		return KERN_INVALID_TASK;
10037 	}
10038 
10039 	kr = task_hold_and_wait(task);
10040 
10041 	task_lock(task);
10042 	queue_iterate(&task->threads, thread, thread_t, task_threads)
10043 	{
10044 		if (thread->th_exclaves_state & TH_EXCLAVES_RPC) {
10045 			scid_list[scid_count++] = thread->th_exclaves_scheduling_context_id;
10046 			if (scid_count >= scid_list_count) {
10047 				break;
10048 			}
10049 		}
10050 	}
10051 
10052 	task_unlock(task);
10053 	return kr;
10054 }
10055 
10056 kern_return_t
task_crash_info_conclave_upcall(task_t task,const xnuupcalls_conclavesharedbuffer_s * shared_buf,uint32_t length)10057 task_crash_info_conclave_upcall(task_t task, const xnuupcalls_conclavesharedbuffer_s *shared_buf,
10058     uint32_t length)
10059 {
10060 	if (task->conclave == NULL) {
10061 		return KERN_INVALID_TASK;
10062 	}
10063 
10064 	/* Allocate the buffer and memcpy it */
10065 	int task_crash_info_buffer_size = 0;
10066 	uint8_t * task_crash_info_buffer;
10067 
10068 	if (!length) {
10069 		printf("Conclave upcall: task_crash_info_conclave_upcall did not return any page addresses\n");
10070 		return KERN_INVALID_ARGUMENT;
10071 	}
10072 
10073 	task_crash_info_buffer_size = CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE;
10074 	assert3u(task_crash_info_buffer_size, >=, length);
10075 
10076 	task_crash_info_buffer = kalloc_data(task_crash_info_buffer_size, Z_WAITOK);
10077 	if (!task_crash_info_buffer) {
10078 		panic("task_crash_info_conclave_upcall: cannot allocate buffer for task_info shared memory");
10079 		return KERN_INVALID_ARGUMENT;
10080 	}
10081 
10082 	uint8_t * dst = task_crash_info_buffer;
10083 	uint32_t remaining = length;
10084 	for (size_t i = 0; i < CONCLAVE_CRASH_BUFFER_PAGECOUNT; i++) {
10085 		if (remaining) {
10086 			memcpy(dst, (uint8_t*)phystokv((pmap_paddr_t)shared_buf->physaddr[i]), PAGE_SIZE);
10087 			remaining = (remaining >= PAGE_SIZE) ? remaining - PAGE_SIZE : 0;
10088 			dst += PAGE_SIZE;
10089 		}
10090 	}
10091 
10092 	task_lock(task);
10093 	if (task->exclave_crash_info == NULL && task->active) {
10094 		task->exclave_crash_info = task_crash_info_buffer;
10095 		task->exclave_crash_info_length = length;
10096 		task_crash_info_buffer = NULL;
10097 	}
10098 	task_unlock(task);
10099 
10100 	if (task_crash_info_buffer) {
10101 		kfree_data(task_crash_info_buffer, task_crash_info_buffer_size);
10102 	}
10103 
10104 	return KERN_SUCCESS;
10105 }
10106 
10107 exclaves_resource_t *
task_get_conclave(task_t task)10108 task_get_conclave(task_t task)
10109 {
10110 	return task->conclave;
10111 }
10112 
10113 extern boolean_t IOPMRootDomainGetWillShutdown(void);
10114 
10115 TUNABLE(bool, disable_conclave_taint, "disable_conclave_taint", true); /* Do not taint processes when they talk to conclave, so system does not panic when exit. */
10116 
10117 static bool
task_should_panic_on_exit_due_to_conclave_taint(task_t task)10118 task_should_panic_on_exit_due_to_conclave_taint(task_t task)
10119 {
10120 	/* Check if boot-arg to disable conclave taint is set */
10121 	if (disable_conclave_taint) {
10122 		return false;
10123 	}
10124 
10125 	/* Check if the system is shutting down */
10126 	if (IOPMRootDomainGetWillShutdown()) {
10127 		return false;
10128 	}
10129 
10130 	return task_is_conclave_tainted(task);
10131 }
10132 
10133 static bool
task_is_conclave_tainted(task_t task)10134 task_is_conclave_tainted(task_t task)
10135 {
10136 	return (task->t_exclave_state & TES_CONCLAVE_TAINTED) != 0 &&
10137 	       !(task->t_exclave_state & TES_CONCLAVE_UNTAINTABLE);
10138 }
10139 
10140 static void
task_set_conclave_taint(task_t task)10141 task_set_conclave_taint(task_t task)
10142 {
10143 	os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_TAINTED, relaxed);
10144 }
10145 
10146 void
task_set_conclave_untaintable(task_t task)10147 task_set_conclave_untaintable(task_t task)
10148 {
10149 	os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_UNTAINTABLE, relaxed);
10150 }
10151 
10152 void
task_add_conclave_crash_info(task_t task,void * crash_info_ptr)10153 task_add_conclave_crash_info(task_t task, void *crash_info_ptr)
10154 {
10155 	__block kern_return_t error = KERN_SUCCESS;
10156 	tb_error_t tberr = TB_ERROR_SUCCESS;
10157 	void *crash_info;
10158 	uint32_t crash_info_length = 0;
10159 
10160 	if (task->conclave == NULL) {
10161 		return;
10162 	}
10163 
10164 	if (task->exclave_crash_info_length == 0 && fake_crash_buffer_length == 0) {
10165 		return;
10166 	}
10167 
10168 	error = kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_BEGIN,
10169 	    STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10170 	if (error != KERN_SUCCESS) {
10171 		return;
10172 	}
10173 
10174 	if (task->exclave_crash_info_length == 0) {
10175 		crash_info = fake_crash_buffer;
10176 		crash_info_length = fake_crash_buffer_length;
10177 	} else {
10178 		crash_info = task->exclave_crash_info;
10179 		crash_info_length = task->exclave_crash_info_length;
10180 	}
10181 
10182 	tberr = stackshot_stackshotresult__unmarshal(crash_info,
10183 	    (uint64_t)crash_info_length, ^(stackshot_stackshotresult_s result){
10184 		error = stackshot_exclaves_process_stackshot(&result, crash_info_ptr);
10185 		if (error != KERN_SUCCESS) {
10186 		        printf("stackshot_exclaves_process_result: error processing stackshot result %d\n", error);
10187 		}
10188 	});
10189 	if (tberr != TB_ERROR_SUCCESS) {
10190 		printf("task_conclave_crash: task_add_conclave_crash_info could not unmarshal stackshot data 0x%x\n", tberr);
10191 		error = KERN_FAILURE;
10192 		goto error_exit;
10193 	}
10194 
10195 error_exit:
10196 	kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_END,
10197 	    STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10198 
10199 	return;
10200 }
10201 
10202 #endif /* CONFIG_EXCLAVES */
10203 
10204 #pragma mark task utils
10205 
10206 /* defined in bsd/kern/kern_proc.c */
10207 extern void proc_name(int pid, char *buf, int size);
10208 extern char *proc_best_name(struct proc *p);
10209 
10210 void
task_procname(task_t task,char * buf,int size)10211 task_procname(task_t task, char *buf, int size)
10212 {
10213 	proc_name(task_pid(task), buf, size);
10214 }
10215 
10216 void
task_best_name(task_t task,char * buf,size_t size)10217 task_best_name(task_t task, char *buf, size_t size)
10218 {
10219 	char *name = proc_best_name(task_get_proc_raw(task));
10220 	strlcpy(buf, name, size);
10221 }
10222