xref: /xnu-10002.1.13/osfmk/kern/task.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  *	File:	kern/task.c
58  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59  *		David Black
60  *
61  *	Task management primitives implementation.
62  */
63 /*
64  * Copyright (c) 1993 The University of Utah and
65  * the Computer Systems Laboratory (CSL).  All rights reserved.
66  *
67  * Permission to use, copy, modify and distribute this software and its
68  * documentation is hereby granted, provided that both the copyright
69  * notice and this permission notice appear in all copies of the
70  * software, derivative works or modified versions, and any portions
71  * thereof, and that both notices appear in supporting documentation.
72  *
73  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76  *
77  * CSL requests users of this software to return to [email protected] any
78  * improvements that they make and grant CSL redistribution rights.
79  *
80  */
81 /*
82  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83  * support for mandatory and extensible security protections.  This notice
84  * is included in support of clause 2.2 (b) of the Apple Public License,
85  * Version 2.0.
86  * Copyright (c) 2005 SPARTA, Inc.
87  */
88 
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/host_priv.h>
92 #include <mach/machine/vm_types.h>
93 #include <mach/vm_param.h>
94 #include <mach/mach_vm.h>
95 #include <mach/semaphore.h>
96 #include <mach/task_info.h>
97 #include <mach/task_inspect.h>
98 #include <mach/task_special_ports.h>
99 #include <mach/sdt.h>
100 #include <mach/mach_test_upcall.h>
101 
102 #include <ipc/ipc_importance.h>
103 #include <ipc/ipc_types.h>
104 #include <ipc/ipc_space.h>
105 #include <ipc/ipc_entry.h>
106 #include <ipc/ipc_hash.h>
107 #include <ipc/ipc_init.h>
108 
109 #include <kern/kern_types.h>
110 #include <kern/mach_param.h>
111 #include <kern/misc_protos.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/coalition.h>
115 #include <kern/zalloc.h>
116 #include <kern/kalloc.h>
117 #include <kern/kern_cdata.h>
118 #include <kern/processor.h>
119 #include <kern/recount.h>
120 #include <kern/sched_prim.h>    /* for thread_wakeup */
121 #include <kern/ipc_tt.h>
122 #include <kern/host.h>
123 #include <kern/clock.h>
124 #include <kern/timer.h>
125 #include <kern/assert.h>
126 #include <kern/affinity.h>
127 #include <kern/exc_resource.h>
128 #include <kern/machine.h>
129 #include <kern/policy_internal.h>
130 #include <kern/restartable.h>
131 #include <kern/ipc_kobject.h>
132 
133 #include <corpses/task_corpse.h>
134 #if CONFIG_TELEMETRY
135 #include <kern/telemetry.h>
136 #endif
137 
138 #if CONFIG_PERVASIVE_CPI
139 #include <kern/monotonic.h>
140 #include <machine/monotonic.h>
141 #endif /* CONFIG_PERVASIVE_CPI */
142 
143 #include <os/log.h>
144 
145 #include <vm/pmap.h>
146 #include <vm/vm_map.h>
147 #include <vm/vm_kern.h>         /* for kernel_map, ipc_kernel_map */
148 #include <vm/vm_pageout.h>
149 #include <vm/vm_protos.h>
150 #include <vm/vm_purgeable_internal.h>
151 #include <vm/vm_compressor_pager.h>
152 #include <vm/vm_reclaim_internal.h>
153 
154 #include <sys/proc_ro.h>
155 #include <sys/resource.h>
156 #include <sys/signalvar.h> /* for coredump */
157 #include <sys/bsdtask_info.h>
158 #include <sys/kdebug_triage.h>
159 /*
160  * Exported interfaces
161  */
162 
163 #include <mach/task_server.h>
164 #include <mach/mach_host_server.h>
165 #include <mach/mach_port_server.h>
166 
167 #include <vm/vm_shared_region.h>
168 
169 #include <libkern/OSDebug.h>
170 #include <libkern/OSAtomic.h>
171 #include <libkern/section_keywords.h>
172 
173 #include <mach-o/loader.h>
174 #include <kdp/kdp_dyld.h>
175 
176 #include <kern/sfi.h>           /* picks up ledger.h */
177 
178 #if CONFIG_MACF
179 #include <security/mac_mach_internal.h>
180 #endif
181 
182 #include <IOKit/IOBSD.h>
183 #include <kdp/processor_core.h>
184 
185 #include <string.h>
186 
187 #if KPERF
188 extern int kpc_force_all_ctrs(task_t, int);
189 #endif
190 
191 SECURITY_READ_ONLY_LATE(task_t) kernel_task;
192 
193 int64_t         next_taskuniqueid = 0;
194 const size_t task_alignment = _Alignof(struct task);
195 extern const size_t proc_alignment;
196 extern size_t proc_struct_size;
197 extern size_t proc_and_task_size;
198 size_t task_struct_size;
199 
200 extern uint32_t ipc_control_port_options;
201 
202 extern int large_corpse_count;
203 
204 extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p);
205 extern void task_disown_frozen_csegs(task_t owner_task);
206 
207 static void task_port_no_senders(ipc_port_t, mach_msg_type_number_t);
208 static void task_port_with_flavor_no_senders(ipc_port_t, mach_msg_type_number_t);
209 static void task_suspension_no_senders(ipc_port_t, mach_msg_type_number_t);
210 static inline void task_zone_init(void);
211 
212 
213 IPC_KOBJECT_DEFINE(IKOT_TASK_NAME);
214 IPC_KOBJECT_DEFINE(IKOT_TASK_CONTROL,
215     .iko_op_no_senders = task_port_no_senders);
216 IPC_KOBJECT_DEFINE(IKOT_TASK_READ,
217     .iko_op_no_senders = task_port_with_flavor_no_senders);
218 IPC_KOBJECT_DEFINE(IKOT_TASK_INSPECT,
219     .iko_op_no_senders = task_port_with_flavor_no_senders);
220 IPC_KOBJECT_DEFINE(IKOT_TASK_RESUME,
221     .iko_op_no_senders = task_suspension_no_senders);
222 
223 #if CONFIG_PROC_RESOURCE_LIMITS
224 static void task_fatal_port_no_senders(ipc_port_t, mach_msg_type_number_t);
225 static mach_port_t task_allocate_fatal_port(void);
226 
227 IPC_KOBJECT_DEFINE(IKOT_TASK_FATAL,
228     .iko_op_stable     = true,
229     .iko_op_no_senders = task_fatal_port_no_senders);
230 
231 extern void task_id_token_set_port(task_id_token_t token, ipc_port_t port);
232 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
233 
234 /* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
235 int audio_active = 0;
236 
237 /*
238  *	structure for tracking zone usage
239  *	Used either one per task/thread for all zones or <per-task,per-zone>.
240  */
241 typedef struct zinfo_usage_store_t {
242 	/* These fields may be updated atomically, and so must be 8 byte aligned */
243 	uint64_t        alloc __attribute__((aligned(8)));              /* allocation counter */
244 	uint64_t        free __attribute__((aligned(8)));               /* free counter */
245 } zinfo_usage_store_t;
246 
247 /**
248  * Return codes related to diag threshold and memory limit
249  */
250 __options_decl(diagthreshold_check_return, int, {
251 	THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED        = 0,
252 	THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED         = 1,
253 	THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED    = 2,
254 	THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED     = 3,
255 });
256 
257 /**
258  * Return codes related to diag threshold and memory limit
259  */
260 __options_decl(current_, int, {
261 	THRESHOLD_IS_SAME_AS_LIMIT      = 0,
262 	THRESHOLD_IS_NOT_SAME_AS_LIMIT  = 1
263 });
264 
265 zinfo_usage_store_t tasks_tkm_private;
266 zinfo_usage_store_t tasks_tkm_shared;
267 
268 /* A container to accumulate statistics for expired tasks */
269 expired_task_statistics_t               dead_task_statistics;
270 LCK_SPIN_DECLARE_ATTR(dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
271 
272 ledger_template_t task_ledger_template = NULL;
273 
274 /* global lock for task_dyld_process_info_notify_{register, deregister, get_trap} */
275 LCK_GRP_DECLARE(g_dyldinfo_mtx_grp, "g_dyldinfo");
276 LCK_MTX_DECLARE(g_dyldinfo_mtx, &g_dyldinfo_mtx_grp);
277 
278 SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__((used)) =
279 {.cpu_time = -1,
280  .tkm_private = -1,
281  .tkm_shared = -1,
282  .phys_mem = -1,
283  .wired_mem = -1,
284  .internal = -1,
285  .iokit_mapped = -1,
286  .external = -1,
287  .reusable = -1,
288  .alternate_accounting = -1,
289  .alternate_accounting_compressed = -1,
290  .page_table = -1,
291  .phys_footprint = -1,
292  .internal_compressed = -1,
293  .purgeable_volatile = -1,
294  .purgeable_nonvolatile = -1,
295  .purgeable_volatile_compressed = -1,
296  .purgeable_nonvolatile_compressed = -1,
297  .tagged_nofootprint = -1,
298  .tagged_footprint = -1,
299  .tagged_nofootprint_compressed = -1,
300  .tagged_footprint_compressed = -1,
301  .network_volatile = -1,
302  .network_nonvolatile = -1,
303  .network_volatile_compressed = -1,
304  .network_nonvolatile_compressed = -1,
305  .media_nofootprint = -1,
306  .media_footprint = -1,
307  .media_nofootprint_compressed = -1,
308  .media_footprint_compressed = -1,
309  .graphics_nofootprint = -1,
310  .graphics_footprint = -1,
311  .graphics_nofootprint_compressed = -1,
312  .graphics_footprint_compressed = -1,
313  .neural_nofootprint = -1,
314  .neural_footprint = -1,
315  .neural_nofootprint_compressed = -1,
316  .neural_footprint_compressed = -1,
317  .platform_idle_wakeups = -1,
318  .interrupt_wakeups = -1,
319 #if CONFIG_SCHED_SFI
320  .sfi_wait_times = { 0 /* initialized at runtime */},
321 #endif /* CONFIG_SCHED_SFI */
322  .cpu_time_billed_to_me = -1,
323  .cpu_time_billed_to_others = -1,
324  .physical_writes = -1,
325  .logical_writes = -1,
326  .logical_writes_to_external = -1,
327 #if DEBUG || DEVELOPMENT
328  .pages_grabbed = -1,
329  .pages_grabbed_kern = -1,
330  .pages_grabbed_iopl = -1,
331  .pages_grabbed_upl = -1,
332 #endif
333 #if CONFIG_FREEZE
334  .frozen_to_swap = -1,
335 #endif /* CONFIG_FREEZE */
336  .energy_billed_to_me = -1,
337  .energy_billed_to_others = -1,
338 #if CONFIG_PHYS_WRITE_ACCT
339  .fs_metadata_writes = -1,
340 #endif /* CONFIG_PHYS_WRITE_ACCT */
341 #if CONFIG_MEMORYSTATUS
342  .memorystatus_dirty_time = -1,
343 #endif /* CONFIG_MEMORYSTATUS */
344  .swapins = -1, };
345 
346 /* System sleep state */
347 boolean_t tasks_suspend_state;
348 
349 __options_decl(send_exec_resource_is_fatal, bool, {
350 	IS_NOT_FATAL            = false,
351 	IS_FATAL                = true
352 });
353 
354 __options_decl(send_exec_resource_is_diagnostics, bool, {
355 	IS_NOT_DIAGNOSTICS      = false,
356 	IS_DIAGNOSTICS          = true
357 });
358 
359 __options_decl(send_exec_resource_is_warning, bool, {
360 	IS_NOT_WARNING          = false,
361 	IS_WARNING              = true
362 });
363 
364 __options_decl(send_exec_resource_options_t, uint8_t, {
365 	EXEC_RESOURCE_FATAL = 0x01,
366 	EXEC_RESOURCE_DIAGNOSTIC = 0x02,
367 	EXEC_RESOURCE_WARNING = 0x04,
368 });
369 
370 /**
371  * Actions to take when a process has reached the memory limit or the diagnostics threshold limits
372  */
373 static inline void task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning);
374 #if DEBUG || DEVELOPMENT
375 static inline void task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size);
376 #endif
377 void init_task_ledgers(void);
378 void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
379 void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
380 void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
381 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
382 void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options);
383 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
384 #if CONFIG_PROC_RESOURCE_LIMITS
385 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit);
386 mach_port_name_t current_task_get_fatal_port_name(void);
387 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
388 
389 kern_return_t task_suspend_internal_locked(task_t);
390 kern_return_t task_suspend_internal(task_t);
391 kern_return_t task_resume_internal_locked(task_t);
392 kern_return_t task_resume_internal(task_t);
393 static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
394 
395 extern kern_return_t iokit_task_terminate(task_t task);
396 extern void          iokit_task_app_suspended_changed(task_t task);
397 
398 extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
399 extern void bsd_copythreadname(void *dst_uth, void *src_uth);
400 extern kern_return_t thread_resume(thread_t thread);
401 
402 extern int exit_with_port_space_exception(void *proc, mach_exception_code_t code, mach_exception_subcode_t subcode);
403 
404 // Condition to include diag footprints
405 #define RESETTABLE_DIAG_FOOTPRINT_LIMITS ((DEBUG || DEVELOPMENT) && CONFIG_MEMORYSTATUS)
406 
407 // Warn tasks when they hit 80% of their memory limit.
408 #define PHYS_FOOTPRINT_WARNING_LEVEL 80
409 
410 #define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT              150 /* wakeups per second */
411 #define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL   300 /* in seconds. */
412 
413 /*
414  * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
415  *
416  * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
417  *  stacktraces, aka micro-stackshots)
418  */
419 #define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER        70
420 
421 int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
422 int task_wakeups_monitor_rate;     /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
423 
424 unsigned int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
425 
426 TUNABLE(bool, disable_exc_resource, "disable_exc_resource", false); /* Global override to suppress EXC_RESOURCE for resource monitor violations. */
427 TUNABLE(bool, disable_exc_resource_during_audio, "disable_exc_resource_during_audio", true); /* Global override to suppress EXC_RESOURCE while audio is active */
428 
429 ledger_amount_t max_task_footprint = 0;  /* Per-task limit on physical memory consumption in bytes     */
430 unsigned int max_task_footprint_warning_level = 0;  /* Per-task limit warning percentage */
431 
432 /*
433  * Configure per-task memory limit.
434  * The boot-arg is interpreted as Megabytes,
435  * and takes precedence over the device tree.
436  * Setting the boot-arg to 0 disables task limits.
437  */
438 TUNABLE_DT_WRITEABLE(int, max_task_footprint_mb, "/defaults", "kern.max_task_pmem", "max_task_pmem", 0, TUNABLE_DT_NONE);
439 
440 /* I/O Monitor Limits */
441 #define IOMON_DEFAULT_LIMIT                     (20480ull)      /* MB of logical/physical I/O */
442 #define IOMON_DEFAULT_INTERVAL                  (86400ull)      /* in seconds */
443 
444 uint64_t task_iomon_limit_mb;           /* Per-task I/O monitor limit in MBs */
445 uint64_t task_iomon_interval_secs;      /* Per-task I/O monitor interval in secs */
446 
447 #define IO_TELEMETRY_DEFAULT_LIMIT              (10ll * 1024ll * 1024ll)
448 int64_t io_telemetry_limit;                     /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
449 int64_t global_logical_writes_count = 0;        /* Global count for logical writes */
450 int64_t global_logical_writes_to_external_count = 0;        /* Global count for logical writes to external storage*/
451 static boolean_t global_update_logical_writes(int64_t, int64_t*);
452 
453 #if DEBUG || DEVELOPMENT
454 static diagthreshold_check_return task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value);
455 #endif
456 #define TASK_MAX_THREAD_LIMIT 256
457 
458 #if MACH_ASSERT
459 int pmap_ledgers_panic = 1;
460 int pmap_ledgers_panic_leeway = 3;
461 #endif /* MACH_ASSERT */
462 
463 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
464 
465 #if CONFIG_COREDUMP
466 int hwm_user_cores = 0; /* high watermark violations generate user core files */
467 #endif
468 
469 #ifdef MACH_BSD
470 extern uint32_t proc_platform(const struct proc *);
471 extern uint32_t proc_sdk(struct proc *);
472 extern void     proc_getexecutableuuid(void *, unsigned char *, unsigned long);
473 extern int      proc_pid(struct proc *p);
474 extern int      proc_selfpid(void);
475 extern struct proc *current_proc(void);
476 extern char     *proc_name_address(struct proc *p);
477 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
478 extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize);
479 extern void workq_proc_suspended(struct proc *p);
480 extern void workq_proc_resumed(struct proc *p);
481 extern struct proc *kernproc;
482 
483 #if CONFIG_MEMORYSTATUS
484 extern void     proc_memstat_skip(struct proc* p, boolean_t set);
485 extern void     memorystatus_on_ledger_footprint_exceeded(int warning, bool memlimit_is_active, bool memlimit_is_fatal);
486 extern void     memorystatus_log_exception(const int max_footprint_mb, bool memlimit_is_active, bool memlimit_is_fatal);
487 extern void     memorystatus_log_diag_threshold_exception(const int diag_threshold_value);
488 extern boolean_t memorystatus_allowed_vm_map_fork(task_t task, bool *is_large);
489 extern uint64_t  memorystatus_available_memory_internal(struct proc *p);
490 
491 #if DEVELOPMENT || DEBUG
492 extern void memorystatus_abort_vm_map_fork(task_t);
493 #endif
494 
495 #endif /* CONFIG_MEMORYSTATUS */
496 
497 #endif /* MACH_BSD */
498 
499 /* Boot-arg that turns on fatal pac exception delivery for all first-party apps */
500 static TUNABLE(bool, enable_pac_exception, "enable_pac_exception", false);
501 
502 /*
503  * Defaults for controllable EXC_GUARD behaviors
504  *
505  * Internal builds are fatal by default (except BRIDGE).
506  * Create an alternate set of defaults for special processes by name.
507  */
508 struct task_exc_guard_named_default {
509 	char *name;
510 	uint32_t behavior;
511 };
512 #define _TASK_EXC_GUARD_MP_CORPSE  (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_CORPSE)
513 #define _TASK_EXC_GUARD_MP_ONCE    (_TASK_EXC_GUARD_MP_CORPSE | TASK_EXC_GUARD_MP_ONCE)
514 #define _TASK_EXC_GUARD_MP_FATAL   (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_FATAL)
515 
516 #define _TASK_EXC_GUARD_VM_CORPSE  (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_ONCE)
517 #define _TASK_EXC_GUARD_VM_ONCE    (_TASK_EXC_GUARD_VM_CORPSE | TASK_EXC_GUARD_VM_ONCE)
518 #define _TASK_EXC_GUARD_VM_FATAL   (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_FATAL)
519 
520 #define _TASK_EXC_GUARD_ALL_CORPSE (_TASK_EXC_GUARD_MP_CORPSE | _TASK_EXC_GUARD_VM_CORPSE)
521 #define _TASK_EXC_GUARD_ALL_ONCE   (_TASK_EXC_GUARD_MP_ONCE | _TASK_EXC_GUARD_VM_ONCE)
522 #define _TASK_EXC_GUARD_ALL_FATAL  (_TASK_EXC_GUARD_MP_FATAL | _TASK_EXC_GUARD_VM_FATAL)
523 
524 /* cannot turn off FATAL and DELIVER bit if set */
525 uint32_t task_exc_guard_no_unset_mask = TASK_EXC_GUARD_MP_FATAL | TASK_EXC_GUARD_VM_FATAL |
526     TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_VM_DELIVER;
527 /* cannot turn on ONCE bit if unset */
528 uint32_t task_exc_guard_no_set_mask = TASK_EXC_GUARD_MP_ONCE | TASK_EXC_GUARD_VM_ONCE;
529 
530 #if !defined(XNU_TARGET_OS_BRIDGE)
531 
532 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_FATAL;
533 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
534 /*
535  * These "by-process-name" default overrides are intended to be a short-term fix to
536  * quickly get over races between changes introducing new EXC_GUARD raising behaviors
537  * in some process and a change in default behavior for same. We should ship with
538  * these lists empty (by fixing the bugs, or explicitly changing the task's EXC_GUARD
539  * exception behavior via task_set_exc_guard_behavior()).
540  *
541  * XXX Remember to add/remove TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS back to
542  * task_exc_guard_default when transitioning this list between empty and
543  * non-empty.
544  */
545 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
546 
547 #else /* !defined(XNU_TARGET_OS_BRIDGE) */
548 
549 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_ONCE;
550 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
551 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
552 
553 #endif /* !defined(XNU_TARGET_OS_BRIDGE) */
554 
555 /* Forwards */
556 
557 static void task_hold_locked(task_t task);
558 static void task_wait_locked(task_t task, boolean_t until_not_runnable);
559 static void task_release_locked(task_t task);
560 extern task_t proc_get_task_raw(void *proc);
561 extern void task_ref_hold_proc_task_struct(task_t task);
562 extern void task_release_proc_task_struct(task_t task);
563 
564 static void task_synchronizer_destroy_all(task_t task);
565 static os_ref_count_t
566 task_add_turnstile_watchports_locked(
567 	task_t                      task,
568 	struct task_watchports      *watchports,
569 	struct task_watchport_elem  **previous_elem_array,
570 	ipc_port_t                  *portwatch_ports,
571 	uint32_t                    portwatch_count);
572 
573 static os_ref_count_t
574 task_remove_turnstile_watchports_locked(
575 	task_t                 task,
576 	struct task_watchports *watchports,
577 	ipc_port_t             *port_freelist);
578 
579 static struct task_watchports *
580 task_watchports_alloc_init(
581 	task_t        task,
582 	thread_t      thread,
583 	uint32_t      count);
584 
585 static void
586 task_watchports_deallocate(
587 	struct task_watchports *watchports);
588 
589 __attribute__((always_inline)) inline void
task_lock(task_t task)590 task_lock(task_t task)
591 {
592 	lck_mtx_lock(&(task)->lock);
593 }
594 
595 __attribute__((always_inline)) inline void
task_unlock(task_t task)596 task_unlock(task_t task)
597 {
598 	lck_mtx_unlock(&(task)->lock);
599 }
600 
601 void
task_set_64bit(task_t task,boolean_t is_64bit,boolean_t is_64bit_data)602 task_set_64bit(
603 	task_t task,
604 	boolean_t is_64bit,
605 	boolean_t is_64bit_data)
606 {
607 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
608 	thread_t thread;
609 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
610 
611 	task_lock(task);
612 
613 	/*
614 	 * Switching to/from 64-bit address spaces
615 	 */
616 	if (is_64bit) {
617 		if (!task_has_64Bit_addr(task)) {
618 			task_set_64Bit_addr(task);
619 		}
620 	} else {
621 		if (task_has_64Bit_addr(task)) {
622 			task_clear_64Bit_addr(task);
623 		}
624 	}
625 
626 	/*
627 	 * Switching to/from 64-bit register state.
628 	 */
629 	if (is_64bit_data) {
630 		if (task_has_64Bit_data(task)) {
631 			goto out;
632 		}
633 
634 		task_set_64Bit_data(task);
635 	} else {
636 		if (!task_has_64Bit_data(task)) {
637 			goto out;
638 		}
639 
640 		task_clear_64Bit_data(task);
641 	}
642 
643 	/* FIXME: On x86, the thread save state flavor can diverge from the
644 	 * task's 64-bit feature flag due to the 32-bit/64-bit register save
645 	 * state dichotomy. Since we can be pre-empted in this interval,
646 	 * certain routines may observe the thread as being in an inconsistent
647 	 * state with respect to its task's 64-bitness.
648 	 */
649 
650 #if defined(__x86_64__) || defined(__arm64__)
651 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
652 		thread_mtx_lock(thread);
653 		machine_thread_switch_addrmode(thread);
654 		thread_mtx_unlock(thread);
655 	}
656 #endif /* defined(__x86_64__) || defined(__arm64__) */
657 
658 out:
659 	task_unlock(task);
660 }
661 
662 bool
task_get_64bit_addr(task_t task)663 task_get_64bit_addr(task_t task)
664 {
665 	return task_has_64Bit_addr(task);
666 }
667 
668 bool
task_get_64bit_data(task_t task)669 task_get_64bit_data(task_t task)
670 {
671 	return task_has_64Bit_data(task);
672 }
673 
674 void
task_set_platform_binary(task_t task,boolean_t is_platform)675 task_set_platform_binary(
676 	task_t task,
677 	boolean_t is_platform)
678 {
679 	if (is_platform) {
680 		task_ro_flags_set(task, TFRO_PLATFORM);
681 	} else {
682 		task_ro_flags_clear(task, TFRO_PLATFORM);
683 	}
684 }
685 
686 boolean_t
task_get_platform_binary(task_t task)687 task_get_platform_binary(task_t task)
688 {
689 	return (task_ro_flags_get(task) & TFRO_PLATFORM) != 0;
690 }
691 
692 boolean_t
task_is_a_corpse(task_t task)693 task_is_a_corpse(task_t task)
694 {
695 	return (task_ro_flags_get(task) & TFRO_CORPSE) != 0;
696 }
697 
698 void
task_set_corpse(task_t task)699 task_set_corpse(task_t task)
700 {
701 	return task_ro_flags_set(task, TFRO_CORPSE);
702 }
703 
704 void
task_set_immovable_pinned(task_t task)705 task_set_immovable_pinned(task_t task)
706 {
707 	ipc_task_set_immovable_pinned(task);
708 }
709 
710 /*
711  * Set or clear per-task TF_CA_CLIENT_WI flag according to specified argument.
712  * Returns "false" if flag is already set, and "true" in other cases.
713  */
714 bool
task_set_ca_client_wi(task_t task,boolean_t set_or_clear)715 task_set_ca_client_wi(
716 	task_t task,
717 	boolean_t set_or_clear)
718 {
719 	bool ret = true;
720 	task_lock(task);
721 	if (set_or_clear) {
722 		/* Tasks can have only one CA_CLIENT work interval */
723 		if (task->t_flags & TF_CA_CLIENT_WI) {
724 			ret = false;
725 		} else {
726 			task->t_flags |= TF_CA_CLIENT_WI;
727 		}
728 	} else {
729 		task->t_flags &= ~TF_CA_CLIENT_WI;
730 	}
731 	task_unlock(task);
732 	return ret;
733 }
734 
735 /*
736  * task_set_dyld_info() is called at most three times.
737  * 1) at task struct creation to set addr/size to zero.
738  * 2) in mach_loader.c to set location of __all_image_info section in loaded dyld
739  * 3) is from dyld itself to update location of all_image_info
740  * For security any calls after that are ignored.  The TF_DYLD_ALL_IMAGE_SET bit is used to determine state.
741  */
742 kern_return_t
task_set_dyld_info(task_t task,mach_vm_address_t addr,mach_vm_size_t size)743 task_set_dyld_info(
744 	task_t            task,
745 	mach_vm_address_t addr,
746 	mach_vm_size_t    size)
747 {
748 	mach_vm_address_t end;
749 	if (os_add_overflow(addr, size, &end)) {
750 		return KERN_FAILURE;
751 	}
752 
753 	task_lock(task);
754 	/* don't accept updates if all_image_info_addr is final */
755 	if ((task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) == 0) {
756 		bool inputNonZero   = ((addr != 0) || (size != 0));
757 		bool currentNonZero = ((task->all_image_info_addr != 0) || (task->all_image_info_size != 0));
758 		task->all_image_info_addr = addr;
759 		task->all_image_info_size = size;
760 		/* can only change from a non-zero value to another non-zero once */
761 		if (inputNonZero && currentNonZero) {
762 			task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
763 		}
764 		task_unlock(task);
765 		return KERN_SUCCESS;
766 	} else {
767 		task_unlock(task);
768 		return KERN_FAILURE;
769 	}
770 }
771 
772 bool
task_donates_own_pages(task_t task)773 task_donates_own_pages(
774 	task_t task)
775 {
776 	return task->donates_own_pages;
777 }
778 
779 void
task_set_mach_header_address(task_t task,mach_vm_address_t addr)780 task_set_mach_header_address(
781 	task_t task,
782 	mach_vm_address_t addr)
783 {
784 	task_lock(task);
785 	task->mach_header_vm_address = addr;
786 	task_unlock(task);
787 }
788 
789 void
task_bank_reset(__unused task_t task)790 task_bank_reset(__unused task_t task)
791 {
792 	if (task->bank_context != NULL) {
793 		bank_task_destroy(task);
794 	}
795 }
796 
797 /*
798  * NOTE: This should only be called when the P_LINTRANSIT
799  *	 flag is set (the proc_trans lock is held) on the
800  *	 proc associated with the task.
801  */
802 void
task_bank_init(__unused task_t task)803 task_bank_init(__unused task_t task)
804 {
805 	if (task->bank_context != NULL) {
806 		panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
807 	}
808 	bank_task_initialize(task);
809 }
810 
811 void
task_set_did_exec_flag(task_t task)812 task_set_did_exec_flag(task_t task)
813 {
814 	task->t_procflags |= TPF_DID_EXEC;
815 }
816 
817 void
task_clear_exec_copy_flag(task_t task)818 task_clear_exec_copy_flag(task_t task)
819 {
820 	task->t_procflags &= ~TPF_EXEC_COPY;
821 }
822 
823 event_t
task_get_return_wait_event(task_t task)824 task_get_return_wait_event(task_t task)
825 {
826 	return (event_t)&task->returnwait_inheritor;
827 }
828 
829 void
task_clear_return_wait(task_t task,uint32_t flags)830 task_clear_return_wait(task_t task, uint32_t flags)
831 {
832 	if (flags & TCRW_CLEAR_INITIAL_WAIT) {
833 		thread_wakeup(task_get_return_wait_event(task));
834 	}
835 
836 	if (flags & TCRW_CLEAR_FINAL_WAIT) {
837 		is_write_lock(task->itk_space);
838 
839 		task->t_returnwaitflags &= ~TRW_LRETURNWAIT;
840 		task->returnwait_inheritor = NULL;
841 
842 		if (flags & TCRW_CLEAR_EXEC_COMPLETE) {
843 			task->t_returnwaitflags &= ~TRW_LEXEC_COMPLETE;
844 		}
845 
846 		if (task->t_returnwaitflags & TRW_LRETURNWAITER) {
847 			struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
848 			    TURNSTILE_ULOCK);
849 
850 			waitq_wakeup64_all(&turnstile->ts_waitq,
851 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
852 			    THREAD_AWAKENED, WAITQ_UPDATE_INHERITOR);
853 
854 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_HELD);
855 
856 			turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
857 			turnstile_cleanup();
858 			task->t_returnwaitflags &= ~TRW_LRETURNWAITER;
859 		}
860 		is_write_unlock(task->itk_space);
861 	}
862 }
863 
864 void __attribute__((noreturn))
task_wait_to_return(void)865 task_wait_to_return(void)
866 {
867 	task_t task = current_task();
868 	uint8_t returnwaitflags;
869 
870 	is_write_lock(task->itk_space);
871 
872 	if (task->t_returnwaitflags & TRW_LRETURNWAIT) {
873 		struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
874 		    TURNSTILE_ULOCK);
875 
876 		do {
877 			task->t_returnwaitflags |= TRW_LRETURNWAITER;
878 			turnstile_update_inheritor(turnstile, task->returnwait_inheritor,
879 			    (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
880 
881 			waitq_assert_wait64(&turnstile->ts_waitq,
882 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
883 			    THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
884 
885 			is_write_unlock(task->itk_space);
886 
887 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
888 
889 			thread_block(THREAD_CONTINUE_NULL);
890 
891 			is_write_lock(task->itk_space);
892 		} while (task->t_returnwaitflags & TRW_LRETURNWAIT);
893 
894 		turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
895 	}
896 
897 	returnwaitflags = task->t_returnwaitflags;
898 	is_write_unlock(task->itk_space);
899 	turnstile_cleanup();
900 
901 
902 #if CONFIG_MACF
903 	/*
904 	 * Before jumping to userspace and allowing this process
905 	 * to execute any code, make sure its credentials are cached,
906 	 * and notify any interested parties.
907 	 */
908 	extern void mach_kauth_cred_thread_update(void);
909 
910 	mach_kauth_cred_thread_update();
911 	if (returnwaitflags & TRW_LEXEC_COMPLETE) {
912 		mac_proc_notify_exec_complete(current_proc());
913 	}
914 #endif
915 
916 	thread_bootstrap_return();
917 }
918 
919 boolean_t
task_is_exec_copy(task_t task)920 task_is_exec_copy(task_t task)
921 {
922 	return task_is_exec_copy_internal(task);
923 }
924 
925 boolean_t
task_did_exec(task_t task)926 task_did_exec(task_t task)
927 {
928 	return task_did_exec_internal(task);
929 }
930 
931 boolean_t
task_is_active(task_t task)932 task_is_active(task_t task)
933 {
934 	return task->active;
935 }
936 
937 boolean_t
task_is_halting(task_t task)938 task_is_halting(task_t task)
939 {
940 	return task->halting;
941 }
942 
943 void
task_init(void)944 task_init(void)
945 {
946 	if (max_task_footprint_mb != 0) {
947 #if CONFIG_MEMORYSTATUS
948 		if (max_task_footprint_mb < 50) {
949 			printf("Warning: max_task_pmem %d below minimum.\n",
950 			    max_task_footprint_mb);
951 			max_task_footprint_mb = 50;
952 		}
953 		printf("Limiting task physical memory footprint to %d MB\n",
954 		    max_task_footprint_mb);
955 
956 		max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024;         // Convert MB to bytes
957 
958 		/*
959 		 * Configure the per-task memory limit warning level.
960 		 * This is computed as a percentage.
961 		 */
962 		max_task_footprint_warning_level = 0;
963 
964 		if (max_mem < 0x40000000) {
965 			/*
966 			 * On devices with < 1GB of memory:
967 			 *    -- set warnings to 50MB below the per-task limit.
968 			 */
969 			if (max_task_footprint_mb > 50) {
970 				max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
971 			}
972 		} else {
973 			/*
974 			 * On devices with >= 1GB of memory:
975 			 *    -- set warnings to 100MB below the per-task limit.
976 			 */
977 			if (max_task_footprint_mb > 100) {
978 				max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
979 			}
980 		}
981 
982 		/*
983 		 * Never allow warning level to land below the default.
984 		 */
985 		if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
986 			max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
987 		}
988 
989 		printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
990 
991 #else
992 		printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
993 #endif /* CONFIG_MEMORYSTATUS */
994 	}
995 
996 #if DEVELOPMENT || DEBUG
997 	PE_parse_boot_argn("task_exc_guard_default",
998 	    &task_exc_guard_default,
999 	    sizeof(task_exc_guard_default));
1000 #endif /* DEVELOPMENT || DEBUG */
1001 
1002 #if CONFIG_COREDUMP
1003 	if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
1004 	    sizeof(hwm_user_cores))) {
1005 		hwm_user_cores = 0;
1006 	}
1007 #endif
1008 
1009 	proc_init_cpumon_params();
1010 
1011 	if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof(task_wakeups_monitor_rate))) {
1012 		task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
1013 	}
1014 
1015 	if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof(task_wakeups_monitor_interval))) {
1016 		task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
1017 	}
1018 
1019 	if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
1020 	    sizeof(task_wakeups_monitor_ustackshots_trigger_pct))) {
1021 		task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
1022 	}
1023 
1024 	if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof(task_iomon_limit_mb))) {
1025 		task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
1026 	}
1027 
1028 	if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof(task_iomon_interval_secs))) {
1029 		task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
1030 	}
1031 
1032 	if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof(io_telemetry_limit))) {
1033 		io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
1034 	}
1035 
1036 /*
1037  * If we have coalitions, coalition_init() will call init_task_ledgers() as it
1038  * sets up the ledgers for the default coalition. If we don't have coalitions,
1039  * then we have to call it now.
1040  */
1041 #if CONFIG_COALITIONS
1042 	assert(task_ledger_template);
1043 #else /* CONFIG_COALITIONS */
1044 	init_task_ledgers();
1045 #endif /* CONFIG_COALITIONS */
1046 
1047 	task_ref_init();
1048 	task_zone_init();
1049 
1050 #ifdef __LP64__
1051 	boolean_t is_64bit = TRUE;
1052 #else
1053 	boolean_t is_64bit = FALSE;
1054 #endif
1055 
1056 	kernproc = (struct proc *)zalloc_flags(proc_task_zone, Z_WAITOK | Z_ZERO);
1057 	kernel_task = proc_get_task_raw(kernproc);
1058 
1059 	/*
1060 	 * Create the kernel task as the first task.
1061 	 */
1062 	if (task_create_internal(TASK_NULL, NULL, NULL, FALSE, is_64bit,
1063 	    is_64bit, TF_NONE, TF_NONE, TPF_NONE, TWF_NONE, kernel_task) != KERN_SUCCESS) {
1064 		panic("task_init");
1065 	}
1066 
1067 	ipc_task_enable(kernel_task);
1068 
1069 #if defined(HAS_APPLE_PAC)
1070 	kernel_task->rop_pid = ml_default_rop_pid();
1071 	kernel_task->jop_pid = ml_default_jop_pid();
1072 	// kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
1073 	// disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
1074 	ml_task_set_disable_user_jop(kernel_task, FALSE);
1075 #endif
1076 
1077 	vm_map_deallocate(kernel_task->map);
1078 	kernel_task->map = kernel_map;
1079 }
1080 
1081 static inline void
task_zone_init(void)1082 task_zone_init(void)
1083 {
1084 	proc_struct_size = roundup(proc_struct_size, task_alignment);
1085 	task_struct_size = roundup(sizeof(struct task), proc_alignment);
1086 	proc_and_task_size = proc_struct_size + task_struct_size;
1087 
1088 	proc_task_zone = zone_create_ext("proc_task", proc_and_task_size,
1089 	    ZC_ZFREE_CLEARMEM | ZC_SEQUESTER, ZONE_ID_PROC_TASK, NULL); /* sequester is needed for proc_rele() */
1090 }
1091 
1092 /*
1093  * Task ledgers
1094  * ------------
1095  *
1096  * phys_footprint
1097  *   Physical footprint: This is the sum of:
1098  *     + (internal - alternate_accounting)
1099  *     + (internal_compressed - alternate_accounting_compressed)
1100  *     + iokit_mapped
1101  *     + purgeable_nonvolatile
1102  *     + purgeable_nonvolatile_compressed
1103  *     + page_table
1104  *
1105  * internal
1106  *   The task's anonymous memory, which on iOS is always resident.
1107  *
1108  * internal_compressed
1109  *   Amount of this task's internal memory which is held by the compressor.
1110  *   Such memory is no longer actually resident for the task [i.e., resident in its pmap],
1111  *   and could be either decompressed back into memory, or paged out to storage, depending
1112  *   on our implementation.
1113  *
1114  * iokit_mapped
1115  *   IOKit mappings: The total size of all IOKit mappings in this task, regardless of
1116  *    clean/dirty or internal/external state].
1117  *
1118  * alternate_accounting
1119  *   The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
1120  *   are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
1121  *   double counting.
1122  *
1123  * pages_grabbed
1124  *   pages_grabbed counts all page grabs in a task.  It is also broken out into three subtypes
1125  *   which track UPL, IOPL and Kernel page grabs.
1126  */
1127 void
init_task_ledgers(void)1128 init_task_ledgers(void)
1129 {
1130 	ledger_template_t t;
1131 
1132 	assert(task_ledger_template == NULL);
1133 	assert(kernel_task == TASK_NULL);
1134 
1135 #if MACH_ASSERT
1136 	PE_parse_boot_argn("pmap_ledgers_panic",
1137 	    &pmap_ledgers_panic,
1138 	    sizeof(pmap_ledgers_panic));
1139 	PE_parse_boot_argn("pmap_ledgers_panic_leeway",
1140 	    &pmap_ledgers_panic_leeway,
1141 	    sizeof(pmap_ledgers_panic_leeway));
1142 #endif /* MACH_ASSERT */
1143 
1144 	if ((t = ledger_template_create("Per-task ledger")) == NULL) {
1145 		panic("couldn't create task ledger template");
1146 	}
1147 
1148 	task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
1149 	task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
1150 	    "physmem", "bytes");
1151 	task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
1152 	    "bytes");
1153 	task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
1154 	    "bytes");
1155 	task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
1156 	    "bytes");
1157 	task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
1158 	    "bytes");
1159 	task_ledgers.iokit_mapped = ledger_entry_add_with_flags(t, "iokit_mapped", "mappings",
1160 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1161 	task_ledgers.alternate_accounting = ledger_entry_add_with_flags(t, "alternate_accounting", "physmem",
1162 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1163 	task_ledgers.alternate_accounting_compressed = ledger_entry_add_with_flags(t, "alternate_accounting_compressed", "physmem",
1164 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1165 	task_ledgers.page_table = ledger_entry_add_with_flags(t, "page_table", "physmem",
1166 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1167 	task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
1168 	    "bytes");
1169 	task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
1170 	    "bytes");
1171 	task_ledgers.reusable = ledger_entry_add(t, "reusable", "physmem", "bytes");
1172 	task_ledgers.external = ledger_entry_add(t, "external", "physmem", "bytes");
1173 	task_ledgers.purgeable_volatile = ledger_entry_add_with_flags(t, "purgeable_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1174 	task_ledgers.purgeable_nonvolatile = ledger_entry_add_with_flags(t, "purgeable_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1175 	task_ledgers.purgeable_volatile_compressed = ledger_entry_add_with_flags(t, "purgeable_volatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1176 	task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add_with_flags(t, "purgeable_nonvolatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1177 #if DEBUG || DEVELOPMENT
1178 	task_ledgers.pages_grabbed = ledger_entry_add_with_flags(t, "pages_grabbed", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1179 	task_ledgers.pages_grabbed_kern = ledger_entry_add_with_flags(t, "pages_grabbed_kern", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1180 	task_ledgers.pages_grabbed_iopl = ledger_entry_add_with_flags(t, "pages_grabbed_iopl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1181 	task_ledgers.pages_grabbed_upl = ledger_entry_add_with_flags(t, "pages_grabbed_upl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1182 #endif
1183 	task_ledgers.tagged_nofootprint = ledger_entry_add_with_flags(t, "tagged_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1184 	task_ledgers.tagged_footprint = ledger_entry_add_with_flags(t, "tagged_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1185 	task_ledgers.tagged_nofootprint_compressed = ledger_entry_add_with_flags(t, "tagged_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1186 	task_ledgers.tagged_footprint_compressed = ledger_entry_add_with_flags(t, "tagged_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1187 	task_ledgers.network_volatile = ledger_entry_add_with_flags(t, "network_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1188 	task_ledgers.network_nonvolatile = ledger_entry_add_with_flags(t, "network_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1189 	task_ledgers.network_volatile_compressed = ledger_entry_add_with_flags(t, "network_volatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1190 	task_ledgers.network_nonvolatile_compressed = ledger_entry_add_with_flags(t, "network_nonvolatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1191 	task_ledgers.media_nofootprint = ledger_entry_add_with_flags(t, "media_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1192 	task_ledgers.media_footprint = ledger_entry_add_with_flags(t, "media_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1193 	task_ledgers.media_nofootprint_compressed = ledger_entry_add_with_flags(t, "media_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1194 	task_ledgers.media_footprint_compressed = ledger_entry_add_with_flags(t, "media_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1195 	task_ledgers.graphics_nofootprint = ledger_entry_add_with_flags(t, "graphics_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1196 	task_ledgers.graphics_footprint = ledger_entry_add_with_flags(t, "graphics_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1197 	task_ledgers.graphics_nofootprint_compressed = ledger_entry_add_with_flags(t, "graphics_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1198 	task_ledgers.graphics_footprint_compressed = ledger_entry_add_with_flags(t, "graphics_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1199 	task_ledgers.neural_nofootprint = ledger_entry_add_with_flags(t, "neural_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1200 	task_ledgers.neural_footprint = ledger_entry_add_with_flags(t, "neural_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1201 	task_ledgers.neural_nofootprint_compressed = ledger_entry_add_with_flags(t, "neural_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1202 	task_ledgers.neural_footprint_compressed = ledger_entry_add_with_flags(t, "neural_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1203 
1204 #if CONFIG_FREEZE
1205 	task_ledgers.frozen_to_swap = ledger_entry_add(t, "frozen_to_swap", "physmem", "bytes");
1206 #endif /* CONFIG_FREEZE */
1207 
1208 	task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
1209 	    "count");
1210 	task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
1211 	    "count");
1212 
1213 #if CONFIG_SCHED_SFI
1214 	sfi_class_id_t class_id, ledger_alias;
1215 	for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1216 		task_ledgers.sfi_wait_times[class_id] = -1;
1217 	}
1218 
1219 	/* don't account for UNSPECIFIED */
1220 	for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
1221 		ledger_alias = sfi_get_ledger_alias_for_class(class_id);
1222 		if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
1223 			/* Check to see if alias has been registered yet */
1224 			if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
1225 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
1226 			} else {
1227 				/* Otherwise, initialize it first */
1228 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
1229 			}
1230 		} else {
1231 			task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
1232 		}
1233 
1234 		if (task_ledgers.sfi_wait_times[class_id] < 0) {
1235 			panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
1236 		}
1237 	}
1238 
1239 	assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID - 1] != -1);
1240 #endif /* CONFIG_SCHED_SFI */
1241 
1242 	task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
1243 	task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
1244 	task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
1245 	task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
1246 	task_ledgers.logical_writes_to_external = ledger_entry_add(t, "logical_writes_to_external", "res", "bytes");
1247 #if CONFIG_PHYS_WRITE_ACCT
1248 	task_ledgers.fs_metadata_writes = ledger_entry_add(t, "fs_metadata_writes", "res", "bytes");
1249 #endif /* CONFIG_PHYS_WRITE_ACCT */
1250 	task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj");
1251 	task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj");
1252 
1253 #if CONFIG_MEMORYSTATUS
1254 	task_ledgers.memorystatus_dirty_time = ledger_entry_add(t, "memorystatus_dirty_time", "physmem", "ns");
1255 #endif /* CONFIG_MEMORYSTATUS */
1256 
1257 	task_ledgers.swapins = ledger_entry_add_with_flags(t, "swapins", "physmem", "bytes",
1258 	    LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1259 
1260 	if ((task_ledgers.cpu_time < 0) ||
1261 	    (task_ledgers.tkm_private < 0) ||
1262 	    (task_ledgers.tkm_shared < 0) ||
1263 	    (task_ledgers.phys_mem < 0) ||
1264 	    (task_ledgers.wired_mem < 0) ||
1265 	    (task_ledgers.internal < 0) ||
1266 	    (task_ledgers.external < 0) ||
1267 	    (task_ledgers.reusable < 0) ||
1268 	    (task_ledgers.iokit_mapped < 0) ||
1269 	    (task_ledgers.alternate_accounting < 0) ||
1270 	    (task_ledgers.alternate_accounting_compressed < 0) ||
1271 	    (task_ledgers.page_table < 0) ||
1272 	    (task_ledgers.phys_footprint < 0) ||
1273 	    (task_ledgers.internal_compressed < 0) ||
1274 	    (task_ledgers.purgeable_volatile < 0) ||
1275 	    (task_ledgers.purgeable_nonvolatile < 0) ||
1276 	    (task_ledgers.purgeable_volatile_compressed < 0) ||
1277 	    (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
1278 	    (task_ledgers.tagged_nofootprint < 0) ||
1279 	    (task_ledgers.tagged_footprint < 0) ||
1280 	    (task_ledgers.tagged_nofootprint_compressed < 0) ||
1281 	    (task_ledgers.tagged_footprint_compressed < 0) ||
1282 #if CONFIG_FREEZE
1283 	    (task_ledgers.frozen_to_swap < 0) ||
1284 #endif /* CONFIG_FREEZE */
1285 	    (task_ledgers.network_volatile < 0) ||
1286 	    (task_ledgers.network_nonvolatile < 0) ||
1287 	    (task_ledgers.network_volatile_compressed < 0) ||
1288 	    (task_ledgers.network_nonvolatile_compressed < 0) ||
1289 	    (task_ledgers.media_nofootprint < 0) ||
1290 	    (task_ledgers.media_footprint < 0) ||
1291 	    (task_ledgers.media_nofootprint_compressed < 0) ||
1292 	    (task_ledgers.media_footprint_compressed < 0) ||
1293 	    (task_ledgers.graphics_nofootprint < 0) ||
1294 	    (task_ledgers.graphics_footprint < 0) ||
1295 	    (task_ledgers.graphics_nofootprint_compressed < 0) ||
1296 	    (task_ledgers.graphics_footprint_compressed < 0) ||
1297 	    (task_ledgers.neural_nofootprint < 0) ||
1298 	    (task_ledgers.neural_footprint < 0) ||
1299 	    (task_ledgers.neural_nofootprint_compressed < 0) ||
1300 	    (task_ledgers.neural_footprint_compressed < 0) ||
1301 	    (task_ledgers.platform_idle_wakeups < 0) ||
1302 	    (task_ledgers.interrupt_wakeups < 0) ||
1303 	    (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
1304 	    (task_ledgers.physical_writes < 0) ||
1305 	    (task_ledgers.logical_writes < 0) ||
1306 	    (task_ledgers.logical_writes_to_external < 0) ||
1307 #if CONFIG_PHYS_WRITE_ACCT
1308 	    (task_ledgers.fs_metadata_writes < 0) ||
1309 #endif /* CONFIG_PHYS_WRITE_ACCT */
1310 #if CONFIG_MEMORYSTATUS
1311 	    (task_ledgers.memorystatus_dirty_time < 0) ||
1312 #endif /* CONFIG_MEMORYSTATUS */
1313 	    (task_ledgers.energy_billed_to_me < 0) ||
1314 	    (task_ledgers.energy_billed_to_others < 0) ||
1315 	    (task_ledgers.swapins < 0)
1316 	    ) {
1317 		panic("couldn't create entries for task ledger template");
1318 	}
1319 
1320 	ledger_track_credit_only(t, task_ledgers.phys_footprint);
1321 	ledger_track_credit_only(t, task_ledgers.internal);
1322 	ledger_track_credit_only(t, task_ledgers.external);
1323 	ledger_track_credit_only(t, task_ledgers.reusable);
1324 
1325 	ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
1326 	ledger_track_maximum(t, task_ledgers.phys_mem, 60);
1327 	ledger_track_maximum(t, task_ledgers.internal, 60);
1328 	ledger_track_maximum(t, task_ledgers.internal_compressed, 60);
1329 	ledger_track_maximum(t, task_ledgers.reusable, 60);
1330 	ledger_track_maximum(t, task_ledgers.external, 60);
1331 #if MACH_ASSERT
1332 	if (pmap_ledgers_panic) {
1333 		ledger_panic_on_negative(t, task_ledgers.phys_footprint);
1334 		ledger_panic_on_negative(t, task_ledgers.page_table);
1335 		ledger_panic_on_negative(t, task_ledgers.internal);
1336 		ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
1337 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
1338 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
1339 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
1340 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
1341 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
1342 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
1343 #if CONFIG_PHYS_WRITE_ACCT
1344 		ledger_panic_on_negative(t, task_ledgers.fs_metadata_writes);
1345 #endif /* CONFIG_PHYS_WRITE_ACCT */
1346 
1347 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint);
1348 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint);
1349 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint_compressed);
1350 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint_compressed);
1351 		ledger_panic_on_negative(t, task_ledgers.network_volatile);
1352 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile);
1353 		ledger_panic_on_negative(t, task_ledgers.network_volatile_compressed);
1354 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile_compressed);
1355 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint);
1356 		ledger_panic_on_negative(t, task_ledgers.media_footprint);
1357 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint_compressed);
1358 		ledger_panic_on_negative(t, task_ledgers.media_footprint_compressed);
1359 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint);
1360 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint);
1361 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint_compressed);
1362 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint_compressed);
1363 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint);
1364 		ledger_panic_on_negative(t, task_ledgers.neural_footprint);
1365 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint_compressed);
1366 		ledger_panic_on_negative(t, task_ledgers.neural_footprint_compressed);
1367 	}
1368 #endif /* MACH_ASSERT */
1369 
1370 #if CONFIG_MEMORYSTATUS
1371 	ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
1372 #endif /* CONFIG_MEMORYSTATUS */
1373 
1374 	ledger_set_callback(t, task_ledgers.interrupt_wakeups,
1375 	    task_wakeups_rate_exceeded, NULL, NULL);
1376 	ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
1377 
1378 #if !XNU_MONITOR
1379 	ledger_template_complete(t);
1380 #else /* !XNU_MONITOR */
1381 	ledger_template_complete_secure_alloc(t);
1382 #endif /* XNU_MONITOR */
1383 	task_ledger_template = t;
1384 }
1385 
1386 /* Create a task, but leave the task ports disabled */
1387 kern_return_t
task_create_internal(task_t parent_task,proc_ro_t proc_ro,coalition_t * parent_coalitions __unused,boolean_t inherit_memory,boolean_t is_64bit,boolean_t is_64bit_data,uint32_t t_flags,uint32_t t_flags_ro,uint32_t t_procflags,uint8_t t_returnwaitflags,task_t child_task)1388 task_create_internal(
1389 	task_t             parent_task,            /* Null-able */
1390 	proc_ro_t          proc_ro,
1391 	coalition_t        *parent_coalitions __unused,
1392 	boolean_t          inherit_memory,
1393 	boolean_t          is_64bit,
1394 	boolean_t          is_64bit_data,
1395 	uint32_t           t_flags,
1396 	uint32_t           t_flags_ro,
1397 	uint32_t           t_procflags,
1398 	uint8_t            t_returnwaitflags,
1399 	task_t             child_task)
1400 {
1401 	task_t                  new_task;
1402 	vm_shared_region_t      shared_region;
1403 	ledger_t                ledger = NULL;
1404 	struct task_ro_data     task_ro_data = {};
1405 	uint32_t                parent_t_flags_ro = 0;
1406 
1407 	new_task = child_task;
1408 
1409 	if (task_ref_count_init(new_task) != KERN_SUCCESS) {
1410 		return KERN_RESOURCE_SHORTAGE;
1411 	}
1412 
1413 	/* allocate with active entries */
1414 	assert(task_ledger_template != NULL);
1415 	ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1416 	if (ledger == NULL) {
1417 		task_ref_count_fini(new_task);
1418 		return KERN_RESOURCE_SHORTAGE;
1419 	}
1420 
1421 	counter_alloc(&(new_task->faults));
1422 
1423 #if defined(HAS_APPLE_PAC)
1424 	ml_task_set_rop_pid(new_task, parent_task, inherit_memory);
1425 	ml_task_set_jop_pid(new_task, parent_task, inherit_memory);
1426 	ml_task_set_disable_user_jop(new_task, inherit_memory ? parent_task->disable_user_jop : FALSE);
1427 #endif
1428 
1429 
1430 	new_task->ledger = ledger;
1431 
1432 	/* if inherit_memory is true, parent_task MUST not be NULL */
1433 	if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) {
1434 #if CONFIG_DEFERRED_RECLAIM
1435 		if (parent_task->deferred_reclamation_metadata) {
1436 			/*
1437 			 * Prevent concurrent reclaims while we're forking the parent_task's map,
1438 			 * so that the child's map is in sync with the forked reclamation
1439 			 * metadata.
1440 			 */
1441 			vm_deferred_reclamation_buffer_lock(parent_task->deferred_reclamation_metadata);
1442 		}
1443 #endif /* CONFIG_DEFERRED_RECLAIM */
1444 		new_task->map = vm_map_fork(ledger, parent_task->map, 0);
1445 #if CONFIG_DEFERRED_RECLAIM
1446 		if (parent_task->deferred_reclamation_metadata) {
1447 			new_task->deferred_reclamation_metadata =
1448 			    vm_deferred_reclamation_buffer_fork(new_task, parent_task->deferred_reclamation_metadata);
1449 		}
1450 #endif /* CONFIG_DEFERRED_RECLAIM */
1451 	} else {
1452 		unsigned int pmap_flags = is_64bit ? PMAP_CREATE_64BIT : 0;
1453 		pmap_t pmap = pmap_create_options(ledger, 0, pmap_flags);
1454 		vm_map_t new_map;
1455 
1456 		if (pmap == NULL) {
1457 			counter_free(&new_task->faults);
1458 			ledger_dereference(ledger);
1459 			task_ref_count_fini(new_task);
1460 			return KERN_RESOURCE_SHORTAGE;
1461 		}
1462 		new_map = vm_map_create_options(pmap,
1463 		    (vm_map_offset_t)(VM_MIN_ADDRESS),
1464 		    (vm_map_offset_t)(VM_MAX_ADDRESS),
1465 		    VM_MAP_CREATE_PAGEABLE);
1466 		if (parent_task) {
1467 			vm_map_inherit_limits(new_map, parent_task->map);
1468 		}
1469 		new_task->map = new_map;
1470 	}
1471 
1472 	if (new_task->map == NULL) {
1473 		counter_free(&new_task->faults);
1474 		ledger_dereference(ledger);
1475 		task_ref_count_fini(new_task);
1476 		return KERN_RESOURCE_SHORTAGE;
1477 	}
1478 
1479 #if defined(CONFIG_SCHED_MULTIQ)
1480 	new_task->sched_group = sched_group_create();
1481 #endif
1482 
1483 	lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
1484 	queue_init(&new_task->threads);
1485 	new_task->suspend_count = 0;
1486 	new_task->thread_count = 0;
1487 	new_task->active_thread_count = 0;
1488 	new_task->user_stop_count = 0;
1489 	new_task->legacy_stop_count = 0;
1490 	new_task->active = TRUE;
1491 	new_task->halting = FALSE;
1492 	new_task->priv_flags = 0;
1493 	new_task->t_flags = t_flags;
1494 	task_ro_data.t_flags_ro = t_flags_ro;
1495 	new_task->t_procflags = t_procflags;
1496 	new_task->t_returnwaitflags = t_returnwaitflags;
1497 	new_task->returnwait_inheritor = current_thread();
1498 	new_task->importance = 0;
1499 	new_task->crashed_thread_id = 0;
1500 	new_task->watchports = NULL;
1501 	new_task->t_rr_ranges = NULL;
1502 
1503 	new_task->bank_context = NULL;
1504 
1505 	if (parent_task) {
1506 		parent_t_flags_ro = task_ro_flags_get(parent_task);
1507 	}
1508 
1509 #if __has_feature(ptrauth_calls)
1510 	/* Inherit the pac exception flags from parent if in fork */
1511 	if (parent_task && inherit_memory) {
1512 		task_ro_data.t_flags_ro |= (parent_t_flags_ro & (TFRO_PAC_ENFORCE_USER_STATE |
1513 		    TFRO_PAC_EXC_FATAL));
1514 	}
1515 #endif
1516 
1517 #ifdef MACH_BSD
1518 	new_task->corpse_info = NULL;
1519 #endif /* MACH_BSD */
1520 
1521 	/* kern_task not created by this function has unique id 0, start with 1 here. */
1522 	task_set_uniqueid(new_task);
1523 
1524 #if CONFIG_MACF
1525 	set_task_crash_label(new_task, NULL);
1526 
1527 	task_ro_data.task_filters.mach_trap_filter_mask = NULL;
1528 	task_ro_data.task_filters.mach_kobj_filter_mask = NULL;
1529 #endif
1530 
1531 #if CONFIG_MEMORYSTATUS
1532 	if (max_task_footprint != 0) {
1533 		ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1534 	}
1535 #endif /* CONFIG_MEMORYSTATUS */
1536 
1537 	if (task_wakeups_monitor_rate != 0) {
1538 		uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1539 		int32_t  rate;        // Ignored because of WAKEMON_SET_DEFAULTS
1540 		task_wakeups_monitor_ctl(new_task, &flags, &rate);
1541 	}
1542 
1543 #if CONFIG_IO_ACCOUNTING
1544 	uint32_t flags = IOMON_ENABLE;
1545 	task_io_monitor_ctl(new_task, &flags);
1546 #endif /* CONFIG_IO_ACCOUNTING */
1547 
1548 	machine_task_init(new_task, parent_task, inherit_memory);
1549 
1550 	new_task->task_debug = NULL;
1551 
1552 #if DEVELOPMENT || DEBUG
1553 	new_task->task_unnested = FALSE;
1554 	new_task->task_disconnected_count = 0;
1555 #endif
1556 	queue_init(&new_task->semaphore_list);
1557 	new_task->semaphores_owned = 0;
1558 
1559 	new_task->vtimers = 0;
1560 
1561 	new_task->shared_region = NULL;
1562 
1563 	new_task->affinity_space = NULL;
1564 
1565 	new_task->t_kpc = 0;
1566 
1567 	new_task->pidsuspended = FALSE;
1568 	new_task->frozen = FALSE;
1569 	new_task->changing_freeze_state = FALSE;
1570 	new_task->rusage_cpu_flags = 0;
1571 	new_task->rusage_cpu_percentage = 0;
1572 	new_task->rusage_cpu_interval = 0;
1573 	new_task->rusage_cpu_deadline = 0;
1574 	new_task->rusage_cpu_callt = NULL;
1575 #if MACH_ASSERT
1576 	new_task->suspends_outstanding = 0;
1577 #endif
1578 	recount_task_init(&new_task->tk_recount);
1579 
1580 #if HYPERVISOR
1581 	new_task->hv_task_target = NULL;
1582 #endif /* HYPERVISOR */
1583 
1584 #if CONFIG_TASKWATCH
1585 	queue_init(&new_task->task_watchers);
1586 	new_task->num_taskwatchers  = 0;
1587 	new_task->watchapplying  = 0;
1588 #endif /* CONFIG_TASKWATCH */
1589 
1590 	new_task->mem_notify_reserved = 0;
1591 	new_task->memlimit_attrs_reserved = 0;
1592 
1593 	new_task->requested_policy = default_task_requested_policy;
1594 	new_task->effective_policy = default_task_effective_policy;
1595 
1596 	new_task->task_shared_region_slide = -1;
1597 
1598 	if (parent_task != NULL) {
1599 		task_ro_data.task_tokens.sec_token = *task_get_sec_token(parent_task);
1600 		task_ro_data.task_tokens.audit_token = *task_get_audit_token(parent_task);
1601 
1602 		/* only inherit the option bits, no effect until task_set_immovable_pinned() */
1603 		task_ro_data.task_control_port_options = task_get_control_port_options(parent_task);
1604 
1605 		task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_FILTER_MSG;
1606 #if CONFIG_MACF
1607 		if (!(t_flags & TF_CORPSE_FORK)) {
1608 			task_ro_data.task_filters.mach_trap_filter_mask = task_get_mach_trap_filter_mask(parent_task);
1609 			task_ro_data.task_filters.mach_kobj_filter_mask = task_get_mach_kobj_filter_mask(parent_task);
1610 		}
1611 #endif
1612 	} else {
1613 		task_ro_data.task_tokens.sec_token = KERNEL_SECURITY_TOKEN;
1614 		task_ro_data.task_tokens.audit_token = KERNEL_AUDIT_TOKEN;
1615 
1616 		task_ro_data.task_control_port_options = TASK_CONTROL_PORT_OPTIONS_NONE;
1617 	}
1618 
1619 	/* must set before task_importance_init_from_parent: */
1620 	if (proc_ro != NULL) {
1621 		new_task->bsd_info_ro = proc_ro_ref_task(proc_ro, new_task, &task_ro_data);
1622 	} else {
1623 		new_task->bsd_info_ro = proc_ro_alloc(NULL, NULL, new_task, &task_ro_data);
1624 	}
1625 
1626 	ipc_task_init(new_task, parent_task);
1627 
1628 	task_importance_init_from_parent(new_task, parent_task);
1629 
1630 	new_task->corpse_vmobject_list = NULL;
1631 
1632 	if (parent_task != TASK_NULL) {
1633 		/* inherit the parent's shared region */
1634 		shared_region = vm_shared_region_get(parent_task);
1635 		if (shared_region != NULL) {
1636 			vm_shared_region_set(new_task, shared_region);
1637 		}
1638 
1639 #if __has_feature(ptrauth_calls)
1640 		/* use parent's shared_region_id */
1641 		char *shared_region_id = task_get_vm_shared_region_id_and_jop_pid(parent_task, NULL);
1642 		if (shared_region_id != NULL) {
1643 			shared_region_key_alloc(shared_region_id, FALSE, 0);         /* get a reference */
1644 		}
1645 		task_set_shared_region_id(new_task, shared_region_id);
1646 #endif /* __has_feature(ptrauth_calls) */
1647 
1648 		if (task_has_64Bit_addr(parent_task)) {
1649 			task_set_64Bit_addr(new_task);
1650 		}
1651 
1652 		if (task_has_64Bit_data(parent_task)) {
1653 			task_set_64Bit_data(new_task);
1654 		}
1655 
1656 		new_task->all_image_info_addr = parent_task->all_image_info_addr;
1657 		new_task->all_image_info_size = parent_task->all_image_info_size;
1658 		new_task->mach_header_vm_address = 0;
1659 
1660 		if (inherit_memory && parent_task->affinity_space) {
1661 			task_affinity_create(parent_task, new_task);
1662 		}
1663 
1664 		new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
1665 
1666 		new_task->task_exc_guard = parent_task->task_exc_guard;
1667 		if (parent_task->t_flags & TF_NO_SMT) {
1668 			new_task->t_flags |= TF_NO_SMT;
1669 		}
1670 
1671 		if (parent_task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE) {
1672 			new_task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
1673 		}
1674 
1675 		if (parent_task->t_flags & TF_TECS) {
1676 			new_task->t_flags |= TF_TECS;
1677 		}
1678 
1679 #if defined(__x86_64__)
1680 		if (parent_task->t_flags & TF_INSN_COPY_OPTOUT) {
1681 			new_task->t_flags |= TF_INSN_COPY_OPTOUT;
1682 		}
1683 #endif
1684 		new_task->priority = BASEPRI_DEFAULT;
1685 		new_task->max_priority = MAXPRI_USER;
1686 
1687 		task_policy_create(new_task, parent_task);
1688 	} else {
1689 #ifdef __LP64__
1690 		if (is_64bit) {
1691 			task_set_64Bit_addr(new_task);
1692 		}
1693 #endif
1694 
1695 		if (is_64bit_data) {
1696 			task_set_64Bit_data(new_task);
1697 		}
1698 
1699 		new_task->all_image_info_addr = (mach_vm_address_t)0;
1700 		new_task->all_image_info_size = (mach_vm_size_t)0;
1701 
1702 		new_task->pset_hint = PROCESSOR_SET_NULL;
1703 
1704 		new_task->task_exc_guard = TASK_EXC_GUARD_NONE;
1705 
1706 		if (new_task == kernel_task) {
1707 			new_task->priority = BASEPRI_KERNEL;
1708 			new_task->max_priority = MAXPRI_KERNEL;
1709 		} else {
1710 			new_task->priority = BASEPRI_DEFAULT;
1711 			new_task->max_priority = MAXPRI_USER;
1712 		}
1713 	}
1714 
1715 	bzero(new_task->coalition, sizeof(new_task->coalition));
1716 	for (int i = 0; i < COALITION_NUM_TYPES; i++) {
1717 		queue_chain_init(new_task->task_coalition[i]);
1718 	}
1719 
1720 	/* Allocate I/O Statistics */
1721 	new_task->task_io_stats = kalloc_data(sizeof(struct io_stat_info),
1722 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1723 
1724 	bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats));
1725 	bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats));
1726 
1727 	bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
1728 
1729 	counter_alloc(&(new_task->pageins));
1730 	counter_alloc(&(new_task->cow_faults));
1731 	counter_alloc(&(new_task->messages_sent));
1732 	counter_alloc(&(new_task->messages_received));
1733 
1734 	/* Copy resource acc. info from Parent for Corpe Forked task. */
1735 	if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
1736 		task_rollup_accounting_info(new_task, parent_task);
1737 		task_store_owned_vmobject_info(new_task, parent_task);
1738 	} else {
1739 		/* Initialize to zero for standard fork/spawn case */
1740 		new_task->total_runnable_time = 0;
1741 		new_task->syscalls_mach = 0;
1742 		new_task->syscalls_unix = 0;
1743 		new_task->c_switch = 0;
1744 		new_task->p_switch = 0;
1745 		new_task->ps_switch = 0;
1746 		new_task->decompressions = 0;
1747 		new_task->low_mem_notified_warn = 0;
1748 		new_task->low_mem_notified_critical = 0;
1749 		new_task->purged_memory_warn = 0;
1750 		new_task->purged_memory_critical = 0;
1751 		new_task->low_mem_privileged_listener = 0;
1752 		new_task->memlimit_is_active = 0;
1753 		new_task->memlimit_is_fatal = 0;
1754 		new_task->memlimit_active_exc_resource = 0;
1755 		new_task->memlimit_inactive_exc_resource = 0;
1756 		new_task->task_timer_wakeups_bin_1 = 0;
1757 		new_task->task_timer_wakeups_bin_2 = 0;
1758 		new_task->task_gpu_ns = 0;
1759 		new_task->task_writes_counters_internal.task_immediate_writes = 0;
1760 		new_task->task_writes_counters_internal.task_deferred_writes = 0;
1761 		new_task->task_writes_counters_internal.task_invalidated_writes = 0;
1762 		new_task->task_writes_counters_internal.task_metadata_writes = 0;
1763 		new_task->task_writes_counters_external.task_immediate_writes = 0;
1764 		new_task->task_writes_counters_external.task_deferred_writes = 0;
1765 		new_task->task_writes_counters_external.task_invalidated_writes = 0;
1766 		new_task->task_writes_counters_external.task_metadata_writes = 0;
1767 #if CONFIG_PHYS_WRITE_ACCT
1768 		new_task->task_fs_metadata_writes = 0;
1769 #endif /* CONFIG_PHYS_WRITE_ACCT */
1770 	}
1771 
1772 
1773 	new_task->donates_own_pages = FALSE;
1774 #if CONFIG_COALITIONS
1775 	if (!(t_flags & TF_CORPSE_FORK)) {
1776 		/* TODO: there is no graceful failure path here... */
1777 		if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1778 			coalitions_adopt_task(parent_coalitions, new_task);
1779 			if (parent_coalitions[COALITION_TYPE_JETSAM]) {
1780 				new_task->donates_own_pages = coalition_is_swappable(parent_coalitions[COALITION_TYPE_JETSAM]);
1781 			}
1782 		} else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1783 			/*
1784 			 * all tasks at least have a resource coalition, so
1785 			 * if the parent has one then inherit all coalitions
1786 			 * the parent is a part of
1787 			 */
1788 			coalitions_adopt_task(parent_task->coalition, new_task);
1789 			if (parent_task->coalition[COALITION_TYPE_JETSAM]) {
1790 				new_task->donates_own_pages = coalition_is_swappable(parent_task->coalition[COALITION_TYPE_JETSAM]);
1791 			}
1792 		} else {
1793 			/* TODO: assert that new_task will be PID 1 (launchd) */
1794 			coalitions_adopt_init_task(new_task);
1795 		}
1796 		/*
1797 		 * on exec, we need to transfer the coalition roles from the
1798 		 * parent task to the exec copy task.
1799 		 */
1800 		if (parent_task && (t_procflags & TPF_EXEC_COPY)) {
1801 			int coal_roles[COALITION_NUM_TYPES];
1802 			task_coalition_roles(parent_task, coal_roles);
1803 			(void)coalitions_set_roles(new_task->coalition, new_task, coal_roles);
1804 		}
1805 	} else {
1806 		coalitions_adopt_corpse_task(new_task);
1807 	}
1808 
1809 	if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1810 		panic("created task is not a member of a resource coalition");
1811 	}
1812 	task_set_coalition_member(new_task);
1813 #endif /* CONFIG_COALITIONS */
1814 
1815 	new_task->dispatchqueue_offset = 0;
1816 	if (parent_task != NULL) {
1817 		new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
1818 	}
1819 
1820 	new_task->task_can_transfer_memory_ownership = FALSE;
1821 	new_task->task_volatile_objects = 0;
1822 	new_task->task_nonvolatile_objects = 0;
1823 	new_task->task_objects_disowning = FALSE;
1824 	new_task->task_objects_disowned = FALSE;
1825 	new_task->task_owned_objects = 0;
1826 	queue_init(&new_task->task_objq);
1827 
1828 #if CONFIG_FREEZE
1829 	queue_init(&new_task->task_frozen_cseg_q);
1830 #endif /* CONFIG_FREEZE */
1831 
1832 	task_objq_lock_init(new_task);
1833 
1834 #if __arm64__
1835 	new_task->task_legacy_footprint = FALSE;
1836 	new_task->task_extra_footprint_limit = FALSE;
1837 	new_task->task_ios13extended_footprint_limit = FALSE;
1838 #endif /* __arm64__ */
1839 	new_task->task_region_footprint = FALSE;
1840 	new_task->task_has_crossed_thread_limit = FALSE;
1841 	new_task->task_thread_limit = 0;
1842 #if CONFIG_SECLUDED_MEMORY
1843 	new_task->task_can_use_secluded_mem = FALSE;
1844 	new_task->task_could_use_secluded_mem = FALSE;
1845 	new_task->task_could_also_use_secluded_mem = FALSE;
1846 	new_task->task_suppressed_secluded = FALSE;
1847 #endif /* CONFIG_SECLUDED_MEMORY */
1848 
1849 	/*
1850 	 * t_flags is set up above. But since we don't
1851 	 * support darkwake mode being set that way
1852 	 * currently, we clear it out here explicitly.
1853 	 */
1854 	new_task->t_flags &= ~(TF_DARKWAKE_MODE);
1855 
1856 	queue_init(&new_task->io_user_clients);
1857 	new_task->loadTag = 0;
1858 
1859 	lck_mtx_lock(&tasks_threads_lock);
1860 	queue_enter(&tasks, new_task, task_t, tasks);
1861 	tasks_count++;
1862 	if (tasks_suspend_state) {
1863 		task_suspend_internal(new_task);
1864 	}
1865 	lck_mtx_unlock(&tasks_threads_lock);
1866 	task_ref_hold_proc_task_struct(new_task);
1867 
1868 	return KERN_SUCCESS;
1869 }
1870 
1871 /*
1872  *	task_rollup_accounting_info
1873  *
1874  *	Roll up accounting stats. Used to rollup stats
1875  *	for exec copy task and corpse fork.
1876  */
1877 void
task_rollup_accounting_info(task_t to_task,task_t from_task)1878 task_rollup_accounting_info(task_t to_task, task_t from_task)
1879 {
1880 	assert(from_task != to_task);
1881 
1882 	recount_task_copy(&to_task->tk_recount, &from_task->tk_recount);
1883 	to_task->total_runnable_time = from_task->total_runnable_time;
1884 	counter_add(&to_task->faults, counter_load(&from_task->faults));
1885 	counter_add(&to_task->pageins, counter_load(&from_task->pageins));
1886 	counter_add(&to_task->cow_faults, counter_load(&from_task->cow_faults));
1887 	counter_add(&to_task->messages_sent, counter_load(&from_task->messages_sent));
1888 	counter_add(&to_task->messages_received, counter_load(&from_task->messages_received));
1889 	to_task->decompressions = from_task->decompressions;
1890 	to_task->syscalls_mach = from_task->syscalls_mach;
1891 	to_task->syscalls_unix = from_task->syscalls_unix;
1892 	to_task->c_switch = from_task->c_switch;
1893 	to_task->p_switch = from_task->p_switch;
1894 	to_task->ps_switch = from_task->ps_switch;
1895 	to_task->extmod_statistics = from_task->extmod_statistics;
1896 	to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
1897 	to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
1898 	to_task->purged_memory_warn = from_task->purged_memory_warn;
1899 	to_task->purged_memory_critical = from_task->purged_memory_critical;
1900 	to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
1901 	*to_task->task_io_stats = *from_task->task_io_stats;
1902 	to_task->cpu_time_eqos_stats = from_task->cpu_time_eqos_stats;
1903 	to_task->cpu_time_rqos_stats = from_task->cpu_time_rqos_stats;
1904 	to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
1905 	to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
1906 	to_task->task_gpu_ns = from_task->task_gpu_ns;
1907 	to_task->task_writes_counters_internal.task_immediate_writes = from_task->task_writes_counters_internal.task_immediate_writes;
1908 	to_task->task_writes_counters_internal.task_deferred_writes = from_task->task_writes_counters_internal.task_deferred_writes;
1909 	to_task->task_writes_counters_internal.task_invalidated_writes = from_task->task_writes_counters_internal.task_invalidated_writes;
1910 	to_task->task_writes_counters_internal.task_metadata_writes = from_task->task_writes_counters_internal.task_metadata_writes;
1911 	to_task->task_writes_counters_external.task_immediate_writes = from_task->task_writes_counters_external.task_immediate_writes;
1912 	to_task->task_writes_counters_external.task_deferred_writes = from_task->task_writes_counters_external.task_deferred_writes;
1913 	to_task->task_writes_counters_external.task_invalidated_writes = from_task->task_writes_counters_external.task_invalidated_writes;
1914 	to_task->task_writes_counters_external.task_metadata_writes = from_task->task_writes_counters_external.task_metadata_writes;
1915 #if CONFIG_PHYS_WRITE_ACCT
1916 	to_task->task_fs_metadata_writes = from_task->task_fs_metadata_writes;
1917 #endif /* CONFIG_PHYS_WRITE_ACCT */
1918 
1919 #if CONFIG_MEMORYSTATUS
1920 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.memorystatus_dirty_time);
1921 #endif /* CONFIG_MEMORYSTATUS */
1922 
1923 	/* Skip ledger roll up for memory accounting entries */
1924 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
1925 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
1926 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
1927 #if CONFIG_SCHED_SFI
1928 	for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1929 		ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
1930 	}
1931 #endif
1932 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
1933 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
1934 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
1935 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
1936 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_me);
1937 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_others);
1938 }
1939 
1940 /*
1941  *	task_deallocate_internal:
1942  *
1943  *	Drop a reference on a task.
1944  *	Don't call this directly.
1945  */
1946 extern void task_deallocate_internal(task_t task, os_ref_count_t refs);
1947 void
task_deallocate_internal(task_t task,os_ref_count_t refs)1948 task_deallocate_internal(
1949 	task_t          task,
1950 	os_ref_count_t  refs)
1951 {
1952 	ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
1953 
1954 	if (task == TASK_NULL) {
1955 		return;
1956 	}
1957 
1958 #if IMPORTANCE_INHERITANCE
1959 	if (refs == 1) {
1960 		/*
1961 		 * If last ref potentially comes from the task's importance,
1962 		 * disconnect it.  But more task refs may be added before
1963 		 * that completes, so wait for the reference to go to zero
1964 		 * naturally (it may happen on a recursive task_deallocate()
1965 		 * from the ipc_importance_disconnect_task() call).
1966 		 */
1967 		if (IIT_NULL != task->task_imp_base) {
1968 			ipc_importance_disconnect_task(task);
1969 		}
1970 		return;
1971 	}
1972 #endif /* IMPORTANCE_INHERITANCE */
1973 
1974 	if (refs > 0) {
1975 		return;
1976 	}
1977 
1978 	/*
1979 	 * The task should be dead at this point. Ensure other resources
1980 	 * like threads, are gone before we trash the world.
1981 	 */
1982 	assert(queue_empty(&task->threads));
1983 	assert(get_bsdtask_info(task) == NULL);
1984 	assert(!is_active(task->itk_space));
1985 	assert(!task->active);
1986 	assert(task->active_thread_count == 0);
1987 	assert(!task_get_game_mode(task));
1988 
1989 	lck_mtx_lock(&tasks_threads_lock);
1990 	assert(terminated_tasks_count > 0);
1991 	queue_remove(&terminated_tasks, task, task_t, tasks);
1992 	terminated_tasks_count--;
1993 	lck_mtx_unlock(&tasks_threads_lock);
1994 
1995 	/*
1996 	 * remove the reference on bank context
1997 	 */
1998 	task_bank_reset(task);
1999 
2000 	kfree_data(task->task_io_stats, sizeof(struct io_stat_info));
2001 
2002 	/*
2003 	 *	Give the machine dependent code a chance
2004 	 *	to perform cleanup before ripping apart
2005 	 *	the task.
2006 	 */
2007 	machine_task_terminate(task);
2008 
2009 	ipc_task_terminate(task);
2010 
2011 	/* let iokit know */
2012 	iokit_task_terminate(task);
2013 
2014 	/* Unregister task from userspace coredumps on panic */
2015 	kern_unregister_userspace_coredump(task);
2016 
2017 	if (task->affinity_space) {
2018 		task_affinity_deallocate(task);
2019 	}
2020 
2021 #if MACH_ASSERT
2022 	if (task->ledger != NULL &&
2023 	    task->map != NULL &&
2024 	    task->map->pmap != NULL &&
2025 	    task->map->pmap->ledger != NULL) {
2026 		assert(task->ledger == task->map->pmap->ledger);
2027 	}
2028 #endif /* MACH_ASSERT */
2029 
2030 	vm_owned_objects_disown(task);
2031 	assert(task->task_objects_disowned);
2032 	if (task->task_owned_objects != 0) {
2033 		panic("task_deallocate(%p): "
2034 		    "volatile_objects=%d nonvolatile_objects=%d owned=%d\n",
2035 		    task,
2036 		    task->task_volatile_objects,
2037 		    task->task_nonvolatile_objects,
2038 		    task->task_owned_objects);
2039 	}
2040 
2041 #if CONFIG_DEFERRED_RECLAIM
2042 	if (task->deferred_reclamation_metadata != NULL) {
2043 		vm_deferred_reclamation_buffer_deallocate(task->deferred_reclamation_metadata);
2044 		task->deferred_reclamation_metadata = NULL;
2045 	}
2046 #endif /* CONFIG_DEFERRED_RECLAIM */
2047 
2048 	vm_map_deallocate(task->map);
2049 	if (task->is_large_corpse) {
2050 		assert(large_corpse_count > 0);
2051 		OSDecrementAtomic(&large_corpse_count);
2052 		task->is_large_corpse = false;
2053 	}
2054 	is_release(task->itk_space);
2055 	if (task->t_rr_ranges) {
2056 		restartable_ranges_release(task->t_rr_ranges);
2057 	}
2058 
2059 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
2060 	    &interrupt_wakeups, &debit);
2061 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
2062 	    &platform_idle_wakeups, &debit);
2063 
2064 #if defined(CONFIG_SCHED_MULTIQ)
2065 	sched_group_destroy(task->sched_group);
2066 #endif
2067 
2068 	struct recount_times_mach sum = { 0 };
2069 	struct recount_times_mach p_only = { 0 };
2070 	recount_task_times_perf_only(task, &sum, &p_only);
2071 #if CONFIG_PERVASIVE_ENERGY
2072 	uint64_t energy = recount_task_energy_nj(task);
2073 #endif /* CONFIG_PERVASIVE_ENERGY */
2074 	recount_task_deinit(&task->tk_recount);
2075 
2076 	/* Accumulate statistics for dead tasks */
2077 	lck_spin_lock(&dead_task_statistics_lock);
2078 	dead_task_statistics.total_user_time += sum.rtm_user;
2079 	dead_task_statistics.total_system_time += sum.rtm_system;
2080 
2081 	dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
2082 	dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
2083 
2084 	dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
2085 	dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
2086 	dead_task_statistics.total_ptime += p_only.rtm_user + p_only.rtm_system;
2087 	dead_task_statistics.total_pset_switches += task->ps_switch;
2088 	dead_task_statistics.task_gpu_ns += task->task_gpu_ns;
2089 #if CONFIG_PERVASIVE_ENERGY
2090 	dead_task_statistics.task_energy += energy;
2091 #endif /* CONFIG_PERVASIVE_ENERGY */
2092 
2093 	lck_spin_unlock(&dead_task_statistics_lock);
2094 	lck_mtx_destroy(&task->lock, &task_lck_grp);
2095 
2096 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
2097 	    &debit)) {
2098 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
2099 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
2100 	}
2101 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
2102 	    &debit)) {
2103 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
2104 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
2105 	}
2106 	ledger_dereference(task->ledger);
2107 
2108 	counter_free(&task->faults);
2109 	counter_free(&task->pageins);
2110 	counter_free(&task->cow_faults);
2111 	counter_free(&task->messages_sent);
2112 	counter_free(&task->messages_received);
2113 
2114 #if CONFIG_COALITIONS
2115 	task_release_coalitions(task);
2116 #endif /* CONFIG_COALITIONS */
2117 
2118 	bzero(task->coalition, sizeof(task->coalition));
2119 
2120 #if MACH_BSD
2121 	/* clean up collected information since last reference to task is gone */
2122 	if (task->corpse_info) {
2123 		void *corpse_info_kernel = kcdata_memory_get_begin_addr(task->corpse_info);
2124 		task_crashinfo_destroy(task->corpse_info);
2125 		task->corpse_info = NULL;
2126 		kfree_data(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
2127 	}
2128 #endif
2129 
2130 #if CONFIG_MACF
2131 	if (get_task_crash_label(task)) {
2132 		mac_exc_free_label(get_task_crash_label(task));
2133 		set_task_crash_label(task, NULL);
2134 	}
2135 #endif
2136 
2137 	assert(queue_empty(&task->task_objq));
2138 	task_objq_lock_destroy(task);
2139 
2140 	if (task->corpse_vmobject_list) {
2141 		kfree_data(task->corpse_vmobject_list,
2142 		    (vm_size_t)task->corpse_vmobject_list_size);
2143 	}
2144 
2145 	task_ref_count_fini(task);
2146 	proc_ro_erase_task(task->bsd_info_ro);
2147 	task_release_proc_task_struct(task);
2148 }
2149 
2150 /*
2151  *	task_name_deallocate_mig:
2152  *
2153  *	Drop a reference on a task name.
2154  */
2155 void
task_name_deallocate_mig(task_name_t task_name)2156 task_name_deallocate_mig(
2157 	task_name_t             task_name)
2158 {
2159 	return task_deallocate_grp((task_t)task_name, TASK_GRP_MIG);
2160 }
2161 
2162 /*
2163  *	task_policy_set_deallocate_mig:
2164  *
2165  *	Drop a reference on a task type.
2166  */
2167 void
task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)2168 task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)
2169 {
2170 	return task_deallocate_grp((task_t)task_policy_set, TASK_GRP_MIG);
2171 }
2172 
2173 /*
2174  *	task_policy_get_deallocate_mig:
2175  *
2176  *	Drop a reference on a task type.
2177  */
2178 void
task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)2179 task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)
2180 {
2181 	return task_deallocate_grp((task_t)task_policy_get, TASK_GRP_MIG);
2182 }
2183 
2184 /*
2185  *	task_inspect_deallocate_mig:
2186  *
2187  *	Drop a task inspection reference.
2188  */
2189 void
task_inspect_deallocate_mig(task_inspect_t task_inspect)2190 task_inspect_deallocate_mig(
2191 	task_inspect_t          task_inspect)
2192 {
2193 	return task_deallocate_grp((task_t)task_inspect, TASK_GRP_MIG);
2194 }
2195 
2196 /*
2197  *	task_read_deallocate_mig:
2198  *
2199  *	Drop a reference on task read port.
2200  */
2201 void
task_read_deallocate_mig(task_read_t task_read)2202 task_read_deallocate_mig(
2203 	task_read_t          task_read)
2204 {
2205 	return task_deallocate_grp((task_t)task_read, TASK_GRP_MIG);
2206 }
2207 
2208 /*
2209  *	task_suspension_token_deallocate:
2210  *
2211  *	Drop a reference on a task suspension token.
2212  */
2213 void
task_suspension_token_deallocate(task_suspension_token_t token)2214 task_suspension_token_deallocate(
2215 	task_suspension_token_t         token)
2216 {
2217 	return task_deallocate((task_t)token);
2218 }
2219 
2220 void
task_suspension_token_deallocate_grp(task_suspension_token_t token,task_grp_t grp)2221 task_suspension_token_deallocate_grp(
2222 	task_suspension_token_t         token,
2223 	task_grp_t                      grp)
2224 {
2225 	return task_deallocate_grp((task_t)token, grp);
2226 }
2227 
2228 /*
2229  * task_collect_crash_info:
2230  *
2231  * collect crash info from bsd and mach based data
2232  */
2233 kern_return_t
task_collect_crash_info(task_t task,struct label * crash_label,int is_corpse_fork)2234 task_collect_crash_info(
2235 	task_t task,
2236 #ifdef CONFIG_MACF
2237 	struct label *crash_label,
2238 #endif
2239 	int is_corpse_fork)
2240 {
2241 	kern_return_t kr = KERN_SUCCESS;
2242 
2243 	kcdata_descriptor_t crash_data = NULL;
2244 	kcdata_descriptor_t crash_data_release = NULL;
2245 	mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
2246 	mach_vm_offset_t crash_data_ptr = 0;
2247 	void *crash_data_kernel = NULL;
2248 	void *crash_data_kernel_release = NULL;
2249 #if CONFIG_MACF
2250 	struct label *label, *free_label;
2251 #endif
2252 
2253 	if (!corpses_enabled()) {
2254 		return KERN_NOT_SUPPORTED;
2255 	}
2256 
2257 #if CONFIG_MACF
2258 	free_label = label = mac_exc_create_label(NULL);
2259 #endif
2260 
2261 	task_lock(task);
2262 
2263 	assert(is_corpse_fork || get_bsdtask_info(task) != NULL);
2264 	if (task->corpse_info == NULL && (is_corpse_fork || get_bsdtask_info(task) != NULL)) {
2265 #if CONFIG_MACF
2266 		/* Set the crash label, used by the exception delivery mac hook */
2267 		free_label = get_task_crash_label(task);         // Most likely NULL.
2268 		set_task_crash_label(task, label);
2269 		mac_exc_update_task_crash_label(task, crash_label);
2270 #endif
2271 		task_unlock(task);
2272 
2273 		crash_data_kernel = kalloc_data(CORPSEINFO_ALLOCATION_SIZE,
2274 		    Z_WAITOK | Z_ZERO);
2275 		if (crash_data_kernel == NULL) {
2276 			kr = KERN_RESOURCE_SHORTAGE;
2277 			goto out_no_lock;
2278 		}
2279 		crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
2280 
2281 		/* Do not get a corpse ref for corpse fork */
2282 		crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size,
2283 		    is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF,
2284 		    KCFLAG_USE_MEMCOPY);
2285 		if (crash_data) {
2286 			task_lock(task);
2287 			crash_data_release = task->corpse_info;
2288 			crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);
2289 			task->corpse_info = crash_data;
2290 
2291 			task_unlock(task);
2292 			kr = KERN_SUCCESS;
2293 		} else {
2294 			kfree_data(crash_data_kernel,
2295 			    CORPSEINFO_ALLOCATION_SIZE);
2296 			kr = KERN_FAILURE;
2297 		}
2298 
2299 		if (crash_data_release != NULL) {
2300 			task_crashinfo_destroy(crash_data_release);
2301 		}
2302 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2303 	} else {
2304 		task_unlock(task);
2305 	}
2306 
2307 out_no_lock:
2308 #if CONFIG_MACF
2309 	if (free_label != NULL) {
2310 		mac_exc_free_label(free_label);
2311 	}
2312 #endif
2313 	return kr;
2314 }
2315 
2316 /*
2317  * task_deliver_crash_notification:
2318  *
2319  * Makes outcall to registered host port for a corpse.
2320  */
2321 kern_return_t
task_deliver_crash_notification(task_t corpse,thread_t thread,exception_type_t etype,mach_exception_subcode_t subcode)2322 task_deliver_crash_notification(
2323 	task_t corpse, /* corpse or corpse fork */
2324 	thread_t thread,
2325 	exception_type_t etype,
2326 	mach_exception_subcode_t subcode)
2327 {
2328 	kcdata_descriptor_t crash_info = corpse->corpse_info;
2329 	thread_t th_iter = NULL;
2330 	kern_return_t kr = KERN_SUCCESS;
2331 	wait_interrupt_t wsave;
2332 	mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2333 	ipc_port_t corpse_port;
2334 
2335 	if (crash_info == NULL) {
2336 		return KERN_FAILURE;
2337 	}
2338 
2339 	assert(task_is_a_corpse(corpse));
2340 
2341 	task_lock(corpse);
2342 
2343 	/*
2344 	 * Always populate code[0] as the effective exception type for EXC_CORPSE_NOTIFY.
2345 	 * Crash reporters should derive whether it's fatal from corpse blob.
2346 	 */
2347 	code[0] = etype;
2348 	code[1] = subcode;
2349 
2350 	queue_iterate(&corpse->threads, th_iter, thread_t, task_threads)
2351 	{
2352 		if (th_iter->corpse_dup == FALSE) {
2353 			ipc_thread_reset(th_iter);
2354 		}
2355 	}
2356 	task_unlock(corpse);
2357 
2358 	/* Arm the no-sender notification for taskport */
2359 	task_reference(corpse);
2360 	corpse_port = convert_corpse_to_port_and_nsrequest(corpse);
2361 
2362 	wsave = thread_interrupt_level(THREAD_UNINT);
2363 	kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
2364 	if (kr != KERN_SUCCESS) {
2365 		printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(corpse));
2366 	}
2367 
2368 	(void)thread_interrupt_level(wsave);
2369 
2370 	/*
2371 	 * Drop the send right on corpse port, will fire the
2372 	 * no-sender notification if exception deliver failed.
2373 	 */
2374 	ipc_port_release_send(corpse_port);
2375 	return kr;
2376 }
2377 
2378 /*
2379  *	task_terminate:
2380  *
2381  *	Terminate the specified task.  See comments on thread_terminate
2382  *	(kern/thread.c) about problems with terminating the "current task."
2383  */
2384 
2385 kern_return_t
task_terminate(task_t task)2386 task_terminate(
2387 	task_t          task)
2388 {
2389 	if (task == TASK_NULL) {
2390 		return KERN_INVALID_ARGUMENT;
2391 	}
2392 
2393 	if (get_bsdtask_info(task)) {
2394 		return KERN_FAILURE;
2395 	}
2396 
2397 	return task_terminate_internal(task);
2398 }
2399 
2400 #if MACH_ASSERT
2401 extern int proc_pid(struct proc *);
2402 extern void proc_name_kdp(struct proc *p, char *buf, int size);
2403 #endif /* MACH_ASSERT */
2404 
2405 #define VM_MAP_PARTIAL_REAP 0x54  /* 0x150 */
2406 static void
task_partial_reap(task_t task,__unused int pid)2407 __unused task_partial_reap(task_t task, __unused int pid)
2408 {
2409 	unsigned int    reclaimed_resident = 0;
2410 	unsigned int    reclaimed_compressed = 0;
2411 	uint64_t        task_page_count;
2412 
2413 	task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
2414 
2415 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_START),
2416 	    pid, task_page_count, 0, 0, 0);
2417 
2418 	vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
2419 
2420 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_END),
2421 	    pid, reclaimed_resident, reclaimed_compressed, 0, 0);
2422 }
2423 
2424 /*
2425  * task_mark_corpse:
2426  *
2427  * Mark the task as a corpse. Called by crashing thread.
2428  */
2429 kern_return_t
task_mark_corpse(task_t task)2430 task_mark_corpse(task_t task)
2431 {
2432 	kern_return_t kr = KERN_SUCCESS;
2433 	thread_t self_thread;
2434 	(void) self_thread;
2435 	wait_interrupt_t wsave;
2436 #if CONFIG_MACF
2437 	struct label *crash_label = NULL;
2438 #endif
2439 
2440 	assert(task != kernel_task);
2441 	assert(task == current_task());
2442 	assert(!task_is_a_corpse(task));
2443 
2444 #if CONFIG_MACF
2445 	crash_label = mac_exc_create_label_for_proc((struct proc*)get_bsdtask_info(task));
2446 #endif
2447 
2448 	kr = task_collect_crash_info(task,
2449 #if CONFIG_MACF
2450 	    crash_label,
2451 #endif
2452 	    FALSE);
2453 	if (kr != KERN_SUCCESS) {
2454 		goto out;
2455 	}
2456 
2457 	self_thread = current_thread();
2458 
2459 	wsave = thread_interrupt_level(THREAD_UNINT);
2460 	task_lock(task);
2461 
2462 	/*
2463 	 * Check if any other thread called task_terminate_internal
2464 	 * and made the task inactive before we could mark it for
2465 	 * corpse pending report. Bail out if the task is inactive.
2466 	 */
2467 	if (!task->active) {
2468 		kcdata_descriptor_t crash_data_release = task->corpse_info;;
2469 		void *crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);;
2470 
2471 		task->corpse_info = NULL;
2472 		task_unlock(task);
2473 
2474 		if (crash_data_release != NULL) {
2475 			task_crashinfo_destroy(crash_data_release);
2476 		}
2477 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2478 		return KERN_TERMINATED;
2479 	}
2480 
2481 	task_set_corpse_pending_report(task);
2482 	task_set_corpse(task);
2483 	task->crashed_thread_id = thread_tid(self_thread);
2484 
2485 	kr = task_start_halt_locked(task, TRUE);
2486 	assert(kr == KERN_SUCCESS);
2487 
2488 	task_set_uniqueid(task);
2489 
2490 	task_unlock(task);
2491 
2492 	/*
2493 	 * ipc_task_reset() moved to last thread_terminate_self(): rdar://75737960.
2494 	 * disable old ports here instead.
2495 	 *
2496 	 * The vm_map and ipc_space must exist until this function returns,
2497 	 * convert_port_to_{map,space}_with_flavor relies on this behavior.
2498 	 */
2499 	ipc_task_disable(task);
2500 
2501 	/* terminate the ipc space */
2502 	ipc_space_terminate(task->itk_space);
2503 
2504 	/* Add it to global corpse task list */
2505 	task_add_to_corpse_task_list(task);
2506 
2507 	thread_terminate_internal(self_thread);
2508 
2509 	(void) thread_interrupt_level(wsave);
2510 	assert(task->halting == TRUE);
2511 
2512 out:
2513 #if CONFIG_MACF
2514 	mac_exc_free_label(crash_label);
2515 #endif
2516 	return kr;
2517 }
2518 
2519 /*
2520  *	task_set_uniqueid
2521  *
2522  *	Set task uniqueid to systemwide unique 64 bit value
2523  */
2524 void
task_set_uniqueid(task_t task)2525 task_set_uniqueid(task_t task)
2526 {
2527 	task->task_uniqueid = OSIncrementAtomic64(&next_taskuniqueid);
2528 }
2529 
2530 /*
2531  *	task_clear_corpse
2532  *
2533  *	Clears the corpse pending bit on task.
2534  *	Removes inspection bit on the threads.
2535  */
2536 void
task_clear_corpse(task_t task)2537 task_clear_corpse(task_t task)
2538 {
2539 	thread_t th_iter = NULL;
2540 
2541 	task_lock(task);
2542 	queue_iterate(&task->threads, th_iter, thread_t, task_threads)
2543 	{
2544 		thread_mtx_lock(th_iter);
2545 		th_iter->inspection = FALSE;
2546 		ipc_thread_disable(th_iter);
2547 		thread_mtx_unlock(th_iter);
2548 	}
2549 
2550 	thread_terminate_crashed_threads();
2551 	/* remove the pending corpse report flag */
2552 	task_clear_corpse_pending_report(task);
2553 
2554 	task_unlock(task);
2555 }
2556 
2557 /*
2558  *	task_port_no_senders
2559  *
2560  *	Called whenever the Mach port system detects no-senders on
2561  *	the task port of a corpse.
2562  *	Each notification that comes in should terminate the task (corpse).
2563  */
2564 static void
task_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)2565 task_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
2566 {
2567 	task_t task = ipc_kobject_get_locked(port, IKOT_TASK_CONTROL);
2568 
2569 	assert(task != TASK_NULL);
2570 	assert(task_is_a_corpse(task));
2571 
2572 	/* Remove the task from global corpse task list */
2573 	task_remove_from_corpse_task_list(task);
2574 
2575 	task_clear_corpse(task);
2576 	task_terminate_internal(task);
2577 }
2578 
2579 /*
2580  *	task_port_with_flavor_no_senders
2581  *
2582  *	Called whenever the Mach port system detects no-senders on
2583  *	the task inspect or read port. These ports are allocated lazily and
2584  *	should be deallocated here when there are no senders remaining.
2585  */
2586 static void
task_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)2587 task_port_with_flavor_no_senders(
2588 	ipc_port_t          port,
2589 	mach_port_mscount_t mscount __unused)
2590 {
2591 	task_t task;
2592 	mach_task_flavor_t flavor;
2593 	ipc_kobject_type_t kotype;
2594 
2595 	ip_mq_lock(port);
2596 	if (port->ip_srights > 0) {
2597 		ip_mq_unlock(port);
2598 		return;
2599 	}
2600 	kotype = ip_kotype(port);
2601 	assert((IKOT_TASK_READ == kotype) || (IKOT_TASK_INSPECT == kotype));
2602 	task = ipc_kobject_get_locked(port, kotype);
2603 	if (task != TASK_NULL) {
2604 		task_reference(task);
2605 	}
2606 	ip_mq_unlock(port);
2607 
2608 	if (task == TASK_NULL) {
2609 		/* The task is exiting or disabled; it will eventually deallocate the port */
2610 		return;
2611 	}
2612 
2613 	if (kotype == IKOT_TASK_READ) {
2614 		flavor = TASK_FLAVOR_READ;
2615 	} else {
2616 		flavor = TASK_FLAVOR_INSPECT;
2617 	}
2618 
2619 	itk_lock(task);
2620 	ip_mq_lock(port);
2621 
2622 	/*
2623 	 * If the port is no longer active, then ipc_task_terminate() ran
2624 	 * and destroyed the kobject already. Just deallocate the task
2625 	 * ref we took and go away.
2626 	 *
2627 	 * It is also possible that several nsrequests are in flight,
2628 	 * only one shall NULL-out the port entry, and this is the one
2629 	 * that gets to dealloc the port.
2630 	 *
2631 	 * Check for a stale no-senders notification. A call to any function
2632 	 * that vends out send rights to this port could resurrect it between
2633 	 * this notification being generated and actually being handled here.
2634 	 */
2635 	if (!ip_active(port) ||
2636 	    task->itk_task_ports[flavor] != port ||
2637 	    port->ip_srights > 0) {
2638 		ip_mq_unlock(port);
2639 		itk_unlock(task);
2640 		task_deallocate(task);
2641 		return;
2642 	}
2643 
2644 	assert(task->itk_task_ports[flavor] == port);
2645 	task->itk_task_ports[flavor] = IP_NULL;
2646 	itk_unlock(task);
2647 
2648 	ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
2649 
2650 	task_deallocate(task);
2651 }
2652 
2653 /*
2654  *	task_wait_till_threads_terminate_locked
2655  *
2656  *	Wait till all the threads in the task are terminated.
2657  *	Might release the task lock and re-acquire it.
2658  */
2659 void
task_wait_till_threads_terminate_locked(task_t task)2660 task_wait_till_threads_terminate_locked(task_t task)
2661 {
2662 	/* wait for all the threads in the task to terminate */
2663 	while (task->active_thread_count != 0) {
2664 		assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
2665 		task_unlock(task);
2666 		thread_block(THREAD_CONTINUE_NULL);
2667 
2668 		task_lock(task);
2669 	}
2670 }
2671 
2672 /*
2673  *	task_duplicate_map_and_threads
2674  *
2675  *	Copy vmmap of source task.
2676  *	Copy active threads from source task to destination task.
2677  *	Source task would be suspended during the copy.
2678  */
2679 kern_return_t
task_duplicate_map_and_threads(task_t task,void * p,task_t new_task,thread_t * thread_ret,uint64_t ** udata_buffer,int * size,int * num_udata,bool for_exception)2680 task_duplicate_map_and_threads(
2681 	task_t task,
2682 	void *p,
2683 	task_t new_task,
2684 	thread_t *thread_ret,
2685 	uint64_t **udata_buffer,
2686 	int *size,
2687 	int *num_udata,
2688 	bool for_exception)
2689 {
2690 	kern_return_t kr = KERN_SUCCESS;
2691 	int active;
2692 	thread_t thread, self, thread_return = THREAD_NULL;
2693 	thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL;
2694 	thread_t *thread_array;
2695 	uint32_t active_thread_count = 0, array_count = 0, i;
2696 	vm_map_t oldmap;
2697 	uint64_t *buffer = NULL;
2698 	int buf_size = 0;
2699 	int est_knotes = 0, num_knotes = 0;
2700 
2701 	self = current_thread();
2702 
2703 	/*
2704 	 * Suspend the task to copy thread state, use the internal
2705 	 * variant so that no user-space process can resume
2706 	 * the task from under us
2707 	 */
2708 	kr = task_suspend_internal(task);
2709 	if (kr != KERN_SUCCESS) {
2710 		return kr;
2711 	}
2712 
2713 	if (task->map->disable_vmentry_reuse == TRUE) {
2714 		/*
2715 		 * Quite likely GuardMalloc (or some debugging tool)
2716 		 * is being used on this task. And it has gone through
2717 		 * its limit. Making a corpse will likely encounter
2718 		 * a lot of VM entries that will need COW.
2719 		 *
2720 		 * Skip it.
2721 		 */
2722 #if DEVELOPMENT || DEBUG
2723 		memorystatus_abort_vm_map_fork(task);
2724 #endif
2725 		ktriage_record(thread_tid(self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_FAIL_LIBGMALLOC), 0 /* arg */);
2726 		task_resume_internal(task);
2727 		return KERN_FAILURE;
2728 	}
2729 
2730 	/* Check with VM if vm_map_fork is allowed for this task */
2731 	bool is_large = false;
2732 	if (memorystatus_allowed_vm_map_fork(task, &is_large)) {
2733 		/* Setup new task's vmmap, switch from parent task's map to it COW map */
2734 		oldmap = new_task->map;
2735 		new_task->map = vm_map_fork(new_task->ledger,
2736 		    task->map,
2737 		    (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
2738 		    VM_MAP_FORK_PRESERVE_PURGEABLE |
2739 		    VM_MAP_FORK_CORPSE_FOOTPRINT));
2740 		if (new_task->map) {
2741 			new_task->is_large_corpse = is_large;
2742 			vm_map_deallocate(oldmap);
2743 
2744 			/* copy ledgers that impact the memory footprint */
2745 			vm_map_copy_footprint_ledgers(task, new_task);
2746 
2747 			/* Get all the udata pointers from kqueue */
2748 			est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2749 			if (est_knotes > 0) {
2750 				buf_size = (est_knotes + 32) * sizeof(uint64_t);
2751 				buffer = kalloc_data(buf_size, Z_WAITOK);
2752 				num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2753 				if (num_knotes > est_knotes + 32) {
2754 					num_knotes = est_knotes + 32;
2755 				}
2756 			}
2757 		} else {
2758 			if (is_large) {
2759 				assert(large_corpse_count > 0);
2760 				OSDecrementAtomic(&large_corpse_count);
2761 			}
2762 			new_task->map = oldmap;
2763 #if DEVELOPMENT || DEBUG
2764 			memorystatus_abort_vm_map_fork(task);
2765 #endif
2766 			task_resume_internal(task);
2767 			return KERN_NO_SPACE;
2768 		}
2769 	} else if (!for_exception) {
2770 #if DEVELOPMENT || DEBUG
2771 		memorystatus_abort_vm_map_fork(task);
2772 #endif
2773 		task_resume_internal(task);
2774 		return KERN_NO_SPACE;
2775 	}
2776 
2777 	active_thread_count = task->active_thread_count;
2778 	if (active_thread_count == 0) {
2779 		kfree_data(buffer, buf_size);
2780 		task_resume_internal(task);
2781 		return KERN_FAILURE;
2782 	}
2783 
2784 	thread_array = kalloc_type(thread_t, active_thread_count, Z_WAITOK);
2785 
2786 	/* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
2787 	task_lock(task);
2788 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2789 		/* Skip inactive threads */
2790 		active = thread->active;
2791 		if (!active) {
2792 			continue;
2793 		}
2794 
2795 		if (array_count >= active_thread_count) {
2796 			break;
2797 		}
2798 
2799 		thread_array[array_count++] = thread;
2800 		thread_reference(thread);
2801 	}
2802 	task_unlock(task);
2803 
2804 	for (i = 0; i < array_count; i++) {
2805 		kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
2806 		if (kr != KERN_SUCCESS) {
2807 			break;
2808 		}
2809 
2810 		/* Equivalent of current thread in corpse */
2811 		if (thread_array[i] == self) {
2812 			thread_return = new_thread;
2813 			new_task->crashed_thread_id = thread_tid(new_thread);
2814 		} else if (first_thread == NULL) {
2815 			first_thread = new_thread;
2816 		} else {
2817 			/* drop the extra ref returned by thread_create_with_continuation */
2818 			thread_deallocate(new_thread);
2819 		}
2820 
2821 		kr = thread_dup2(thread_array[i], new_thread);
2822 		if (kr != KERN_SUCCESS) {
2823 			thread_mtx_lock(new_thread);
2824 			new_thread->corpse_dup = TRUE;
2825 			thread_mtx_unlock(new_thread);
2826 			continue;
2827 		}
2828 
2829 		/* Copy thread name */
2830 		bsd_copythreadname(get_bsdthread_info(new_thread),
2831 		    get_bsdthread_info(thread_array[i]));
2832 		new_thread->thread_tag = thread_array[i]->thread_tag &
2833 		    ~THREAD_TAG_USER_JOIN;
2834 		thread_copy_resource_info(new_thread, thread_array[i]);
2835 	}
2836 
2837 	/* return the first thread if we couldn't find the equivalent of current */
2838 	if (thread_return == THREAD_NULL) {
2839 		thread_return = first_thread;
2840 	} else if (first_thread != THREAD_NULL) {
2841 		/* drop the extra ref returned by thread_create_with_continuation */
2842 		thread_deallocate(first_thread);
2843 	}
2844 
2845 	task_resume_internal(task);
2846 
2847 	for (i = 0; i < array_count; i++) {
2848 		thread_deallocate(thread_array[i]);
2849 	}
2850 	kfree_type(thread_t, active_thread_count, thread_array);
2851 
2852 	if (kr == KERN_SUCCESS) {
2853 		*thread_ret = thread_return;
2854 		*udata_buffer = buffer;
2855 		*size = buf_size;
2856 		*num_udata = num_knotes;
2857 	} else {
2858 		if (thread_return != THREAD_NULL) {
2859 			thread_deallocate(thread_return);
2860 		}
2861 		kfree_data(buffer, buf_size);
2862 	}
2863 
2864 	return kr;
2865 }
2866 
2867 #if CONFIG_SECLUDED_MEMORY
2868 extern void task_set_can_use_secluded_mem_locked(
2869 	task_t          task,
2870 	boolean_t       can_use_secluded_mem);
2871 #endif /* CONFIG_SECLUDED_MEMORY */
2872 
2873 #if MACH_ASSERT
2874 int debug4k_panic_on_terminate = 0;
2875 #endif /* MACH_ASSERT */
2876 kern_return_t
task_terminate_internal(task_t task)2877 task_terminate_internal(
2878 	task_t                  task)
2879 {
2880 	thread_t                        thread, self;
2881 	task_t                          self_task;
2882 	boolean_t                       interrupt_save;
2883 	int                             pid = 0;
2884 
2885 	assert(task != kernel_task);
2886 
2887 	self = current_thread();
2888 	self_task = current_task();
2889 
2890 	/*
2891 	 *	Get the task locked and make sure that we are not racing
2892 	 *	with someone else trying to terminate us.
2893 	 */
2894 	if (task == self_task) {
2895 		task_lock(task);
2896 	} else if (task < self_task) {
2897 		task_lock(task);
2898 		task_lock(self_task);
2899 	} else {
2900 		task_lock(self_task);
2901 		task_lock(task);
2902 	}
2903 
2904 #if CONFIG_SECLUDED_MEMORY
2905 	if (task->task_can_use_secluded_mem) {
2906 		task_set_can_use_secluded_mem_locked(task, FALSE);
2907 	}
2908 	task->task_could_use_secluded_mem = FALSE;
2909 	task->task_could_also_use_secluded_mem = FALSE;
2910 
2911 	if (task->task_suppressed_secluded) {
2912 		stop_secluded_suppression(task);
2913 	}
2914 #endif /* CONFIG_SECLUDED_MEMORY */
2915 
2916 	if (!task->active) {
2917 		/*
2918 		 *	Task is already being terminated.
2919 		 *	Just return an error. If we are dying, this will
2920 		 *	just get us to our AST special handler and that
2921 		 *	will get us to finalize the termination of ourselves.
2922 		 */
2923 		task_unlock(task);
2924 		if (self_task != task) {
2925 			task_unlock(self_task);
2926 		}
2927 
2928 		return KERN_FAILURE;
2929 	}
2930 
2931 	if (task_corpse_pending_report(task)) {
2932 		/*
2933 		 *	Task is marked for reporting as corpse.
2934 		 *	Just return an error. This will
2935 		 *	just get us to our AST special handler and that
2936 		 *	will get us to finish the path to death
2937 		 */
2938 		task_unlock(task);
2939 		if (self_task != task) {
2940 			task_unlock(self_task);
2941 		}
2942 
2943 		return KERN_FAILURE;
2944 	}
2945 
2946 	if (self_task != task) {
2947 		task_unlock(self_task);
2948 	}
2949 
2950 	/*
2951 	 * Make sure the current thread does not get aborted out of
2952 	 * the waits inside these operations.
2953 	 */
2954 	interrupt_save = thread_interrupt_level(THREAD_UNINT);
2955 
2956 	/*
2957 	 *	Indicate that we want all the threads to stop executing
2958 	 *	at user space by holding the task (we would have held
2959 	 *	each thread independently in thread_terminate_internal -
2960 	 *	but this way we may be more likely to already find it
2961 	 *	held there).  Mark the task inactive, and prevent
2962 	 *	further task operations via the task port.
2963 	 *
2964 	 *	The vm_map and ipc_space must exist until this function returns,
2965 	 *	convert_port_to_{map,space}_with_flavor relies on this behavior.
2966 	 */
2967 	task_hold_locked(task);
2968 	task->active = FALSE;
2969 	ipc_task_disable(task);
2970 
2971 #if CONFIG_TELEMETRY
2972 	/*
2973 	 * Notify telemetry that this task is going away.
2974 	 */
2975 	telemetry_task_ctl_locked(task, TF_TELEMETRY, 0);
2976 #endif
2977 
2978 	/*
2979 	 *	Terminate each thread in the task.
2980 	 */
2981 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2982 		thread_terminate_internal(thread);
2983 	}
2984 
2985 #ifdef MACH_BSD
2986 	void *bsd_info = get_bsdtask_info(task);
2987 	if (bsd_info != NULL) {
2988 		pid = proc_pid(bsd_info);
2989 	}
2990 #endif /* MACH_BSD */
2991 
2992 	task_unlock(task);
2993 
2994 	proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
2995 	    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
2996 
2997 	/* Early object reap phase */
2998 
2999 // PR-17045188: Revisit implementation
3000 //        task_partial_reap(task, pid);
3001 
3002 #if CONFIG_TASKWATCH
3003 	/*
3004 	 * remove all task watchers
3005 	 */
3006 	task_removewatchers(task);
3007 
3008 #endif /* CONFIG_TASKWATCH */
3009 
3010 	/*
3011 	 *	Destroy all synchronizers owned by the task.
3012 	 */
3013 	task_synchronizer_destroy_all(task);
3014 
3015 	/*
3016 	 *	Clear the watchport boost on the task.
3017 	 */
3018 	task_remove_turnstile_watchports(task);
3019 
3020 	/*
3021 	 *	Destroy the IPC space, leaving just a reference for it.
3022 	 */
3023 	ipc_space_terminate(task->itk_space);
3024 
3025 #if 00
3026 	/* if some ledgers go negative on tear-down again... */
3027 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3028 	    task_ledgers.phys_footprint);
3029 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3030 	    task_ledgers.internal);
3031 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3032 	    task_ledgers.iokit_mapped);
3033 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3034 	    task_ledgers.alternate_accounting);
3035 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3036 	    task_ledgers.alternate_accounting_compressed);
3037 #endif
3038 
3039 #if CONFIG_DEFERRED_RECLAIM
3040 	/*
3041 	 * Remove this tasks reclaim buffer from global queues.
3042 	 */
3043 	if (task->deferred_reclamation_metadata != NULL) {
3044 		vm_deferred_reclamation_buffer_uninstall(task->deferred_reclamation_metadata);
3045 	}
3046 #endif /* CONFIG_DEFERRED_RECLAIM */
3047 
3048 	/*
3049 	 * If the current thread is a member of the task
3050 	 * being terminated, then the last reference to
3051 	 * the task will not be dropped until the thread
3052 	 * is finally reaped.  To avoid incurring the
3053 	 * expense of removing the address space regions
3054 	 * at reap time, we do it explictly here.
3055 	 */
3056 
3057 #if MACH_ASSERT
3058 	/*
3059 	 * Identify the pmap's process, in case the pmap ledgers drift
3060 	 * and we have to report it.
3061 	 */
3062 	char procname[17];
3063 	void *proc = get_bsdtask_info(task);
3064 	if (proc) {
3065 		pid = proc_pid(proc);
3066 		proc_name_kdp(proc, procname, sizeof(procname));
3067 	} else {
3068 		pid = 0;
3069 		strlcpy(procname, "<unknown>", sizeof(procname));
3070 	}
3071 	pmap_set_process(task->map->pmap, pid, procname);
3072 	if (vm_map_page_shift(task->map) < (int)PAGE_SHIFT) {
3073 		DEBUG4K_LIFE("map %p procname: %s\n", task->map, procname);
3074 		if (debug4k_panic_on_terminate) {
3075 			panic("DEBUG4K: %s:%d %d[%s] map %p", __FUNCTION__, __LINE__, pid, procname, task->map);
3076 		}
3077 	}
3078 #endif /* MACH_ASSERT */
3079 
3080 	vm_map_terminate(task->map);
3081 
3082 	/* release our shared region */
3083 	vm_shared_region_set(task, NULL);
3084 
3085 #if __has_feature(ptrauth_calls)
3086 	task_set_shared_region_id(task, NULL);
3087 #endif /* __has_feature(ptrauth_calls) */
3088 
3089 	lck_mtx_lock(&tasks_threads_lock);
3090 	queue_remove(&tasks, task, task_t, tasks);
3091 	queue_enter(&terminated_tasks, task, task_t, tasks);
3092 	tasks_count--;
3093 	terminated_tasks_count++;
3094 	lck_mtx_unlock(&tasks_threads_lock);
3095 
3096 	/*
3097 	 * We no longer need to guard against being aborted, so restore
3098 	 * the previous interruptible state.
3099 	 */
3100 	thread_interrupt_level(interrupt_save);
3101 
3102 #if KPC
3103 	/* force the task to release all ctrs */
3104 	if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) {
3105 		kpc_force_all_ctrs(task, 0);
3106 	}
3107 #endif /* KPC */
3108 
3109 #if CONFIG_COALITIONS
3110 	/*
3111 	 * Leave the coalition for corpse task or task that
3112 	 * never had any active threads (e.g. fork, exec failure).
3113 	 * For task with active threads, the task will be removed
3114 	 * from coalition by last terminating thread.
3115 	 */
3116 	if (task->active_thread_count == 0) {
3117 		coalitions_remove_task(task);
3118 	}
3119 #endif
3120 
3121 #if CONFIG_FREEZE
3122 	extern int      vm_compressor_available;
3123 	if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE && vm_compressor_available) {
3124 		task_disown_frozen_csegs(task);
3125 		assert(queue_empty(&task->task_frozen_cseg_q));
3126 	}
3127 #endif /* CONFIG_FREEZE */
3128 
3129 
3130 	/*
3131 	 * Get rid of the task active reference on itself.
3132 	 */
3133 	task_deallocate_grp(task, TASK_GRP_INTERNAL);
3134 
3135 	return KERN_SUCCESS;
3136 }
3137 
3138 void
tasks_system_suspend(boolean_t suspend)3139 tasks_system_suspend(boolean_t suspend)
3140 {
3141 	task_t task;
3142 
3143 	KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SUSPEND_USERSPACE) |
3144 	    (suspend ? DBG_FUNC_START : DBG_FUNC_END));
3145 
3146 	lck_mtx_lock(&tasks_threads_lock);
3147 	assert(tasks_suspend_state != suspend);
3148 	tasks_suspend_state = suspend;
3149 	queue_iterate(&tasks, task, task_t, tasks) {
3150 		if (task == kernel_task) {
3151 			continue;
3152 		}
3153 		suspend ? task_suspend_internal(task) : task_resume_internal(task);
3154 	}
3155 	lck_mtx_unlock(&tasks_threads_lock);
3156 }
3157 
3158 /*
3159  * task_start_halt:
3160  *
3161  *      Shut the current task down (except for the current thread) in
3162  *	preparation for dramatic changes to the task (probably exec).
3163  *	We hold the task and mark all other threads in the task for
3164  *	termination.
3165  */
3166 kern_return_t
task_start_halt(task_t task)3167 task_start_halt(task_t task)
3168 {
3169 	kern_return_t kr = KERN_SUCCESS;
3170 	task_lock(task);
3171 	kr = task_start_halt_locked(task, FALSE);
3172 	task_unlock(task);
3173 	return kr;
3174 }
3175 
3176 static kern_return_t
task_start_halt_locked(task_t task,boolean_t should_mark_corpse)3177 task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
3178 {
3179 	thread_t thread, self;
3180 	uint64_t dispatchqueue_offset;
3181 
3182 	assert(task != kernel_task);
3183 
3184 	self = current_thread();
3185 
3186 	if (task != get_threadtask(self) && !task_is_a_corpse_fork(task)) {
3187 		return KERN_INVALID_ARGUMENT;
3188 	}
3189 
3190 	if (!should_mark_corpse &&
3191 	    (task->halting || !task->active || !self->active)) {
3192 		/*
3193 		 * Task or current thread is already being terminated.
3194 		 * Hurry up and return out of the current kernel context
3195 		 * so that we run our AST special handler to terminate
3196 		 * ourselves. If should_mark_corpse is set, the corpse
3197 		 * creation might have raced with exec, let the corpse
3198 		 * creation continue, once the current thread reaches AST
3199 		 * thread in exec will be woken up from task_complete_halt.
3200 		 * Exec will fail cause the proc was marked for exit.
3201 		 * Once the thread in exec reaches AST, it will call proc_exit
3202 		 * and deliver the EXC_CORPSE_NOTIFY.
3203 		 */
3204 		return KERN_FAILURE;
3205 	}
3206 
3207 	/* Thread creation will fail after this point of no return. */
3208 	task->halting = TRUE;
3209 
3210 	/*
3211 	 * Mark all the threads to keep them from starting any more
3212 	 * user-level execution. The thread_terminate_internal code
3213 	 * would do this on a thread by thread basis anyway, but this
3214 	 * gives us a better chance of not having to wait there.
3215 	 */
3216 	task_hold_locked(task);
3217 	dispatchqueue_offset = get_dispatchqueue_offset_from_proc(get_bsdtask_info(task));
3218 
3219 	/*
3220 	 * Terminate all the other threads in the task.
3221 	 */
3222 	queue_iterate(&task->threads, thread, thread_t, task_threads)
3223 	{
3224 		/*
3225 		 * Remove priority throttles for threads to terminate timely. This has
3226 		 * to be done after task_hold_locked() traps all threads to AST, but before
3227 		 * threads are marked inactive in thread_terminate_internal(). Takes thread
3228 		 * mutex lock.
3229 		 *
3230 		 * We need task_is_a_corpse() check so that we don't accidently update policy
3231 		 * for tasks that are doing posix_spawn().
3232 		 *
3233 		 * See: thread_policy_update_tasklocked().
3234 		 */
3235 		if (task_is_a_corpse(task)) {
3236 			proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE,
3237 			    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3238 		}
3239 
3240 		if (should_mark_corpse) {
3241 			thread_mtx_lock(thread);
3242 			thread->inspection = TRUE;
3243 			thread_mtx_unlock(thread);
3244 		}
3245 		if (thread != self) {
3246 			thread_terminate_internal(thread);
3247 		}
3248 	}
3249 	task->dispatchqueue_offset = dispatchqueue_offset;
3250 
3251 	task_release_locked(task);
3252 
3253 	return KERN_SUCCESS;
3254 }
3255 
3256 
3257 /*
3258  * task_complete_halt:
3259  *
3260  *	Complete task halt by waiting for threads to terminate, then clean
3261  *	up task resources (VM, port namespace, etc...) and then let the
3262  *	current thread go in the (practically empty) task context.
3263  *
3264  *	Note: task->halting flag is not cleared in order to avoid creation
3265  *	of new thread in old exec'ed task.
3266  */
3267 void
task_complete_halt(task_t task)3268 task_complete_halt(task_t task)
3269 {
3270 	task_lock(task);
3271 	assert(task->halting);
3272 	assert(task == current_task());
3273 
3274 	/*
3275 	 *	Wait for the other threads to get shut down.
3276 	 *      When the last other thread is reaped, we'll be
3277 	 *	woken up.
3278 	 */
3279 	if (task->thread_count > 1) {
3280 		assert_wait((event_t)&task->halting, THREAD_UNINT);
3281 		task_unlock(task);
3282 		thread_block(THREAD_CONTINUE_NULL);
3283 	} else {
3284 		task_unlock(task);
3285 	}
3286 
3287 	/*
3288 	 *	Give the machine dependent code a chance
3289 	 *	to perform cleanup of task-level resources
3290 	 *	associated with the current thread before
3291 	 *	ripping apart the task.
3292 	 */
3293 	machine_task_terminate(task);
3294 
3295 	/*
3296 	 *	Destroy all synchronizers owned by the task.
3297 	 */
3298 	task_synchronizer_destroy_all(task);
3299 
3300 	/*
3301 	 *	Terminate the IPC space.  A long time ago,
3302 	 *	this used to be ipc_space_clean() which would
3303 	 *	keep the space active but hollow it.
3304 	 *
3305 	 *	We really do not need this semantics given
3306 	 *	tasks die with exec now.
3307 	 */
3308 	ipc_space_terminate(task->itk_space);
3309 
3310 	/*
3311 	 * Clean out the address space, as we are going to be
3312 	 * getting a new one.
3313 	 */
3314 	vm_map_terminate(task->map);
3315 
3316 	/*
3317 	 * Kick out any IOKitUser handles to the task. At best they're stale,
3318 	 * at worst someone is racing a SUID exec.
3319 	 */
3320 	iokit_task_terminate(task);
3321 }
3322 
3323 #ifdef CONFIG_TASK_SUSPEND_STATS
3324 
3325 static void
_task_mark_suspend_source(task_t task)3326 _task_mark_suspend_source(task_t task)
3327 {
3328 	int idx;
3329 	task_suspend_stats_t stats;
3330 	task_suspend_source_t source;
3331 	task_lock_assert_owned(task);
3332 	stats = &task->t_suspend_stats;
3333 
3334 	idx = stats->tss_count % TASK_SUSPEND_SOURCES_MAX;
3335 	source = &task->t_suspend_sources[idx];
3336 	bzero(source, sizeof(*source));
3337 
3338 	source->tss_time = mach_absolute_time();
3339 	source->tss_tid = current_thread()->thread_id;
3340 	source->tss_pid = task_pid(current_task());
3341 	task_best_name(current_task(), source->tss_procname, sizeof(source->tss_procname));
3342 
3343 	stats->tss_count++;
3344 }
3345 
3346 static inline void
_task_mark_suspend_start(task_t task)3347 _task_mark_suspend_start(task_t task)
3348 {
3349 	task_lock_assert_owned(task);
3350 	task->t_suspend_stats.tss_last_start = mach_absolute_time();
3351 }
3352 
3353 static inline void
_task_mark_suspend_end(task_t task)3354 _task_mark_suspend_end(task_t task)
3355 {
3356 	task_lock_assert_owned(task);
3357 	task->t_suspend_stats.tss_last_end = mach_absolute_time();
3358 	task->t_suspend_stats.tss_duration += (task->t_suspend_stats.tss_last_end -
3359 	    task->t_suspend_stats.tss_last_start);
3360 }
3361 
3362 static kern_return_t
_task_get_suspend_stats_locked(task_t task,task_suspend_stats_t stats)3363 _task_get_suspend_stats_locked(task_t task, task_suspend_stats_t stats)
3364 {
3365 	if (task == TASK_NULL || stats == NULL) {
3366 		return KERN_INVALID_ARGUMENT;
3367 	}
3368 	task_lock_assert_owned(task);
3369 	memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3370 	return KERN_SUCCESS;
3371 }
3372 
3373 static kern_return_t
_task_get_suspend_sources_locked(task_t task,task_suspend_source_t sources)3374 _task_get_suspend_sources_locked(task_t task, task_suspend_source_t sources)
3375 {
3376 	if (task == TASK_NULL || sources == NULL) {
3377 		return KERN_INVALID_ARGUMENT;
3378 	}
3379 	task_lock_assert_owned(task);
3380 	memcpy(sources, task->t_suspend_sources,
3381 	    sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3382 	return KERN_SUCCESS;
3383 }
3384 
3385 #endif /* CONFIG_TASK_SUSPEND_STATS */
3386 
3387 kern_return_t
task_get_suspend_stats(task_t task,task_suspend_stats_t stats)3388 task_get_suspend_stats(task_t task, task_suspend_stats_t stats)
3389 {
3390 #ifdef CONFIG_TASK_SUSPEND_STATS
3391 	kern_return_t kr;
3392 	if (task == TASK_NULL || stats == NULL) {
3393 		return KERN_INVALID_ARGUMENT;
3394 	}
3395 	task_lock(task);
3396 	kr = _task_get_suspend_stats_locked(task, stats);
3397 	task_unlock(task);
3398 	return kr;
3399 #else /* CONFIG_TASK_SUSPEND_STATS */
3400 	(void)task;
3401 	(void)stats;
3402 	return KERN_NOT_SUPPORTED;
3403 #endif
3404 }
3405 
3406 kern_return_t
task_get_suspend_stats_kdp(task_t task,task_suspend_stats_t stats)3407 task_get_suspend_stats_kdp(task_t task, task_suspend_stats_t stats)
3408 {
3409 #ifdef CONFIG_TASK_SUSPEND_STATS
3410 	if (task == TASK_NULL || stats == NULL) {
3411 		return KERN_INVALID_ARGUMENT;
3412 	}
3413 	memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3414 	return KERN_SUCCESS;
3415 #else /* CONFIG_TASK_SUSPEND_STATS */
3416 #pragma unused(task, stats)
3417 	return KERN_NOT_SUPPORTED;
3418 #endif /* CONFIG_TASK_SUSPEND_STATS */
3419 }
3420 
3421 kern_return_t
task_get_suspend_sources(task_t task,task_suspend_source_array_t sources)3422 task_get_suspend_sources(task_t task, task_suspend_source_array_t sources)
3423 {
3424 #ifdef CONFIG_TASK_SUSPEND_STATS
3425 	kern_return_t kr;
3426 	if (task == TASK_NULL || sources == NULL) {
3427 		return KERN_INVALID_ARGUMENT;
3428 	}
3429 	task_lock(task);
3430 	kr = _task_get_suspend_sources_locked(task, sources);
3431 	task_unlock(task);
3432 	return kr;
3433 #else /* CONFIG_TASK_SUSPEND_STATS */
3434 	(void)task;
3435 	(void)sources;
3436 	return KERN_NOT_SUPPORTED;
3437 #endif
3438 }
3439 
3440 kern_return_t
task_get_suspend_sources_kdp(task_t task,task_suspend_source_array_t sources)3441 task_get_suspend_sources_kdp(task_t task, task_suspend_source_array_t sources)
3442 {
3443 #ifdef CONFIG_TASK_SUSPEND_STATS
3444 	if (task == TASK_NULL || sources == NULL) {
3445 		return KERN_INVALID_ARGUMENT;
3446 	}
3447 	memcpy(sources, task->t_suspend_sources,
3448 	    sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3449 	return KERN_SUCCESS;
3450 #else /* CONFIG_TASK_SUSPEND_STATS */
3451 #pragma unused(task, sources)
3452 	return KERN_NOT_SUPPORTED;
3453 #endif
3454 }
3455 
3456 /*
3457  *	task_hold_locked:
3458  *
3459  *	Suspend execution of the specified task.
3460  *	This is a recursive-style suspension of the task, a count of
3461  *	suspends is maintained.
3462  *
3463  *	CONDITIONS: the task is locked and active.
3464  */
3465 void
task_hold_locked(task_t task)3466 task_hold_locked(
3467 	task_t          task)
3468 {
3469 	thread_t        thread;
3470 	void *bsd_info = get_bsdtask_info(task);
3471 
3472 	assert(task->active);
3473 
3474 	if (task->suspend_count++ > 0) {
3475 		return;
3476 	}
3477 
3478 	if (bsd_info) {
3479 		workq_proc_suspended(bsd_info);
3480 	}
3481 
3482 	/*
3483 	 *	Iterate through all the threads and hold them.
3484 	 */
3485 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3486 		thread_mtx_lock(thread);
3487 		thread_hold(thread);
3488 		thread_mtx_unlock(thread);
3489 	}
3490 
3491 #ifdef CONFIG_TASK_SUSPEND_STATS
3492 	_task_mark_suspend_start(task);
3493 #endif
3494 }
3495 
3496 /*
3497  *	task_hold:
3498  *
3499  *	Same as the internal routine above, except that is must lock
3500  *	and verify that the task is active.  This differs from task_suspend
3501  *	in that it places a kernel hold on the task rather than just a
3502  *	user-level hold.  This keeps users from over resuming and setting
3503  *	it running out from under the kernel.
3504  *
3505  *      CONDITIONS: the caller holds a reference on the task
3506  */
3507 kern_return_t
task_hold(task_t task)3508 task_hold(
3509 	task_t          task)
3510 {
3511 	if (task == TASK_NULL) {
3512 		return KERN_INVALID_ARGUMENT;
3513 	}
3514 
3515 	task_lock(task);
3516 
3517 	if (!task->active) {
3518 		task_unlock(task);
3519 
3520 		return KERN_FAILURE;
3521 	}
3522 
3523 #ifdef CONFIG_TASK_SUSPEND_STATS
3524 	_task_mark_suspend_source(task);
3525 #endif /* CONFIG_TASK_SUSPEND_STATS */
3526 	task_hold_locked(task);
3527 	task_unlock(task);
3528 
3529 	return KERN_SUCCESS;
3530 }
3531 
3532 kern_return_t
task_wait(task_t task,boolean_t until_not_runnable)3533 task_wait(
3534 	task_t          task,
3535 	boolean_t       until_not_runnable)
3536 {
3537 	if (task == TASK_NULL) {
3538 		return KERN_INVALID_ARGUMENT;
3539 	}
3540 
3541 	task_lock(task);
3542 
3543 	if (!task->active) {
3544 		task_unlock(task);
3545 
3546 		return KERN_FAILURE;
3547 	}
3548 
3549 	task_wait_locked(task, until_not_runnable);
3550 	task_unlock(task);
3551 
3552 	return KERN_SUCCESS;
3553 }
3554 
3555 /*
3556  *	task_wait_locked:
3557  *
3558  *	Wait for all threads in task to stop.
3559  *
3560  * Conditions:
3561  *	Called with task locked, active, and held.
3562  */
3563 void
task_wait_locked(task_t task,boolean_t until_not_runnable)3564 task_wait_locked(
3565 	task_t          task,
3566 	boolean_t               until_not_runnable)
3567 {
3568 	thread_t        thread, self;
3569 
3570 	assert(task->active);
3571 	assert(task->suspend_count > 0);
3572 
3573 	self = current_thread();
3574 
3575 	/*
3576 	 *	Iterate through all the threads and wait for them to
3577 	 *	stop.  Do not wait for the current thread if it is within
3578 	 *	the task.
3579 	 */
3580 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3581 		if (thread != self) {
3582 			thread_wait(thread, until_not_runnable);
3583 		}
3584 	}
3585 }
3586 
3587 boolean_t
task_is_app_suspended(task_t task)3588 task_is_app_suspended(task_t task)
3589 {
3590 	return task->pidsuspended;
3591 }
3592 
3593 /*
3594  *	task_release_locked:
3595  *
3596  *	Release a kernel hold on a task.
3597  *
3598  *      CONDITIONS: the task is locked and active
3599  */
3600 void
task_release_locked(task_t task)3601 task_release_locked(
3602 	task_t          task)
3603 {
3604 	thread_t        thread;
3605 	void *bsd_info = get_bsdtask_info(task);
3606 
3607 	assert(task->active);
3608 	assert(task->suspend_count > 0);
3609 
3610 	if (--task->suspend_count > 0) {
3611 		return;
3612 	}
3613 
3614 	if (bsd_info) {
3615 		workq_proc_resumed(bsd_info);
3616 	}
3617 
3618 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3619 		thread_mtx_lock(thread);
3620 		thread_release(thread);
3621 		thread_mtx_unlock(thread);
3622 	}
3623 
3624 #if CONFIG_TASK_SUSPEND_STATS
3625 	_task_mark_suspend_end(task);
3626 #endif
3627 }
3628 
3629 /*
3630  *	task_release:
3631  *
3632  *	Same as the internal routine above, except that it must lock
3633  *	and verify that the task is active.
3634  *
3635  *      CONDITIONS: The caller holds a reference to the task
3636  */
3637 kern_return_t
task_release(task_t task)3638 task_release(
3639 	task_t          task)
3640 {
3641 	if (task == TASK_NULL) {
3642 		return KERN_INVALID_ARGUMENT;
3643 	}
3644 
3645 	task_lock(task);
3646 
3647 	if (!task->active) {
3648 		task_unlock(task);
3649 
3650 		return KERN_FAILURE;
3651 	}
3652 
3653 	task_release_locked(task);
3654 	task_unlock(task);
3655 
3656 	return KERN_SUCCESS;
3657 }
3658 
3659 static kern_return_t
task_threads_internal(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * countp,mach_thread_flavor_t flavor)3660 task_threads_internal(
3661 	task_t                      task,
3662 	thread_act_array_t         *threads_out,
3663 	mach_msg_type_number_t     *countp,
3664 	mach_thread_flavor_t        flavor)
3665 {
3666 	mach_msg_type_number_t  actual, count, count_needed;
3667 	thread_t               *thread_list;
3668 	thread_t                thread;
3669 	unsigned int            i;
3670 
3671 	count = 0;
3672 	thread_list = NULL;
3673 
3674 	if (task == TASK_NULL) {
3675 		return KERN_INVALID_ARGUMENT;
3676 	}
3677 
3678 	assert(flavor <= THREAD_FLAVOR_INSPECT);
3679 
3680 	for (;;) {
3681 		task_lock(task);
3682 		if (!task->active) {
3683 			task_unlock(task);
3684 
3685 			kfree_type(thread_t, count, thread_list);
3686 			return KERN_FAILURE;
3687 		}
3688 
3689 		count_needed = actual = task->thread_count;
3690 		if (count_needed <= count) {
3691 			break;
3692 		}
3693 
3694 		/* unlock the task and allocate more memory */
3695 		task_unlock(task);
3696 
3697 		kfree_type(thread_t, count, thread_list);
3698 		count = count_needed;
3699 		thread_list = kalloc_type(thread_t, count, Z_WAITOK);
3700 
3701 		if (thread_list == NULL) {
3702 			return KERN_RESOURCE_SHORTAGE;
3703 		}
3704 	}
3705 
3706 	i = 0;
3707 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3708 		assert(i < actual);
3709 		thread_reference(thread);
3710 		thread_list[i++] = thread;
3711 	}
3712 
3713 	count_needed = actual;
3714 
3715 	/* can unlock task now that we've got the thread refs */
3716 	task_unlock(task);
3717 
3718 	if (actual == 0) {
3719 		/* no threads, so return null pointer and deallocate memory */
3720 
3721 		*threads_out = NULL;
3722 		*countp = 0;
3723 		kfree_type(thread_t, count, thread_list);
3724 	} else {
3725 		/* if we allocated too much, must copy */
3726 		if (count_needed < count) {
3727 			void *newaddr;
3728 
3729 			newaddr = kalloc_type(thread_t, count_needed, Z_WAITOK);
3730 			if (newaddr == NULL) {
3731 				for (i = 0; i < actual; ++i) {
3732 					thread_deallocate(thread_list[i]);
3733 				}
3734 				kfree_type(thread_t, count, thread_list);
3735 				return KERN_RESOURCE_SHORTAGE;
3736 			}
3737 
3738 			bcopy(thread_list, newaddr, count_needed * sizeof(thread_t));
3739 			kfree_type(thread_t, count, thread_list);
3740 			thread_list = (thread_t *)newaddr;
3741 		}
3742 
3743 		*threads_out = thread_list;
3744 		*countp = actual;
3745 
3746 		/* do the conversion that Mig should handle */
3747 
3748 		switch (flavor) {
3749 		case THREAD_FLAVOR_CONTROL:
3750 			if (task == current_task()) {
3751 				for (i = 0; i < actual; ++i) {
3752 					((ipc_port_t *) thread_list)[i] = convert_thread_to_port_pinned(thread_list[i]);
3753 				}
3754 			} else {
3755 				for (i = 0; i < actual; ++i) {
3756 					((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
3757 				}
3758 			}
3759 			break;
3760 		case THREAD_FLAVOR_READ:
3761 			for (i = 0; i < actual; ++i) {
3762 				((ipc_port_t *) thread_list)[i] = convert_thread_read_to_port(thread_list[i]);
3763 			}
3764 			break;
3765 		case THREAD_FLAVOR_INSPECT:
3766 			for (i = 0; i < actual; ++i) {
3767 				((ipc_port_t *) thread_list)[i] = convert_thread_inspect_to_port(thread_list[i]);
3768 			}
3769 			break;
3770 		}
3771 	}
3772 
3773 	return KERN_SUCCESS;
3774 }
3775 
3776 kern_return_t
task_threads(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3777 task_threads(
3778 	task_t                      task,
3779 	thread_act_array_t         *threads_out,
3780 	mach_msg_type_number_t     *count)
3781 {
3782 	return task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3783 }
3784 
3785 
3786 kern_return_t
task_threads_from_user(mach_port_t port,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3787 task_threads_from_user(
3788 	mach_port_t                 port,
3789 	thread_act_array_t         *threads_out,
3790 	mach_msg_type_number_t     *count)
3791 {
3792 	ipc_kobject_type_t kotype;
3793 	kern_return_t kr;
3794 
3795 	task_t task = convert_port_to_task_inspect_no_eval(port);
3796 
3797 	if (task == TASK_NULL) {
3798 		return KERN_INVALID_ARGUMENT;
3799 	}
3800 
3801 	kotype = ip_kotype(port);
3802 
3803 	switch (kotype) {
3804 	case IKOT_TASK_CONTROL:
3805 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3806 		break;
3807 	case IKOT_TASK_READ:
3808 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_READ);
3809 		break;
3810 	case IKOT_TASK_INSPECT:
3811 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_INSPECT);
3812 		break;
3813 	default:
3814 		panic("strange kobject type");
3815 		break;
3816 	}
3817 
3818 	task_deallocate(task);
3819 	return kr;
3820 }
3821 
3822 #define TASK_HOLD_NORMAL        0
3823 #define TASK_HOLD_PIDSUSPEND    1
3824 #define TASK_HOLD_LEGACY        2
3825 #define TASK_HOLD_LEGACY_ALL    3
3826 
3827 static kern_return_t
place_task_hold(task_t task,int mode)3828 place_task_hold(
3829 	task_t task,
3830 	int mode)
3831 {
3832 	if (!task->active && !task_is_a_corpse(task)) {
3833 		return KERN_FAILURE;
3834 	}
3835 
3836 	/* Return success for corpse task */
3837 	if (task_is_a_corpse(task)) {
3838 		return KERN_SUCCESS;
3839 	}
3840 
3841 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_SUSPEND),
3842 	    task_pid(task),
3843 	    task->thread_count > 0 ?((thread_t)queue_first(&task->threads))->thread_id : 0,
3844 	    task->user_stop_count, task->user_stop_count + 1);
3845 
3846 #if MACH_ASSERT
3847 	current_task()->suspends_outstanding++;
3848 #endif
3849 
3850 	if (mode == TASK_HOLD_LEGACY) {
3851 		task->legacy_stop_count++;
3852 	}
3853 
3854 #ifdef CONFIG_TASK_SUSPEND_STATS
3855 	_task_mark_suspend_source(task);
3856 #endif /* CONFIG_TASK_SUSPEND_STATS */
3857 
3858 	if (task->user_stop_count++ > 0) {
3859 		/*
3860 		 *	If the stop count was positive, the task is
3861 		 *	already stopped and we can exit.
3862 		 */
3863 		return KERN_SUCCESS;
3864 	}
3865 
3866 	/*
3867 	 * Put a kernel-level hold on the threads in the task (all
3868 	 * user-level task suspensions added together represent a
3869 	 * single kernel-level hold).  We then wait for the threads
3870 	 * to stop executing user code.
3871 	 */
3872 	task_hold_locked(task);
3873 	task_wait_locked(task, FALSE);
3874 
3875 	return KERN_SUCCESS;
3876 }
3877 
3878 static kern_return_t
release_task_hold(task_t task,int mode)3879 release_task_hold(
3880 	task_t          task,
3881 	int                     mode)
3882 {
3883 	boolean_t release = FALSE;
3884 
3885 	if (!task->active && !task_is_a_corpse(task)) {
3886 		return KERN_FAILURE;
3887 	}
3888 
3889 	/* Return success for corpse task */
3890 	if (task_is_a_corpse(task)) {
3891 		return KERN_SUCCESS;
3892 	}
3893 
3894 	if (mode == TASK_HOLD_PIDSUSPEND) {
3895 		if (task->pidsuspended == FALSE) {
3896 			return KERN_FAILURE;
3897 		}
3898 		task->pidsuspended = FALSE;
3899 	}
3900 
3901 	if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
3902 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3903 		    MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_RESUME) | DBG_FUNC_NONE,
3904 		    task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id,
3905 		    task->user_stop_count, mode, task->legacy_stop_count);
3906 
3907 #if MACH_ASSERT
3908 		/*
3909 		 * This is obviously not robust; if we suspend one task and then resume a different one,
3910 		 * we'll fly under the radar. This is only meant to catch the common case of a crashed
3911 		 * or buggy suspender.
3912 		 */
3913 		current_task()->suspends_outstanding--;
3914 #endif
3915 
3916 		if (mode == TASK_HOLD_LEGACY_ALL) {
3917 			if (task->legacy_stop_count >= task->user_stop_count) {
3918 				task->user_stop_count = 0;
3919 				release = TRUE;
3920 			} else {
3921 				task->user_stop_count -= task->legacy_stop_count;
3922 			}
3923 			task->legacy_stop_count = 0;
3924 		} else {
3925 			if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) {
3926 				task->legacy_stop_count--;
3927 			}
3928 			if (--task->user_stop_count == 0) {
3929 				release = TRUE;
3930 			}
3931 		}
3932 	} else {
3933 		return KERN_FAILURE;
3934 	}
3935 
3936 	/*
3937 	 *	Release the task if necessary.
3938 	 */
3939 	if (release) {
3940 		task_release_locked(task);
3941 	}
3942 
3943 	return KERN_SUCCESS;
3944 }
3945 
3946 boolean_t
get_task_suspended(task_t task)3947 get_task_suspended(task_t task)
3948 {
3949 	return 0 != task->user_stop_count;
3950 }
3951 
3952 /*
3953  *	task_suspend:
3954  *
3955  *	Implement an (old-fashioned) user-level suspension on a task.
3956  *
3957  *	Because the user isn't expecting to have to manage a suspension
3958  *	token, we'll track it for him in the kernel in the form of a naked
3959  *	send right to the task's resume port.  All such send rights
3960  *	account for a single suspension against the task (unlike task_suspend2()
3961  *	where each caller gets a unique suspension count represented by a
3962  *	unique send-once right).
3963  *
3964  * Conditions:
3965  *      The caller holds a reference to the task
3966  */
3967 kern_return_t
task_suspend(task_t task)3968 task_suspend(
3969 	task_t          task)
3970 {
3971 	kern_return_t                   kr;
3972 	mach_port_t                     port;
3973 	mach_port_name_t                name;
3974 
3975 	if (task == TASK_NULL || task == kernel_task) {
3976 		return KERN_INVALID_ARGUMENT;
3977 	}
3978 
3979 	/*
3980 	 * place a legacy hold on the task.
3981 	 */
3982 	task_lock(task);
3983 	kr = place_task_hold(task, TASK_HOLD_LEGACY);
3984 	task_unlock(task);
3985 
3986 	if (kr != KERN_SUCCESS) {
3987 		return kr;
3988 	}
3989 
3990 	/*
3991 	 * Claim a send right on the task resume port, and request a no-senders
3992 	 * notification on that port (if none outstanding).
3993 	 */
3994 	itk_lock(task);
3995 	port = task->itk_resume;
3996 	if (port == IP_NULL) {
3997 		port = ipc_kobject_alloc_port(task, IKOT_TASK_RESUME,
3998 		    IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
3999 		task->itk_resume = port;
4000 	} else {
4001 		(void)ipc_kobject_make_send_nsrequest(port, task, IKOT_TASK_RESUME);
4002 	}
4003 	itk_unlock(task);
4004 
4005 	/*
4006 	 * Copyout the send right into the calling task's IPC space.  It won't know it is there,
4007 	 * but we'll look it up when calling a traditional resume.  Any IPC operations that
4008 	 * deallocate the send right will auto-release the suspension.
4009 	 */
4010 	if (IP_VALID(port)) {
4011 		kr = ipc_object_copyout(current_space(), ip_to_object(port),
4012 		    MACH_MSG_TYPE_MOVE_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
4013 		    NULL, NULL, &name);
4014 	} else {
4015 		kr = KERN_SUCCESS;
4016 	}
4017 	if (kr != KERN_SUCCESS) {
4018 		printf("warning: %s(%d) failed to copyout suspension "
4019 		    "token for pid %d with error: %d\n",
4020 		    proc_name_address(get_bsdtask_info(current_task())),
4021 		    proc_pid(get_bsdtask_info(current_task())),
4022 		    task_pid(task), kr);
4023 	}
4024 
4025 	return kr;
4026 }
4027 
4028 /*
4029  *	task_resume:
4030  *		Release a user hold on a task.
4031  *
4032  * Conditions:
4033  *		The caller holds a reference to the task
4034  */
4035 kern_return_t
task_resume(task_t task)4036 task_resume(
4037 	task_t  task)
4038 {
4039 	kern_return_t    kr;
4040 	mach_port_name_t resume_port_name;
4041 	ipc_entry_t              resume_port_entry;
4042 	ipc_space_t              space = current_task()->itk_space;
4043 
4044 	if (task == TASK_NULL || task == kernel_task) {
4045 		return KERN_INVALID_ARGUMENT;
4046 	}
4047 
4048 	/* release a legacy task hold */
4049 	task_lock(task);
4050 	kr = release_task_hold(task, TASK_HOLD_LEGACY);
4051 	task_unlock(task);
4052 
4053 	itk_lock(task); /* for itk_resume */
4054 	is_write_lock(space); /* spin lock */
4055 	if (is_active(space) && IP_VALID(task->itk_resume) &&
4056 	    ipc_hash_lookup(space, ip_to_object(task->itk_resume), &resume_port_name, &resume_port_entry) == TRUE) {
4057 		/*
4058 		 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
4059 		 * we are holding one less legacy hold on the task from this caller.  If the release failed,
4060 		 * go ahead and drop all the rights, as someone either already released our holds or the task
4061 		 * is gone.
4062 		 */
4063 		itk_unlock(task);
4064 		if (kr == KERN_SUCCESS) {
4065 			ipc_right_dealloc(space, resume_port_name, resume_port_entry);
4066 		} else {
4067 			ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0);
4068 		}
4069 		/* space unlocked */
4070 	} else {
4071 		itk_unlock(task);
4072 		is_write_unlock(space);
4073 		if (kr == KERN_SUCCESS) {
4074 			printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
4075 			    proc_name_address(get_bsdtask_info(current_task())), proc_pid(get_bsdtask_info(current_task())),
4076 			    task_pid(task));
4077 		}
4078 	}
4079 
4080 	return kr;
4081 }
4082 
4083 /*
4084  * Suspend a task that is already protected by a held lock.
4085  * Making/holding a token/reference/port is the caller's responsibility.
4086  */
4087 kern_return_t
task_suspend_internal_locked(task_t task)4088 task_suspend_internal_locked(task_t task)
4089 {
4090 	if (task == TASK_NULL || task == kernel_task) {
4091 		return KERN_INVALID_ARGUMENT;
4092 	}
4093 
4094 	return place_task_hold(task, TASK_HOLD_NORMAL);
4095 }
4096 
4097 /*
4098  * Suspend a task.
4099  * Making/holding a token/reference/port is the caller's responsibility.
4100  */
4101 kern_return_t
task_suspend_internal(task_t task)4102 task_suspend_internal(task_t task)
4103 {
4104 	kern_return_t    kr;
4105 
4106 	if (task == TASK_NULL || task == kernel_task) {
4107 		return KERN_INVALID_ARGUMENT;
4108 	}
4109 
4110 	task_lock(task);
4111 	kr = task_suspend_internal_locked(task);
4112 	task_unlock(task);
4113 	return kr;
4114 }
4115 
4116 /*
4117  * Suspend the target task, and return a suspension token. The token
4118  * represents a reference on the suspended task.
4119  */
4120 static kern_return_t
task_suspend2_grp(task_t task,task_suspension_token_t * suspend_token,task_grp_t grp)4121 task_suspend2_grp(
4122 	task_t                  task,
4123 	task_suspension_token_t *suspend_token,
4124 	task_grp_t              grp)
4125 {
4126 	kern_return_t    kr;
4127 
4128 	kr = task_suspend_internal(task);
4129 	if (kr != KERN_SUCCESS) {
4130 		*suspend_token = TASK_NULL;
4131 		return kr;
4132 	}
4133 
4134 	/*
4135 	 * Take a reference on the target task and return that to the caller
4136 	 * as a "suspension token," which can be converted into an SO right to
4137 	 * the now-suspended task's resume port.
4138 	 */
4139 	task_reference_grp(task, grp);
4140 	*suspend_token = task;
4141 
4142 	return KERN_SUCCESS;
4143 }
4144 
4145 kern_return_t
task_suspend2_mig(task_t task,task_suspension_token_t * suspend_token)4146 task_suspend2_mig(
4147 	task_t                  task,
4148 	task_suspension_token_t *suspend_token)
4149 {
4150 	return task_suspend2_grp(task, suspend_token, TASK_GRP_MIG);
4151 }
4152 
4153 kern_return_t
task_suspend2_external(task_t task,task_suspension_token_t * suspend_token)4154 task_suspend2_external(
4155 	task_t                  task,
4156 	task_suspension_token_t *suspend_token)
4157 {
4158 	return task_suspend2_grp(task, suspend_token, TASK_GRP_EXTERNAL);
4159 }
4160 
4161 /*
4162  * Resume a task that is already protected by a held lock.
4163  * (reference/token/port management is caller's responsibility).
4164  */
4165 kern_return_t
task_resume_internal_locked(task_suspension_token_t task)4166 task_resume_internal_locked(
4167 	task_suspension_token_t         task)
4168 {
4169 	if (task == TASK_NULL || task == kernel_task) {
4170 		return KERN_INVALID_ARGUMENT;
4171 	}
4172 
4173 	return release_task_hold(task, TASK_HOLD_NORMAL);
4174 }
4175 
4176 /*
4177  * Resume a task.
4178  * (reference/token/port management is caller's responsibility).
4179  */
4180 kern_return_t
task_resume_internal(task_suspension_token_t task)4181 task_resume_internal(
4182 	task_suspension_token_t         task)
4183 {
4184 	kern_return_t kr;
4185 
4186 	if (task == TASK_NULL || task == kernel_task) {
4187 		return KERN_INVALID_ARGUMENT;
4188 	}
4189 
4190 	task_lock(task);
4191 	kr = task_resume_internal_locked(task);
4192 	task_unlock(task);
4193 	return kr;
4194 }
4195 
4196 /*
4197  * Resume the task using a suspension token. Consumes the token's ref.
4198  */
4199 static kern_return_t
task_resume2_grp(task_suspension_token_t task,task_grp_t grp)4200 task_resume2_grp(
4201 	task_suspension_token_t         task,
4202 	task_grp_t                      grp)
4203 {
4204 	kern_return_t kr;
4205 
4206 	kr = task_resume_internal(task);
4207 	task_suspension_token_deallocate_grp(task, grp);
4208 
4209 	return kr;
4210 }
4211 
4212 kern_return_t
task_resume2_mig(task_suspension_token_t task)4213 task_resume2_mig(
4214 	task_suspension_token_t         task)
4215 {
4216 	return task_resume2_grp(task, TASK_GRP_MIG);
4217 }
4218 
4219 kern_return_t
task_resume2_external(task_suspension_token_t task)4220 task_resume2_external(
4221 	task_suspension_token_t         task)
4222 {
4223 	return task_resume2_grp(task, TASK_GRP_EXTERNAL);
4224 }
4225 
4226 static void
task_suspension_no_senders(ipc_port_t port,mach_port_mscount_t mscount)4227 task_suspension_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
4228 {
4229 	task_t task = convert_port_to_task_suspension_token(port);
4230 	kern_return_t kr;
4231 
4232 	if (task == TASK_NULL) {
4233 		return;
4234 	}
4235 
4236 	if (task == kernel_task) {
4237 		task_suspension_token_deallocate(task);
4238 		return;
4239 	}
4240 
4241 	task_lock(task);
4242 
4243 	kr = ipc_kobject_nsrequest(port, mscount, NULL);
4244 	if (kr == KERN_FAILURE) {
4245 		/* release all the [remaining] outstanding legacy holds */
4246 		release_task_hold(task, TASK_HOLD_LEGACY_ALL);
4247 	}
4248 
4249 	task_unlock(task);
4250 
4251 	task_suspension_token_deallocate(task);         /* drop token reference */
4252 }
4253 
4254 /*
4255  * Fires when a send once made
4256  * by convert_task_suspension_token_to_port() dies.
4257  */
4258 void
task_suspension_send_once(ipc_port_t port)4259 task_suspension_send_once(ipc_port_t port)
4260 {
4261 	task_t task = convert_port_to_task_suspension_token(port);
4262 
4263 	if (task == TASK_NULL || task == kernel_task) {
4264 		return; /* nothing to do */
4265 	}
4266 
4267 	/* release the hold held by this specific send-once right */
4268 	task_lock(task);
4269 	release_task_hold(task, TASK_HOLD_NORMAL);
4270 	task_unlock(task);
4271 
4272 	task_suspension_token_deallocate(task);         /* drop token reference */
4273 }
4274 
4275 static kern_return_t
task_pidsuspend_locked(task_t task)4276 task_pidsuspend_locked(task_t task)
4277 {
4278 	kern_return_t kr;
4279 
4280 	if (task->pidsuspended) {
4281 		kr = KERN_FAILURE;
4282 		goto out;
4283 	}
4284 
4285 	task->pidsuspended = TRUE;
4286 
4287 	kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
4288 	if (kr != KERN_SUCCESS) {
4289 		task->pidsuspended = FALSE;
4290 	}
4291 out:
4292 	return kr;
4293 }
4294 
4295 
4296 /*
4297  *	task_pidsuspend:
4298  *
4299  *	Suspends a task by placing a hold on its threads.
4300  *
4301  * Conditions:
4302  *      The caller holds a reference to the task
4303  */
4304 kern_return_t
task_pidsuspend(task_t task)4305 task_pidsuspend(
4306 	task_t          task)
4307 {
4308 	kern_return_t    kr;
4309 
4310 	if (task == TASK_NULL || task == kernel_task) {
4311 		return KERN_INVALID_ARGUMENT;
4312 	}
4313 
4314 	task_lock(task);
4315 
4316 	kr = task_pidsuspend_locked(task);
4317 
4318 	task_unlock(task);
4319 
4320 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4321 		iokit_task_app_suspended_changed(task);
4322 	}
4323 
4324 	return kr;
4325 }
4326 
4327 /*
4328  *	task_pidresume:
4329  *		Resumes a previously suspended task.
4330  *
4331  * Conditions:
4332  *		The caller holds a reference to the task
4333  */
4334 kern_return_t
task_pidresume(task_t task)4335 task_pidresume(
4336 	task_t  task)
4337 {
4338 	kern_return_t    kr;
4339 
4340 	if (task == TASK_NULL || task == kernel_task) {
4341 		return KERN_INVALID_ARGUMENT;
4342 	}
4343 
4344 	task_lock(task);
4345 
4346 #if CONFIG_FREEZE
4347 
4348 	while (task->changing_freeze_state) {
4349 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4350 		task_unlock(task);
4351 		thread_block(THREAD_CONTINUE_NULL);
4352 
4353 		task_lock(task);
4354 	}
4355 	task->changing_freeze_state = TRUE;
4356 #endif
4357 
4358 	kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
4359 
4360 	task_unlock(task);
4361 
4362 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4363 		iokit_task_app_suspended_changed(task);
4364 	}
4365 
4366 #if CONFIG_FREEZE
4367 
4368 	task_lock(task);
4369 
4370 	if (kr == KERN_SUCCESS) {
4371 		task->frozen = FALSE;
4372 	}
4373 	task->changing_freeze_state = FALSE;
4374 	thread_wakeup(&task->changing_freeze_state);
4375 
4376 	task_unlock(task);
4377 #endif
4378 
4379 	return kr;
4380 }
4381 
4382 os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
4383 
4384 /*
4385  *	task_add_turnstile_watchports:
4386  *		Setup watchports to boost the main thread of the task.
4387  *
4388  *	Arguments:
4389  *		task: task being spawned
4390  *		thread: main thread of task
4391  *		portwatch_ports: array of watchports
4392  *		portwatch_count: number of watchports
4393  *
4394  *	Conditions:
4395  *		Nothing locked.
4396  */
4397 void
task_add_turnstile_watchports(task_t task,thread_t thread,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4398 task_add_turnstile_watchports(
4399 	task_t          task,
4400 	thread_t        thread,
4401 	ipc_port_t      *portwatch_ports,
4402 	uint32_t        portwatch_count)
4403 {
4404 	struct task_watchports *watchports = NULL;
4405 	struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
4406 	os_ref_count_t refs;
4407 
4408 	/* Check if the task has terminated */
4409 	if (!task->active) {
4410 		return;
4411 	}
4412 
4413 	assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
4414 
4415 	watchports = task_watchports_alloc_init(task, thread, portwatch_count);
4416 
4417 	/* Lock the ipc space */
4418 	is_write_lock(task->itk_space);
4419 
4420 	/* Setup watchports to boost the main thread */
4421 	refs = task_add_turnstile_watchports_locked(task,
4422 	    watchports, previous_elem_array, portwatch_ports,
4423 	    portwatch_count);
4424 
4425 	/* Drop the space lock */
4426 	is_write_unlock(task->itk_space);
4427 
4428 	if (refs == 0) {
4429 		task_watchports_deallocate(watchports);
4430 	}
4431 
4432 	/* Drop the ref on previous_elem_array */
4433 	for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
4434 		task_watchport_elem_deallocate(previous_elem_array[i]);
4435 	}
4436 }
4437 
4438 /*
4439  *	task_remove_turnstile_watchports:
4440  *		Clear all turnstile boost on the task from watchports.
4441  *
4442  *	Arguments:
4443  *		task: task being terminated
4444  *
4445  *	Conditions:
4446  *		Nothing locked.
4447  */
4448 void
task_remove_turnstile_watchports(task_t task)4449 task_remove_turnstile_watchports(
4450 	task_t          task)
4451 {
4452 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4453 	struct task_watchports *watchports = NULL;
4454 	ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
4455 	uint32_t portwatch_count;
4456 
4457 	/* Lock the ipc space */
4458 	is_write_lock(task->itk_space);
4459 
4460 	/* Check if watchport boost exist */
4461 	if (task->watchports == NULL) {
4462 		is_write_unlock(task->itk_space);
4463 		return;
4464 	}
4465 	watchports = task->watchports;
4466 	portwatch_count = watchports->tw_elem_array_count;
4467 
4468 	refs = task_remove_turnstile_watchports_locked(task, watchports,
4469 	    port_freelist);
4470 
4471 	is_write_unlock(task->itk_space);
4472 
4473 	/* Drop all the port references */
4474 	for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
4475 		ip_release(port_freelist[i]);
4476 	}
4477 
4478 	/* Clear the task and thread references for task_watchport */
4479 	if (refs == 0) {
4480 		task_watchports_deallocate(watchports);
4481 	}
4482 }
4483 
4484 /*
4485  *	task_transfer_turnstile_watchports:
4486  *		Transfer all watchport turnstile boost from old task to new task.
4487  *
4488  *	Arguments:
4489  *		old_task: task calling exec
4490  *		new_task: new exec'ed task
4491  *		thread: main thread of new task
4492  *
4493  *	Conditions:
4494  *		Nothing locked.
4495  */
4496 void
task_transfer_turnstile_watchports(task_t old_task,task_t new_task,thread_t new_thread)4497 task_transfer_turnstile_watchports(
4498 	task_t   old_task,
4499 	task_t   new_task,
4500 	thread_t new_thread)
4501 {
4502 	struct task_watchports *old_watchports = NULL;
4503 	struct task_watchports *new_watchports = NULL;
4504 	os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
4505 	os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
4506 	uint32_t portwatch_count;
4507 
4508 	if (old_task->watchports == NULL || !new_task->active) {
4509 		return;
4510 	}
4511 
4512 	/* Get the watch port count from the old task */
4513 	is_write_lock(old_task->itk_space);
4514 	if (old_task->watchports == NULL) {
4515 		is_write_unlock(old_task->itk_space);
4516 		return;
4517 	}
4518 
4519 	portwatch_count = old_task->watchports->tw_elem_array_count;
4520 	is_write_unlock(old_task->itk_space);
4521 
4522 	new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
4523 
4524 	/* Lock the ipc space for old task */
4525 	is_write_lock(old_task->itk_space);
4526 
4527 	/* Lock the ipc space for new task */
4528 	is_write_lock(new_task->itk_space);
4529 
4530 	/* Check if watchport boost exist */
4531 	if (old_task->watchports == NULL || !new_task->active) {
4532 		is_write_unlock(new_task->itk_space);
4533 		is_write_unlock(old_task->itk_space);
4534 		(void)task_watchports_release(new_watchports);
4535 		task_watchports_deallocate(new_watchports);
4536 		return;
4537 	}
4538 
4539 	old_watchports = old_task->watchports;
4540 	assert(portwatch_count == old_task->watchports->tw_elem_array_count);
4541 
4542 	/* Setup new task watchports */
4543 	new_task->watchports = new_watchports;
4544 
4545 	for (uint32_t i = 0; i < portwatch_count; i++) {
4546 		ipc_port_t port = old_watchports->tw_elem[i].twe_port;
4547 
4548 		if (port == NULL) {
4549 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4550 			continue;
4551 		}
4552 
4553 		/* Lock the port and check if it has the entry */
4554 		ip_mq_lock(port);
4555 
4556 		task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
4557 
4558 		if (ipc_port_replace_watchport_elem_conditional_locked(port,
4559 		    &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
4560 			task_watchport_elem_clear(&old_watchports->tw_elem[i]);
4561 
4562 			task_watchports_retain(new_watchports);
4563 			old_refs = task_watchports_release(old_watchports);
4564 
4565 			/* Check if all ports are cleaned */
4566 			if (old_refs == 0) {
4567 				old_task->watchports = NULL;
4568 			}
4569 		} else {
4570 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4571 		}
4572 		/* port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
4573 	}
4574 
4575 	/* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
4576 	new_refs = task_watchports_release(new_watchports);
4577 	if (new_refs == 0) {
4578 		new_task->watchports = NULL;
4579 	}
4580 
4581 	is_write_unlock(new_task->itk_space);
4582 	is_write_unlock(old_task->itk_space);
4583 
4584 	/* Clear the task and thread references for old_watchport */
4585 	if (old_refs == 0) {
4586 		task_watchports_deallocate(old_watchports);
4587 	}
4588 
4589 	/* Clear the task and thread references for new_watchport */
4590 	if (new_refs == 0) {
4591 		task_watchports_deallocate(new_watchports);
4592 	}
4593 }
4594 
4595 /*
4596  *	task_add_turnstile_watchports_locked:
4597  *		Setup watchports to boost the main thread of the task.
4598  *
4599  *	Arguments:
4600  *		task: task to boost
4601  *		watchports: watchport structure to be attached to the task
4602  *		previous_elem_array: an array of old watchport_elem to be returned to caller
4603  *		portwatch_ports: array of watchports
4604  *		portwatch_count: number of watchports
4605  *
4606  *	Conditions:
4607  *		ipc space of the task locked.
4608  *		returns array of old watchport_elem in previous_elem_array
4609  */
4610 static os_ref_count_t
task_add_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,struct task_watchport_elem ** previous_elem_array,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4611 task_add_turnstile_watchports_locked(
4612 	task_t                      task,
4613 	struct task_watchports      *watchports,
4614 	struct task_watchport_elem  **previous_elem_array,
4615 	ipc_port_t                  *portwatch_ports,
4616 	uint32_t                    portwatch_count)
4617 {
4618 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4619 
4620 	/* Check if the task is still active */
4621 	if (!task->active) {
4622 		refs = task_watchports_release(watchports);
4623 		return refs;
4624 	}
4625 
4626 	assert(task->watchports == NULL);
4627 	task->watchports = watchports;
4628 
4629 	for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
4630 		ipc_port_t port = portwatch_ports[i];
4631 
4632 		task_watchport_elem_init(&watchports->tw_elem[i], task, port);
4633 		if (port == NULL) {
4634 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4635 			continue;
4636 		}
4637 
4638 		ip_mq_lock(port);
4639 
4640 		/* Check if port is in valid state to be setup as watchport */
4641 		if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
4642 		    &previous_elem_array[j]) != KERN_SUCCESS) {
4643 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4644 			continue;
4645 		}
4646 		/* port unlocked on return */
4647 
4648 		ip_reference(port);
4649 		task_watchports_retain(watchports);
4650 		if (previous_elem_array[j] != NULL) {
4651 			j++;
4652 		}
4653 	}
4654 
4655 	/* Drop the reference on task_watchport struct returned by os_ref_init */
4656 	refs = task_watchports_release(watchports);
4657 	if (refs == 0) {
4658 		task->watchports = NULL;
4659 	}
4660 
4661 	return refs;
4662 }
4663 
4664 /*
4665  *	task_remove_turnstile_watchports_locked:
4666  *		Clear all turnstile boost on the task from watchports.
4667  *
4668  *	Arguments:
4669  *		task: task to remove watchports from
4670  *		watchports: watchports structure for the task
4671  *		port_freelist: array of ports returned with ref to caller
4672  *
4673  *
4674  *	Conditions:
4675  *		ipc space of the task locked.
4676  *		array of ports with refs are returned in port_freelist
4677  */
4678 static os_ref_count_t
task_remove_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,ipc_port_t * port_freelist)4679 task_remove_turnstile_watchports_locked(
4680 	task_t                 task,
4681 	struct task_watchports *watchports,
4682 	ipc_port_t             *port_freelist)
4683 {
4684 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4685 
4686 	for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
4687 		ipc_port_t port = watchports->tw_elem[i].twe_port;
4688 		if (port == NULL) {
4689 			continue;
4690 		}
4691 
4692 		/* Lock the port and check if it has the entry */
4693 		ip_mq_lock(port);
4694 		if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
4695 		    &watchports->tw_elem[i]) == KERN_SUCCESS) {
4696 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4697 			port_freelist[j++] = port;
4698 			refs = task_watchports_release(watchports);
4699 
4700 			/* Check if all ports are cleaned */
4701 			if (refs == 0) {
4702 				task->watchports = NULL;
4703 				break;
4704 			}
4705 		}
4706 		/* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
4707 	}
4708 	return refs;
4709 }
4710 
4711 /*
4712  *	task_watchports_alloc_init:
4713  *		Allocate and initialize task watchport struct.
4714  *
4715  *	Conditions:
4716  *		Nothing locked.
4717  */
4718 static struct task_watchports *
task_watchports_alloc_init(task_t task,thread_t thread,uint32_t count)4719 task_watchports_alloc_init(
4720 	task_t        task,
4721 	thread_t      thread,
4722 	uint32_t      count)
4723 {
4724 	struct task_watchports *watchports = kalloc_type(struct task_watchports,
4725 	    struct task_watchport_elem, count, Z_WAITOK | Z_ZERO | Z_NOFAIL);
4726 
4727 	task_reference(task);
4728 	thread_reference(thread);
4729 	watchports->tw_task = task;
4730 	watchports->tw_thread = thread;
4731 	watchports->tw_elem_array_count = count;
4732 	os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
4733 
4734 	return watchports;
4735 }
4736 
4737 /*
4738  *	task_watchports_deallocate:
4739  *		Deallocate task watchport struct.
4740  *
4741  *	Conditions:
4742  *		Nothing locked.
4743  */
4744 static void
task_watchports_deallocate(struct task_watchports * watchports)4745 task_watchports_deallocate(
4746 	struct task_watchports *watchports)
4747 {
4748 	uint32_t portwatch_count = watchports->tw_elem_array_count;
4749 
4750 	task_deallocate(watchports->tw_task);
4751 	thread_deallocate(watchports->tw_thread);
4752 	kfree_type(struct task_watchports, struct task_watchport_elem,
4753 	    portwatch_count, watchports);
4754 }
4755 
4756 /*
4757  *	task_watchport_elem_deallocate:
4758  *		Deallocate task watchport element and release its ref on task_watchport.
4759  *
4760  *	Conditions:
4761  *		Nothing locked.
4762  */
4763 void
task_watchport_elem_deallocate(struct task_watchport_elem * watchport_elem)4764 task_watchport_elem_deallocate(
4765 	struct task_watchport_elem *watchport_elem)
4766 {
4767 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4768 	task_t task = watchport_elem->twe_task;
4769 	struct task_watchports *watchports = NULL;
4770 	ipc_port_t port = NULL;
4771 
4772 	assert(task != NULL);
4773 
4774 	/* Take the space lock to modify the elememt */
4775 	is_write_lock(task->itk_space);
4776 
4777 	watchports = task->watchports;
4778 	assert(watchports != NULL);
4779 
4780 	port = watchport_elem->twe_port;
4781 	assert(port != NULL);
4782 
4783 	task_watchport_elem_clear(watchport_elem);
4784 	refs = task_watchports_release(watchports);
4785 
4786 	if (refs == 0) {
4787 		task->watchports = NULL;
4788 	}
4789 
4790 	is_write_unlock(task->itk_space);
4791 
4792 	ip_release(port);
4793 	if (refs == 0) {
4794 		task_watchports_deallocate(watchports);
4795 	}
4796 }
4797 
4798 /*
4799  *	task_has_watchports:
4800  *		Return TRUE if task has watchport boosts.
4801  *
4802  *	Conditions:
4803  *		Nothing locked.
4804  */
4805 boolean_t
task_has_watchports(task_t task)4806 task_has_watchports(task_t task)
4807 {
4808 	return task->watchports != NULL;
4809 }
4810 
4811 #if DEVELOPMENT || DEBUG
4812 
4813 extern void IOSleep(int);
4814 
4815 kern_return_t
task_disconnect_page_mappings(task_t task)4816 task_disconnect_page_mappings(task_t task)
4817 {
4818 	int     n;
4819 
4820 	if (task == TASK_NULL || task == kernel_task) {
4821 		return KERN_INVALID_ARGUMENT;
4822 	}
4823 
4824 	/*
4825 	 * this function is used to strip all of the mappings from
4826 	 * the pmap for the specified task to force the task to
4827 	 * re-fault all of the pages it is actively using... this
4828 	 * allows us to approximate the true working set of the
4829 	 * specified task.  We only engage if at least 1 of the
4830 	 * threads in the task is runnable, but we want to continuously
4831 	 * sweep (at least for a while - I've arbitrarily set the limit at
4832 	 * 100 sweeps to be re-looked at as we gain experience) to get a better
4833 	 * view into what areas within a page are being visited (as opposed to only
4834 	 * seeing the first fault of a page after the task becomes
4835 	 * runnable)...  in the future I may
4836 	 * try to block until awakened by a thread in this task
4837 	 * being made runnable, but for now we'll periodically poll from the
4838 	 * user level debug tool driving the sysctl
4839 	 */
4840 	for (n = 0; n < 100; n++) {
4841 		thread_t        thread;
4842 		boolean_t       runnable;
4843 		boolean_t       do_unnest;
4844 		int             page_count;
4845 
4846 		runnable = FALSE;
4847 		do_unnest = FALSE;
4848 
4849 		task_lock(task);
4850 
4851 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
4852 			if (thread->state & TH_RUN) {
4853 				runnable = TRUE;
4854 				break;
4855 			}
4856 		}
4857 		if (n == 0) {
4858 			task->task_disconnected_count++;
4859 		}
4860 
4861 		if (task->task_unnested == FALSE) {
4862 			if (runnable == TRUE) {
4863 				task->task_unnested = TRUE;
4864 				do_unnest = TRUE;
4865 			}
4866 		}
4867 		task_unlock(task);
4868 
4869 		if (runnable == FALSE) {
4870 			break;
4871 		}
4872 
4873 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
4874 		    task, do_unnest, task->task_disconnected_count, 0, 0);
4875 
4876 		page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
4877 
4878 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
4879 		    task, page_count, 0, 0, 0);
4880 
4881 		if ((n % 5) == 4) {
4882 			IOSleep(1);
4883 		}
4884 	}
4885 	return KERN_SUCCESS;
4886 }
4887 
4888 #endif
4889 
4890 
4891 #if CONFIG_FREEZE
4892 
4893 /*
4894  *	task_freeze:
4895  *
4896  *	Freeze a task.
4897  *
4898  * Conditions:
4899  *      The caller holds a reference to the task
4900  */
4901 extern void     vm_wake_compactor_swapper(void);
4902 extern struct freezer_context freezer_context_global;
4903 
4904 kern_return_t
task_freeze(task_t task,uint32_t * purgeable_count,uint32_t * wired_count,uint32_t * clean_count,uint32_t * dirty_count,uint32_t dirty_budget,uint32_t * shared_count,int * freezer_error_code,boolean_t eval_only)4905 task_freeze(
4906 	task_t    task,
4907 	uint32_t           *purgeable_count,
4908 	uint32_t           *wired_count,
4909 	uint32_t           *clean_count,
4910 	uint32_t           *dirty_count,
4911 	uint32_t           dirty_budget,
4912 	uint32_t           *shared_count,
4913 	int                *freezer_error_code,
4914 	boolean_t          eval_only)
4915 {
4916 	kern_return_t kr = KERN_SUCCESS;
4917 
4918 	if (task == TASK_NULL || task == kernel_task) {
4919 		return KERN_INVALID_ARGUMENT;
4920 	}
4921 
4922 	task_lock(task);
4923 
4924 	while (task->changing_freeze_state) {
4925 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4926 		task_unlock(task);
4927 		thread_block(THREAD_CONTINUE_NULL);
4928 
4929 		task_lock(task);
4930 	}
4931 	if (task->frozen) {
4932 		task_unlock(task);
4933 		return KERN_FAILURE;
4934 	}
4935 	task->changing_freeze_state = TRUE;
4936 
4937 	freezer_context_global.freezer_ctx_task = task;
4938 
4939 	task_unlock(task);
4940 
4941 	kr = vm_map_freeze(task,
4942 	    purgeable_count,
4943 	    wired_count,
4944 	    clean_count,
4945 	    dirty_count,
4946 	    dirty_budget,
4947 	    shared_count,
4948 	    freezer_error_code,
4949 	    eval_only);
4950 
4951 	task_lock(task);
4952 
4953 	if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) {
4954 		task->frozen = TRUE;
4955 
4956 		freezer_context_global.freezer_ctx_task = NULL;
4957 		freezer_context_global.freezer_ctx_uncompressed_pages = 0;
4958 
4959 		if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
4960 			/*
4961 			 * reset the counter tracking the # of swapped compressed pages
4962 			 * because we are now done with this freeze session and task.
4963 			 */
4964 
4965 			*dirty_count = (uint32_t) (freezer_context_global.freezer_ctx_swapped_bytes / PAGE_SIZE_64);         /*used to track pageouts*/
4966 		}
4967 
4968 		freezer_context_global.freezer_ctx_swapped_bytes = 0;
4969 	}
4970 
4971 	task->changing_freeze_state = FALSE;
4972 	thread_wakeup(&task->changing_freeze_state);
4973 
4974 	task_unlock(task);
4975 
4976 	if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
4977 	    (kr == KERN_SUCCESS) &&
4978 	    (eval_only == FALSE)) {
4979 		vm_wake_compactor_swapper();
4980 		/*
4981 		 * We do an explicit wakeup of the swapout thread here
4982 		 * because the compact_and_swap routines don't have
4983 		 * knowledge about these kind of "per-task packed c_segs"
4984 		 * and so will not be evaluating whether we need to do
4985 		 * a wakeup there.
4986 		 */
4987 		thread_wakeup((event_t)&vm_swapout_thread);
4988 	}
4989 
4990 	return kr;
4991 }
4992 
4993 /*
4994  *	task_thaw:
4995  *
4996  *	Thaw a currently frozen task.
4997  *
4998  * Conditions:
4999  *      The caller holds a reference to the task
5000  */
5001 kern_return_t
task_thaw(task_t task)5002 task_thaw(
5003 	task_t          task)
5004 {
5005 	if (task == TASK_NULL || task == kernel_task) {
5006 		return KERN_INVALID_ARGUMENT;
5007 	}
5008 
5009 	task_lock(task);
5010 
5011 	while (task->changing_freeze_state) {
5012 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5013 		task_unlock(task);
5014 		thread_block(THREAD_CONTINUE_NULL);
5015 
5016 		task_lock(task);
5017 	}
5018 	if (!task->frozen) {
5019 		task_unlock(task);
5020 		return KERN_FAILURE;
5021 	}
5022 	task->frozen = FALSE;
5023 
5024 	task_unlock(task);
5025 
5026 	return KERN_SUCCESS;
5027 }
5028 
5029 void
task_update_frozen_to_swap_acct(task_t task,int64_t amount,freezer_acct_op_t op)5030 task_update_frozen_to_swap_acct(task_t task, int64_t amount, freezer_acct_op_t op)
5031 {
5032 	/*
5033 	 * We don't assert that the task lock is held because we call this
5034 	 * routine from the decompression path and we won't be holding the
5035 	 * task lock. However, since we are in the context of the task we are
5036 	 * safe.
5037 	 * In the case of the task_freeze path, we call it from behind the task
5038 	 * lock but we don't need to because we have a reference on the proc
5039 	 * being frozen.
5040 	 */
5041 
5042 	assert(task);
5043 	if (amount == 0) {
5044 		return;
5045 	}
5046 
5047 	if (op == CREDIT_TO_SWAP) {
5048 		ledger_credit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5049 	} else if (op == DEBIT_FROM_SWAP) {
5050 		ledger_debit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5051 	} else {
5052 		panic("task_update_frozen_to_swap_acct: Invalid ledger op");
5053 	}
5054 }
5055 #endif /* CONFIG_FREEZE */
5056 
5057 kern_return_t
task_set_security_tokens(task_t task,security_token_t sec_token,audit_token_t audit_token,host_priv_t host_priv)5058 task_set_security_tokens(
5059 	task_t           task,
5060 	security_token_t sec_token,
5061 	audit_token_t    audit_token,
5062 	host_priv_t      host_priv)
5063 {
5064 	ipc_port_t       host_port = IP_NULL;
5065 	kern_return_t    kr;
5066 
5067 	if (task == TASK_NULL) {
5068 		return KERN_INVALID_ARGUMENT;
5069 	}
5070 
5071 	task_lock(task);
5072 	task_set_tokens(task, &sec_token, &audit_token);
5073 	task_unlock(task);
5074 
5075 	if (host_priv != HOST_PRIV_NULL) {
5076 		kr = host_get_host_priv_port(host_priv, &host_port);
5077 	} else {
5078 		kr = host_get_host_port(host_priv_self(), &host_port);
5079 	}
5080 	assert(kr == KERN_SUCCESS);
5081 
5082 	kr = task_set_special_port_internal(task, TASK_HOST_PORT, host_port);
5083 	return kr;
5084 }
5085 
5086 kern_return_t
task_send_trace_memory(__unused task_t target_task,__unused uint32_t pid,__unused uint64_t uniqueid)5087 task_send_trace_memory(
5088 	__unused task_t   target_task,
5089 	__unused uint32_t pid,
5090 	__unused uint64_t uniqueid)
5091 {
5092 	return KERN_INVALID_ARGUMENT;
5093 }
5094 
5095 /*
5096  * This routine was added, pretty much exclusively, for registering the
5097  * RPC glue vector for in-kernel short circuited tasks.  Rather than
5098  * removing it completely, I have only disabled that feature (which was
5099  * the only feature at the time).  It just appears that we are going to
5100  * want to add some user data to tasks in the future (i.e. bsd info,
5101  * task names, etc...), so I left it in the formal task interface.
5102  */
5103 kern_return_t
task_set_info(task_t task,task_flavor_t flavor,__unused task_info_t task_info_in,__unused mach_msg_type_number_t task_info_count)5104 task_set_info(
5105 	task_t          task,
5106 	task_flavor_t   flavor,
5107 	__unused task_info_t    task_info_in,           /* pointer to IN array */
5108 	__unused mach_msg_type_number_t task_info_count)
5109 {
5110 	if (task == TASK_NULL) {
5111 		return KERN_INVALID_ARGUMENT;
5112 	}
5113 	switch (flavor) {
5114 #if CONFIG_ATM
5115 	case TASK_TRACE_MEMORY_INFO:
5116 		return KERN_NOT_SUPPORTED;
5117 #endif // CONFIG_ATM
5118 	default:
5119 		return KERN_INVALID_ARGUMENT;
5120 	}
5121 }
5122 
5123 static void
_task_fill_times(task_t task,time_value_t * user_time,time_value_t * sys_time)5124 _task_fill_times(task_t task, time_value_t *user_time, time_value_t *sys_time)
5125 {
5126 	clock_sec_t sec;
5127 	clock_usec_t usec;
5128 
5129 	struct recount_times_mach times = recount_task_terminated_times(task);
5130 	absolutetime_to_microtime(times.rtm_user, &sec, &usec);
5131 	user_time->seconds = (typeof(user_time->seconds))sec;
5132 	user_time->microseconds = usec;
5133 	absolutetime_to_microtime(times.rtm_system, &sec, &usec);
5134 	sys_time->seconds = (typeof(sys_time->seconds))sec;
5135 	sys_time->microseconds = usec;
5136 }
5137 
5138 int radar_20146450 = 1;
5139 kern_return_t
task_info(task_t task,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)5140 task_info(
5141 	task_t                  task,
5142 	task_flavor_t           flavor,
5143 	task_info_t             task_info_out,
5144 	mach_msg_type_number_t  *task_info_count)
5145 {
5146 	kern_return_t error = KERN_SUCCESS;
5147 	mach_msg_type_number_t  original_task_info_count;
5148 	bool is_kernel_task = (task == kernel_task);
5149 
5150 	if (task == TASK_NULL) {
5151 		return KERN_INVALID_ARGUMENT;
5152 	}
5153 
5154 	original_task_info_count = *task_info_count;
5155 	task_lock(task);
5156 
5157 	if (task != current_task() && !task->active) {
5158 		task_unlock(task);
5159 		return KERN_INVALID_ARGUMENT;
5160 	}
5161 
5162 
5163 	switch (flavor) {
5164 	case TASK_BASIC_INFO_32:
5165 	case TASK_BASIC2_INFO_32:
5166 #if defined(__arm64__)
5167 	case TASK_BASIC_INFO_64:
5168 #endif
5169 		{
5170 			task_basic_info_32_t basic_info;
5171 			ledger_amount_t      tmp;
5172 
5173 			if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
5174 				error = KERN_INVALID_ARGUMENT;
5175 				break;
5176 			}
5177 
5178 			basic_info = (task_basic_info_32_t)task_info_out;
5179 
5180 			basic_info->virtual_size = (typeof(basic_info->virtual_size))
5181 			    vm_map_adjusted_size(is_kernel_task ? kernel_map : task->map);
5182 			if (flavor == TASK_BASIC2_INFO_32) {
5183 				/*
5184 				 * The "BASIC2" flavor gets the maximum resident
5185 				 * size instead of the current resident size...
5186 				 */
5187 				ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, &tmp);
5188 			} else {
5189 				ledger_get_balance(task->ledger, task_ledgers.phys_mem, &tmp);
5190 			}
5191 			basic_info->resident_size = (natural_t) MIN((ledger_amount_t) UINT32_MAX, tmp);
5192 
5193 			_task_fill_times(task, &basic_info->user_time,
5194 			    &basic_info->system_time);
5195 
5196 			basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5197 			basic_info->suspend_count = task->user_stop_count;
5198 
5199 			*task_info_count = TASK_BASIC_INFO_32_COUNT;
5200 			break;
5201 		}
5202 
5203 #if defined(__arm64__)
5204 	case TASK_BASIC_INFO_64_2:
5205 	{
5206 		task_basic_info_64_2_t  basic_info;
5207 
5208 		if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) {
5209 			error = KERN_INVALID_ARGUMENT;
5210 			break;
5211 		}
5212 
5213 		basic_info = (task_basic_info_64_2_t)task_info_out;
5214 
5215 		basic_info->virtual_size  = vm_map_adjusted_size(is_kernel_task ?
5216 		    kernel_map : task->map);
5217 		ledger_get_balance(task->ledger, task_ledgers.phys_mem,
5218 		    (ledger_amount_t *)&basic_info->resident_size);
5219 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5220 		basic_info->suspend_count = task->user_stop_count;
5221 		_task_fill_times(task, &basic_info->user_time,
5222 		    &basic_info->system_time);
5223 
5224 		*task_info_count = TASK_BASIC_INFO_64_2_COUNT;
5225 		break;
5226 	}
5227 
5228 #else /* defined(__arm64__) */
5229 	case TASK_BASIC_INFO_64:
5230 	{
5231 		task_basic_info_64_t basic_info;
5232 
5233 		if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
5234 			error = KERN_INVALID_ARGUMENT;
5235 			break;
5236 		}
5237 
5238 		basic_info = (task_basic_info_64_t)task_info_out;
5239 
5240 		basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5241 		    kernel_map : task->map);
5242 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *)&basic_info->resident_size);
5243 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5244 		basic_info->suspend_count = task->user_stop_count;
5245 		_task_fill_times(task, &basic_info->user_time,
5246 		    &basic_info->system_time);
5247 
5248 		*task_info_count = TASK_BASIC_INFO_64_COUNT;
5249 		break;
5250 	}
5251 #endif /* defined(__arm64__) */
5252 
5253 	case MACH_TASK_BASIC_INFO:
5254 	{
5255 		mach_task_basic_info_t  basic_info;
5256 
5257 		if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
5258 			error = KERN_INVALID_ARGUMENT;
5259 			break;
5260 		}
5261 
5262 		basic_info = (mach_task_basic_info_t)task_info_out;
5263 
5264 		basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5265 		    kernel_map : task->map);
5266 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size);
5267 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size_max);
5268 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5269 		basic_info->suspend_count = task->user_stop_count;
5270 		_task_fill_times(task, &basic_info->user_time,
5271 		    &basic_info->system_time);
5272 
5273 		*task_info_count = MACH_TASK_BASIC_INFO_COUNT;
5274 		break;
5275 	}
5276 
5277 	case TASK_THREAD_TIMES_INFO:
5278 	{
5279 		task_thread_times_info_t times_info;
5280 		thread_t                 thread;
5281 
5282 		if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
5283 			error = KERN_INVALID_ARGUMENT;
5284 			break;
5285 		}
5286 
5287 		times_info = (task_thread_times_info_t)task_info_out;
5288 		times_info->user_time = (time_value_t){ 0 };
5289 		times_info->system_time = (time_value_t){ 0 };
5290 
5291 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5292 			if ((thread->options & TH_OPT_IDLE_THREAD) == 0) {
5293 				time_value_t user_time, system_time;
5294 
5295 				thread_read_times(thread, &user_time, &system_time, NULL);
5296 				time_value_add(&times_info->user_time, &user_time);
5297 				time_value_add(&times_info->system_time, &system_time);
5298 			}
5299 		}
5300 
5301 		*task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
5302 		break;
5303 	}
5304 
5305 	case TASK_ABSOLUTETIME_INFO:
5306 	{
5307 		task_absolutetime_info_t        info;
5308 
5309 		if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
5310 			error = KERN_INVALID_ARGUMENT;
5311 			break;
5312 		}
5313 
5314 		info = (task_absolutetime_info_t)task_info_out;
5315 
5316 		struct recount_times_mach term_times =
5317 		    recount_task_terminated_times(task);
5318 		struct recount_times_mach total_times = recount_task_times(task);
5319 
5320 		info->total_user = total_times.rtm_user;
5321 		info->total_system = total_times.rtm_system;
5322 		info->threads_user = total_times.rtm_user - term_times.rtm_user;
5323 		info->threads_system += total_times.rtm_system - term_times.rtm_system;
5324 
5325 		*task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
5326 		break;
5327 	}
5328 
5329 	case TASK_DYLD_INFO:
5330 	{
5331 		task_dyld_info_t info;
5332 
5333 		/*
5334 		 * We added the format field to TASK_DYLD_INFO output.  For
5335 		 * temporary backward compatibility, accept the fact that
5336 		 * clients may ask for the old version - distinquished by the
5337 		 * size of the expected result structure.
5338 		 */
5339 #define TASK_LEGACY_DYLD_INFO_COUNT \
5340 	        offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
5341 
5342 		if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
5343 			error = KERN_INVALID_ARGUMENT;
5344 			break;
5345 		}
5346 
5347 		info = (task_dyld_info_t)task_info_out;
5348 		info->all_image_info_addr = task->all_image_info_addr;
5349 		info->all_image_info_size = task->all_image_info_size;
5350 
5351 		/* only set format on output for those expecting it */
5352 		if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
5353 			info->all_image_info_format = task_has_64Bit_addr(task) ?
5354 			    TASK_DYLD_ALL_IMAGE_INFO_64 :
5355 			    TASK_DYLD_ALL_IMAGE_INFO_32;
5356 			*task_info_count = TASK_DYLD_INFO_COUNT;
5357 		} else {
5358 			*task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
5359 		}
5360 		break;
5361 	}
5362 
5363 	case TASK_EXTMOD_INFO:
5364 	{
5365 		task_extmod_info_t info;
5366 		void *p;
5367 
5368 		if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
5369 			error = KERN_INVALID_ARGUMENT;
5370 			break;
5371 		}
5372 
5373 		info = (task_extmod_info_t)task_info_out;
5374 
5375 		p = get_bsdtask_info(task);
5376 		if (p) {
5377 			proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
5378 		} else {
5379 			bzero(info->task_uuid, sizeof(info->task_uuid));
5380 		}
5381 		info->extmod_statistics = task->extmod_statistics;
5382 		*task_info_count = TASK_EXTMOD_INFO_COUNT;
5383 
5384 		break;
5385 	}
5386 
5387 	case TASK_KERNELMEMORY_INFO:
5388 	{
5389 		task_kernelmemory_info_t        tkm_info;
5390 		ledger_amount_t                 credit, debit;
5391 
5392 		if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
5393 			error = KERN_INVALID_ARGUMENT;
5394 			break;
5395 		}
5396 
5397 		tkm_info = (task_kernelmemory_info_t) task_info_out;
5398 		tkm_info->total_palloc = 0;
5399 		tkm_info->total_pfree = 0;
5400 		tkm_info->total_salloc = 0;
5401 		tkm_info->total_sfree = 0;
5402 
5403 		if (task == kernel_task) {
5404 			/*
5405 			 * All shared allocs/frees from other tasks count against
5406 			 * the kernel private memory usage.  If we are looking up
5407 			 * info for the kernel task, gather from everywhere.
5408 			 */
5409 			task_unlock(task);
5410 
5411 			/* start by accounting for all the terminated tasks against the kernel */
5412 			tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
5413 			tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
5414 
5415 			/* count all other task/thread shared alloc/free against the kernel */
5416 			lck_mtx_lock(&tasks_threads_lock);
5417 
5418 			/* XXX this really shouldn't be using the function parameter 'task' as a local var! */
5419 			queue_iterate(&tasks, task, task_t, tasks) {
5420 				if (task == kernel_task) {
5421 					if (ledger_get_entries(task->ledger,
5422 					    task_ledgers.tkm_private, &credit,
5423 					    &debit) == KERN_SUCCESS) {
5424 						tkm_info->total_palloc += credit;
5425 						tkm_info->total_pfree += debit;
5426 					}
5427 				}
5428 				if (!ledger_get_entries(task->ledger,
5429 				    task_ledgers.tkm_shared, &credit, &debit)) {
5430 					tkm_info->total_palloc += credit;
5431 					tkm_info->total_pfree += debit;
5432 				}
5433 			}
5434 			lck_mtx_unlock(&tasks_threads_lock);
5435 		} else {
5436 			if (!ledger_get_entries(task->ledger,
5437 			    task_ledgers.tkm_private, &credit, &debit)) {
5438 				tkm_info->total_palloc = credit;
5439 				tkm_info->total_pfree = debit;
5440 			}
5441 			if (!ledger_get_entries(task->ledger,
5442 			    task_ledgers.tkm_shared, &credit, &debit)) {
5443 				tkm_info->total_salloc = credit;
5444 				tkm_info->total_sfree = debit;
5445 			}
5446 			task_unlock(task);
5447 		}
5448 
5449 		*task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
5450 		return KERN_SUCCESS;
5451 	}
5452 
5453 	/* OBSOLETE */
5454 	case TASK_SCHED_FIFO_INFO:
5455 	{
5456 		if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
5457 			error = KERN_INVALID_ARGUMENT;
5458 			break;
5459 		}
5460 
5461 		error = KERN_INVALID_POLICY;
5462 		break;
5463 	}
5464 
5465 	/* OBSOLETE */
5466 	case TASK_SCHED_RR_INFO:
5467 	{
5468 		policy_rr_base_t        rr_base;
5469 		uint32_t quantum_time;
5470 		uint64_t quantum_ns;
5471 
5472 		if (*task_info_count < POLICY_RR_BASE_COUNT) {
5473 			error = KERN_INVALID_ARGUMENT;
5474 			break;
5475 		}
5476 
5477 		rr_base = (policy_rr_base_t) task_info_out;
5478 
5479 		if (task != kernel_task) {
5480 			error = KERN_INVALID_POLICY;
5481 			break;
5482 		}
5483 
5484 		rr_base->base_priority = task->priority;
5485 
5486 		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
5487 		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
5488 
5489 		rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
5490 
5491 		*task_info_count = POLICY_RR_BASE_COUNT;
5492 		break;
5493 	}
5494 
5495 	/* OBSOLETE */
5496 	case TASK_SCHED_TIMESHARE_INFO:
5497 	{
5498 		policy_timeshare_base_t ts_base;
5499 
5500 		if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
5501 			error = KERN_INVALID_ARGUMENT;
5502 			break;
5503 		}
5504 
5505 		ts_base = (policy_timeshare_base_t) task_info_out;
5506 
5507 		if (task == kernel_task) {
5508 			error = KERN_INVALID_POLICY;
5509 			break;
5510 		}
5511 
5512 		ts_base->base_priority = task->priority;
5513 
5514 		*task_info_count = POLICY_TIMESHARE_BASE_COUNT;
5515 		break;
5516 	}
5517 
5518 	case TASK_SECURITY_TOKEN:
5519 	{
5520 		security_token_t        *sec_token_p;
5521 
5522 		if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
5523 			error = KERN_INVALID_ARGUMENT;
5524 			break;
5525 		}
5526 
5527 		sec_token_p = (security_token_t *) task_info_out;
5528 
5529 		*sec_token_p = *task_get_sec_token(task);
5530 
5531 		*task_info_count = TASK_SECURITY_TOKEN_COUNT;
5532 		break;
5533 	}
5534 
5535 	case TASK_AUDIT_TOKEN:
5536 	{
5537 		audit_token_t   *audit_token_p;
5538 
5539 		if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
5540 			error = KERN_INVALID_ARGUMENT;
5541 			break;
5542 		}
5543 
5544 		audit_token_p = (audit_token_t *) task_info_out;
5545 
5546 		*audit_token_p = *task_get_audit_token(task);
5547 
5548 		*task_info_count = TASK_AUDIT_TOKEN_COUNT;
5549 		break;
5550 	}
5551 
5552 	case TASK_SCHED_INFO:
5553 		error = KERN_INVALID_ARGUMENT;
5554 		break;
5555 
5556 	case TASK_EVENTS_INFO:
5557 	{
5558 		task_events_info_t      events_info;
5559 		thread_t                thread;
5560 		uint64_t                n_syscalls_mach, n_syscalls_unix, n_csw;
5561 
5562 		if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
5563 			error = KERN_INVALID_ARGUMENT;
5564 			break;
5565 		}
5566 
5567 		events_info = (task_events_info_t) task_info_out;
5568 
5569 
5570 		events_info->faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
5571 		events_info->pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
5572 		events_info->cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
5573 		events_info->messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
5574 		events_info->messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
5575 
5576 		n_syscalls_mach = task->syscalls_mach;
5577 		n_syscalls_unix = task->syscalls_unix;
5578 		n_csw = task->c_switch;
5579 
5580 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5581 			n_csw           += thread->c_switch;
5582 			n_syscalls_mach += thread->syscalls_mach;
5583 			n_syscalls_unix += thread->syscalls_unix;
5584 		}
5585 
5586 		events_info->syscalls_mach = (int32_t) MIN(n_syscalls_mach, INT32_MAX);
5587 		events_info->syscalls_unix = (int32_t) MIN(n_syscalls_unix, INT32_MAX);
5588 		events_info->csw = (int32_t) MIN(n_csw, INT32_MAX);
5589 
5590 		*task_info_count = TASK_EVENTS_INFO_COUNT;
5591 		break;
5592 	}
5593 	case TASK_AFFINITY_TAG_INFO:
5594 	{
5595 		if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
5596 			error = KERN_INVALID_ARGUMENT;
5597 			break;
5598 		}
5599 
5600 		error = task_affinity_info(task, task_info_out, task_info_count);
5601 		break;
5602 	}
5603 	case TASK_POWER_INFO:
5604 	{
5605 		if (*task_info_count < TASK_POWER_INFO_COUNT) {
5606 			error = KERN_INVALID_ARGUMENT;
5607 			break;
5608 		}
5609 
5610 		task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL, NULL);
5611 		break;
5612 	}
5613 
5614 	case TASK_POWER_INFO_V2:
5615 	{
5616 		if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
5617 			error = KERN_INVALID_ARGUMENT;
5618 			break;
5619 		}
5620 		task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
5621 		task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2, NULL);
5622 		break;
5623 	}
5624 
5625 	case TASK_VM_INFO:
5626 	case TASK_VM_INFO_PURGEABLE:
5627 	{
5628 		task_vm_info_t          vm_info;
5629 		vm_map_t                map;
5630 		ledger_amount_t         tmp_amount;
5631 
5632 		struct proc *p;
5633 		uint32_t platform, sdk;
5634 		p = current_proc();
5635 		platform = proc_platform(p);
5636 		sdk = proc_sdk(p);
5637 		if (original_task_info_count > TASK_VM_INFO_COUNT) {
5638 			/*
5639 			 * Some iOS apps pass an incorrect value for
5640 			 * task_info_count, expressed in number of bytes
5641 			 * instead of number of "natural_t" elements, which
5642 			 * can lead to binary compatibility issues (including
5643 			 * stack corruption) when the data structure is
5644 			 * expanded in the future.
5645 			 * Let's make this potential issue visible by
5646 			 * logging about it...
5647 			 */
5648 			printf("%s:%d %d[%s] task_info(flavor=%d) possibly invalid "
5649 			    "task_info_count=%d > TASK_VM_INFO_COUNT=%d platform %d sdk "
5650 			    "%d.%d.%d - please use TASK_VM_INFO_COUNT.\n",
5651 			    __FUNCTION__, __LINE__, proc_pid(p), proc_name_address(p),
5652 			    flavor, original_task_info_count, TASK_VM_INFO_COUNT,
5653 			    platform, (sdk >> 16), ((sdk >> 8) & 0xff), (sdk & 0xff));
5654 			DTRACE_VM4(suspicious_task_vm_info_count,
5655 			    mach_msg_type_number_t, original_task_info_count,
5656 			    mach_msg_type_number_t, TASK_VM_INFO_COUNT,
5657 			    uint32_t, platform,
5658 			    uint32_t, sdk);
5659 		}
5660 #if __arm64__
5661 		if (original_task_info_count > TASK_VM_INFO_REV2_COUNT &&
5662 		    platform == PLATFORM_IOS &&
5663 		    sdk != 0 &&
5664 		    (sdk >> 16) <= 12) {
5665 			/*
5666 			 * Some iOS apps pass an incorrect value for
5667 			 * task_info_count, expressed in number of bytes
5668 			 * instead of number of "natural_t" elements.
5669 			 * For the sake of backwards binary compatibility
5670 			 * for apps built with an iOS12 or older SDK and using
5671 			 * the "rev2" data structure, let's fix task_info_count
5672 			 * for them, to avoid stomping past the actual end
5673 			 * of their buffer.
5674 			 */
5675 #if DEVELOPMENT || DEBUG
5676 			printf("%s:%d %d[%s] rdar://49484582 task_info_count %d -> %d "
5677 			    "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5678 			    proc_name_address(p), original_task_info_count,
5679 			    TASK_VM_INFO_REV2_COUNT, platform, (sdk >> 16),
5680 			    ((sdk >> 8) & 0xff), (sdk & 0xff));
5681 #endif /* DEVELOPMENT || DEBUG */
5682 			DTRACE_VM4(workaround_task_vm_info_count,
5683 			    mach_msg_type_number_t, original_task_info_count,
5684 			    mach_msg_type_number_t, TASK_VM_INFO_REV2_COUNT,
5685 			    uint32_t, platform,
5686 			    uint32_t, sdk);
5687 			original_task_info_count = TASK_VM_INFO_REV2_COUNT;
5688 			*task_info_count = original_task_info_count;
5689 		}
5690 		if (original_task_info_count > TASK_VM_INFO_REV5_COUNT &&
5691 		    platform == PLATFORM_IOS &&
5692 		    sdk != 0 &&
5693 		    (sdk >> 16) <= 15) {
5694 			/*
5695 			 * Some iOS apps pass an incorrect value for
5696 			 * task_info_count, expressed in number of bytes
5697 			 * instead of number of "natural_t" elements.
5698 			 */
5699 			printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5700 			    "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5701 			    proc_name_address(p), original_task_info_count,
5702 			    TASK_VM_INFO_REV5_COUNT, platform, (sdk >> 16),
5703 			    ((sdk >> 8) & 0xff), (sdk & 0xff));
5704 			DTRACE_VM4(workaround_task_vm_info_count,
5705 			    mach_msg_type_number_t, original_task_info_count,
5706 			    mach_msg_type_number_t, TASK_VM_INFO_REV5_COUNT,
5707 			    uint32_t, platform,
5708 			    uint32_t, sdk);
5709 #if DEVELOPMENT || DEBUG
5710 			/*
5711 			 * For the sake of internal builds livability,
5712 			 * work around this user-space bug by capping the
5713 			 * buffer's size to what it was with the iOS15 SDK.
5714 			 */
5715 			original_task_info_count = TASK_VM_INFO_REV5_COUNT;
5716 			*task_info_count = original_task_info_count;
5717 #endif /* DEVELOPMENT || DEBUG */
5718 		}
5719 #endif /* __arm64__ */
5720 
5721 		if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
5722 			error = KERN_INVALID_ARGUMENT;
5723 			break;
5724 		}
5725 
5726 		vm_info = (task_vm_info_t)task_info_out;
5727 
5728 		/*
5729 		 * Do not hold both the task and map locks,
5730 		 * so convert the task lock into a map reference,
5731 		 * drop the task lock, then lock the map.
5732 		 */
5733 		if (is_kernel_task) {
5734 			map = kernel_map;
5735 			task_unlock(task);
5736 			/* no lock, no reference */
5737 		} else {
5738 			map = task->map;
5739 			vm_map_reference(map);
5740 			task_unlock(task);
5741 			vm_map_lock_read(map);
5742 		}
5743 
5744 		vm_info->virtual_size = (typeof(vm_info->virtual_size))vm_map_adjusted_size(map);
5745 		vm_info->region_count = map->hdr.nentries;
5746 		vm_info->page_size = vm_map_page_size(map);
5747 
5748 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size);
5749 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size_peak);
5750 
5751 		vm_info->device = 0;
5752 		vm_info->device_peak = 0;
5753 		ledger_get_balance(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external);
5754 		ledger_get_lifetime_max(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external_peak);
5755 		ledger_get_balance(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal);
5756 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal_peak);
5757 		ledger_get_balance(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable);
5758 		ledger_get_lifetime_max(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable_peak);
5759 		ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed);
5760 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_peak);
5761 		ledger_get_entries(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_lifetime, &tmp_amount);
5762 
5763 		vm_info->purgeable_volatile_pmap = 0;
5764 		vm_info->purgeable_volatile_resident = 0;
5765 		vm_info->purgeable_volatile_virtual = 0;
5766 		if (is_kernel_task) {
5767 			/*
5768 			 * We do not maintain the detailed stats for the
5769 			 * kernel_pmap, so just count everything as
5770 			 * "internal"...
5771 			 */
5772 			vm_info->internal = vm_info->resident_size;
5773 			/*
5774 			 * ... but since the memory held by the VM compressor
5775 			 * in the kernel address space ought to be attributed
5776 			 * to user-space tasks, we subtract it from "internal"
5777 			 * to give memory reporting tools a more accurate idea
5778 			 * of what the kernel itself is actually using, instead
5779 			 * of making it look like the kernel is leaking memory
5780 			 * when the system is under memory pressure.
5781 			 */
5782 			vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
5783 			    PAGE_SIZE);
5784 		} else {
5785 			mach_vm_size_t  volatile_virtual_size;
5786 			mach_vm_size_t  volatile_resident_size;
5787 			mach_vm_size_t  volatile_compressed_size;
5788 			mach_vm_size_t  volatile_pmap_size;
5789 			mach_vm_size_t  volatile_compressed_pmap_size;
5790 			kern_return_t   kr;
5791 
5792 			if (flavor == TASK_VM_INFO_PURGEABLE) {
5793 				kr = vm_map_query_volatile(
5794 					map,
5795 					&volatile_virtual_size,
5796 					&volatile_resident_size,
5797 					&volatile_compressed_size,
5798 					&volatile_pmap_size,
5799 					&volatile_compressed_pmap_size);
5800 				if (kr == KERN_SUCCESS) {
5801 					vm_info->purgeable_volatile_pmap =
5802 					    volatile_pmap_size;
5803 					if (radar_20146450) {
5804 						vm_info->compressed -=
5805 						    volatile_compressed_pmap_size;
5806 					}
5807 					vm_info->purgeable_volatile_resident =
5808 					    volatile_resident_size;
5809 					vm_info->purgeable_volatile_virtual =
5810 					    volatile_virtual_size;
5811 				}
5812 			}
5813 		}
5814 		*task_info_count = TASK_VM_INFO_REV0_COUNT;
5815 
5816 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5817 			/* must be captured while we still have the map lock */
5818 			vm_info->min_address = map->min_offset;
5819 			vm_info->max_address = map->max_offset;
5820 		}
5821 
5822 		/*
5823 		 * Done with vm map things, can drop the map lock and reference,
5824 		 * and take the task lock back.
5825 		 *
5826 		 * Re-validate that the task didn't die on us.
5827 		 */
5828 		if (!is_kernel_task) {
5829 			vm_map_unlock_read(map);
5830 			vm_map_deallocate(map);
5831 		}
5832 		map = VM_MAP_NULL;
5833 
5834 		task_lock(task);
5835 
5836 		if ((task != current_task()) && (!task->active)) {
5837 			error = KERN_INVALID_ARGUMENT;
5838 			break;
5839 		}
5840 
5841 		if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
5842 			vm_info->phys_footprint =
5843 			    (mach_vm_size_t) get_task_phys_footprint(task);
5844 			*task_info_count = TASK_VM_INFO_REV1_COUNT;
5845 		}
5846 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5847 			/* data was captured above */
5848 			*task_info_count = TASK_VM_INFO_REV2_COUNT;
5849 		}
5850 
5851 		if (original_task_info_count >= TASK_VM_INFO_REV3_COUNT) {
5852 			ledger_get_lifetime_max(task->ledger,
5853 			    task_ledgers.phys_footprint,
5854 			    &vm_info->ledger_phys_footprint_peak);
5855 			ledger_get_balance(task->ledger,
5856 			    task_ledgers.purgeable_nonvolatile,
5857 			    &vm_info->ledger_purgeable_nonvolatile);
5858 			ledger_get_balance(task->ledger,
5859 			    task_ledgers.purgeable_nonvolatile_compressed,
5860 			    &vm_info->ledger_purgeable_novolatile_compressed);
5861 			ledger_get_balance(task->ledger,
5862 			    task_ledgers.purgeable_volatile,
5863 			    &vm_info->ledger_purgeable_volatile);
5864 			ledger_get_balance(task->ledger,
5865 			    task_ledgers.purgeable_volatile_compressed,
5866 			    &vm_info->ledger_purgeable_volatile_compressed);
5867 			ledger_get_balance(task->ledger,
5868 			    task_ledgers.network_nonvolatile,
5869 			    &vm_info->ledger_tag_network_nonvolatile);
5870 			ledger_get_balance(task->ledger,
5871 			    task_ledgers.network_nonvolatile_compressed,
5872 			    &vm_info->ledger_tag_network_nonvolatile_compressed);
5873 			ledger_get_balance(task->ledger,
5874 			    task_ledgers.network_volatile,
5875 			    &vm_info->ledger_tag_network_volatile);
5876 			ledger_get_balance(task->ledger,
5877 			    task_ledgers.network_volatile_compressed,
5878 			    &vm_info->ledger_tag_network_volatile_compressed);
5879 			ledger_get_balance(task->ledger,
5880 			    task_ledgers.media_footprint,
5881 			    &vm_info->ledger_tag_media_footprint);
5882 			ledger_get_balance(task->ledger,
5883 			    task_ledgers.media_footprint_compressed,
5884 			    &vm_info->ledger_tag_media_footprint_compressed);
5885 			ledger_get_balance(task->ledger,
5886 			    task_ledgers.media_nofootprint,
5887 			    &vm_info->ledger_tag_media_nofootprint);
5888 			ledger_get_balance(task->ledger,
5889 			    task_ledgers.media_nofootprint_compressed,
5890 			    &vm_info->ledger_tag_media_nofootprint_compressed);
5891 			ledger_get_balance(task->ledger,
5892 			    task_ledgers.graphics_footprint,
5893 			    &vm_info->ledger_tag_graphics_footprint);
5894 			ledger_get_balance(task->ledger,
5895 			    task_ledgers.graphics_footprint_compressed,
5896 			    &vm_info->ledger_tag_graphics_footprint_compressed);
5897 			ledger_get_balance(task->ledger,
5898 			    task_ledgers.graphics_nofootprint,
5899 			    &vm_info->ledger_tag_graphics_nofootprint);
5900 			ledger_get_balance(task->ledger,
5901 			    task_ledgers.graphics_nofootprint_compressed,
5902 			    &vm_info->ledger_tag_graphics_nofootprint_compressed);
5903 			ledger_get_balance(task->ledger,
5904 			    task_ledgers.neural_footprint,
5905 			    &vm_info->ledger_tag_neural_footprint);
5906 			ledger_get_balance(task->ledger,
5907 			    task_ledgers.neural_footprint_compressed,
5908 			    &vm_info->ledger_tag_neural_footprint_compressed);
5909 			ledger_get_balance(task->ledger,
5910 			    task_ledgers.neural_nofootprint,
5911 			    &vm_info->ledger_tag_neural_nofootprint);
5912 			ledger_get_balance(task->ledger,
5913 			    task_ledgers.neural_nofootprint_compressed,
5914 			    &vm_info->ledger_tag_neural_nofootprint_compressed);
5915 			*task_info_count = TASK_VM_INFO_REV3_COUNT;
5916 		}
5917 		if (original_task_info_count >= TASK_VM_INFO_REV4_COUNT) {
5918 			if (get_bsdtask_info(task)) {
5919 				vm_info->limit_bytes_remaining =
5920 				    memorystatus_available_memory_internal(get_bsdtask_info(task));
5921 			} else {
5922 				vm_info->limit_bytes_remaining = 0;
5923 			}
5924 			*task_info_count = TASK_VM_INFO_REV4_COUNT;
5925 		}
5926 		if (original_task_info_count >= TASK_VM_INFO_REV5_COUNT) {
5927 			thread_t thread;
5928 			uint64_t total = task->decompressions;
5929 			queue_iterate(&task->threads, thread, thread_t, task_threads) {
5930 				total += thread->decompressions;
5931 			}
5932 			vm_info->decompressions = (int32_t) MIN(total, INT32_MAX);
5933 			*task_info_count = TASK_VM_INFO_REV5_COUNT;
5934 		}
5935 		if (original_task_info_count >= TASK_VM_INFO_REV6_COUNT) {
5936 			ledger_get_balance(task->ledger, task_ledgers.swapins,
5937 			    &vm_info->ledger_swapins);
5938 			*task_info_count = TASK_VM_INFO_REV6_COUNT;
5939 		}
5940 
5941 		break;
5942 	}
5943 
5944 	case TASK_WAIT_STATE_INFO:
5945 	{
5946 		/*
5947 		 * Deprecated flavor. Currently allowing some results until all users
5948 		 * stop calling it. The results may not be accurate.
5949 		 */
5950 		task_wait_state_info_t  wait_state_info;
5951 		uint64_t total_sfi_ledger_val = 0;
5952 
5953 		if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
5954 			error = KERN_INVALID_ARGUMENT;
5955 			break;
5956 		}
5957 
5958 		wait_state_info = (task_wait_state_info_t) task_info_out;
5959 
5960 		wait_state_info->total_wait_state_time = 0;
5961 		bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
5962 
5963 #if CONFIG_SCHED_SFI
5964 		int i, prev_lentry = -1;
5965 		int64_t  val_credit, val_debit;
5966 
5967 		for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
5968 			val_credit = 0;
5969 			/*
5970 			 * checking with prev_lentry != entry ensures adjacent classes
5971 			 * which share the same ledger do not add wait times twice.
5972 			 * Note: Use ledger() call to get data for each individual sfi class.
5973 			 */
5974 			if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
5975 			    KERN_SUCCESS == ledger_get_entries(task->ledger,
5976 			    task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
5977 				total_sfi_ledger_val += val_credit;
5978 			}
5979 			prev_lentry = task_ledgers.sfi_wait_times[i];
5980 		}
5981 
5982 #endif /* CONFIG_SCHED_SFI */
5983 		wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
5984 		*task_info_count = TASK_WAIT_STATE_INFO_COUNT;
5985 
5986 		break;
5987 	}
5988 	case TASK_VM_INFO_PURGEABLE_ACCOUNT:
5989 	{
5990 #if DEVELOPMENT || DEBUG
5991 		pvm_account_info_t      acnt_info;
5992 
5993 		if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
5994 			error = KERN_INVALID_ARGUMENT;
5995 			break;
5996 		}
5997 
5998 		if (task_info_out == NULL) {
5999 			error = KERN_INVALID_ARGUMENT;
6000 			break;
6001 		}
6002 
6003 		acnt_info = (pvm_account_info_t) task_info_out;
6004 
6005 		error = vm_purgeable_account(task, acnt_info);
6006 
6007 		*task_info_count = PVM_ACCOUNT_INFO_COUNT;
6008 
6009 		break;
6010 #else /* DEVELOPMENT || DEBUG */
6011 		error = KERN_NOT_SUPPORTED;
6012 		break;
6013 #endif /* DEVELOPMENT || DEBUG */
6014 	}
6015 	case TASK_FLAGS_INFO:
6016 	{
6017 		task_flags_info_t               flags_info;
6018 
6019 		if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
6020 			error = KERN_INVALID_ARGUMENT;
6021 			break;
6022 		}
6023 
6024 		flags_info = (task_flags_info_t)task_info_out;
6025 
6026 		/* only publish the 64-bit flag of the task */
6027 		flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA);
6028 
6029 		*task_info_count = TASK_FLAGS_INFO_COUNT;
6030 		break;
6031 	}
6032 
6033 	case TASK_DEBUG_INFO_INTERNAL:
6034 	{
6035 #if DEVELOPMENT || DEBUG
6036 		task_debug_info_internal_t dbg_info;
6037 		ipc_space_t space = task->itk_space;
6038 		if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
6039 			error = KERN_NOT_SUPPORTED;
6040 			break;
6041 		}
6042 
6043 		if (task_info_out == NULL) {
6044 			error = KERN_INVALID_ARGUMENT;
6045 			break;
6046 		}
6047 		dbg_info = (task_debug_info_internal_t) task_info_out;
6048 		dbg_info->ipc_space_size = 0;
6049 
6050 		if (space) {
6051 			smr_ipc_enter();
6052 			ipc_entry_table_t table = smr_entered_load(&space->is_table);
6053 			if (table) {
6054 				dbg_info->ipc_space_size =
6055 				    ipc_entry_table_count(table);
6056 			}
6057 			smr_ipc_leave();
6058 		}
6059 
6060 		dbg_info->suspend_count = task->suspend_count;
6061 
6062 		error = KERN_SUCCESS;
6063 		*task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
6064 		break;
6065 #else /* DEVELOPMENT || DEBUG */
6066 		error = KERN_NOT_SUPPORTED;
6067 		break;
6068 #endif /* DEVELOPMENT || DEBUG */
6069 	}
6070 	case TASK_SUSPEND_STATS_INFO:
6071 	{
6072 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6073 		if (*task_info_count < TASK_SUSPEND_STATS_INFO_COUNT || task_info_out == NULL) {
6074 			error = KERN_INVALID_ARGUMENT;
6075 			break;
6076 		}
6077 		error = _task_get_suspend_stats_locked(task, (task_suspend_stats_t)task_info_out);
6078 		*task_info_count = TASK_SUSPEND_STATS_INFO_COUNT;
6079 		break;
6080 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6081 		error = KERN_NOT_SUPPORTED;
6082 		break;
6083 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6084 	}
6085 	case TASK_SUSPEND_SOURCES_INFO:
6086 	{
6087 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6088 		if (*task_info_count < TASK_SUSPEND_SOURCES_INFO_COUNT || task_info_out == NULL) {
6089 			error = KERN_INVALID_ARGUMENT;
6090 			break;
6091 		}
6092 		error = _task_get_suspend_sources_locked(task, (task_suspend_source_t)task_info_out);
6093 		*task_info_count = TASK_SUSPEND_SOURCES_INFO_COUNT;
6094 		break;
6095 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6096 		error = KERN_NOT_SUPPORTED;
6097 		break;
6098 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6099 	}
6100 	default:
6101 		error = KERN_INVALID_ARGUMENT;
6102 	}
6103 
6104 	task_unlock(task);
6105 	return error;
6106 }
6107 
6108 /*
6109  * task_info_from_user
6110  *
6111  * When calling task_info from user space,
6112  * this function will be executed as mig server side
6113  * instead of calling directly into task_info.
6114  * This gives the possibility to perform more security
6115  * checks on task_port.
6116  *
6117  * In the case of TASK_DYLD_INFO, we require the more
6118  * privileged task_read_port not the less-privileged task_name_port.
6119  *
6120  */
6121 kern_return_t
task_info_from_user(mach_port_t task_port,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)6122 task_info_from_user(
6123 	mach_port_t             task_port,
6124 	task_flavor_t           flavor,
6125 	task_info_t             task_info_out,
6126 	mach_msg_type_number_t  *task_info_count)
6127 {
6128 	task_t task;
6129 	kern_return_t ret;
6130 
6131 	if (flavor == TASK_DYLD_INFO) {
6132 		task = convert_port_to_task_read(task_port);
6133 	} else {
6134 		task = convert_port_to_task_name(task_port);
6135 	}
6136 
6137 	ret = task_info(task, flavor, task_info_out, task_info_count);
6138 
6139 	task_deallocate(task);
6140 
6141 	return ret;
6142 }
6143 
6144 /*
6145  * Routine: task_dyld_process_info_update_helper
6146  *
6147  * Release send rights in release_ports.
6148  *
6149  * If no active ports found in task's dyld notifier array, unset the magic value
6150  * in user space to indicate so.
6151  *
6152  * Condition:
6153  *      task's itk_lock is locked, and is unlocked upon return.
6154  *      Global g_dyldinfo_mtx is locked, and is unlocked upon return.
6155  */
6156 void
task_dyld_process_info_update_helper(task_t task,size_t active_count,vm_map_address_t magic_addr,ipc_port_t * release_ports,size_t release_count)6157 task_dyld_process_info_update_helper(
6158 	task_t                  task,
6159 	size_t                  active_count,
6160 	vm_map_address_t        magic_addr,    /* a userspace address */
6161 	ipc_port_t             *release_ports,
6162 	size_t                  release_count)
6163 {
6164 	void *notifiers_ptr = NULL;
6165 
6166 	assert(release_count <= DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT);
6167 
6168 	if (active_count == 0) {
6169 		assert(task->itk_dyld_notify != NULL);
6170 		notifiers_ptr = task->itk_dyld_notify;
6171 		task->itk_dyld_notify = NULL;
6172 		itk_unlock(task);
6173 
6174 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6175 		(void)copyoutmap_atomic32(task->map, MACH_PORT_NULL, magic_addr); /* unset magic */
6176 	} else {
6177 		itk_unlock(task);
6178 		(void)copyoutmap_atomic32(task->map, (mach_port_name_t)DYLD_PROCESS_INFO_NOTIFY_MAGIC,
6179 		    magic_addr);     /* reset magic */
6180 	}
6181 
6182 	lck_mtx_unlock(&g_dyldinfo_mtx);
6183 
6184 	for (size_t i = 0; i < release_count; i++) {
6185 		ipc_port_release_send(release_ports[i]);
6186 	}
6187 }
6188 
6189 /*
6190  * Routine: task_dyld_process_info_notify_register
6191  *
6192  * Insert a send right to target task's itk_dyld_notify array. Allocate kernel
6193  * memory for the array if it's the first port to be registered. Also cleanup
6194  * any dead rights found in the array.
6195  *
6196  * Consumes sright if returns KERN_SUCCESS, otherwise MIG will destroy it.
6197  *
6198  * Args:
6199  *     task:   Target task for the registration.
6200  *     sright: A send right.
6201  *
6202  * Returns:
6203  *     KERN_SUCCESS: Registration succeeded.
6204  *     KERN_INVALID_TASK: task is invalid.
6205  *     KERN_INVALID_RIGHT: sright is invalid.
6206  *     KERN_DENIED: Security policy denied this call.
6207  *     KERN_RESOURCE_SHORTAGE: Kernel memory allocation failed.
6208  *     KERN_NO_SPACE: No available notifier port slot left for this task.
6209  *     KERN_RIGHT_EXISTS: The notifier port is already registered and active.
6210  *
6211  *     Other error code see task_info().
6212  *
6213  * See Also:
6214  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6215  */
6216 kern_return_t
task_dyld_process_info_notify_register(task_t task,ipc_port_t sright)6217 task_dyld_process_info_notify_register(
6218 	task_t                  task,
6219 	ipc_port_t              sright)
6220 {
6221 	struct task_dyld_info dyld_info;
6222 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6223 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6224 	uint32_t release_count = 0, active_count = 0;
6225 	mach_vm_address_t ports_addr; /* a user space address */
6226 	kern_return_t kr;
6227 	boolean_t right_exists = false;
6228 	ipc_port_t *notifiers_ptr = NULL;
6229 	ipc_port_t *portp;
6230 
6231 	if (task == TASK_NULL || task == kernel_task) {
6232 		return KERN_INVALID_TASK;
6233 	}
6234 
6235 	if (!IP_VALID(sright)) {
6236 		return KERN_INVALID_RIGHT;
6237 	}
6238 
6239 #if CONFIG_MACF
6240 	if (mac_task_check_dyld_process_info_notify_register()) {
6241 		return KERN_DENIED;
6242 	}
6243 #endif
6244 
6245 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6246 	if (kr) {
6247 		return kr;
6248 	}
6249 
6250 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6251 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6252 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6253 	} else {
6254 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6255 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6256 	}
6257 
6258 	if (task->itk_dyld_notify == NULL) {
6259 		notifiers_ptr = kalloc_type(ipc_port_t,
6260 		    DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT,
6261 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
6262 	}
6263 
6264 	lck_mtx_lock(&g_dyldinfo_mtx);
6265 	itk_lock(task);
6266 
6267 	if (task->itk_dyld_notify == NULL) {
6268 		task->itk_dyld_notify = notifiers_ptr;
6269 		notifiers_ptr = NULL;
6270 	}
6271 
6272 	assert(task->itk_dyld_notify != NULL);
6273 	/* First pass: clear dead names and check for duplicate registration */
6274 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6275 		portp = &task->itk_dyld_notify[slot];
6276 		if (*portp != IPC_PORT_NULL && !ip_active(*portp)) {
6277 			release_ports[release_count++] = *portp;
6278 			*portp = IPC_PORT_NULL;
6279 		} else if (*portp == sright) {
6280 			/* the port is already registered and is active */
6281 			right_exists = true;
6282 		}
6283 
6284 		if (*portp != IPC_PORT_NULL) {
6285 			active_count++;
6286 		}
6287 	}
6288 
6289 	if (right_exists) {
6290 		/* skip second pass */
6291 		kr = KERN_RIGHT_EXISTS;
6292 		goto out;
6293 	}
6294 
6295 	/* Second pass: register the port */
6296 	kr = KERN_NO_SPACE;
6297 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6298 		portp = &task->itk_dyld_notify[slot];
6299 		if (*portp == IPC_PORT_NULL) {
6300 			*portp = sright;
6301 			active_count++;
6302 			kr = KERN_SUCCESS;
6303 			break;
6304 		}
6305 	}
6306 
6307 out:
6308 	assert(active_count > 0);
6309 
6310 	task_dyld_process_info_update_helper(task, active_count,
6311 	    (vm_map_address_t)ports_addr, release_ports, release_count);
6312 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6313 
6314 	kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6315 
6316 	return kr;
6317 }
6318 
6319 /*
6320  * Routine: task_dyld_process_info_notify_deregister
6321  *
6322  * Remove a send right in target task's itk_dyld_notify array matching the receive
6323  * right name passed in. Deallocate kernel memory for the array if it's the last port to
6324  * be deregistered, or all ports have died. Also cleanup any dead rights found in the array.
6325  *
6326  * Does not consume any reference.
6327  *
6328  * Args:
6329  *     task: Target task for the deregistration.
6330  *     rcv_name: The name denoting the receive right in caller's space.
6331  *
6332  * Returns:
6333  *     KERN_SUCCESS: A matching entry found and degistration succeeded.
6334  *     KERN_INVALID_TASK: task is invalid.
6335  *     KERN_INVALID_NAME: name is invalid.
6336  *     KERN_DENIED: Security policy denied this call.
6337  *     KERN_FAILURE: A matching entry is not found.
6338  *     KERN_INVALID_RIGHT: The name passed in does not represent a valid rcv right.
6339  *
6340  *     Other error code see task_info().
6341  *
6342  * See Also:
6343  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6344  */
6345 kern_return_t
task_dyld_process_info_notify_deregister(task_t task,mach_port_name_t rcv_name)6346 task_dyld_process_info_notify_deregister(
6347 	task_t                  task,
6348 	mach_port_name_t        rcv_name)
6349 {
6350 	struct task_dyld_info dyld_info;
6351 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6352 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6353 	uint32_t release_count = 0, active_count = 0;
6354 	boolean_t port_found = false;
6355 	mach_vm_address_t ports_addr; /* a user space address */
6356 	ipc_port_t sright;
6357 	kern_return_t kr;
6358 	ipc_port_t *portp;
6359 
6360 	if (task == TASK_NULL || task == kernel_task) {
6361 		return KERN_INVALID_TASK;
6362 	}
6363 
6364 	if (!MACH_PORT_VALID(rcv_name)) {
6365 		return KERN_INVALID_NAME;
6366 	}
6367 
6368 #if CONFIG_MACF
6369 	if (mac_task_check_dyld_process_info_notify_register()) {
6370 		return KERN_DENIED;
6371 	}
6372 #endif
6373 
6374 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6375 	if (kr) {
6376 		return kr;
6377 	}
6378 
6379 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6380 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6381 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6382 	} else {
6383 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6384 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6385 	}
6386 
6387 	kr = ipc_port_translate_receive(current_space(), rcv_name, &sright); /* does not produce port ref */
6388 	if (kr) {
6389 		return KERN_INVALID_RIGHT;
6390 	}
6391 
6392 	ip_reference(sright);
6393 	ip_mq_unlock(sright);
6394 
6395 	assert(sright != IPC_PORT_NULL);
6396 
6397 	lck_mtx_lock(&g_dyldinfo_mtx);
6398 	itk_lock(task);
6399 
6400 	if (task->itk_dyld_notify == NULL) {
6401 		itk_unlock(task);
6402 		lck_mtx_unlock(&g_dyldinfo_mtx);
6403 		ip_release(sright);
6404 		return KERN_FAILURE;
6405 	}
6406 
6407 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6408 		portp = &task->itk_dyld_notify[slot];
6409 		if (*portp == sright) {
6410 			release_ports[release_count++] = *portp;
6411 			*portp = IPC_PORT_NULL;
6412 			port_found = true;
6413 		} else if ((*portp != IPC_PORT_NULL && !ip_active(*portp))) {
6414 			release_ports[release_count++] = *portp;
6415 			*portp = IPC_PORT_NULL;
6416 		}
6417 
6418 		if (*portp != IPC_PORT_NULL) {
6419 			active_count++;
6420 		}
6421 	}
6422 
6423 	task_dyld_process_info_update_helper(task, active_count,
6424 	    (vm_map_address_t)ports_addr, release_ports, release_count);
6425 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6426 
6427 	ip_release(sright);
6428 
6429 	return port_found ? KERN_SUCCESS : KERN_FAILURE;
6430 }
6431 
6432 /*
6433  *	task_power_info
6434  *
6435  *	Returns power stats for the task.
6436  *	Note: Called with task locked.
6437  */
6438 void
task_power_info_locked(task_t task,task_power_info_t info,gpu_energy_data_t ginfo,task_power_info_v2_t infov2,struct task_power_info_extra * extra_info)6439 task_power_info_locked(
6440 	task_t                        task,
6441 	task_power_info_t             info,
6442 	gpu_energy_data_t             ginfo,
6443 	task_power_info_v2_t          infov2,
6444 	struct task_power_info_extra *extra_info)
6445 {
6446 	thread_t                thread;
6447 	ledger_amount_t         tmp;
6448 
6449 	uint64_t                runnable_time_sum = 0;
6450 
6451 	task_lock_assert_owned(task);
6452 
6453 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
6454 	    (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
6455 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
6456 	    (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
6457 
6458 	info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
6459 	info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
6460 
6461 	struct recount_usage usage = { 0 };
6462 	struct recount_usage usage_perf = { 0 };
6463 	recount_task_usage_perf_only(task, &usage, &usage_perf);
6464 
6465 	info->total_user = usage.ru_user_time_mach;
6466 	info->total_system = usage.ru_system_time_mach;
6467 	runnable_time_sum = task->total_runnable_time;
6468 
6469 	if (ginfo) {
6470 		ginfo->task_gpu_utilisation = task->task_gpu_ns;
6471 	}
6472 
6473 	if (infov2) {
6474 		infov2->task_ptime = usage_perf.ru_system_time_mach +
6475 		    usage_perf.ru_user_time_mach;
6476 		infov2->task_pset_switches = task->ps_switch;
6477 #if CONFIG_PERVASIVE_ENERGY
6478 		infov2->task_energy = usage.ru_energy_nj;
6479 #endif /* CONFIG_PERVASIVE_ENERGY */
6480 	}
6481 
6482 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6483 		spl_t x;
6484 
6485 		if (thread->options & TH_OPT_IDLE_THREAD) {
6486 			continue;
6487 		}
6488 
6489 		x = splsched();
6490 		thread_lock(thread);
6491 
6492 		info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
6493 		info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
6494 
6495 		if (infov2) {
6496 			infov2->task_pset_switches += thread->ps_switch;
6497 		}
6498 
6499 		runnable_time_sum += timer_grab(&thread->runnable_timer);
6500 
6501 		if (ginfo) {
6502 			ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
6503 		}
6504 		thread_unlock(thread);
6505 		splx(x);
6506 	}
6507 
6508 	if (extra_info) {
6509 		extra_info->runnable_time = runnable_time_sum;
6510 #if CONFIG_PERVASIVE_CPI
6511 		extra_info->cycles = usage.ru_cycles;
6512 		extra_info->instructions = usage.ru_instructions;
6513 		extra_info->pcycles = usage_perf.ru_cycles;
6514 		extra_info->pinstructions = usage_perf.ru_instructions;
6515 		extra_info->user_ptime = usage_perf.ru_user_time_mach;
6516 		extra_info->system_ptime = usage_perf.ru_system_time_mach;
6517 #endif // CONFIG_PERVASIVE_CPI
6518 #if CONFIG_PERVASIVE_ENERGY
6519 		extra_info->energy = usage.ru_energy_nj;
6520 		extra_info->penergy = usage_perf.ru_energy_nj;
6521 #endif // CONFIG_PERVASIVE_ENERGY
6522 	}
6523 }
6524 
6525 /*
6526  *	task_gpu_utilisation
6527  *
6528  *	Returns the total gpu time used by the all the threads of the task
6529  *  (both dead and alive)
6530  */
6531 uint64_t
task_gpu_utilisation(task_t task)6532 task_gpu_utilisation(
6533 	task_t  task)
6534 {
6535 	uint64_t gpu_time = 0;
6536 #if defined(__x86_64__)
6537 	thread_t thread;
6538 
6539 	task_lock(task);
6540 	gpu_time += task->task_gpu_ns;
6541 
6542 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6543 		spl_t x;
6544 		x = splsched();
6545 		thread_lock(thread);
6546 		gpu_time += ml_gpu_stat(thread);
6547 		thread_unlock(thread);
6548 		splx(x);
6549 	}
6550 
6551 	task_unlock(task);
6552 #else /* defined(__x86_64__) */
6553 	/* silence compiler warning */
6554 	(void)task;
6555 #endif /* defined(__x86_64__) */
6556 	return gpu_time;
6557 }
6558 
6559 /* This function updates the cpu time in the arrays for each
6560  * effective and requested QoS class
6561  */
6562 void
task_update_cpu_time_qos_stats(task_t task,uint64_t * eqos_stats,uint64_t * rqos_stats)6563 task_update_cpu_time_qos_stats(
6564 	task_t  task,
6565 	uint64_t *eqos_stats,
6566 	uint64_t *rqos_stats)
6567 {
6568 	if (!eqos_stats && !rqos_stats) {
6569 		return;
6570 	}
6571 
6572 	task_lock(task);
6573 	thread_t thread;
6574 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6575 		if (thread->options & TH_OPT_IDLE_THREAD) {
6576 			continue;
6577 		}
6578 
6579 		thread_update_qos_cpu_time(thread);
6580 	}
6581 
6582 	if (eqos_stats) {
6583 		eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default;
6584 		eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
6585 		eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background;
6586 		eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility;
6587 		eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy;
6588 		eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
6589 		eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
6590 	}
6591 
6592 	if (rqos_stats) {
6593 		rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default;
6594 		rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance;
6595 		rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background;
6596 		rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility;
6597 		rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy;
6598 		rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated;
6599 		rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive;
6600 	}
6601 
6602 	task_unlock(task);
6603 }
6604 
6605 kern_return_t
task_purgable_info(task_t task,task_purgable_info_t * stats)6606 task_purgable_info(
6607 	task_t                  task,
6608 	task_purgable_info_t    *stats)
6609 {
6610 	if (task == TASK_NULL || stats == NULL) {
6611 		return KERN_INVALID_ARGUMENT;
6612 	}
6613 	/* Take task reference */
6614 	task_reference(task);
6615 	vm_purgeable_stats((vm_purgeable_info_t)stats, task);
6616 	/* Drop task reference */
6617 	task_deallocate(task);
6618 	return KERN_SUCCESS;
6619 }
6620 
6621 void
task_vtimer_set(task_t task,integer_t which)6622 task_vtimer_set(
6623 	task_t          task,
6624 	integer_t       which)
6625 {
6626 	thread_t        thread;
6627 	spl_t           x;
6628 
6629 	task_lock(task);
6630 
6631 	task->vtimers |= which;
6632 
6633 	switch (which) {
6634 	case TASK_VTIMER_USER:
6635 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6636 			x = splsched();
6637 			thread_lock(thread);
6638 			struct recount_times_mach times = recount_thread_times(thread);
6639 			thread->vtimer_user_save = times.rtm_user;
6640 			thread_unlock(thread);
6641 			splx(x);
6642 		}
6643 		break;
6644 
6645 	case TASK_VTIMER_PROF:
6646 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6647 			x = splsched();
6648 			thread_lock(thread);
6649 			thread->vtimer_prof_save = recount_thread_time_mach(thread);
6650 			thread_unlock(thread);
6651 			splx(x);
6652 		}
6653 		break;
6654 
6655 	case TASK_VTIMER_RLIM:
6656 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6657 			x = splsched();
6658 			thread_lock(thread);
6659 			thread->vtimer_rlim_save = recount_thread_time_mach(thread);
6660 			thread_unlock(thread);
6661 			splx(x);
6662 		}
6663 		break;
6664 	}
6665 
6666 	task_unlock(task);
6667 }
6668 
6669 void
task_vtimer_clear(task_t task,integer_t which)6670 task_vtimer_clear(
6671 	task_t          task,
6672 	integer_t       which)
6673 {
6674 	task_lock(task);
6675 
6676 	task->vtimers &= ~which;
6677 
6678 	task_unlock(task);
6679 }
6680 
6681 void
task_vtimer_update(__unused task_t task,integer_t which,uint32_t * microsecs)6682 task_vtimer_update(
6683 	__unused
6684 	task_t          task,
6685 	integer_t       which,
6686 	uint32_t        *microsecs)
6687 {
6688 	thread_t        thread = current_thread();
6689 	uint32_t        tdelt = 0;
6690 	clock_sec_t     secs = 0;
6691 	uint64_t        tsum;
6692 
6693 	assert(task == current_task());
6694 
6695 	spl_t s = splsched();
6696 	thread_lock(thread);
6697 
6698 	if ((task->vtimers & which) != (uint32_t)which) {
6699 		thread_unlock(thread);
6700 		splx(s);
6701 		return;
6702 	}
6703 
6704 	switch (which) {
6705 	case TASK_VTIMER_USER:;
6706 		struct recount_times_mach times = recount_thread_times(thread);
6707 		tsum = times.rtm_user;
6708 		tdelt = (uint32_t)(tsum - thread->vtimer_user_save);
6709 		thread->vtimer_user_save = tsum;
6710 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6711 		break;
6712 
6713 	case TASK_VTIMER_PROF:
6714 		tsum = recount_current_thread_time_mach();
6715 		tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
6716 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6717 		/* if the time delta is smaller than a usec, ignore */
6718 		if (*microsecs != 0) {
6719 			thread->vtimer_prof_save = tsum;
6720 		}
6721 		break;
6722 
6723 	case TASK_VTIMER_RLIM:
6724 		tsum = recount_current_thread_time_mach();
6725 		tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
6726 		thread->vtimer_rlim_save = tsum;
6727 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6728 		break;
6729 	}
6730 
6731 	thread_unlock(thread);
6732 	splx(s);
6733 }
6734 
6735 uint64_t
get_task_dispatchqueue_offset(task_t task)6736 get_task_dispatchqueue_offset(
6737 	task_t          task)
6738 {
6739 	return task->dispatchqueue_offset;
6740 }
6741 
6742 void
task_synchronizer_destroy_all(task_t task)6743 task_synchronizer_destroy_all(task_t task)
6744 {
6745 	/*
6746 	 *  Destroy owned semaphores
6747 	 */
6748 	semaphore_destroy_all(task);
6749 }
6750 
6751 /*
6752  * Install default (machine-dependent) initial thread state
6753  * on the task.  Subsequent thread creation will have this initial
6754  * state set on the thread by machine_thread_inherit_taskwide().
6755  * Flavors and structures are exactly the same as those to thread_set_state()
6756  */
6757 kern_return_t
task_set_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t state_count)6758 task_set_state(
6759 	task_t task,
6760 	int flavor,
6761 	thread_state_t state,
6762 	mach_msg_type_number_t state_count)
6763 {
6764 	kern_return_t ret;
6765 
6766 	if (task == TASK_NULL) {
6767 		return KERN_INVALID_ARGUMENT;
6768 	}
6769 
6770 	task_lock(task);
6771 
6772 	if (!task->active) {
6773 		task_unlock(task);
6774 		return KERN_FAILURE;
6775 	}
6776 
6777 	ret = machine_task_set_state(task, flavor, state, state_count);
6778 
6779 	task_unlock(task);
6780 	return ret;
6781 }
6782 
6783 /*
6784  * Examine the default (machine-dependent) initial thread state
6785  * on the task, as set by task_set_state().  Flavors and structures
6786  * are exactly the same as those passed to thread_get_state().
6787  */
6788 kern_return_t
task_get_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)6789 task_get_state(
6790 	task_t  task,
6791 	int     flavor,
6792 	thread_state_t state,
6793 	mach_msg_type_number_t *state_count)
6794 {
6795 	kern_return_t ret;
6796 
6797 	if (task == TASK_NULL) {
6798 		return KERN_INVALID_ARGUMENT;
6799 	}
6800 
6801 	task_lock(task);
6802 
6803 	if (!task->active) {
6804 		task_unlock(task);
6805 		return KERN_FAILURE;
6806 	}
6807 
6808 	ret = machine_task_get_state(task, flavor, state, state_count);
6809 
6810 	task_unlock(task);
6811 	return ret;
6812 }
6813 
6814 
6815 static kern_return_t __attribute__((noinline, not_tail_called))
PROC_VIOLATED_GUARD__SEND_EXC_GUARD(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,boolean_t backtrace_only)6816 PROC_VIOLATED_GUARD__SEND_EXC_GUARD(
6817 	mach_exception_code_t code,
6818 	mach_exception_subcode_t subcode,
6819 	void *reason,
6820 	boolean_t backtrace_only)
6821 {
6822 #ifdef MACH_BSD
6823 	if (1 == proc_selfpid()) {
6824 		return KERN_NOT_SUPPORTED;              // initproc is immune
6825 	}
6826 #endif
6827 	mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
6828 		[0] = code,
6829 		[1] = subcode,
6830 	};
6831 	task_t task = current_task();
6832 	kern_return_t kr;
6833 	void *bsd_info = get_bsdtask_info(task);
6834 
6835 	/* (See jetsam-related comments below) */
6836 
6837 	proc_memstat_skip(bsd_info, TRUE);
6838 	kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason, backtrace_only);
6839 	proc_memstat_skip(bsd_info, FALSE);
6840 	return kr;
6841 }
6842 
6843 kern_return_t
task_violated_guard(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,bool backtrace_only)6844 task_violated_guard(
6845 	mach_exception_code_t code,
6846 	mach_exception_subcode_t subcode,
6847 	void *reason,
6848 	bool backtrace_only)
6849 {
6850 	return PROC_VIOLATED_GUARD__SEND_EXC_GUARD(code, subcode, reason, backtrace_only);
6851 }
6852 
6853 
6854 #if CONFIG_MEMORYSTATUS
6855 
6856 boolean_t
task_get_memlimit_is_active(task_t task)6857 task_get_memlimit_is_active(task_t task)
6858 {
6859 	assert(task != NULL);
6860 
6861 	if (task->memlimit_is_active == 1) {
6862 		return TRUE;
6863 	} else {
6864 		return FALSE;
6865 	}
6866 }
6867 
6868 void
task_set_memlimit_is_active(task_t task,boolean_t memlimit_is_active)6869 task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
6870 {
6871 	assert(task != NULL);
6872 
6873 	if (memlimit_is_active) {
6874 		task->memlimit_is_active = 1;
6875 	} else {
6876 		task->memlimit_is_active = 0;
6877 	}
6878 }
6879 
6880 boolean_t
task_get_memlimit_is_fatal(task_t task)6881 task_get_memlimit_is_fatal(task_t task)
6882 {
6883 	assert(task != NULL);
6884 
6885 	if (task->memlimit_is_fatal == 1) {
6886 		return TRUE;
6887 	} else {
6888 		return FALSE;
6889 	}
6890 }
6891 
6892 void
task_set_memlimit_is_fatal(task_t task,boolean_t memlimit_is_fatal)6893 task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
6894 {
6895 	assert(task != NULL);
6896 
6897 	if (memlimit_is_fatal) {
6898 		task->memlimit_is_fatal = 1;
6899 	} else {
6900 		task->memlimit_is_fatal = 0;
6901 	}
6902 }
6903 
6904 uint64_t
task_get_dirty_start(task_t task)6905 task_get_dirty_start(task_t task)
6906 {
6907 	return task->memstat_dirty_start;
6908 }
6909 
6910 void
task_set_dirty_start(task_t task,uint64_t start)6911 task_set_dirty_start(task_t task, uint64_t start)
6912 {
6913 	task_lock(task);
6914 	task->memstat_dirty_start = start;
6915 	task_unlock(task);
6916 }
6917 
6918 boolean_t
task_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)6919 task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
6920 {
6921 	boolean_t triggered = FALSE;
6922 
6923 	assert(task == current_task());
6924 
6925 	/*
6926 	 * Returns true, if task has already triggered an exc_resource exception.
6927 	 */
6928 
6929 	if (memlimit_is_active) {
6930 		triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
6931 	} else {
6932 		triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
6933 	}
6934 
6935 	return triggered;
6936 }
6937 
6938 void
task_mark_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)6939 task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
6940 {
6941 	assert(task == current_task());
6942 
6943 	/*
6944 	 * We allow one exc_resource per process per active/inactive limit.
6945 	 * The limit's fatal attribute does not come into play.
6946 	 */
6947 
6948 	if (memlimit_is_active) {
6949 		task->memlimit_active_exc_resource = 1;
6950 	} else {
6951 		task->memlimit_inactive_exc_resource = 1;
6952 	}
6953 }
6954 
6955 #define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
6956 
6957 void __attribute__((noinline))
PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb,send_exec_resource_options_t exception_options)6958 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options)
6959 {
6960 	task_t                                          task            = current_task();
6961 	int                                                     pid         = 0;
6962 	const char                                      *procname       = "unknown";
6963 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
6964 	boolean_t send_sync_exc_resource = FALSE;
6965 	void *cur_bsd_info = get_bsdtask_info(current_task());
6966 
6967 #ifdef MACH_BSD
6968 	pid = proc_selfpid();
6969 
6970 	if (pid == 1) {
6971 		/*
6972 		 * Cannot have ReportCrash analyzing
6973 		 * a suspended initproc.
6974 		 */
6975 		return;
6976 	}
6977 
6978 	if (cur_bsd_info != NULL) {
6979 		procname = proc_name_address(cur_bsd_info);
6980 		send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(cur_bsd_info);
6981 	}
6982 #endif
6983 #if CONFIG_COREDUMP
6984 	if (hwm_user_cores) {
6985 		int                             error;
6986 		uint64_t                starttime, end;
6987 		clock_sec_t             secs = 0;
6988 		uint32_t                microsecs = 0;
6989 
6990 		starttime = mach_absolute_time();
6991 		/*
6992 		 * Trigger a coredump of this process. Don't proceed unless we know we won't
6993 		 * be filling up the disk; and ignore the core size resource limit for this
6994 		 * core file.
6995 		 */
6996 		if ((error = coredump(cur_bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
6997 			printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
6998 		}
6999 		/*
7000 		 * coredump() leaves the task suspended.
7001 		 */
7002 		task_resume_internal(current_task());
7003 
7004 		end = mach_absolute_time();
7005 		absolutetime_to_microtime(end - starttime, &secs, &microsecs);
7006 		printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
7007 		    proc_name_address(cur_bsd_info), pid, (int)secs, microsecs);
7008 	}
7009 #endif /* CONFIG_COREDUMP */
7010 
7011 	if (disable_exc_resource) {
7012 		printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7013 		    "suppressed by a boot-arg.\n", procname, pid, max_footprint_mb);
7014 		return;
7015 	}
7016 	printf("process %s [%d] crossed memory %s (%d MB); EXC_RESOURCE "
7017 	    "\n", procname, pid, (!(exception_options & EXEC_RESOURCE_DIAGNOSTIC) ? "high watermark" : "diagnostics limit"), max_footprint_mb);
7018 
7019 	/*
7020 	 * A task that has triggered an EXC_RESOURCE, should not be
7021 	 * jetsammed when the device is under memory pressure.  Here
7022 	 * we set the P_MEMSTAT_SKIP flag so that the process
7023 	 * will be skipped if the memorystatus_thread wakes up.
7024 	 *
7025 	 * This is a debugging aid to ensure we can get a corpse before
7026 	 * the jetsam thread kills the process.
7027 	 * Note that proc_memstat_skip is a no-op on release kernels.
7028 	 */
7029 	proc_memstat_skip(cur_bsd_info, TRUE);
7030 
7031 	code[0] = code[1] = 0;
7032 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
7033 	/*
7034 	 * Regardless if there was a diag memlimit violation, fatal exceptions shall be notified always
7035 	 * as high level watermaks. In another words, if there was a diag limit and a watermark, and the
7036 	 * violation if for limit watermark, a watermark shall be reported.
7037 	 */
7038 	if (!(exception_options & EXEC_RESOURCE_FATAL)) {
7039 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], !(exception_options & EXEC_RESOURCE_DIAGNOSTIC)  ? FLAVOR_HIGH_WATERMARK : FLAVOR_DIAG_MEMLIMIT);
7040 	} else {
7041 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK );
7042 	}
7043 	EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
7044 	/*
7045 	 * Do not generate a corpse fork if the violation is a fatal one
7046 	 * or the process wants synchronous EXC_RESOURCE exceptions.
7047 	 */
7048 	if ((exception_options & EXEC_RESOURCE_FATAL) || send_sync_exc_resource || !exc_via_corpse_forking) {
7049 		/* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */
7050 		if (send_sync_exc_resource || !corpse_for_fatal_memkill) {
7051 			/*
7052 			 * Use the _internal_ variant so that no user-space
7053 			 * process can resume our task from under us.
7054 			 */
7055 			task_suspend_internal(task);
7056 			exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
7057 			task_resume_internal(task);
7058 		}
7059 	} else {
7060 		if (disable_exc_resource_during_audio && audio_active) {
7061 			printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7062 			    "suppressed due to audio playback.\n", procname, pid, max_footprint_mb);
7063 		} else {
7064 			task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
7065 			    code, EXCEPTION_CODE_MAX, NULL, FALSE);
7066 		}
7067 	}
7068 
7069 	/*
7070 	 * After the EXC_RESOURCE has been handled, we must clear the
7071 	 * P_MEMSTAT_SKIP flag so that the process can again be
7072 	 * considered for jetsam if the memorystatus_thread wakes up.
7073 	 */
7074 	proc_memstat_skip(cur_bsd_info, FALSE);         /* clear the flag */
7075 }
7076 /*
7077  * Callback invoked when a task exceeds its physical footprint limit.
7078  */
7079 void
task_footprint_exceeded(int warning,__unused const void * param0,__unused const void * param1)7080 task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
7081 {
7082 	ledger_amount_t max_footprint = 0;
7083 	ledger_amount_t max_footprint_mb = 0;
7084 #if DEBUG || DEVELOPMENT
7085 	ledger_amount_t diag_threshold_limit_mb = 0;
7086 	ledger_amount_t diag_threshold_limit = 0;
7087 #endif
7088 #if CONFIG_DEFERRED_RECLAIM
7089 	ledger_amount_t current_footprint;
7090 #endif /* CONFIG_DEFERRED_RECLAIM */
7091 	task_t task;
7092 	send_exec_resource_is_warning is_warning = IS_NOT_WARNING;
7093 	boolean_t memlimit_is_active;
7094 	send_exec_resource_is_fatal memlimit_is_fatal;
7095 	send_exec_resource_is_diagnostics is_diag_mem_threshold = IS_NOT_DIAGNOSTICS;
7096 	if (warning == LEDGER_WARNING_DIAG_MEM_THRESHOLD) {
7097 		is_diag_mem_threshold = IS_DIAGNOSTICS;
7098 		is_warning = IS_WARNING;
7099 	} else if (warning == LEDGER_WARNING_DIPPED_BELOW) {
7100 		/*
7101 		 * Task memory limits only provide a warning on the way up.
7102 		 */
7103 		return;
7104 	} else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
7105 		/*
7106 		 * This task is in danger of violating a memory limit,
7107 		 * It has exceeded a percentage level of the limit.
7108 		 */
7109 		is_warning = IS_WARNING;
7110 	} else {
7111 		/*
7112 		 * The task has exceeded the physical footprint limit.
7113 		 * This is not a warning but a true limit violation.
7114 		 */
7115 		is_warning = IS_NOT_WARNING;
7116 	}
7117 
7118 	task = current_task();
7119 
7120 	ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint);
7121 #if DEBUG || DEVELOPMENT
7122 	ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &diag_threshold_limit);
7123 #endif
7124 #if CONFIG_DEFERRED_RECLAIM
7125 	if (task->deferred_reclamation_metadata != NULL) {
7126 		/*
7127 		 * Task is enrolled in deferred reclamation.
7128 		 * Do a reclaim to ensure it's really over its limit.
7129 		 */
7130 		vm_deferred_reclamation_reclaim_from_task_sync(task, UINT64_MAX);
7131 		ledger_get_balance(task->ledger, task_ledgers.phys_footprint, &current_footprint);
7132 		if (current_footprint < max_footprint) {
7133 			return;
7134 		}
7135 	}
7136 #endif /* CONFIG_DEFERRED_RECLAIM */
7137 	max_footprint_mb = max_footprint >> 20;
7138 #if DEBUG || DEVELOPMENT
7139 	diag_threshold_limit_mb = diag_threshold_limit >> 20;
7140 #endif
7141 	memlimit_is_active = task_get_memlimit_is_active(task);
7142 	memlimit_is_fatal = task_get_memlimit_is_fatal(task) == FALSE ? IS_NOT_FATAL : IS_FATAL;
7143 #if DEBUG || DEVELOPMENT
7144 	if (is_diag_mem_threshold == IS_NOT_DIAGNOSTICS) {
7145 		task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7146 	} else {
7147 		task_process_crossed_limit_diag(diag_threshold_limit_mb);
7148 	}
7149 #else
7150 	task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7151 #endif
7152 }
7153 
7154 /*
7155  * Actions to perfrom when a process has crossed watermark or is a fatal consumption */
7156 static inline void
task_process_crossed_limit_no_diag(task_t task,ledger_amount_t ledger_limit_size,bool memlimit_is_fatal,bool memlimit_is_active,send_exec_resource_is_warning is_warning)7157 task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning)
7158 {
7159 	send_exec_resource_options_t exception_options = 0;
7160 	if (memlimit_is_fatal) {
7161 		exception_options |= EXEC_RESOURCE_FATAL;
7162 	}
7163 	/*
7164 	 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7165 	 * We only generate the exception once per process per memlimit (active/inactive limit).
7166 	 * To enforce this, we monitor state based on the  memlimit's active/inactive attribute
7167 	 * and we disable it by marking that memlimit as exception triggered.
7168 	 */
7169 	if (is_warning == IS_NOT_WARNING && !task_has_triggered_exc_resource(task, memlimit_is_active)) {
7170 		PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7171 		// If it was not a diag threshold (if was a memory limit), then we do not want more signalling,
7172 		// however, if was a diag limit, the user may reload a different limit and signal again the violation
7173 		memorystatus_log_exception((int)ledger_limit_size, memlimit_is_active, memlimit_is_fatal);
7174 		task_mark_has_triggered_exc_resource(task, memlimit_is_active);
7175 	}
7176 	memorystatus_on_ledger_footprint_exceeded(is_warning == IS_NOT_WARNING ? FALSE : TRUE, memlimit_is_active, memlimit_is_fatal);
7177 }
7178 
7179 #if DEBUG || DEVELOPMENT
7180 /**
7181  * Actions to take when a process has crossed the diagnostics limit
7182  */
7183 static inline void
task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)7184 task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)
7185 {
7186 	/*
7187 	 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7188 	 * In the case of the diagnostics thresholds, the exception will be signaled only once, but the
7189 	 * inhibit / rearm mechanism if performed at ledger level.
7190 	 */
7191 	send_exec_resource_options_t exception_options = EXEC_RESOURCE_DIAGNOSTIC;
7192 	PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7193 	memorystatus_log_diag_threshold_exception((int)ledger_limit_size);
7194 }
7195 #endif
7196 
7197 extern int proc_check_footprint_priv(void);
7198 
7199 kern_return_t
task_set_phys_footprint_limit(task_t task,int new_limit_mb,int * old_limit_mb)7200 task_set_phys_footprint_limit(
7201 	task_t task,
7202 	int new_limit_mb,
7203 	int *old_limit_mb)
7204 {
7205 	kern_return_t error;
7206 
7207 	boolean_t memlimit_is_active;
7208 	boolean_t memlimit_is_fatal;
7209 
7210 	if ((error = proc_check_footprint_priv())) {
7211 		return KERN_NO_ACCESS;
7212 	}
7213 
7214 	/*
7215 	 * This call should probably be obsoleted.
7216 	 * But for now, we default to current state.
7217 	 */
7218 	memlimit_is_active = task_get_memlimit_is_active(task);
7219 	memlimit_is_fatal = task_get_memlimit_is_fatal(task);
7220 
7221 	return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
7222 }
7223 
7224 /*
7225  * Set the limit of diagnostics memory consumption for a concrete task
7226  */
7227 #if CONFIG_MEMORYSTATUS
7228 #if DEVELOPMENT || DEBUG
7229 kern_return_t
task_set_diag_footprint_limit(task_t task,uint64_t new_limit_mb,uint64_t * old_limit_mb)7230 task_set_diag_footprint_limit(
7231 	task_t task,
7232 	uint64_t new_limit_mb,
7233 	uint64_t *old_limit_mb)
7234 {
7235 	kern_return_t error;
7236 
7237 	if ((error = proc_check_footprint_priv())) {
7238 		return KERN_NO_ACCESS;
7239 	}
7240 
7241 	return task_set_diag_footprint_limit_internal(task, new_limit_mb, old_limit_mb);
7242 }
7243 
7244 #endif // DEVELOPMENT || DEBUG
7245 #endif // CONFIG_MEMORYSTATUS
7246 
7247 kern_return_t
task_convert_phys_footprint_limit(int limit_mb,int * converted_limit_mb)7248 task_convert_phys_footprint_limit(
7249 	int limit_mb,
7250 	int *converted_limit_mb)
7251 {
7252 	if (limit_mb == -1) {
7253 		/*
7254 		 * No limit
7255 		 */
7256 		if (max_task_footprint != 0) {
7257 			*converted_limit_mb = (int)(max_task_footprint / 1024 / 1024);         /* bytes to MB */
7258 		} else {
7259 			*converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
7260 		}
7261 	} else {
7262 		/* nothing to convert */
7263 		*converted_limit_mb = limit_mb;
7264 	}
7265 	return KERN_SUCCESS;
7266 }
7267 
7268 
7269 kern_return_t
task_set_phys_footprint_limit_internal(task_t task,int new_limit_mb,int * old_limit_mb,boolean_t memlimit_is_active,boolean_t memlimit_is_fatal)7270 task_set_phys_footprint_limit_internal(
7271 	task_t task,
7272 	int new_limit_mb,
7273 	int *old_limit_mb,
7274 	boolean_t memlimit_is_active,
7275 	boolean_t memlimit_is_fatal)
7276 {
7277 	ledger_amount_t old;
7278 	kern_return_t ret;
7279 #if DEVELOPMENT || DEBUG
7280 	diagthreshold_check_return diag_threshold_validity;
7281 #endif
7282 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
7283 
7284 	if (ret != KERN_SUCCESS) {
7285 		return ret;
7286 	}
7287 	/**
7288 	 * Maybe we will need to re-enable the diag threshold, lets get the value
7289 	 * and the current status
7290 	 */
7291 #if DEVELOPMENT || DEBUG
7292 	diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_mb, false);
7293 	/**
7294 	 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7295 	 */
7296 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7297 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7298 	} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7299 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7300 	}
7301 #endif
7302 
7303 	/*
7304 	 * Check that limit >> 20 will not give an "unexpected" 32-bit
7305 	 * result. There are, however, implicit assumptions that -1 mb limit
7306 	 * equates to LEDGER_LIMIT_INFINITY.
7307 	 */
7308 	assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
7309 
7310 	if (old_limit_mb) {
7311 		*old_limit_mb = (int)(old >> 20);
7312 	}
7313 
7314 	if (new_limit_mb == -1) {
7315 		/*
7316 		 * Caller wishes to remove the limit.
7317 		 */
7318 		ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7319 		    max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
7320 		    max_task_footprint ? (uint8_t)max_task_footprint_warning_level : 0);
7321 
7322 		task_lock(task);
7323 		task_set_memlimit_is_active(task, memlimit_is_active);
7324 		task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7325 		task_unlock(task);
7326 		/**
7327 		 * If the diagnostics were disabled, and now we have a new limit, we have to re-enable it.
7328 		 */
7329 #if DEVELOPMENT || DEBUG
7330 		if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7331 			ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7332 		} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7333 			ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7334 		}
7335 	#endif
7336 		return KERN_SUCCESS;
7337 	}
7338 
7339 #ifdef CONFIG_NOMONITORS
7340 	return KERN_SUCCESS;
7341 #endif /* CONFIG_NOMONITORS */
7342 
7343 	task_lock(task);
7344 
7345 	if ((memlimit_is_active == task_get_memlimit_is_active(task)) &&
7346 	    (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) &&
7347 	    (((ledger_amount_t)new_limit_mb << 20) == old)) {
7348 		/*
7349 		 * memlimit state is not changing
7350 		 */
7351 		task_unlock(task);
7352 		return KERN_SUCCESS;
7353 	}
7354 
7355 	task_set_memlimit_is_active(task, memlimit_is_active);
7356 	task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7357 
7358 	ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7359 	    (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
7360 
7361 	if (task == current_task()) {
7362 		ledger_check_new_balance(current_thread(), task->ledger,
7363 		    task_ledgers.phys_footprint);
7364 	}
7365 
7366 	task_unlock(task);
7367 #if DEVELOPMENT || DEBUG
7368 	if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7369 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7370 	}
7371 	#endif
7372 
7373 	return KERN_SUCCESS;
7374 }
7375 
7376 #if RESETTABLE_DIAG_FOOTPRINT_LIMITS
7377 kern_return_t
task_set_diag_footprint_limit_internal(task_t task,uint64_t new_limit_bytes,uint64_t * old_limit_bytes)7378 task_set_diag_footprint_limit_internal(
7379 	task_t task,
7380 	uint64_t new_limit_bytes,
7381 	uint64_t *old_limit_bytes)
7382 {
7383 	ledger_amount_t old = 0;
7384 	kern_return_t ret = KERN_SUCCESS;
7385 	diagthreshold_check_return diag_threshold_validity;
7386 	ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &old);
7387 
7388 	if (ret != KERN_SUCCESS) {
7389 		return ret;
7390 	}
7391 	/**
7392 	 * Maybe we will need to re-enable the diag threshold, lets get the value
7393 	 * and the current status
7394 	 */
7395 	diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_bytes >> 20, true);
7396 	/**
7397 	 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7398 	 */
7399 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7400 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7401 	}
7402 
7403 	/*
7404 	 * Check that limit >> 20 will not give an "unexpected" 32-bit
7405 	 * result. There are, however, implicit assumptions that -1 mb limit
7406 	 * equates to LEDGER_LIMIT_INFINITY.
7407 	 */
7408 	if (old_limit_bytes) {
7409 		*old_limit_bytes = old;
7410 	}
7411 
7412 	if (new_limit_bytes == -1) {
7413 		/*
7414 		 * Caller wishes to remove the limit.
7415 		 */
7416 		ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7417 		    LEDGER_LIMIT_INFINITY);
7418 		/*
7419 		 * If the memory diagnostics flag was disabled, lets enable it again
7420 		 */
7421 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7422 		return KERN_SUCCESS;
7423 	}
7424 
7425 #ifdef CONFIG_NOMONITORS
7426 	return KERN_SUCCESS;
7427 #else
7428 
7429 	task_lock(task);
7430 	ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7431 	    (ledger_amount_t)new_limit_bytes );
7432 	if (task == current_task()) {
7433 		ledger_check_new_balance(current_thread(), task->ledger,
7434 		    task_ledgers.phys_footprint);
7435 	}
7436 
7437 	task_unlock(task);
7438 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7439 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7440 	} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7441 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7442 	}
7443 
7444 	return KERN_SUCCESS;
7445 #endif /* CONFIG_NOMONITORS */
7446 }
7447 
7448 kern_return_t
task_get_diag_footprint_limit_internal(task_t task,uint64_t * new_limit_bytes,bool * threshold_disabled)7449 task_get_diag_footprint_limit_internal(
7450 	task_t task,
7451 	uint64_t *new_limit_bytes,
7452 	bool *threshold_disabled)
7453 {
7454 	ledger_amount_t ledger_limit;
7455 	kern_return_t ret = KERN_SUCCESS;
7456 	if (new_limit_bytes == NULL || threshold_disabled == NULL) {
7457 		return KERN_INVALID_ARGUMENT;
7458 	}
7459 	ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &ledger_limit);
7460 	if (ledger_limit == LEDGER_LIMIT_INFINITY) {
7461 		ledger_limit = -1;
7462 	}
7463 	if (ret == KERN_SUCCESS) {
7464 		*new_limit_bytes = ledger_limit;
7465 		ret = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, threshold_disabled);
7466 	}
7467 	return ret;
7468 }
7469 #endif /* RESETTABLE_DIAG_FOOTPRINT_LIMITS */
7470 
7471 
7472 kern_return_t
task_get_phys_footprint_limit(task_t task,int * limit_mb)7473 task_get_phys_footprint_limit(
7474 	task_t task,
7475 	int *limit_mb)
7476 {
7477 	ledger_amount_t limit;
7478 	kern_return_t ret;
7479 
7480 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
7481 	if (ret != KERN_SUCCESS) {
7482 		return ret;
7483 	}
7484 
7485 	/*
7486 	 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
7487 	 * result. There are, however, implicit assumptions that -1 mb limit
7488 	 * equates to LEDGER_LIMIT_INFINITY.
7489 	 */
7490 	assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
7491 	*limit_mb = (int)(limit >> 20);
7492 
7493 	return KERN_SUCCESS;
7494 }
7495 #else /* CONFIG_MEMORYSTATUS */
7496 kern_return_t
task_set_phys_footprint_limit(__unused task_t task,__unused int new_limit_mb,__unused int * old_limit_mb)7497 task_set_phys_footprint_limit(
7498 	__unused task_t task,
7499 	__unused int new_limit_mb,
7500 	__unused int *old_limit_mb)
7501 {
7502 	return KERN_FAILURE;
7503 }
7504 
7505 kern_return_t
task_get_phys_footprint_limit(__unused task_t task,__unused int * limit_mb)7506 task_get_phys_footprint_limit(
7507 	__unused task_t task,
7508 	__unused int *limit_mb)
7509 {
7510 	return KERN_FAILURE;
7511 }
7512 #endif /* CONFIG_MEMORYSTATUS */
7513 
7514 security_token_t *
task_get_sec_token(task_t task)7515 task_get_sec_token(task_t task)
7516 {
7517 	return &task_get_ro(task)->task_tokens.sec_token;
7518 }
7519 
7520 void
task_set_sec_token(task_t task,security_token_t * token)7521 task_set_sec_token(task_t task, security_token_t *token)
7522 {
7523 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7524 	    task_tokens.sec_token, token);
7525 }
7526 
7527 audit_token_t *
task_get_audit_token(task_t task)7528 task_get_audit_token(task_t task)
7529 {
7530 	return &task_get_ro(task)->task_tokens.audit_token;
7531 }
7532 
7533 void
task_set_audit_token(task_t task,audit_token_t * token)7534 task_set_audit_token(task_t task, audit_token_t *token)
7535 {
7536 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7537 	    task_tokens.audit_token, token);
7538 }
7539 
7540 void
task_set_tokens(task_t task,security_token_t * sec_token,audit_token_t * audit_token)7541 task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token)
7542 {
7543 	struct task_token_ro_data tokens;
7544 
7545 	tokens = task_get_ro(task)->task_tokens;
7546 	tokens.sec_token = *sec_token;
7547 	tokens.audit_token = *audit_token;
7548 
7549 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task), task_tokens,
7550 	    &tokens);
7551 }
7552 
7553 boolean_t
task_is_privileged(task_t task)7554 task_is_privileged(task_t task)
7555 {
7556 	return task_get_sec_token(task)->val[0] == 0;
7557 }
7558 
7559 #ifdef CONFIG_MACF
7560 uint8_t *
task_get_mach_trap_filter_mask(task_t task)7561 task_get_mach_trap_filter_mask(task_t task)
7562 {
7563 	return task_get_ro(task)->task_filters.mach_trap_filter_mask;
7564 }
7565 
7566 void
task_set_mach_trap_filter_mask(task_t task,uint8_t * mask)7567 task_set_mach_trap_filter_mask(task_t task, uint8_t *mask)
7568 {
7569 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7570 	    task_filters.mach_trap_filter_mask, &mask);
7571 }
7572 
7573 uint8_t *
task_get_mach_kobj_filter_mask(task_t task)7574 task_get_mach_kobj_filter_mask(task_t task)
7575 {
7576 	return task_get_ro(task)->task_filters.mach_kobj_filter_mask;
7577 }
7578 
7579 mach_vm_address_t
task_get_all_image_info_addr(task_t task)7580 task_get_all_image_info_addr(task_t task)
7581 {
7582 	return task->all_image_info_addr;
7583 }
7584 
7585 void
task_set_mach_kobj_filter_mask(task_t task,uint8_t * mask)7586 task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask)
7587 {
7588 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7589 	    task_filters.mach_kobj_filter_mask, &mask);
7590 }
7591 
7592 #endif /* CONFIG_MACF */
7593 
7594 void
task_set_thread_limit(task_t task,uint16_t thread_limit)7595 task_set_thread_limit(task_t task, uint16_t thread_limit)
7596 {
7597 	assert(task != kernel_task);
7598 	if (thread_limit <= TASK_MAX_THREAD_LIMIT) {
7599 		task_lock(task);
7600 		task->task_thread_limit = thread_limit;
7601 		task_unlock(task);
7602 	}
7603 }
7604 
7605 #if CONFIG_PROC_RESOURCE_LIMITS
7606 kern_return_t
task_set_port_space_limits(task_t task,uint32_t soft_limit,uint32_t hard_limit)7607 task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit)
7608 {
7609 	return ipc_space_set_table_size_limits(task->itk_space, soft_limit, hard_limit);
7610 }
7611 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7612 
7613 #if XNU_TARGET_OS_OSX
7614 boolean_t
task_has_system_version_compat_enabled(task_t task)7615 task_has_system_version_compat_enabled(task_t task)
7616 {
7617 	boolean_t enabled = FALSE;
7618 
7619 	task_lock(task);
7620 	enabled = (task->t_flags & TF_SYS_VERSION_COMPAT);
7621 	task_unlock(task);
7622 
7623 	return enabled;
7624 }
7625 
7626 void
task_set_system_version_compat_enabled(task_t task,boolean_t enable_system_version_compat)7627 task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat)
7628 {
7629 	assert(task == current_task());
7630 	assert(task != kernel_task);
7631 
7632 	task_lock(task);
7633 	if (enable_system_version_compat) {
7634 		task->t_flags |= TF_SYS_VERSION_COMPAT;
7635 	} else {
7636 		task->t_flags &= ~TF_SYS_VERSION_COMPAT;
7637 	}
7638 	task_unlock(task);
7639 }
7640 #endif /* XNU_TARGET_OS_OSX */
7641 
7642 /*
7643  * We need to export some functions to other components that
7644  * are currently implemented in macros within the osfmk
7645  * component.  Just export them as functions of the same name.
7646  */
7647 boolean_t
is_kerneltask(task_t t)7648 is_kerneltask(task_t t)
7649 {
7650 	if (t == kernel_task) {
7651 		return TRUE;
7652 	}
7653 
7654 	return FALSE;
7655 }
7656 
7657 boolean_t
is_corpsefork(task_t t)7658 is_corpsefork(task_t t)
7659 {
7660 	return task_is_a_corpse_fork(t);
7661 }
7662 
7663 task_t
current_task_early(void)7664 current_task_early(void)
7665 {
7666 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
7667 		if (current_thread()->t_tro == NULL) {
7668 			return TASK_NULL;
7669 		}
7670 	}
7671 	return get_threadtask(current_thread());
7672 }
7673 
7674 task_t
current_task(void)7675 current_task(void)
7676 {
7677 	return get_threadtask(current_thread());
7678 }
7679 
7680 /* defined in bsd/kern/kern_prot.c */
7681 extern int get_audit_token_pid(audit_token_t *audit_token);
7682 
7683 int
task_pid(task_t task)7684 task_pid(task_t task)
7685 {
7686 	if (task) {
7687 		return get_audit_token_pid(task_get_audit_token(task));
7688 	}
7689 	return -1;
7690 }
7691 
7692 #if __has_feature(ptrauth_calls)
7693 /*
7694  * Get the shared region id and jop signing key for the task.
7695  * The function will allocate a kalloc buffer and return
7696  * it to caller, the caller needs to free it. This is used
7697  * for getting the information via task port.
7698  */
7699 char *
task_get_vm_shared_region_id_and_jop_pid(task_t task,uint64_t * jop_pid)7700 task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid)
7701 {
7702 	size_t len;
7703 	char *shared_region_id = NULL;
7704 
7705 	task_lock(task);
7706 	if (task->shared_region_id == NULL) {
7707 		task_unlock(task);
7708 		return NULL;
7709 	}
7710 	len = strlen(task->shared_region_id) + 1;
7711 
7712 	/* don't hold task lock while allocating */
7713 	task_unlock(task);
7714 	shared_region_id = kalloc_data(len, Z_WAITOK);
7715 	task_lock(task);
7716 
7717 	if (task->shared_region_id == NULL) {
7718 		task_unlock(task);
7719 		kfree_data(shared_region_id, len);
7720 		return NULL;
7721 	}
7722 	assert(len == strlen(task->shared_region_id) + 1);         /* should never change */
7723 	strlcpy(shared_region_id, task->shared_region_id, len);
7724 	task_unlock(task);
7725 
7726 	/* find key from its auth pager */
7727 	if (jop_pid != NULL) {
7728 		*jop_pid = shared_region_find_key(shared_region_id);
7729 	}
7730 
7731 	return shared_region_id;
7732 }
7733 
7734 /*
7735  * set the shared region id for a task
7736  */
7737 void
task_set_shared_region_id(task_t task,char * id)7738 task_set_shared_region_id(task_t task, char *id)
7739 {
7740 	char *old_id;
7741 
7742 	task_lock(task);
7743 	old_id = task->shared_region_id;
7744 	task->shared_region_id = id;
7745 	task->shared_region_auth_remapped = FALSE;
7746 	task_unlock(task);
7747 
7748 	/* free any pre-existing shared region id */
7749 	if (old_id != NULL) {
7750 		shared_region_key_dealloc(old_id);
7751 		kfree_data(old_id, strlen(old_id) + 1);
7752 	}
7753 }
7754 #endif /* __has_feature(ptrauth_calls) */
7755 
7756 /*
7757  * This routine finds a thread in a task by its unique id
7758  * Returns a referenced thread or THREAD_NULL if the thread was not found
7759  *
7760  * TODO: This is super inefficient - it's an O(threads in task) list walk!
7761  *       We should make a tid hash, or transition all tid clients to thread ports
7762  *
7763  * Precondition: No locks held (will take task lock)
7764  */
7765 thread_t
task_findtid(task_t task,uint64_t tid)7766 task_findtid(task_t task, uint64_t tid)
7767 {
7768 	thread_t self           = current_thread();
7769 	thread_t found_thread   = THREAD_NULL;
7770 	thread_t iter_thread    = THREAD_NULL;
7771 
7772 	/* Short-circuit the lookup if we're looking up ourselves */
7773 	if (tid == self->thread_id || tid == TID_NULL) {
7774 		assert(get_threadtask(self) == task);
7775 
7776 		thread_reference(self);
7777 
7778 		return self;
7779 	}
7780 
7781 	task_lock(task);
7782 
7783 	queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
7784 		if (iter_thread->thread_id == tid) {
7785 			found_thread = iter_thread;
7786 			thread_reference(found_thread);
7787 			break;
7788 		}
7789 	}
7790 
7791 	task_unlock(task);
7792 
7793 	return found_thread;
7794 }
7795 
7796 int
pid_from_task(task_t task)7797 pid_from_task(task_t task)
7798 {
7799 	int pid = -1;
7800 	void *bsd_info = get_bsdtask_info(task);
7801 
7802 	if (bsd_info) {
7803 		pid = proc_pid(bsd_info);
7804 	} else {
7805 		pid = task_pid(task);
7806 	}
7807 
7808 	return pid;
7809 }
7810 
7811 /*
7812  * Control the CPU usage monitor for a task.
7813  */
7814 kern_return_t
task_cpu_usage_monitor_ctl(task_t task,uint32_t * flags)7815 task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
7816 {
7817 	int error = KERN_SUCCESS;
7818 
7819 	if (*flags & CPUMON_MAKE_FATAL) {
7820 		task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
7821 	} else {
7822 		error = KERN_INVALID_ARGUMENT;
7823 	}
7824 
7825 	return error;
7826 }
7827 
7828 /*
7829  * Control the wakeups monitor for a task.
7830  */
7831 kern_return_t
task_wakeups_monitor_ctl(task_t task,uint32_t * flags,int32_t * rate_hz)7832 task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
7833 {
7834 	ledger_t ledger = task->ledger;
7835 
7836 	task_lock(task);
7837 	if (*flags & WAKEMON_GET_PARAMS) {
7838 		ledger_amount_t limit;
7839 		uint64_t                period;
7840 
7841 		ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
7842 		ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
7843 
7844 		if (limit != LEDGER_LIMIT_INFINITY) {
7845 			/*
7846 			 * An active limit means the wakeups monitor is enabled.
7847 			 */
7848 			*rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
7849 			*flags = WAKEMON_ENABLE;
7850 			if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
7851 				*flags |= WAKEMON_MAKE_FATAL;
7852 			}
7853 		} else {
7854 			*flags = WAKEMON_DISABLE;
7855 			*rate_hz = -1;
7856 		}
7857 
7858 		/*
7859 		 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
7860 		 */
7861 		task_unlock(task);
7862 		return KERN_SUCCESS;
7863 	}
7864 
7865 	if (*flags & WAKEMON_ENABLE) {
7866 		if (*flags & WAKEMON_SET_DEFAULTS) {
7867 			*rate_hz = task_wakeups_monitor_rate;
7868 		}
7869 
7870 #ifndef CONFIG_NOMONITORS
7871 		if (*flags & WAKEMON_MAKE_FATAL) {
7872 			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
7873 		}
7874 #endif /* CONFIG_NOMONITORS */
7875 
7876 		if (*rate_hz <= 0) {
7877 			task_unlock(task);
7878 			return KERN_INVALID_ARGUMENT;
7879 		}
7880 
7881 #ifndef CONFIG_NOMONITORS
7882 		ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
7883 		    (uint8_t)task_wakeups_monitor_ustackshots_trigger_pct);
7884 		ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
7885 		ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
7886 #endif /* CONFIG_NOMONITORS */
7887 	} else if (*flags & WAKEMON_DISABLE) {
7888 		/*
7889 		 * Caller wishes to disable wakeups monitor on the task.
7890 		 *
7891 		 * Disable telemetry if it was triggered by the wakeups monitor, and
7892 		 * remove the limit & callback on the wakeups ledger entry.
7893 		 */
7894 #if CONFIG_TELEMETRY
7895 		telemetry_task_ctl_locked(task, TF_WAKEMON_WARNING, 0);
7896 #endif
7897 		ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
7898 		ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
7899 	}
7900 
7901 	task_unlock(task);
7902 	return KERN_SUCCESS;
7903 }
7904 
7905 void
task_wakeups_rate_exceeded(int warning,__unused const void * param0,__unused const void * param1)7906 task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
7907 {
7908 	if (warning == LEDGER_WARNING_ROSE_ABOVE) {
7909 #if CONFIG_TELEMETRY
7910 		/*
7911 		 * This task is in danger of violating the wakeups monitor. Enable telemetry on this task
7912 		 * so there are micro-stackshots available if and when EXC_RESOURCE is triggered.
7913 		 */
7914 		telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 1);
7915 #endif
7916 		return;
7917 	}
7918 
7919 #if CONFIG_TELEMETRY
7920 	/*
7921 	 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
7922 	 * exceeded the limit, turn telemetry off for the task.
7923 	 */
7924 	telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 0);
7925 #endif
7926 
7927 	if (warning == 0) {
7928 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
7929 	}
7930 }
7931 
7932 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)7933 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
7934 {
7935 	task_t                      task        = current_task();
7936 	int                         pid         = 0;
7937 	const char                  *procname   = "unknown";
7938 	boolean_t                   fatal;
7939 	kern_return_t               kr;
7940 #ifdef EXC_RESOURCE_MONITORS
7941 	mach_exception_data_type_t  code[EXCEPTION_CODE_MAX];
7942 #endif /* EXC_RESOURCE_MONITORS */
7943 	struct ledger_entry_info    lei;
7944 
7945 #ifdef MACH_BSD
7946 	pid = proc_selfpid();
7947 	if (get_bsdtask_info(task) != NULL) {
7948 		procname = proc_name_address(get_bsdtask_info(current_task()));
7949 	}
7950 #endif
7951 
7952 	ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
7953 
7954 	/*
7955 	 * Disable the exception notification so we don't overwhelm
7956 	 * the listener with an endless stream of redundant exceptions.
7957 	 * TODO: detect whether another thread is already reporting the violation.
7958 	 */
7959 	uint32_t flags = WAKEMON_DISABLE;
7960 	task_wakeups_monitor_ctl(task, &flags, NULL);
7961 
7962 	fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
7963 	trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
7964 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
7965 	    "over ~%llu seconds, averaging %llu wakes / second and "
7966 	    "violating a %slimit of %llu wakes over %llu seconds.\n",
7967 	    procname, pid,
7968 	    lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
7969 	    lei.lei_last_refill == 0 ? 0 :
7970 	    (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
7971 	    fatal ? "FATAL " : "",
7972 	    lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
7973 
7974 	kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
7975 	    fatal ? kRNFatalLimitFlag : 0);
7976 	if (kr) {
7977 		printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
7978 	}
7979 
7980 #ifdef EXC_RESOURCE_MONITORS
7981 	if (disable_exc_resource) {
7982 		printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
7983 		    "suppressed by a boot-arg\n", procname, pid);
7984 		return;
7985 	}
7986 	if (disable_exc_resource_during_audio && audio_active) {
7987 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
7988 		    "suppressed due to audio playback\n", procname, pid);
7989 		return;
7990 	}
7991 	if (lei.lei_last_refill == 0) {
7992 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
7993 		    "suppressed due to lei.lei_last_refill = 0 \n", procname, pid);
7994 	}
7995 
7996 	code[0] = code[1] = 0;
7997 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
7998 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
7999 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
8000 	    NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
8001 	EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
8002 	    lei.lei_last_refill);
8003 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
8004 	    NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
8005 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8006 #endif /* EXC_RESOURCE_MONITORS */
8007 
8008 	if (fatal) {
8009 		task_terminate_internal(task);
8010 	}
8011 }
8012 
8013 static boolean_t
global_update_logical_writes(int64_t io_delta,int64_t * global_write_count)8014 global_update_logical_writes(int64_t io_delta, int64_t *global_write_count)
8015 {
8016 	int64_t old_count, new_count;
8017 	boolean_t needs_telemetry;
8018 
8019 	do {
8020 		new_count = old_count = *global_write_count;
8021 		new_count += io_delta;
8022 		if (new_count >= io_telemetry_limit) {
8023 			new_count = 0;
8024 			needs_telemetry = TRUE;
8025 		} else {
8026 			needs_telemetry = FALSE;
8027 		}
8028 	} while (!OSCompareAndSwap64(old_count, new_count, global_write_count));
8029 	return needs_telemetry;
8030 }
8031 
8032 void
task_update_physical_writes(__unused task_t task,__unused task_physical_write_flavor_t flavor,__unused uint64_t io_size,__unused task_balance_flags_t flags)8033 task_update_physical_writes(__unused task_t task, __unused task_physical_write_flavor_t flavor, __unused uint64_t io_size, __unused task_balance_flags_t flags)
8034 {
8035 #if CONFIG_PHYS_WRITE_ACCT
8036 	if (!io_size) {
8037 		return;
8038 	}
8039 
8040 	/*
8041 	 * task == NULL means that we have to update kernel_task ledgers
8042 	 */
8043 	if (!task) {
8044 		task = kernel_task;
8045 	}
8046 
8047 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PHYS_WRITE_ACCT)) | DBG_FUNC_NONE,
8048 	    task_pid(task), flavor, io_size, flags, 0);
8049 	DTRACE_IO4(physical_writes, struct task *, task, task_physical_write_flavor_t, flavor, uint64_t, io_size, task_balance_flags_t, flags);
8050 
8051 	if (flags & TASK_BALANCE_CREDIT) {
8052 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8053 			OSAddAtomic64(io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8054 			ledger_credit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8055 		}
8056 	} else if (flags & TASK_BALANCE_DEBIT) {
8057 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8058 			OSAddAtomic64(-1 * io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8059 			ledger_debit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8060 		}
8061 	}
8062 #endif /* CONFIG_PHYS_WRITE_ACCT */
8063 }
8064 
8065 void
task_update_logical_writes(task_t task,uint32_t io_size,int flags,void * vp)8066 task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
8067 {
8068 	int64_t io_delta = 0;
8069 	int64_t * global_counter_to_update;
8070 	boolean_t needs_telemetry = FALSE;
8071 	boolean_t is_external_device = FALSE;
8072 	int ledger_to_update = 0;
8073 	struct task_writes_counters * writes_counters_to_update;
8074 
8075 	if ((!task) || (!io_size) || (!vp)) {
8076 		return;
8077 	}
8078 
8079 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_DATA_WRITE)) | DBG_FUNC_NONE,
8080 	    task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp), 0);
8081 	DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
8082 
8083 	// Is the drive backing this vnode internal or external to the system?
8084 	if (vnode_isonexternalstorage(vp) == false) {
8085 		global_counter_to_update = &global_logical_writes_count;
8086 		ledger_to_update = task_ledgers.logical_writes;
8087 		writes_counters_to_update = &task->task_writes_counters_internal;
8088 		is_external_device = FALSE;
8089 	} else {
8090 		global_counter_to_update = &global_logical_writes_to_external_count;
8091 		ledger_to_update = task_ledgers.logical_writes_to_external;
8092 		writes_counters_to_update = &task->task_writes_counters_external;
8093 		is_external_device = TRUE;
8094 	}
8095 
8096 	switch (flags) {
8097 	case TASK_WRITE_IMMEDIATE:
8098 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_immediate_writes));
8099 		ledger_credit(task->ledger, ledger_to_update, io_size);
8100 		if (!is_external_device) {
8101 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8102 		}
8103 		break;
8104 	case TASK_WRITE_DEFERRED:
8105 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_deferred_writes));
8106 		ledger_credit(task->ledger, ledger_to_update, io_size);
8107 		if (!is_external_device) {
8108 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8109 		}
8110 		break;
8111 	case TASK_WRITE_INVALIDATED:
8112 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_invalidated_writes));
8113 		ledger_debit(task->ledger, ledger_to_update, io_size);
8114 		if (!is_external_device) {
8115 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, FALSE, io_size);
8116 		}
8117 		break;
8118 	case TASK_WRITE_METADATA:
8119 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_metadata_writes));
8120 		ledger_credit(task->ledger, ledger_to_update, io_size);
8121 		if (!is_external_device) {
8122 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8123 		}
8124 		break;
8125 	}
8126 
8127 	io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
8128 	if (io_telemetry_limit != 0) {
8129 		/* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
8130 		needs_telemetry = global_update_logical_writes(io_delta, global_counter_to_update);
8131 		if (needs_telemetry && !is_external_device) {
8132 			act_set_io_telemetry_ast(current_thread());
8133 		}
8134 	}
8135 }
8136 
8137 /*
8138  * Control the I/O monitor for a task.
8139  */
8140 kern_return_t
task_io_monitor_ctl(task_t task,uint32_t * flags)8141 task_io_monitor_ctl(task_t task, uint32_t *flags)
8142 {
8143 	ledger_t ledger = task->ledger;
8144 
8145 	task_lock(task);
8146 	if (*flags & IOMON_ENABLE) {
8147 		/* Configure the physical I/O ledger */
8148 		ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
8149 		ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
8150 	} else if (*flags & IOMON_DISABLE) {
8151 		/*
8152 		 * Caller wishes to disable I/O monitor on the task.
8153 		 */
8154 		ledger_disable_refill(ledger, task_ledgers.physical_writes);
8155 		ledger_disable_callback(ledger, task_ledgers.physical_writes);
8156 	}
8157 
8158 	task_unlock(task);
8159 	return KERN_SUCCESS;
8160 }
8161 
8162 void
task_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)8163 task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
8164 {
8165 	if (warning == 0) {
8166 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
8167 	}
8168 }
8169 
8170 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)8171 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
8172 {
8173 	int                             pid = 0;
8174 	task_t                          task = current_task();
8175 #ifdef EXC_RESOURCE_MONITORS
8176 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
8177 #endif /* EXC_RESOURCE_MONITORS */
8178 	struct ledger_entry_info        lei = {};
8179 	kern_return_t                   kr;
8180 
8181 #ifdef MACH_BSD
8182 	pid = proc_selfpid();
8183 #endif
8184 	/*
8185 	 * Get the ledger entry info. We need to do this before disabling the exception
8186 	 * to get correct values for all fields.
8187 	 */
8188 	switch (flavor) {
8189 	case FLAVOR_IO_PHYSICAL_WRITES:
8190 		ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
8191 		break;
8192 	}
8193 
8194 
8195 	/*
8196 	 * Disable the exception notification so we don't overwhelm
8197 	 * the listener with an endless stream of redundant exceptions.
8198 	 * TODO: detect whether another thread is already reporting the violation.
8199 	 */
8200 	uint32_t flags = IOMON_DISABLE;
8201 	task_io_monitor_ctl(task, &flags);
8202 
8203 	if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
8204 		trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
8205 	}
8206 	os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
8207 	    pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
8208 
8209 	kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
8210 	if (kr) {
8211 		printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
8212 	}
8213 
8214 #ifdef EXC_RESOURCE_MONITORS
8215 	code[0] = code[1] = 0;
8216 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
8217 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
8218 	EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
8219 	EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
8220 	EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
8221 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8222 #endif /* EXC_RESOURCE_MONITORS */
8223 }
8224 
8225 void
task_port_space_ast(__unused task_t task)8226 task_port_space_ast(__unused task_t task)
8227 {
8228 	uint32_t current_size, soft_limit, hard_limit;
8229 	assert(task == current_task());
8230 	kern_return_t ret = ipc_space_get_table_size_and_limits(task->itk_space,
8231 	    &current_size, &soft_limit, &hard_limit);
8232 	if (ret == KERN_SUCCESS) {
8233 		SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task, current_size, soft_limit, hard_limit);
8234 	}
8235 }
8236 
8237 #if CONFIG_PROC_RESOURCE_LIMITS
8238 static mach_port_t
task_allocate_fatal_port(void)8239 task_allocate_fatal_port(void)
8240 {
8241 	mach_port_t task_fatal_port = MACH_PORT_NULL;
8242 	task_id_token_t token;
8243 
8244 	kern_return_t kr = task_create_identity_token(current_task(), &token); /* Takes a reference on the token */
8245 	if (kr) {
8246 		return MACH_PORT_NULL;
8247 	}
8248 	task_fatal_port = ipc_kobject_alloc_port((ipc_kobject_t)token, IKOT_TASK_FATAL,
8249 	    IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
8250 
8251 	task_id_token_set_port(token, task_fatal_port);
8252 
8253 	return task_fatal_port;
8254 }
8255 
8256 static void
task_fatal_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)8257 task_fatal_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
8258 {
8259 	task_t task = TASK_NULL;
8260 	kern_return_t kr;
8261 
8262 	task_id_token_t token = ipc_kobject_get_stable(port, IKOT_TASK_FATAL);
8263 
8264 	assert(token != NULL);
8265 	if (token) {
8266 		kr = task_identity_token_get_task_grp(token, &task, TASK_GRP_KERNEL); /* takes a reference on task */
8267 		if (task) {
8268 			task_bsdtask_kill(task);
8269 			task_deallocate(task);
8270 		}
8271 		task_id_token_release(token); /* consumes ref given by notification */
8272 	}
8273 }
8274 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8275 
8276 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,uint32_t current_size,uint32_t soft_limit,uint32_t hard_limit)8277 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task, uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit)
8278 {
8279 	int pid = 0;
8280 	char *procname = (char *) "unknown";
8281 	__unused kern_return_t kr;
8282 	__unused resource_notify_flags_t flags = kRNFlagsNone;
8283 	__unused uint32_t limit;
8284 	__unused mach_port_t task_fatal_port = MACH_PORT_NULL;
8285 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
8286 
8287 #ifdef MACH_BSD
8288 	pid = proc_selfpid();
8289 	if (get_bsdtask_info(task) != NULL) {
8290 		procname = proc_name_address(get_bsdtask_info(task));
8291 	}
8292 #endif
8293 	/*
8294 	 * Only kernel_task and launchd may be allowed to
8295 	 * have really large ipc space.
8296 	 */
8297 	if (pid == 0 || pid == 1) {
8298 		return;
8299 	}
8300 
8301 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many mach ports. \
8302 	    Num of ports allocated %u; \n", procname, pid, current_size);
8303 
8304 	/* Abort the process if it has hit the system-wide limit for ipc port table size */
8305 	if (!hard_limit && !soft_limit) {
8306 		code[0] = code[1] = 0;
8307 		EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_PORTS);
8308 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_PORT_SPACE_FULL);
8309 		EXC_RESOURCE_PORTS_ENCODE_PORTS(code[0], current_size);
8310 
8311 		exit_with_port_space_exception(current_proc(), code[0], code[1]);
8312 
8313 		return;
8314 	}
8315 
8316 #if CONFIG_PROC_RESOURCE_LIMITS
8317 	if (hard_limit > 0) {
8318 		flags |= kRNHardLimitFlag;
8319 		limit = hard_limit;
8320 		task_fatal_port = task_allocate_fatal_port();
8321 		if (!task_fatal_port) {
8322 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8323 			task_bsdtask_kill(task);
8324 		}
8325 	} else {
8326 		flags |= kRNSoftLimitFlag;
8327 		limit = soft_limit;
8328 	}
8329 
8330 	kr = send_resource_violation_with_fatal_port(send_port_space_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8331 	if (kr) {
8332 		os_log(OS_LOG_DEFAULT, "send_resource_violation(ports, ...): error %#x\n", kr);
8333 	}
8334 	if (task_fatal_port) {
8335 		ipc_port_release_send(task_fatal_port);
8336 	}
8337 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8338 }
8339 
8340 void
task_filedesc_ast(__unused task_t task,__unused int current_size,__unused int soft_limit,__unused int hard_limit)8341 task_filedesc_ast(__unused task_t task, __unused int current_size, __unused int soft_limit, __unused int hard_limit)
8342 {
8343 #if CONFIG_PROC_RESOURCE_LIMITS
8344 	assert(task == current_task());
8345 	SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task, current_size, soft_limit, hard_limit);
8346 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8347 }
8348 
8349 #if CONFIG_PROC_RESOURCE_LIMITS
8350 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task,int current_size,int soft_limit,int hard_limit)8351 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit)
8352 {
8353 	int pid = 0;
8354 	char *procname = (char *) "unknown";
8355 	kern_return_t kr;
8356 	resource_notify_flags_t flags = kRNFlagsNone;
8357 	int limit;
8358 	mach_port_t task_fatal_port = MACH_PORT_NULL;
8359 
8360 #ifdef MACH_BSD
8361 	pid = proc_selfpid();
8362 	if (get_bsdtask_info(task) != NULL) {
8363 		procname = proc_name_address(get_bsdtask_info(task));
8364 	}
8365 #endif
8366 	/*
8367 	 * Only kernel_task and launchd may be allowed to
8368 	 * have really large ipc space.
8369 	 */
8370 	if (pid == 0 || pid == 1) {
8371 		return;
8372 	}
8373 
8374 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many file descriptors. \
8375 	    Num of fds allocated %u; \n", procname, pid, current_size);
8376 
8377 	if (hard_limit > 0) {
8378 		flags |= kRNHardLimitFlag;
8379 		limit = hard_limit;
8380 		task_fatal_port = task_allocate_fatal_port();
8381 		if (!task_fatal_port) {
8382 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8383 			task_bsdtask_kill(task);
8384 		}
8385 	} else {
8386 		flags |= kRNSoftLimitFlag;
8387 		limit = soft_limit;
8388 	}
8389 
8390 	kr = send_resource_violation_with_fatal_port(send_file_descriptors_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8391 	if (kr) {
8392 		os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(filedesc, ...): error %#x\n", kr);
8393 	}
8394 	if (task_fatal_port) {
8395 		ipc_port_release_send(task_fatal_port);
8396 	}
8397 }
8398 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8399 
8400 /* Placeholders for the task set/get voucher interfaces */
8401 kern_return_t
task_get_mach_voucher(task_t task,mach_voucher_selector_t __unused which,ipc_voucher_t * voucher)8402 task_get_mach_voucher(
8403 	task_t                  task,
8404 	mach_voucher_selector_t __unused which,
8405 	ipc_voucher_t           *voucher)
8406 {
8407 	if (TASK_NULL == task) {
8408 		return KERN_INVALID_TASK;
8409 	}
8410 
8411 	*voucher = NULL;
8412 	return KERN_SUCCESS;
8413 }
8414 
8415 kern_return_t
task_set_mach_voucher(task_t task,ipc_voucher_t __unused voucher)8416 task_set_mach_voucher(
8417 	task_t                  task,
8418 	ipc_voucher_t           __unused voucher)
8419 {
8420 	if (TASK_NULL == task) {
8421 		return KERN_INVALID_TASK;
8422 	}
8423 
8424 	return KERN_SUCCESS;
8425 }
8426 
8427 kern_return_t
task_swap_mach_voucher(__unused task_t task,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)8428 task_swap_mach_voucher(
8429 	__unused task_t         task,
8430 	__unused ipc_voucher_t  new_voucher,
8431 	ipc_voucher_t          *in_out_old_voucher)
8432 {
8433 	/*
8434 	 * Currently this function is only called from a MIG generated
8435 	 * routine which doesn't release the reference on the voucher
8436 	 * addressed by in_out_old_voucher. To avoid leaking this reference,
8437 	 * a call to release it has been added here.
8438 	 */
8439 	ipc_voucher_release(*in_out_old_voucher);
8440 	OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
8441 }
8442 
8443 void
task_set_gpu_denied(task_t task,boolean_t denied)8444 task_set_gpu_denied(task_t task, boolean_t denied)
8445 {
8446 	task_lock(task);
8447 
8448 	if (denied) {
8449 		task->t_flags |= TF_GPU_DENIED;
8450 	} else {
8451 		task->t_flags &= ~TF_GPU_DENIED;
8452 	}
8453 
8454 	task_unlock(task);
8455 }
8456 
8457 boolean_t
task_is_gpu_denied(task_t task)8458 task_is_gpu_denied(task_t task)
8459 {
8460 	/* We don't need the lock to read this flag */
8461 	return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE;
8462 }
8463 
8464 /*
8465  * Task policy termination uses this path to clear the bit the final time
8466  * during the termination flow, and the TASK_POLICY_TERMINATED bit guarantees
8467  * that it won't be changed again on a terminated task.
8468  */
8469 bool
task_set_game_mode_locked(task_t task,bool enabled)8470 task_set_game_mode_locked(task_t task, bool enabled)
8471 {
8472 	task_lock_assert_owned(task);
8473 
8474 	if (enabled) {
8475 		assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
8476 	}
8477 
8478 	bool previously_enabled = task_get_game_mode(task);
8479 	bool needs_update = false;
8480 	uint32_t new_count = 0;
8481 
8482 	if (enabled) {
8483 		task->t_flags |= TF_GAME_MODE;
8484 	} else {
8485 		task->t_flags &= ~TF_GAME_MODE;
8486 	}
8487 
8488 	if (enabled && !previously_enabled) {
8489 		if (task_coalition_adjust_game_mode_count(task, 1, &new_count) && (new_count == 1)) {
8490 			needs_update = true;
8491 		}
8492 	} else if (!enabled && previously_enabled) {
8493 		if (task_coalition_adjust_game_mode_count(task, -1, &new_count) && (new_count == 0)) {
8494 			needs_update = true;
8495 		}
8496 	}
8497 
8498 	return needs_update;
8499 }
8500 
8501 void
task_set_game_mode(task_t task,bool enabled)8502 task_set_game_mode(task_t task, bool enabled)
8503 {
8504 	bool needs_update = false;
8505 
8506 	task_lock(task);
8507 
8508 	/* After termination, further updates are no longer effective */
8509 	if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
8510 		needs_update = task_set_game_mode_locked(task, enabled);
8511 	}
8512 
8513 	task_unlock(task);
8514 
8515 #if CONFIG_THREAD_GROUPS
8516 	if (needs_update) {
8517 		task_coalition_thread_group_game_mode_update(task);
8518 	}
8519 #endif /* CONFIG_THREAD_GROUPS */
8520 }
8521 
8522 bool
task_get_game_mode(task_t task)8523 task_get_game_mode(task_t task)
8524 {
8525 	/* We don't need the lock to read this flag */
8526 	return task->t_flags & TF_GAME_MODE;
8527 }
8528 
8529 
8530 uint64_t
get_task_memory_region_count(task_t task)8531 get_task_memory_region_count(task_t task)
8532 {
8533 	vm_map_t map;
8534 	map = (task == kernel_task) ? kernel_map: task->map;
8535 	return (uint64_t)get_map_nentries(map);
8536 }
8537 
8538 static void
kdebug_trace_dyld_internal(uint32_t base_code,struct dyld_kernel_image_info * info)8539 kdebug_trace_dyld_internal(uint32_t base_code,
8540     struct dyld_kernel_image_info *info)
8541 {
8542 	static_assert(sizeof(info->uuid) >= 16);
8543 
8544 #if defined(__LP64__)
8545 	uint64_t *uuid = (uint64_t *)&(info->uuid);
8546 
8547 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8548 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
8549 	    uuid[1], info->load_addr,
8550 	    (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
8551 	    0);
8552 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8553 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
8554 	    (uint64_t)info->fsobjid.fid_objno |
8555 	    ((uint64_t)info->fsobjid.fid_generation << 32),
8556 	    0, 0, 0, 0);
8557 #else /* defined(__LP64__) */
8558 	uint32_t *uuid = (uint32_t *)&(info->uuid);
8559 
8560 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8561 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
8562 	    uuid[1], uuid[2], uuid[3], 0);
8563 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8564 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
8565 	    (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
8566 	    info->fsobjid.fid_objno, 0);
8567 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8568 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
8569 	    info->fsobjid.fid_generation, 0, 0, 0, 0);
8570 #endif /* !defined(__LP64__) */
8571 }
8572 
8573 static kern_return_t
kdebug_trace_dyld(task_t task,uint32_t base_code,vm_map_copy_t infos_copy,mach_msg_type_number_t infos_len)8574 kdebug_trace_dyld(task_t task, uint32_t base_code,
8575     vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
8576 {
8577 	kern_return_t kr;
8578 	dyld_kernel_image_info_array_t infos;
8579 	vm_map_offset_t map_data;
8580 	vm_offset_t data;
8581 
8582 	if (!infos_copy) {
8583 		return KERN_INVALID_ADDRESS;
8584 	}
8585 
8586 	if (!kdebug_enable ||
8587 	    !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) {
8588 		vm_map_copy_discard(infos_copy);
8589 		return KERN_SUCCESS;
8590 	}
8591 
8592 	if (task == NULL || task != current_task()) {
8593 		return KERN_INVALID_TASK;
8594 	}
8595 
8596 	kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
8597 	if (kr != KERN_SUCCESS) {
8598 		return kr;
8599 	}
8600 
8601 	infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
8602 
8603 	for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
8604 		kdebug_trace_dyld_internal(base_code, &(infos[i]));
8605 	}
8606 
8607 	data = CAST_DOWN(vm_offset_t, map_data);
8608 	mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
8609 	return KERN_SUCCESS;
8610 }
8611 
8612 kern_return_t
task_register_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8613 task_register_dyld_image_infos(task_t task,
8614     dyld_kernel_image_info_array_t infos_copy,
8615     mach_msg_type_number_t infos_len)
8616 {
8617 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
8618 	           (vm_map_copy_t)infos_copy, infos_len);
8619 }
8620 
8621 kern_return_t
task_unregister_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8622 task_unregister_dyld_image_infos(task_t task,
8623     dyld_kernel_image_info_array_t infos_copy,
8624     mach_msg_type_number_t infos_len)
8625 {
8626 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
8627 	           (vm_map_copy_t)infos_copy, infos_len);
8628 }
8629 
8630 kern_return_t
task_get_dyld_image_infos(__unused task_t task,__unused dyld_kernel_image_info_array_t * dyld_images,__unused mach_msg_type_number_t * dyld_imagesCnt)8631 task_get_dyld_image_infos(__unused task_t task,
8632     __unused dyld_kernel_image_info_array_t * dyld_images,
8633     __unused mach_msg_type_number_t * dyld_imagesCnt)
8634 {
8635 	return KERN_NOT_SUPPORTED;
8636 }
8637 
8638 kern_return_t
task_register_dyld_shared_cache_image_info(task_t task,dyld_kernel_image_info_t cache_img,__unused boolean_t no_cache,__unused boolean_t private_cache)8639 task_register_dyld_shared_cache_image_info(task_t task,
8640     dyld_kernel_image_info_t cache_img,
8641     __unused boolean_t no_cache,
8642     __unused boolean_t private_cache)
8643 {
8644 	if (task == NULL || task != current_task()) {
8645 		return KERN_INVALID_TASK;
8646 	}
8647 
8648 	kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
8649 	return KERN_SUCCESS;
8650 }
8651 
8652 kern_return_t
task_register_dyld_set_dyld_state(__unused task_t task,__unused uint8_t dyld_state)8653 task_register_dyld_set_dyld_state(__unused task_t task,
8654     __unused uint8_t dyld_state)
8655 {
8656 	return KERN_NOT_SUPPORTED;
8657 }
8658 
8659 kern_return_t
task_register_dyld_get_process_state(__unused task_t task,__unused dyld_kernel_process_info_t * dyld_process_state)8660 task_register_dyld_get_process_state(__unused task_t task,
8661     __unused dyld_kernel_process_info_t * dyld_process_state)
8662 {
8663 	return KERN_NOT_SUPPORTED;
8664 }
8665 
8666 kern_return_t
task_inspect(task_inspect_t task_insp,task_inspect_flavor_t flavor,task_inspect_info_t info_out,mach_msg_type_number_t * size_in_out)8667 task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
8668     task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out)
8669 {
8670 #if CONFIG_PERVASIVE_CPI
8671 	task_t task = (task_t)task_insp;
8672 	kern_return_t kr = KERN_SUCCESS;
8673 	mach_msg_type_number_t size;
8674 
8675 	if (task == TASK_NULL) {
8676 		return KERN_INVALID_ARGUMENT;
8677 	}
8678 
8679 	size = *size_in_out;
8680 
8681 	switch (flavor) {
8682 	case TASK_INSPECT_BASIC_COUNTS: {
8683 		struct task_inspect_basic_counts *bc =
8684 		    (struct task_inspect_basic_counts *)info_out;
8685 		struct recount_usage stats = { 0 };
8686 		if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
8687 			kr = KERN_INVALID_ARGUMENT;
8688 			break;
8689 		}
8690 
8691 		recount_sum(&recount_task_plan, task->tk_recount.rtk_lifetime, &stats);
8692 		bc->instructions = stats.ru_instructions;
8693 		bc->cycles = stats.ru_cycles;
8694 		size = TASK_INSPECT_BASIC_COUNTS_COUNT;
8695 		break;
8696 	}
8697 	default:
8698 		kr = KERN_INVALID_ARGUMENT;
8699 		break;
8700 	}
8701 
8702 	if (kr == KERN_SUCCESS) {
8703 		*size_in_out = size;
8704 	}
8705 	return kr;
8706 #else /* CONFIG_PERVASIVE_CPI */
8707 #pragma unused(task_insp, flavor, info_out, size_in_out)
8708 	return KERN_NOT_SUPPORTED;
8709 #endif /* !CONFIG_PERVASIVE_CPI */
8710 }
8711 
8712 #if CONFIG_SECLUDED_MEMORY
8713 int num_tasks_can_use_secluded_mem = 0;
8714 
8715 void
task_set_can_use_secluded_mem(task_t task,boolean_t can_use_secluded_mem)8716 task_set_can_use_secluded_mem(
8717 	task_t          task,
8718 	boolean_t       can_use_secluded_mem)
8719 {
8720 	if (!task->task_could_use_secluded_mem) {
8721 		return;
8722 	}
8723 	task_lock(task);
8724 	task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
8725 	task_unlock(task);
8726 }
8727 
8728 void
task_set_can_use_secluded_mem_locked(task_t task,boolean_t can_use_secluded_mem)8729 task_set_can_use_secluded_mem_locked(
8730 	task_t          task,
8731 	boolean_t       can_use_secluded_mem)
8732 {
8733 	assert(task->task_could_use_secluded_mem);
8734 	if (can_use_secluded_mem &&
8735 	    secluded_for_apps &&         /* global boot-arg */
8736 	    !task->task_can_use_secluded_mem) {
8737 		assert(num_tasks_can_use_secluded_mem >= 0);
8738 		OSAddAtomic(+1,
8739 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
8740 		task->task_can_use_secluded_mem = TRUE;
8741 	} else if (!can_use_secluded_mem &&
8742 	    task->task_can_use_secluded_mem) {
8743 		assert(num_tasks_can_use_secluded_mem > 0);
8744 		OSAddAtomic(-1,
8745 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
8746 		task->task_can_use_secluded_mem = FALSE;
8747 	}
8748 }
8749 
8750 void
task_set_could_use_secluded_mem(task_t task,boolean_t could_use_secluded_mem)8751 task_set_could_use_secluded_mem(
8752 	task_t          task,
8753 	boolean_t       could_use_secluded_mem)
8754 {
8755 	task->task_could_use_secluded_mem = !!could_use_secluded_mem;
8756 }
8757 
8758 void
task_set_could_also_use_secluded_mem(task_t task,boolean_t could_also_use_secluded_mem)8759 task_set_could_also_use_secluded_mem(
8760 	task_t          task,
8761 	boolean_t       could_also_use_secluded_mem)
8762 {
8763 	task->task_could_also_use_secluded_mem = !!could_also_use_secluded_mem;
8764 }
8765 
8766 boolean_t
task_can_use_secluded_mem(task_t task,boolean_t is_alloc)8767 task_can_use_secluded_mem(
8768 	task_t          task,
8769 	boolean_t       is_alloc)
8770 {
8771 	if (task->task_can_use_secluded_mem) {
8772 		assert(task->task_could_use_secluded_mem);
8773 		assert(num_tasks_can_use_secluded_mem > 0);
8774 		return TRUE;
8775 	}
8776 	if (task->task_could_also_use_secluded_mem &&
8777 	    num_tasks_can_use_secluded_mem > 0) {
8778 		assert(num_tasks_can_use_secluded_mem > 0);
8779 		return TRUE;
8780 	}
8781 
8782 	/*
8783 	 * If a single task is using more than some large amount of
8784 	 * memory (i.e. secluded_shutoff_trigger) and is approaching
8785 	 * its task limit, allow it to dip into secluded and begin
8786 	 * suppression of rebuilding secluded memory until that task exits.
8787 	 */
8788 	if (is_alloc && secluded_shutoff_trigger != 0) {
8789 		uint64_t phys_used = get_task_phys_footprint(task);
8790 		uint64_t limit = get_task_phys_footprint_limit(task);
8791 		if (phys_used > secluded_shutoff_trigger &&
8792 		    limit > secluded_shutoff_trigger &&
8793 		    phys_used > limit - secluded_shutoff_headroom) {
8794 			start_secluded_suppression(task);
8795 			return TRUE;
8796 		}
8797 	}
8798 
8799 	return FALSE;
8800 }
8801 
8802 boolean_t
task_could_use_secluded_mem(task_t task)8803 task_could_use_secluded_mem(
8804 	task_t  task)
8805 {
8806 	return task->task_could_use_secluded_mem;
8807 }
8808 
8809 boolean_t
task_could_also_use_secluded_mem(task_t task)8810 task_could_also_use_secluded_mem(
8811 	task_t  task)
8812 {
8813 	return task->task_could_also_use_secluded_mem;
8814 }
8815 #endif /* CONFIG_SECLUDED_MEMORY */
8816 
8817 queue_head_t *
task_io_user_clients(task_t task)8818 task_io_user_clients(task_t task)
8819 {
8820 	return &task->io_user_clients;
8821 }
8822 
8823 void
task_set_message_app_suspended(task_t task,boolean_t enable)8824 task_set_message_app_suspended(task_t task, boolean_t enable)
8825 {
8826 	task->message_app_suspended = enable;
8827 }
8828 
8829 void
task_copy_fields_for_exec(task_t dst_task,task_t src_task)8830 task_copy_fields_for_exec(task_t dst_task, task_t src_task)
8831 {
8832 	dst_task->vtimers = src_task->vtimers;
8833 }
8834 
8835 #if DEVELOPMENT || DEBUG
8836 int vm_region_footprint = 0;
8837 #endif /* DEVELOPMENT || DEBUG */
8838 
8839 boolean_t
task_self_region_footprint(void)8840 task_self_region_footprint(void)
8841 {
8842 #if DEVELOPMENT || DEBUG
8843 	if (vm_region_footprint) {
8844 		/* system-wide override */
8845 		return TRUE;
8846 	}
8847 #endif /* DEVELOPMENT || DEBUG */
8848 	return current_task()->task_region_footprint;
8849 }
8850 
8851 void
task_self_region_footprint_set(boolean_t newval)8852 task_self_region_footprint_set(
8853 	boolean_t newval)
8854 {
8855 	task_t  curtask;
8856 
8857 	curtask = current_task();
8858 	task_lock(curtask);
8859 	if (newval) {
8860 		curtask->task_region_footprint = TRUE;
8861 	} else {
8862 		curtask->task_region_footprint = FALSE;
8863 	}
8864 	task_unlock(curtask);
8865 }
8866 
8867 void
task_set_darkwake_mode(task_t task,boolean_t set_mode)8868 task_set_darkwake_mode(task_t task, boolean_t set_mode)
8869 {
8870 	assert(task);
8871 
8872 	task_lock(task);
8873 
8874 	if (set_mode) {
8875 		task->t_flags |= TF_DARKWAKE_MODE;
8876 	} else {
8877 		task->t_flags &= ~(TF_DARKWAKE_MODE);
8878 	}
8879 
8880 	task_unlock(task);
8881 }
8882 
8883 boolean_t
task_get_darkwake_mode(task_t task)8884 task_get_darkwake_mode(task_t task)
8885 {
8886 	assert(task);
8887 	return (task->t_flags & TF_DARKWAKE_MODE) != 0;
8888 }
8889 
8890 /*
8891  * Set default behavior for task's control port and EXC_GUARD variants that have
8892  * settable behavior.
8893  *
8894  * Platform binaries typically have one behavior, third parties another -
8895  * but there are special exception we may need to account for.
8896  */
8897 void
task_set_exc_guard_ctrl_port_default(task_t task,thread_t main_thread,const char * name,unsigned int namelen,boolean_t is_simulated,uint32_t platform,uint32_t sdk)8898 task_set_exc_guard_ctrl_port_default(
8899 	task_t task,
8900 	thread_t main_thread,
8901 	const char *name,
8902 	unsigned int namelen,
8903 	boolean_t is_simulated,
8904 	uint32_t platform,
8905 	uint32_t sdk)
8906 {
8907 	task_control_port_options_t opts = TASK_CONTROL_PORT_OPTIONS_NONE;
8908 
8909 	if (task_get_platform_binary(task)) {
8910 		/* set exc guard default behavior for first-party code */
8911 		task->task_exc_guard = (task_exc_guard_default & TASK_EXC_GUARD_ALL);
8912 
8913 		if (1 == task_pid(task)) {
8914 			/* special flags for inittask - delivery every instance as corpse */
8915 			task->task_exc_guard = _TASK_EXC_GUARD_ALL_CORPSE;
8916 		} else if (task_exc_guard_default & TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS) {
8917 			/* honor by-name default setting overrides */
8918 
8919 			int count = sizeof(task_exc_guard_named_defaults) / sizeof(struct task_exc_guard_named_default);
8920 
8921 			for (int i = 0; i < count; i++) {
8922 				const struct task_exc_guard_named_default *named_default =
8923 				    &task_exc_guard_named_defaults[i];
8924 				if (strncmp(named_default->name, name, namelen) == 0 &&
8925 				    strlen(named_default->name) == namelen) {
8926 					task->task_exc_guard = named_default->behavior;
8927 					break;
8928 				}
8929 			}
8930 		}
8931 
8932 		/* set control port options for 1p code, inherited from parent task by default */
8933 		opts = ipc_control_port_options & ICP_OPTIONS_1P_MASK;
8934 	} else {
8935 		/* set exc guard default behavior for third-party code */
8936 		task->task_exc_guard = ((task_exc_guard_default >> TASK_EXC_GUARD_THIRD_PARTY_DEFAULT_SHIFT) & TASK_EXC_GUARD_ALL);
8937 		/* set control port options for 3p code, inherited from parent task by default */
8938 		opts = (ipc_control_port_options & ICP_OPTIONS_3P_MASK) >> ICP_OPTIONS_3P_SHIFT;
8939 	}
8940 
8941 	if (is_simulated) {
8942 		/* If simulated and built against pre-iOS 15 SDK, disable all EXC_GUARD */
8943 		if ((platform == PLATFORM_IOSSIMULATOR && sdk < 0xf0000) ||
8944 		    (platform == PLATFORM_TVOSSIMULATOR && sdk < 0xf0000) ||
8945 		    (platform == PLATFORM_WATCHOSSIMULATOR && sdk < 0x80000)) {
8946 			task->task_exc_guard = TASK_EXC_GUARD_NONE;
8947 		}
8948 		/* Disable protection for control ports for simulated binaries */
8949 		opts = TASK_CONTROL_PORT_OPTIONS_NONE;
8950 	}
8951 
8952 
8953 	task_set_control_port_options(task, opts);
8954 
8955 	task_set_immovable_pinned(task);
8956 	main_thread_set_immovable_pinned(main_thread);
8957 }
8958 
8959 kern_return_t
task_get_exc_guard_behavior(task_t task,task_exc_guard_behavior_t * behaviorp)8960 task_get_exc_guard_behavior(
8961 	task_t task,
8962 	task_exc_guard_behavior_t *behaviorp)
8963 {
8964 	if (task == TASK_NULL) {
8965 		return KERN_INVALID_TASK;
8966 	}
8967 	*behaviorp = task->task_exc_guard;
8968 	return KERN_SUCCESS;
8969 }
8970 
8971 kern_return_t
task_set_exc_guard_behavior(task_t task,task_exc_guard_behavior_t new_behavior)8972 task_set_exc_guard_behavior(
8973 	task_t task,
8974 	task_exc_guard_behavior_t new_behavior)
8975 {
8976 	if (task == TASK_NULL) {
8977 		return KERN_INVALID_TASK;
8978 	}
8979 	if (new_behavior & ~TASK_EXC_GUARD_ALL) {
8980 		return KERN_INVALID_VALUE;
8981 	}
8982 
8983 	/* limit setting to that allowed for this config */
8984 	new_behavior = new_behavior & task_exc_guard_config_mask;
8985 
8986 #if !defined (DEBUG) && !defined (DEVELOPMENT)
8987 	/* On release kernels, only allow _upgrading_ exc guard behavior */
8988 	task_exc_guard_behavior_t cur_behavior;
8989 
8990 	os_atomic_rmw_loop(&task->task_exc_guard, cur_behavior, new_behavior, relaxed, {
8991 		if ((cur_behavior & task_exc_guard_no_unset_mask) & ~(new_behavior & task_exc_guard_no_unset_mask)) {
8992 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
8993 		}
8994 
8995 		if ((new_behavior & task_exc_guard_no_set_mask) & ~(cur_behavior & task_exc_guard_no_set_mask)) {
8996 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
8997 		}
8998 
8999 		/* no restrictions on CORPSE bit */
9000 	});
9001 #else
9002 	task->task_exc_guard = new_behavior;
9003 #endif
9004 	return KERN_SUCCESS;
9005 }
9006 
9007 kern_return_t
task_set_corpse_forking_behavior(task_t task,task_corpse_forking_behavior_t behavior)9008 task_set_corpse_forking_behavior(task_t task, task_corpse_forking_behavior_t behavior)
9009 {
9010 #if DEVELOPMENT || DEBUG
9011 	if (task == TASK_NULL) {
9012 		return KERN_INVALID_TASK;
9013 	}
9014 
9015 	task_lock(task);
9016 	if (behavior & TASK_CORPSE_FORKING_DISABLED_MEM_DIAG) {
9017 		task->t_flags |= TF_NO_CORPSE_FORKING;
9018 	} else {
9019 		task->t_flags &= ~TF_NO_CORPSE_FORKING;
9020 	}
9021 	task_unlock(task);
9022 
9023 	return KERN_SUCCESS;
9024 #else
9025 	(void)task;
9026 	(void)behavior;
9027 	return KERN_NOT_SUPPORTED;
9028 #endif
9029 }
9030 
9031 boolean_t
task_corpse_forking_disabled(task_t task)9032 task_corpse_forking_disabled(task_t task)
9033 {
9034 	boolean_t disabled = FALSE;
9035 
9036 	task_lock(task);
9037 	disabled = (task->t_flags & TF_NO_CORPSE_FORKING);
9038 	task_unlock(task);
9039 
9040 	return disabled;
9041 }
9042 
9043 #if __arm64__
9044 extern int legacy_footprint_entitlement_mode;
9045 extern void memorystatus_act_on_legacy_footprint_entitlement(struct proc *, boolean_t);
9046 extern void memorystatus_act_on_ios13extended_footprint_entitlement(struct proc *);
9047 
9048 
9049 void
task_set_legacy_footprint(task_t task)9050 task_set_legacy_footprint(
9051 	task_t task)
9052 {
9053 	task_lock(task);
9054 	task->task_legacy_footprint = TRUE;
9055 	task_unlock(task);
9056 }
9057 
9058 void
task_set_extra_footprint_limit(task_t task)9059 task_set_extra_footprint_limit(
9060 	task_t task)
9061 {
9062 	if (task->task_extra_footprint_limit) {
9063 		return;
9064 	}
9065 	task_lock(task);
9066 	if (task->task_extra_footprint_limit) {
9067 		task_unlock(task);
9068 		return;
9069 	}
9070 	task->task_extra_footprint_limit = TRUE;
9071 	task_unlock(task);
9072 	memorystatus_act_on_legacy_footprint_entitlement(get_bsdtask_info(task), TRUE);
9073 }
9074 
9075 void
task_set_ios13extended_footprint_limit(task_t task)9076 task_set_ios13extended_footprint_limit(
9077 	task_t task)
9078 {
9079 	if (task->task_ios13extended_footprint_limit) {
9080 		return;
9081 	}
9082 	task_lock(task);
9083 	if (task->task_ios13extended_footprint_limit) {
9084 		task_unlock(task);
9085 		return;
9086 	}
9087 	task->task_ios13extended_footprint_limit = TRUE;
9088 	task_unlock(task);
9089 	memorystatus_act_on_ios13extended_footprint_entitlement(get_bsdtask_info(task));
9090 }
9091 #endif /* __arm64__ */
9092 
9093 static inline ledger_amount_t
task_ledger_get_balance(ledger_t ledger,int ledger_idx)9094 task_ledger_get_balance(
9095 	ledger_t        ledger,
9096 	int             ledger_idx)
9097 {
9098 	ledger_amount_t amount;
9099 	amount = 0;
9100 	ledger_get_balance(ledger, ledger_idx, &amount);
9101 	return amount;
9102 }
9103 
9104 /*
9105  * Gather the amount of memory counted in a task's footprint due to
9106  * being in a specific set of ledgers.
9107  */
9108 void
task_ledgers_footprint(ledger_t ledger,ledger_amount_t * ledger_resident,ledger_amount_t * ledger_compressed)9109 task_ledgers_footprint(
9110 	ledger_t        ledger,
9111 	ledger_amount_t *ledger_resident,
9112 	ledger_amount_t *ledger_compressed)
9113 {
9114 	*ledger_resident = 0;
9115 	*ledger_compressed = 0;
9116 
9117 	/* purgeable non-volatile memory */
9118 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile);
9119 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile_compressed);
9120 
9121 	/* "default" tagged memory */
9122 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint);
9123 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint_compressed);
9124 
9125 	/* "network" currently never counts in the footprint... */
9126 
9127 	/* "media" tagged memory */
9128 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.media_footprint);
9129 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.media_footprint_compressed);
9130 
9131 	/* "graphics" tagged memory */
9132 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint);
9133 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint_compressed);
9134 
9135 	/* "neural" tagged memory */
9136 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.neural_footprint);
9137 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.neural_footprint_compressed);
9138 }
9139 
9140 #if CONFIG_MEMORYSTATUS
9141 /*
9142  * Credit any outstanding task dirty time to the ledger.
9143  * memstat_dirty_start is pushed forward to prevent any possibility of double
9144  * counting, making it safe to call this as often as necessary to ensure that
9145  * anyone reading the ledger gets up-to-date information.
9146  */
9147 void
task_ledger_settle_dirty_time(task_t t)9148 task_ledger_settle_dirty_time(task_t t)
9149 {
9150 	task_lock(t);
9151 
9152 	uint64_t start = t->memstat_dirty_start;
9153 	if (start) {
9154 		uint64_t now = mach_absolute_time();
9155 
9156 		uint64_t duration;
9157 		absolutetime_to_nanoseconds(now - start, &duration);
9158 
9159 		ledger_t ledger = get_task_ledger(t);
9160 		ledger_credit(ledger, task_ledgers.memorystatus_dirty_time, duration);
9161 
9162 		t->memstat_dirty_start = now;
9163 	}
9164 
9165 	task_unlock(t);
9166 }
9167 #endif /* CONFIG_MEMORYSTATUS */
9168 
9169 void
task_set_memory_ownership_transfer(task_t task,boolean_t value)9170 task_set_memory_ownership_transfer(
9171 	task_t    task,
9172 	boolean_t value)
9173 {
9174 	task_lock(task);
9175 	task->task_can_transfer_memory_ownership = !!value;
9176 	task_unlock(task);
9177 }
9178 
9179 #if DEVELOPMENT || DEBUG
9180 
9181 void
task_set_no_footprint_for_debug(task_t task,boolean_t value)9182 task_set_no_footprint_for_debug(task_t task, boolean_t value)
9183 {
9184 	task_lock(task);
9185 	task->task_no_footprint_for_debug = !!value;
9186 	task_unlock(task);
9187 }
9188 
9189 int
task_get_no_footprint_for_debug(task_t task)9190 task_get_no_footprint_for_debug(task_t task)
9191 {
9192 	return task->task_no_footprint_for_debug;
9193 }
9194 
9195 #endif /* DEVELOPMENT || DEBUG */
9196 
9197 void
task_copy_vmobjects(task_t task,vm_object_query_t query,size_t len,size_t * num)9198 task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num)
9199 {
9200 	vm_object_t find_vmo;
9201 	size_t size = 0;
9202 
9203 	task_objq_lock(task);
9204 	if (query != NULL) {
9205 		queue_iterate(&task->task_objq, find_vmo, vm_object_t, task_objq)
9206 		{
9207 			vm_object_query_t p = &query[size++];
9208 
9209 			/* make sure to not overrun */
9210 			if (size * sizeof(vm_object_query_data_t) > len) {
9211 				--size;
9212 				break;
9213 			}
9214 
9215 			bzero(p, sizeof(*p));
9216 			p->object_id = (vm_object_id_t) VM_KERNEL_ADDRPERM(find_vmo);
9217 			p->virtual_size = find_vmo->internal ? find_vmo->vo_size : 0;
9218 			p->resident_size = find_vmo->resident_page_count * PAGE_SIZE;
9219 			p->wired_size = find_vmo->wired_page_count * PAGE_SIZE;
9220 			p->reusable_size = find_vmo->reusable_page_count * PAGE_SIZE;
9221 			p->vo_no_footprint = find_vmo->vo_no_footprint;
9222 			p->vo_ledger_tag = find_vmo->vo_ledger_tag;
9223 			p->purgable = find_vmo->purgable;
9224 
9225 			if (find_vmo->internal && find_vmo->pager_created && find_vmo->pager != NULL) {
9226 				p->compressed_size = vm_compressor_pager_get_count(find_vmo->pager) * PAGE_SIZE;
9227 			} else {
9228 				p->compressed_size = 0;
9229 			}
9230 		}
9231 	} else {
9232 		size = (size_t)task->task_owned_objects;
9233 	}
9234 	task_objq_unlock(task);
9235 
9236 	*num = size;
9237 }
9238 
9239 void
task_get_owned_vmobjects(task_t task,size_t buffer_size,vmobject_list_output_t buffer,size_t * output_size,size_t * entries)9240 task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries)
9241 {
9242 	assert(output_size);
9243 	assert(entries);
9244 
9245 	/* copy the vmobjects and vmobject data out of the task */
9246 	if (buffer_size == 0) {
9247 		task_copy_vmobjects(task, NULL, 0, entries);
9248 		*output_size = (*entries > 0) ? *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer) : 0;
9249 	} else {
9250 		assert(buffer);
9251 		task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(*buffer), entries);
9252 		buffer->entries = (uint64_t)*entries;
9253 		*output_size = *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer);
9254 	}
9255 }
9256 
9257 void
task_store_owned_vmobject_info(task_t to_task,task_t from_task)9258 task_store_owned_vmobject_info(task_t to_task, task_t from_task)
9259 {
9260 	size_t buffer_size;
9261 	vmobject_list_output_t buffer;
9262 	size_t output_size;
9263 	size_t entries;
9264 
9265 	assert(to_task != from_task);
9266 
9267 	/* get the size, allocate a bufferr, and populate */
9268 	entries = 0;
9269 	output_size = 0;
9270 	task_get_owned_vmobjects(from_task, 0, NULL, &output_size, &entries);
9271 
9272 	if (output_size) {
9273 		buffer_size = output_size;
9274 		buffer = kalloc_data(buffer_size, Z_WAITOK);
9275 
9276 		if (buffer) {
9277 			entries = 0;
9278 			output_size = 0;
9279 
9280 			task_get_owned_vmobjects(from_task, buffer_size, buffer, &output_size, &entries);
9281 
9282 			if (entries) {
9283 				to_task->corpse_vmobject_list = buffer;
9284 				to_task->corpse_vmobject_list_size = buffer_size;
9285 			}
9286 		}
9287 	}
9288 }
9289 
9290 void
task_set_filter_msg_flag(task_t task,boolean_t flag)9291 task_set_filter_msg_flag(
9292 	task_t task,
9293 	boolean_t flag)
9294 {
9295 	assert(task != TASK_NULL);
9296 
9297 	if (flag) {
9298 		task_ro_flags_set(task, TFRO_FILTER_MSG);
9299 	} else {
9300 		task_ro_flags_clear(task, TFRO_FILTER_MSG);
9301 	}
9302 }
9303 
9304 boolean_t
task_get_filter_msg_flag(task_t task)9305 task_get_filter_msg_flag(
9306 	task_t task)
9307 {
9308 	if (!task) {
9309 		return false;
9310 	}
9311 
9312 	return (task_ro_flags_get(task) & TFRO_FILTER_MSG) ? TRUE : FALSE;
9313 }
9314 bool
task_is_exotic(task_t task)9315 task_is_exotic(
9316 	task_t task)
9317 {
9318 	if (task == TASK_NULL) {
9319 		return false;
9320 	}
9321 	return vm_map_is_exotic(get_task_map(task));
9322 }
9323 
9324 bool
task_is_alien(task_t task)9325 task_is_alien(
9326 	task_t task)
9327 {
9328 	if (task == TASK_NULL) {
9329 		return false;
9330 	}
9331 	return vm_map_is_alien(get_task_map(task));
9332 }
9333 
9334 
9335 
9336 #if CONFIG_MACF
9337 /* Set the filter mask for Mach traps. */
9338 void
mac_task_set_mach_filter_mask(task_t task,uint8_t * maskptr)9339 mac_task_set_mach_filter_mask(task_t task, uint8_t *maskptr)
9340 {
9341 	assert(task);
9342 
9343 	task_set_mach_trap_filter_mask(task, maskptr);
9344 }
9345 
9346 /* Set the filter mask for kobject msgs. */
9347 void
mac_task_set_kobj_filter_mask(task_t task,uint8_t * maskptr)9348 mac_task_set_kobj_filter_mask(task_t task, uint8_t *maskptr)
9349 {
9350 	assert(task);
9351 
9352 	task_set_mach_kobj_filter_mask(task, maskptr);
9353 }
9354 
9355 /* Hook for mach trap/sc filter evaluation policy. */
9356 SECURITY_READ_ONLY_LATE(mac_task_mach_filter_cbfunc_t) mac_task_mach_trap_evaluate = NULL;
9357 
9358 /* Hook for kobj message filter evaluation policy. */
9359 SECURITY_READ_ONLY_LATE(mac_task_kobj_filter_cbfunc_t) mac_task_kobj_msg_evaluate = NULL;
9360 
9361 /* Set the callback hooks for the filtering policy. */
9362 int
mac_task_register_filter_callbacks(const mac_task_mach_filter_cbfunc_t mach_cbfunc,const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)9363 mac_task_register_filter_callbacks(
9364 	const mac_task_mach_filter_cbfunc_t mach_cbfunc,
9365 	const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)
9366 {
9367 	if (mach_cbfunc != NULL) {
9368 		if (mac_task_mach_trap_evaluate != NULL) {
9369 			return KERN_FAILURE;
9370 		}
9371 		mac_task_mach_trap_evaluate = mach_cbfunc;
9372 	}
9373 	if (kobj_cbfunc != NULL) {
9374 		if (mac_task_kobj_msg_evaluate != NULL) {
9375 			return KERN_FAILURE;
9376 		}
9377 		mac_task_kobj_msg_evaluate = kobj_cbfunc;
9378 	}
9379 
9380 	return KERN_SUCCESS;
9381 }
9382 #endif /* CONFIG_MACF */
9383 
9384 #if CONFIG_ROSETTA
9385 bool
task_is_translated(task_t task)9386 task_is_translated(task_t task)
9387 {
9388 	extern boolean_t proc_is_translated(struct proc* p);
9389 	return task && proc_is_translated(get_bsdtask_info(task));
9390 }
9391 #endif
9392 
9393 
9394 #if __has_feature(ptrauth_calls)
9395 /* All pac violations will be delivered as fatal exceptions irrespective of
9396  * the enable_pac_exception boot-arg value.
9397  */
9398 #define PAC_EXCEPTION_ENTITLEMENT "com.apple.private.pac.exception"
9399 /*
9400  * When enable_pac_exception boot-arg is set to true, processes
9401  * can choose to get non-fatal pac exception delivery by setting
9402  * this entitlement.
9403  */
9404 #define SKIP_PAC_EXCEPTION_ENTITLEMENT "com.apple.private.skip.pac.exception"
9405 
9406 void
task_set_pac_exception_fatal_flag(task_t task)9407 task_set_pac_exception_fatal_flag(
9408 	task_t task)
9409 {
9410 	assert(task != TASK_NULL);
9411 	bool pac_entitlement = false;
9412 	uint32_t set_flags = 0;
9413 
9414 	if (enable_pac_exception && IOTaskHasEntitlement(task, SKIP_PAC_EXCEPTION_ENTITLEMENT)) {
9415 		return;
9416 	}
9417 
9418 	if (IOTaskHasEntitlement(task, PAC_EXCEPTION_ENTITLEMENT)) {
9419 		pac_entitlement = true;
9420 	}
9421 
9422 	if (pac_entitlement) {
9423 		set_flags |= TFRO_PAC_ENFORCE_USER_STATE;
9424 	}
9425 	if (pac_entitlement || (enable_pac_exception && task_get_platform_binary(task))) {
9426 		set_flags |= TFRO_PAC_EXC_FATAL;
9427 	}
9428 	if (set_flags != 0) {
9429 		task_ro_flags_set(task, set_flags);
9430 	}
9431 }
9432 
9433 bool
task_is_pac_exception_fatal(task_t task)9434 task_is_pac_exception_fatal(
9435 	task_t task)
9436 {
9437 	assert(task != TASK_NULL);
9438 	return !!(task_ro_flags_get(task) & TFRO_PAC_EXC_FATAL);
9439 }
9440 #endif /* __has_feature(ptrauth_calls) */
9441 
9442 bool
task_needs_user_signed_thread_state(task_t task)9443 task_needs_user_signed_thread_state(
9444 	task_t task)
9445 {
9446 	assert(task != TASK_NULL);
9447 	return !!(task_ro_flags_get(task) & TFRO_PAC_ENFORCE_USER_STATE);
9448 }
9449 
9450 void
task_set_tecs(task_t task)9451 task_set_tecs(task_t task)
9452 {
9453 	if (task == TASK_NULL) {
9454 		task = current_task();
9455 	}
9456 
9457 	if (!machine_csv(CPUVN_CI)) {
9458 		return;
9459 	}
9460 
9461 	LCK_MTX_ASSERT(&task->lock, LCK_MTX_ASSERT_NOTOWNED);
9462 
9463 	task_lock(task);
9464 
9465 	task->t_flags |= TF_TECS;
9466 
9467 	thread_t thread;
9468 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
9469 		machine_tecs(thread);
9470 	}
9471 	task_unlock(task);
9472 }
9473 
9474 kern_return_t
task_test_sync_upcall(task_t task,ipc_port_t send_port)9475 task_test_sync_upcall(
9476 	task_t     task,
9477 	ipc_port_t send_port)
9478 {
9479 #if DEVELOPMENT || DEBUG
9480 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9481 		return KERN_INVALID_ARGUMENT;
9482 	}
9483 
9484 	/* Block on sync kernel upcall on the given send port */
9485 	mach_test_sync_upcall(send_port);
9486 
9487 	ipc_port_release_send(send_port);
9488 	return KERN_SUCCESS;
9489 #else
9490 	(void)task;
9491 	(void)send_port;
9492 	return KERN_NOT_SUPPORTED;
9493 #endif
9494 }
9495 
9496 kern_return_t
task_test_async_upcall_propagation(task_t task,ipc_port_t send_port,int qos,int iotier)9497 task_test_async_upcall_propagation(
9498 	task_t      task,
9499 	ipc_port_t  send_port,
9500 	int         qos,
9501 	int         iotier)
9502 {
9503 #if DEVELOPMENT || DEBUG
9504 	kern_return_t kr;
9505 
9506 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9507 		return KERN_INVALID_ARGUMENT;
9508 	}
9509 
9510 	if (qos < THREAD_QOS_DEFAULT || qos > THREAD_QOS_USER_INTERACTIVE ||
9511 	    iotier < THROTTLE_LEVEL_START || iotier > THROTTLE_LEVEL_END) {
9512 		return KERN_INVALID_ARGUMENT;
9513 	}
9514 
9515 	struct thread_attr_for_ipc_propagation attr = {
9516 		.tafip_iotier = iotier,
9517 		.tafip_qos = qos
9518 	};
9519 
9520 	/* Apply propagate attr to port */
9521 	kr = ipc_port_propagate_thread_attr(send_port, attr);
9522 	if (kr != KERN_SUCCESS) {
9523 		return kr;
9524 	}
9525 
9526 	thread_enable_send_importance(current_thread(), TRUE);
9527 
9528 	/* Perform an async kernel upcall on the given send port */
9529 	mach_test_async_upcall(send_port);
9530 	thread_enable_send_importance(current_thread(), FALSE);
9531 
9532 	ipc_port_release_send(send_port);
9533 	return KERN_SUCCESS;
9534 #else
9535 	(void)task;
9536 	(void)send_port;
9537 	(void)qos;
9538 	(void)iotier;
9539 	return KERN_NOT_SUPPORTED;
9540 #endif
9541 }
9542 
9543 #if CONFIG_PROC_RESOURCE_LIMITS
9544 mach_port_name_t
current_task_get_fatal_port_name(void)9545 current_task_get_fatal_port_name(void)
9546 {
9547 	mach_port_t task_fatal_port = MACH_PORT_NULL;
9548 	mach_port_name_t port_name = 0;
9549 
9550 	task_fatal_port = task_allocate_fatal_port();
9551 
9552 	if (task_fatal_port) {
9553 		ipc_object_copyout(current_space(), ip_to_object(task_fatal_port), MACH_MSG_TYPE_PORT_SEND,
9554 		    IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, &port_name);
9555 	}
9556 
9557 	return port_name;
9558 }
9559 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
9560 
9561 #if defined(__x86_64__)
9562 bool
curtask_get_insn_copy_optout(void)9563 curtask_get_insn_copy_optout(void)
9564 {
9565 	bool optout;
9566 	task_t cur_task = current_task();
9567 
9568 	task_lock(cur_task);
9569 	optout = (cur_task->t_flags & TF_INSN_COPY_OPTOUT) ? true : false;
9570 	task_unlock(cur_task);
9571 
9572 	return optout;
9573 }
9574 
9575 void
curtask_set_insn_copy_optout(void)9576 curtask_set_insn_copy_optout(void)
9577 {
9578 	task_t cur_task = current_task();
9579 
9580 	task_lock(cur_task);
9581 
9582 	cur_task->t_flags |= TF_INSN_COPY_OPTOUT;
9583 
9584 	thread_t thread;
9585 	queue_iterate(&cur_task->threads, thread, thread_t, task_threads) {
9586 		machine_thread_set_insn_copy_optout(thread);
9587 	}
9588 	task_unlock(cur_task);
9589 }
9590 #endif /* defined(__x86_64__) */
9591 
9592 void
task_get_corpse_vmobject_list(task_t task,vmobject_list_output_t * list,size_t * list_size)9593 task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size)
9594 {
9595 	assert(task);
9596 	assert(list_size);
9597 
9598 	*list = task->corpse_vmobject_list;
9599 	*list_size = (size_t)task->corpse_vmobject_list_size;
9600 }
9601 
9602 __abortlike
9603 static void
panic_proc_ro_task_backref_mismatch(task_t t,proc_ro_t ro)9604 panic_proc_ro_task_backref_mismatch(task_t t, proc_ro_t ro)
9605 {
9606 	panic("proc_ro->task backref mismatch: t=%p, ro=%p, "
9607 	    "proc_ro_task(ro)=%p", t, ro, proc_ro_task(ro));
9608 }
9609 
9610 proc_ro_t
task_get_ro(task_t t)9611 task_get_ro(task_t t)
9612 {
9613 	proc_ro_t ro = (proc_ro_t)t->bsd_info_ro;
9614 
9615 	zone_require_ro(ZONE_ID_PROC_RO, sizeof(struct proc_ro), ro);
9616 	if (__improbable(proc_ro_task(ro) != t)) {
9617 		panic_proc_ro_task_backref_mismatch(t, ro);
9618 	}
9619 
9620 	return ro;
9621 }
9622 
9623 uint32_t
task_ro_flags_get(task_t task)9624 task_ro_flags_get(task_t task)
9625 {
9626 	return task_get_ro(task)->t_flags_ro;
9627 }
9628 
9629 void
task_ro_flags_set(task_t task,uint32_t flags)9630 task_ro_flags_set(task_t task, uint32_t flags)
9631 {
9632 	zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
9633 	    t_flags_ro, ZRO_ATOMIC_OR_32, flags);
9634 }
9635 
9636 void
task_ro_flags_clear(task_t task,uint32_t flags)9637 task_ro_flags_clear(task_t task, uint32_t flags)
9638 {
9639 	zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
9640 	    t_flags_ro, ZRO_ATOMIC_AND_32, ~flags);
9641 }
9642 
9643 task_control_port_options_t
task_get_control_port_options(task_t task)9644 task_get_control_port_options(task_t task)
9645 {
9646 	return task_get_ro(task)->task_control_port_options;
9647 }
9648 
9649 void
task_set_control_port_options(task_t task,task_control_port_options_t opts)9650 task_set_control_port_options(task_t task, task_control_port_options_t opts)
9651 {
9652 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
9653 	    task_control_port_options, &opts);
9654 }
9655 
9656 /*!
9657  * @function kdp_task_is_locked
9658  *
9659  * @abstract
9660  * Checks if task is locked.
9661  *
9662  * @discussion
9663  * NOT SAFE: To be used only by kernel debugger.
9664  *
9665  * @param task task to check
9666  *
9667  * @returns TRUE if the task is locked.
9668  */
9669 boolean_t
kdp_task_is_locked(task_t task)9670 kdp_task_is_locked(task_t task)
9671 {
9672 	return kdp_lck_mtx_lock_spin_is_acquired(&task->lock);
9673 }
9674 
9675 #if DEBUG || DEVELOPMENT
9676 /**
9677  *
9678  * Check if a threshold limit is valid based on the actual phys memory
9679  * limit. If they are same, race conditions may arise, so we have to prevent
9680  * it to happen.
9681  */
9682 static diagthreshold_check_return
task_check_memorythreshold_is_valid(task_t task,uint64_t new_limit,bool is_diagnostics_value)9683 task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value)
9684 {
9685 	int phys_limit_mb;
9686 	kern_return_t ret_value;
9687 	bool threshold_enabled;
9688 	bool dummy;
9689 	ret_value = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, &threshold_enabled);
9690 	if (ret_value != KERN_SUCCESS) {
9691 		return ret_value;
9692 	}
9693 	if (is_diagnostics_value == true) {
9694 		ret_value = task_get_phys_footprint_limit(task, &phys_limit_mb);
9695 	} else {
9696 		uint64_t diag_limit;
9697 		ret_value = task_get_diag_footprint_limit_internal(task, &diag_limit, &dummy);
9698 		phys_limit_mb = (int)(diag_limit >> 20);
9699 	}
9700 	if (ret_value != KERN_SUCCESS) {
9701 		return ret_value;
9702 	}
9703 	if (phys_limit_mb == (int)  new_limit) {
9704 		if (threshold_enabled == false) {
9705 			return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED;
9706 		} else {
9707 			return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED;
9708 		}
9709 	}
9710 	if (threshold_enabled == false) {
9711 		return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED;
9712 	} else {
9713 		return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED;
9714 	}
9715 }
9716 #endif
9717 
9718 
9719 #pragma mark task utils
9720 
9721 /* defined in bsd/kern/kern_proc.c */
9722 extern void proc_name(int pid, char *buf, int size);
9723 extern char *proc_best_name(struct proc *p);
9724 
9725 void
task_procname(task_t task,char * buf,int size)9726 task_procname(task_t task, char *buf, int size)
9727 {
9728 	proc_name(task_pid(task), buf, size);
9729 }
9730 
9731 void
task_best_name(task_t task,char * buf,size_t size)9732 task_best_name(task_t task, char *buf, size_t size)
9733 {
9734 	char *name = proc_best_name(task_get_proc_raw(task));
9735 	strlcpy(buf, name, size);
9736 }
9737