xref: /xnu-10002.41.9/osfmk/kern/task.c (revision 699cd48037512bf4380799317ca44ca453c82f57)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  *	File:	kern/task.c
58  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59  *		David Black
60  *
61  *	Task management primitives implementation.
62  */
63 /*
64  * Copyright (c) 1993 The University of Utah and
65  * the Computer Systems Laboratory (CSL).  All rights reserved.
66  *
67  * Permission to use, copy, modify and distribute this software and its
68  * documentation is hereby granted, provided that both the copyright
69  * notice and this permission notice appear in all copies of the
70  * software, derivative works or modified versions, and any portions
71  * thereof, and that both notices appear in supporting documentation.
72  *
73  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76  *
77  * CSL requests users of this software to return to [email protected] any
78  * improvements that they make and grant CSL redistribution rights.
79  *
80  */
81 /*
82  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83  * support for mandatory and extensible security protections.  This notice
84  * is included in support of clause 2.2 (b) of the Apple Public License,
85  * Version 2.0.
86  * Copyright (c) 2005 SPARTA, Inc.
87  */
88 
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/host_priv.h>
92 #include <mach/machine/vm_types.h>
93 #include <mach/vm_param.h>
94 #include <mach/mach_vm.h>
95 #include <mach/semaphore.h>
96 #include <mach/task_info.h>
97 #include <mach/task_inspect.h>
98 #include <mach/task_special_ports.h>
99 #include <mach/sdt.h>
100 #include <mach/mach_test_upcall.h>
101 
102 #include <ipc/ipc_importance.h>
103 #include <ipc/ipc_types.h>
104 #include <ipc/ipc_space.h>
105 #include <ipc/ipc_entry.h>
106 #include <ipc/ipc_hash.h>
107 #include <ipc/ipc_init.h>
108 
109 #include <kern/kern_types.h>
110 #include <kern/mach_param.h>
111 #include <kern/misc_protos.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/coalition.h>
115 #include <kern/zalloc.h>
116 #include <kern/kalloc.h>
117 #include <kern/kern_cdata.h>
118 #include <kern/processor.h>
119 #include <kern/recount.h>
120 #include <kern/sched_prim.h>    /* for thread_wakeup */
121 #include <kern/ipc_tt.h>
122 #include <kern/host.h>
123 #include <kern/clock.h>
124 #include <kern/timer.h>
125 #include <kern/assert.h>
126 #include <kern/affinity.h>
127 #include <kern/exc_resource.h>
128 #include <kern/machine.h>
129 #include <kern/policy_internal.h>
130 #include <kern/restartable.h>
131 #include <kern/ipc_kobject.h>
132 
133 #include <corpses/task_corpse.h>
134 #if CONFIG_TELEMETRY
135 #include <kern/telemetry.h>
136 #endif
137 
138 #if CONFIG_PERVASIVE_CPI
139 #include <kern/monotonic.h>
140 #include <machine/monotonic.h>
141 #endif /* CONFIG_PERVASIVE_CPI */
142 
143 #include <os/log.h>
144 
145 #include <vm/pmap.h>
146 #include <vm/vm_map.h>
147 #include <vm/vm_kern.h>         /* for kernel_map, ipc_kernel_map */
148 #include <vm/vm_pageout.h>
149 #include <vm/vm_protos.h>
150 #include <vm/vm_purgeable_internal.h>
151 #include <vm/vm_compressor_pager.h>
152 #include <vm/vm_reclaim_internal.h>
153 
154 #include <sys/proc_ro.h>
155 #include <sys/resource.h>
156 #include <sys/signalvar.h> /* for coredump */
157 #include <sys/bsdtask_info.h>
158 #include <sys/kdebug_triage.h>
159 /*
160  * Exported interfaces
161  */
162 
163 #include <mach/task_server.h>
164 #include <mach/mach_host_server.h>
165 #include <mach/mach_port_server.h>
166 
167 #include <vm/vm_shared_region.h>
168 
169 #include <libkern/OSDebug.h>
170 #include <libkern/OSAtomic.h>
171 #include <libkern/section_keywords.h>
172 
173 #include <mach-o/loader.h>
174 #include <kdp/kdp_dyld.h>
175 
176 #include <kern/sfi.h>           /* picks up ledger.h */
177 
178 #if CONFIG_MACF
179 #include <security/mac_mach_internal.h>
180 #endif
181 
182 #include <IOKit/IOBSD.h>
183 #include <kdp/processor_core.h>
184 
185 #include <string.h>
186 
187 #if KPERF
188 extern int kpc_force_all_ctrs(task_t, int);
189 #endif
190 
191 SECURITY_READ_ONLY_LATE(task_t) kernel_task;
192 
193 int64_t         next_taskuniqueid = 0;
194 const size_t task_alignment = _Alignof(struct task);
195 extern const size_t proc_alignment;
196 extern size_t proc_struct_size;
197 extern size_t proc_and_task_size;
198 size_t task_struct_size;
199 
200 extern uint32_t ipc_control_port_options;
201 
202 extern int large_corpse_count;
203 
204 extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p);
205 extern void task_disown_frozen_csegs(task_t owner_task);
206 
207 static void task_port_no_senders(ipc_port_t, mach_msg_type_number_t);
208 static void task_port_with_flavor_no_senders(ipc_port_t, mach_msg_type_number_t);
209 static void task_suspension_no_senders(ipc_port_t, mach_msg_type_number_t);
210 static inline void task_zone_init(void);
211 
212 
213 IPC_KOBJECT_DEFINE(IKOT_TASK_NAME);
214 IPC_KOBJECT_DEFINE(IKOT_TASK_CONTROL,
215     .iko_op_no_senders = task_port_no_senders);
216 IPC_KOBJECT_DEFINE(IKOT_TASK_READ,
217     .iko_op_no_senders = task_port_with_flavor_no_senders);
218 IPC_KOBJECT_DEFINE(IKOT_TASK_INSPECT,
219     .iko_op_no_senders = task_port_with_flavor_no_senders);
220 IPC_KOBJECT_DEFINE(IKOT_TASK_RESUME,
221     .iko_op_no_senders = task_suspension_no_senders);
222 
223 #if CONFIG_PROC_RESOURCE_LIMITS
224 static void task_fatal_port_no_senders(ipc_port_t, mach_msg_type_number_t);
225 static mach_port_t task_allocate_fatal_port(void);
226 
227 IPC_KOBJECT_DEFINE(IKOT_TASK_FATAL,
228     .iko_op_stable     = true,
229     .iko_op_no_senders = task_fatal_port_no_senders);
230 
231 extern void task_id_token_set_port(task_id_token_t token, ipc_port_t port);
232 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
233 
234 /* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
235 int audio_active = 0;
236 
237 /*
238  *	structure for tracking zone usage
239  *	Used either one per task/thread for all zones or <per-task,per-zone>.
240  */
241 typedef struct zinfo_usage_store_t {
242 	/* These fields may be updated atomically, and so must be 8 byte aligned */
243 	uint64_t        alloc __attribute__((aligned(8)));              /* allocation counter */
244 	uint64_t        free __attribute__((aligned(8)));               /* free counter */
245 } zinfo_usage_store_t;
246 
247 /**
248  * Return codes related to diag threshold and memory limit
249  */
250 __options_decl(diagthreshold_check_return, int, {
251 	THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED        = 0,
252 	THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED         = 1,
253 	THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED    = 2,
254 	THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED     = 3,
255 });
256 
257 /**
258  * Return codes related to diag threshold and memory limit
259  */
260 __options_decl(current_, int, {
261 	THRESHOLD_IS_SAME_AS_LIMIT      = 0,
262 	THRESHOLD_IS_NOT_SAME_AS_LIMIT  = 1
263 });
264 
265 zinfo_usage_store_t tasks_tkm_private;
266 zinfo_usage_store_t tasks_tkm_shared;
267 
268 /* A container to accumulate statistics for expired tasks */
269 expired_task_statistics_t               dead_task_statistics;
270 LCK_SPIN_DECLARE_ATTR(dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
271 
272 ledger_template_t task_ledger_template = NULL;
273 
274 /* global lock for task_dyld_process_info_notify_{register, deregister, get_trap} */
275 LCK_GRP_DECLARE(g_dyldinfo_mtx_grp, "g_dyldinfo");
276 LCK_MTX_DECLARE(g_dyldinfo_mtx, &g_dyldinfo_mtx_grp);
277 
278 SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__((used)) =
279 {.cpu_time = -1,
280  .tkm_private = -1,
281  .tkm_shared = -1,
282  .phys_mem = -1,
283  .wired_mem = -1,
284  .internal = -1,
285  .iokit_mapped = -1,
286  .external = -1,
287  .reusable = -1,
288  .alternate_accounting = -1,
289  .alternate_accounting_compressed = -1,
290  .page_table = -1,
291  .phys_footprint = -1,
292  .internal_compressed = -1,
293  .purgeable_volatile = -1,
294  .purgeable_nonvolatile = -1,
295  .purgeable_volatile_compressed = -1,
296  .purgeable_nonvolatile_compressed = -1,
297  .tagged_nofootprint = -1,
298  .tagged_footprint = -1,
299  .tagged_nofootprint_compressed = -1,
300  .tagged_footprint_compressed = -1,
301  .network_volatile = -1,
302  .network_nonvolatile = -1,
303  .network_volatile_compressed = -1,
304  .network_nonvolatile_compressed = -1,
305  .media_nofootprint = -1,
306  .media_footprint = -1,
307  .media_nofootprint_compressed = -1,
308  .media_footprint_compressed = -1,
309  .graphics_nofootprint = -1,
310  .graphics_footprint = -1,
311  .graphics_nofootprint_compressed = -1,
312  .graphics_footprint_compressed = -1,
313  .neural_nofootprint = -1,
314  .neural_footprint = -1,
315  .neural_nofootprint_compressed = -1,
316  .neural_footprint_compressed = -1,
317  .platform_idle_wakeups = -1,
318  .interrupt_wakeups = -1,
319 #if CONFIG_SCHED_SFI
320  .sfi_wait_times = { 0 /* initialized at runtime */},
321 #endif /* CONFIG_SCHED_SFI */
322  .cpu_time_billed_to_me = -1,
323  .cpu_time_billed_to_others = -1,
324  .physical_writes = -1,
325  .logical_writes = -1,
326  .logical_writes_to_external = -1,
327 #if DEBUG || DEVELOPMENT
328  .pages_grabbed = -1,
329  .pages_grabbed_kern = -1,
330  .pages_grabbed_iopl = -1,
331  .pages_grabbed_upl = -1,
332 #endif
333 #if CONFIG_FREEZE
334  .frozen_to_swap = -1,
335 #endif /* CONFIG_FREEZE */
336  .energy_billed_to_me = -1,
337  .energy_billed_to_others = -1,
338 #if CONFIG_PHYS_WRITE_ACCT
339  .fs_metadata_writes = -1,
340 #endif /* CONFIG_PHYS_WRITE_ACCT */
341 #if CONFIG_MEMORYSTATUS
342  .memorystatus_dirty_time = -1,
343 #endif /* CONFIG_MEMORYSTATUS */
344  .swapins = -1, };
345 
346 /* System sleep state */
347 boolean_t tasks_suspend_state;
348 
349 __options_decl(send_exec_resource_is_fatal, bool, {
350 	IS_NOT_FATAL            = false,
351 	IS_FATAL                = true
352 });
353 
354 __options_decl(send_exec_resource_is_diagnostics, bool, {
355 	IS_NOT_DIAGNOSTICS      = false,
356 	IS_DIAGNOSTICS          = true
357 });
358 
359 __options_decl(send_exec_resource_is_warning, bool, {
360 	IS_NOT_WARNING          = false,
361 	IS_WARNING              = true
362 });
363 
364 __options_decl(send_exec_resource_options_t, uint8_t, {
365 	EXEC_RESOURCE_FATAL = 0x01,
366 	EXEC_RESOURCE_DIAGNOSTIC = 0x02,
367 	EXEC_RESOURCE_WARNING = 0x04,
368 });
369 
370 /**
371  * Actions to take when a process has reached the memory limit or the diagnostics threshold limits
372  */
373 static inline void task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning);
374 #if DEBUG || DEVELOPMENT
375 static inline void task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size);
376 #endif
377 void init_task_ledgers(void);
378 void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
379 void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
380 void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
381 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
382 void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options);
383 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
384 #if CONFIG_PROC_RESOURCE_LIMITS
385 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit);
386 mach_port_name_t current_task_get_fatal_port_name(void);
387 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
388 
389 kern_return_t task_suspend_internal_locked(task_t);
390 kern_return_t task_suspend_internal(task_t);
391 kern_return_t task_resume_internal_locked(task_t);
392 kern_return_t task_resume_internal(task_t);
393 static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
394 
395 extern kern_return_t iokit_task_terminate(task_t task);
396 extern void          iokit_task_app_suspended_changed(task_t task);
397 
398 extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
399 extern void bsd_copythreadname(void *dst_uth, void *src_uth);
400 extern kern_return_t thread_resume(thread_t thread);
401 
402 extern int exit_with_port_space_exception(void *proc, mach_exception_code_t code, mach_exception_subcode_t subcode);
403 
404 // Condition to include diag footprints
405 #define RESETTABLE_DIAG_FOOTPRINT_LIMITS ((DEBUG || DEVELOPMENT) && CONFIG_MEMORYSTATUS)
406 
407 // Warn tasks when they hit 80% of their memory limit.
408 #define PHYS_FOOTPRINT_WARNING_LEVEL 80
409 
410 #define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT              150 /* wakeups per second */
411 #define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL   300 /* in seconds. */
412 
413 /*
414  * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
415  *
416  * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
417  *  stacktraces, aka micro-stackshots)
418  */
419 #define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER        70
420 
421 int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
422 int task_wakeups_monitor_rate;     /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
423 
424 unsigned int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
425 
426 TUNABLE(bool, disable_exc_resource, "disable_exc_resource", false); /* Global override to suppress EXC_RESOURCE for resource monitor violations. */
427 TUNABLE(bool, disable_exc_resource_during_audio, "disable_exc_resource_during_audio", true); /* Global override to suppress EXC_RESOURCE while audio is active */
428 
429 ledger_amount_t max_task_footprint = 0;  /* Per-task limit on physical memory consumption in bytes     */
430 unsigned int max_task_footprint_warning_level = 0;  /* Per-task limit warning percentage */
431 
432 /*
433  * Configure per-task memory limit.
434  * The boot-arg is interpreted as Megabytes,
435  * and takes precedence over the device tree.
436  * Setting the boot-arg to 0 disables task limits.
437  */
438 TUNABLE_DT_WRITEABLE(int, max_task_footprint_mb, "/defaults", "kern.max_task_pmem", "max_task_pmem", 0, TUNABLE_DT_NONE);
439 
440 /* I/O Monitor Limits */
441 #define IOMON_DEFAULT_LIMIT                     (20480ull)      /* MB of logical/physical I/O */
442 #define IOMON_DEFAULT_INTERVAL                  (86400ull)      /* in seconds */
443 
444 uint64_t task_iomon_limit_mb;           /* Per-task I/O monitor limit in MBs */
445 uint64_t task_iomon_interval_secs;      /* Per-task I/O monitor interval in secs */
446 
447 #define IO_TELEMETRY_DEFAULT_LIMIT              (10ll * 1024ll * 1024ll)
448 int64_t io_telemetry_limit;                     /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
449 int64_t global_logical_writes_count = 0;        /* Global count for logical writes */
450 int64_t global_logical_writes_to_external_count = 0;        /* Global count for logical writes to external storage*/
451 static boolean_t global_update_logical_writes(int64_t, int64_t*);
452 
453 #if DEBUG || DEVELOPMENT
454 static diagthreshold_check_return task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value);
455 #endif
456 #define TASK_MAX_THREAD_LIMIT 256
457 
458 #if MACH_ASSERT
459 int pmap_ledgers_panic = 1;
460 int pmap_ledgers_panic_leeway = 3;
461 #endif /* MACH_ASSERT */
462 
463 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
464 
465 #if CONFIG_COREDUMP
466 int hwm_user_cores = 0; /* high watermark violations generate user core files */
467 #endif
468 
469 #ifdef MACH_BSD
470 extern uint32_t proc_platform(const struct proc *);
471 extern uint32_t proc_sdk(struct proc *);
472 extern void     proc_getexecutableuuid(void *, unsigned char *, unsigned long);
473 extern int      proc_pid(struct proc *p);
474 extern int      proc_selfpid(void);
475 extern struct proc *current_proc(void);
476 extern char     *proc_name_address(struct proc *p);
477 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
478 extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize);
479 extern void workq_proc_suspended(struct proc *p);
480 extern void workq_proc_resumed(struct proc *p);
481 extern struct proc *kernproc;
482 
483 #if CONFIG_MEMORYSTATUS
484 extern void     proc_memstat_skip(struct proc* p, boolean_t set);
485 extern void     memorystatus_on_ledger_footprint_exceeded(int warning, bool memlimit_is_active, bool memlimit_is_fatal);
486 extern void     memorystatus_log_exception(const int max_footprint_mb, bool memlimit_is_active, bool memlimit_is_fatal);
487 extern void     memorystatus_log_diag_threshold_exception(const int diag_threshold_value);
488 extern boolean_t memorystatus_allowed_vm_map_fork(task_t task, bool *is_large);
489 extern uint64_t  memorystatus_available_memory_internal(struct proc *p);
490 
491 #if DEVELOPMENT || DEBUG
492 extern void memorystatus_abort_vm_map_fork(task_t);
493 #endif
494 
495 #endif /* CONFIG_MEMORYSTATUS */
496 
497 #endif /* MACH_BSD */
498 
499 /* Boot-arg that turns on fatal pac exception delivery for all first-party apps */
500 static TUNABLE(bool, enable_pac_exception, "enable_pac_exception", false);
501 
502 /*
503  * Defaults for controllable EXC_GUARD behaviors
504  *
505  * Internal builds are fatal by default (except BRIDGE).
506  * Create an alternate set of defaults for special processes by name.
507  */
508 struct task_exc_guard_named_default {
509 	char *name;
510 	uint32_t behavior;
511 };
512 #define _TASK_EXC_GUARD_MP_CORPSE  (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_CORPSE)
513 #define _TASK_EXC_GUARD_MP_ONCE    (_TASK_EXC_GUARD_MP_CORPSE | TASK_EXC_GUARD_MP_ONCE)
514 #define _TASK_EXC_GUARD_MP_FATAL   (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_FATAL)
515 
516 #define _TASK_EXC_GUARD_VM_CORPSE  (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_ONCE)
517 #define _TASK_EXC_GUARD_VM_ONCE    (_TASK_EXC_GUARD_VM_CORPSE | TASK_EXC_GUARD_VM_ONCE)
518 #define _TASK_EXC_GUARD_VM_FATAL   (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_FATAL)
519 
520 #define _TASK_EXC_GUARD_ALL_CORPSE (_TASK_EXC_GUARD_MP_CORPSE | _TASK_EXC_GUARD_VM_CORPSE)
521 #define _TASK_EXC_GUARD_ALL_ONCE   (_TASK_EXC_GUARD_MP_ONCE | _TASK_EXC_GUARD_VM_ONCE)
522 #define _TASK_EXC_GUARD_ALL_FATAL  (_TASK_EXC_GUARD_MP_FATAL | _TASK_EXC_GUARD_VM_FATAL)
523 
524 /* cannot turn off FATAL and DELIVER bit if set */
525 uint32_t task_exc_guard_no_unset_mask = TASK_EXC_GUARD_MP_FATAL | TASK_EXC_GUARD_VM_FATAL |
526     TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_VM_DELIVER;
527 /* cannot turn on ONCE bit if unset */
528 uint32_t task_exc_guard_no_set_mask = TASK_EXC_GUARD_MP_ONCE | TASK_EXC_GUARD_VM_ONCE;
529 
530 #if !defined(XNU_TARGET_OS_BRIDGE)
531 
532 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_FATAL;
533 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
534 /*
535  * These "by-process-name" default overrides are intended to be a short-term fix to
536  * quickly get over races between changes introducing new EXC_GUARD raising behaviors
537  * in some process and a change in default behavior for same. We should ship with
538  * these lists empty (by fixing the bugs, or explicitly changing the task's EXC_GUARD
539  * exception behavior via task_set_exc_guard_behavior()).
540  *
541  * XXX Remember to add/remove TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS back to
542  * task_exc_guard_default when transitioning this list between empty and
543  * non-empty.
544  */
545 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
546 
547 #else /* !defined(XNU_TARGET_OS_BRIDGE) */
548 
549 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_ONCE;
550 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
551 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
552 
553 #endif /* !defined(XNU_TARGET_OS_BRIDGE) */
554 
555 /* Forwards */
556 
557 static void task_hold_locked(task_t task);
558 static void task_wait_locked(task_t task, boolean_t until_not_runnable);
559 static void task_release_locked(task_t task);
560 extern task_t proc_get_task_raw(void *proc);
561 extern void task_ref_hold_proc_task_struct(task_t task);
562 extern void task_release_proc_task_struct(task_t task);
563 
564 static void task_synchronizer_destroy_all(task_t task);
565 static os_ref_count_t
566 task_add_turnstile_watchports_locked(
567 	task_t                      task,
568 	struct task_watchports      *watchports,
569 	struct task_watchport_elem  **previous_elem_array,
570 	ipc_port_t                  *portwatch_ports,
571 	uint32_t                    portwatch_count);
572 
573 static os_ref_count_t
574 task_remove_turnstile_watchports_locked(
575 	task_t                 task,
576 	struct task_watchports *watchports,
577 	ipc_port_t             *port_freelist);
578 
579 static struct task_watchports *
580 task_watchports_alloc_init(
581 	task_t        task,
582 	thread_t      thread,
583 	uint32_t      count);
584 
585 static void
586 task_watchports_deallocate(
587 	struct task_watchports *watchports);
588 
589 __attribute__((always_inline)) inline void
task_lock(task_t task)590 task_lock(task_t task)
591 {
592 	lck_mtx_lock(&(task)->lock);
593 }
594 
595 __attribute__((always_inline)) inline void
task_unlock(task_t task)596 task_unlock(task_t task)
597 {
598 	lck_mtx_unlock(&(task)->lock);
599 }
600 
601 void
task_set_64bit(task_t task,boolean_t is_64bit,boolean_t is_64bit_data)602 task_set_64bit(
603 	task_t task,
604 	boolean_t is_64bit,
605 	boolean_t is_64bit_data)
606 {
607 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
608 	thread_t thread;
609 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
610 
611 	task_lock(task);
612 
613 	/*
614 	 * Switching to/from 64-bit address spaces
615 	 */
616 	if (is_64bit) {
617 		if (!task_has_64Bit_addr(task)) {
618 			task_set_64Bit_addr(task);
619 		}
620 	} else {
621 		if (task_has_64Bit_addr(task)) {
622 			task_clear_64Bit_addr(task);
623 		}
624 	}
625 
626 	/*
627 	 * Switching to/from 64-bit register state.
628 	 */
629 	if (is_64bit_data) {
630 		if (task_has_64Bit_data(task)) {
631 			goto out;
632 		}
633 
634 		task_set_64Bit_data(task);
635 	} else {
636 		if (!task_has_64Bit_data(task)) {
637 			goto out;
638 		}
639 
640 		task_clear_64Bit_data(task);
641 	}
642 
643 	/* FIXME: On x86, the thread save state flavor can diverge from the
644 	 * task's 64-bit feature flag due to the 32-bit/64-bit register save
645 	 * state dichotomy. Since we can be pre-empted in this interval,
646 	 * certain routines may observe the thread as being in an inconsistent
647 	 * state with respect to its task's 64-bitness.
648 	 */
649 
650 #if defined(__x86_64__) || defined(__arm64__)
651 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
652 		thread_mtx_lock(thread);
653 		machine_thread_switch_addrmode(thread);
654 		thread_mtx_unlock(thread);
655 	}
656 #endif /* defined(__x86_64__) || defined(__arm64__) */
657 
658 out:
659 	task_unlock(task);
660 }
661 
662 bool
task_get_64bit_addr(task_t task)663 task_get_64bit_addr(task_t task)
664 {
665 	return task_has_64Bit_addr(task);
666 }
667 
668 bool
task_get_64bit_data(task_t task)669 task_get_64bit_data(task_t task)
670 {
671 	return task_has_64Bit_data(task);
672 }
673 
674 void
task_set_platform_binary(task_t task,boolean_t is_platform)675 task_set_platform_binary(
676 	task_t task,
677 	boolean_t is_platform)
678 {
679 	if (is_platform) {
680 		task_ro_flags_set(task, TFRO_PLATFORM);
681 	} else {
682 		task_ro_flags_clear(task, TFRO_PLATFORM);
683 	}
684 }
685 
686 boolean_t
task_get_platform_binary(task_t task)687 task_get_platform_binary(task_t task)
688 {
689 	return (task_ro_flags_get(task) & TFRO_PLATFORM) != 0;
690 }
691 
692 boolean_t
task_is_a_corpse(task_t task)693 task_is_a_corpse(task_t task)
694 {
695 	return (task_ro_flags_get(task) & TFRO_CORPSE) != 0;
696 }
697 
698 void
task_set_corpse(task_t task)699 task_set_corpse(task_t task)
700 {
701 	return task_ro_flags_set(task, TFRO_CORPSE);
702 }
703 
704 void
task_set_immovable_pinned(task_t task)705 task_set_immovable_pinned(task_t task)
706 {
707 	ipc_task_set_immovable_pinned(task);
708 }
709 
710 /*
711  * Set or clear per-task TF_CA_CLIENT_WI flag according to specified argument.
712  * Returns "false" if flag is already set, and "true" in other cases.
713  */
714 bool
task_set_ca_client_wi(task_t task,boolean_t set_or_clear)715 task_set_ca_client_wi(
716 	task_t task,
717 	boolean_t set_or_clear)
718 {
719 	bool ret = true;
720 	task_lock(task);
721 	if (set_or_clear) {
722 		/* Tasks can have only one CA_CLIENT work interval */
723 		if (task->t_flags & TF_CA_CLIENT_WI) {
724 			ret = false;
725 		} else {
726 			task->t_flags |= TF_CA_CLIENT_WI;
727 		}
728 	} else {
729 		task->t_flags &= ~TF_CA_CLIENT_WI;
730 	}
731 	task_unlock(task);
732 	return ret;
733 }
734 
735 /*
736  * task_set_dyld_info() is called at most three times.
737  * 1) at task struct creation to set addr/size to zero.
738  * 2) in mach_loader.c to set location of __all_image_info section in loaded dyld
739  * 3) is from dyld itself to update location of all_image_info
740  * For security any calls after that are ignored.  The TF_DYLD_ALL_IMAGE_SET bit is used to determine state.
741  */
742 kern_return_t
task_set_dyld_info(task_t task,mach_vm_address_t addr,mach_vm_size_t size)743 task_set_dyld_info(
744 	task_t            task,
745 	mach_vm_address_t addr,
746 	mach_vm_size_t    size)
747 {
748 	mach_vm_address_t end;
749 	if (os_add_overflow(addr, size, &end)) {
750 		return KERN_FAILURE;
751 	}
752 
753 	task_lock(task);
754 	/* don't accept updates if all_image_info_addr is final */
755 	if ((task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) == 0) {
756 		bool inputNonZero   = ((addr != 0) || (size != 0));
757 		bool currentNonZero = ((task->all_image_info_addr != 0) || (task->all_image_info_size != 0));
758 		task->all_image_info_addr = addr;
759 		task->all_image_info_size = size;
760 		/* can only change from a non-zero value to another non-zero once */
761 		if (inputNonZero && currentNonZero) {
762 			task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
763 		}
764 		task_unlock(task);
765 		return KERN_SUCCESS;
766 	} else {
767 		task_unlock(task);
768 		return KERN_FAILURE;
769 	}
770 }
771 
772 bool
task_donates_own_pages(task_t task)773 task_donates_own_pages(
774 	task_t task)
775 {
776 	return task->donates_own_pages;
777 }
778 
779 void
task_set_mach_header_address(task_t task,mach_vm_address_t addr)780 task_set_mach_header_address(
781 	task_t task,
782 	mach_vm_address_t addr)
783 {
784 	task_lock(task);
785 	task->mach_header_vm_address = addr;
786 	task_unlock(task);
787 }
788 
789 void
task_bank_reset(__unused task_t task)790 task_bank_reset(__unused task_t task)
791 {
792 	if (task->bank_context != NULL) {
793 		bank_task_destroy(task);
794 	}
795 }
796 
797 /*
798  * NOTE: This should only be called when the P_LINTRANSIT
799  *	 flag is set (the proc_trans lock is held) on the
800  *	 proc associated with the task.
801  */
802 void
task_bank_init(__unused task_t task)803 task_bank_init(__unused task_t task)
804 {
805 	if (task->bank_context != NULL) {
806 		panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
807 	}
808 	bank_task_initialize(task);
809 }
810 
811 void
task_set_did_exec_flag(task_t task)812 task_set_did_exec_flag(task_t task)
813 {
814 	task->t_procflags |= TPF_DID_EXEC;
815 }
816 
817 void
task_clear_exec_copy_flag(task_t task)818 task_clear_exec_copy_flag(task_t task)
819 {
820 	task->t_procflags &= ~TPF_EXEC_COPY;
821 }
822 
823 event_t
task_get_return_wait_event(task_t task)824 task_get_return_wait_event(task_t task)
825 {
826 	return (event_t)&task->returnwait_inheritor;
827 }
828 
829 void
task_clear_return_wait(task_t task,uint32_t flags)830 task_clear_return_wait(task_t task, uint32_t flags)
831 {
832 	if (flags & TCRW_CLEAR_INITIAL_WAIT) {
833 		thread_wakeup(task_get_return_wait_event(task));
834 	}
835 
836 	if (flags & TCRW_CLEAR_FINAL_WAIT) {
837 		is_write_lock(task->itk_space);
838 
839 		task->t_returnwaitflags &= ~TRW_LRETURNWAIT;
840 		task->returnwait_inheritor = NULL;
841 
842 		if (flags & TCRW_CLEAR_EXEC_COMPLETE) {
843 			task->t_returnwaitflags &= ~TRW_LEXEC_COMPLETE;
844 		}
845 
846 		if (task->t_returnwaitflags & TRW_LRETURNWAITER) {
847 			struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
848 			    TURNSTILE_ULOCK);
849 
850 			waitq_wakeup64_all(&turnstile->ts_waitq,
851 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
852 			    THREAD_AWAKENED, WAITQ_UPDATE_INHERITOR);
853 
854 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_HELD);
855 
856 			turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
857 			turnstile_cleanup();
858 			task->t_returnwaitflags &= ~TRW_LRETURNWAITER;
859 		}
860 		is_write_unlock(task->itk_space);
861 	}
862 }
863 
864 void __attribute__((noreturn))
task_wait_to_return(void)865 task_wait_to_return(void)
866 {
867 	task_t task = current_task();
868 	uint8_t returnwaitflags;
869 
870 	is_write_lock(task->itk_space);
871 
872 	if (task->t_returnwaitflags & TRW_LRETURNWAIT) {
873 		struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
874 		    TURNSTILE_ULOCK);
875 
876 		do {
877 			task->t_returnwaitflags |= TRW_LRETURNWAITER;
878 			turnstile_update_inheritor(turnstile, task->returnwait_inheritor,
879 			    (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
880 
881 			waitq_assert_wait64(&turnstile->ts_waitq,
882 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
883 			    THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
884 
885 			is_write_unlock(task->itk_space);
886 
887 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
888 
889 			thread_block(THREAD_CONTINUE_NULL);
890 
891 			is_write_lock(task->itk_space);
892 		} while (task->t_returnwaitflags & TRW_LRETURNWAIT);
893 
894 		turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
895 	}
896 
897 	returnwaitflags = task->t_returnwaitflags;
898 	is_write_unlock(task->itk_space);
899 	turnstile_cleanup();
900 
901 
902 #if CONFIG_MACF
903 	/*
904 	 * Before jumping to userspace and allowing this process
905 	 * to execute any code, make sure its credentials are cached,
906 	 * and notify any interested parties.
907 	 */
908 	extern void mach_kauth_cred_thread_update(void);
909 
910 	mach_kauth_cred_thread_update();
911 	if (returnwaitflags & TRW_LEXEC_COMPLETE) {
912 		mac_proc_notify_exec_complete(current_proc());
913 	}
914 #endif
915 
916 	thread_bootstrap_return();
917 }
918 
919 boolean_t
task_is_exec_copy(task_t task)920 task_is_exec_copy(task_t task)
921 {
922 	return task_is_exec_copy_internal(task);
923 }
924 
925 boolean_t
task_did_exec(task_t task)926 task_did_exec(task_t task)
927 {
928 	return task_did_exec_internal(task);
929 }
930 
931 boolean_t
task_is_active(task_t task)932 task_is_active(task_t task)
933 {
934 	return task->active;
935 }
936 
937 boolean_t
task_is_halting(task_t task)938 task_is_halting(task_t task)
939 {
940 	return task->halting;
941 }
942 
943 void
task_init(void)944 task_init(void)
945 {
946 	if (max_task_footprint_mb != 0) {
947 #if CONFIG_MEMORYSTATUS
948 		if (max_task_footprint_mb < 50) {
949 			printf("Warning: max_task_pmem %d below minimum.\n",
950 			    max_task_footprint_mb);
951 			max_task_footprint_mb = 50;
952 		}
953 		printf("Limiting task physical memory footprint to %d MB\n",
954 		    max_task_footprint_mb);
955 
956 		max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024;         // Convert MB to bytes
957 
958 		/*
959 		 * Configure the per-task memory limit warning level.
960 		 * This is computed as a percentage.
961 		 */
962 		max_task_footprint_warning_level = 0;
963 
964 		if (max_mem < 0x40000000) {
965 			/*
966 			 * On devices with < 1GB of memory:
967 			 *    -- set warnings to 50MB below the per-task limit.
968 			 */
969 			if (max_task_footprint_mb > 50) {
970 				max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
971 			}
972 		} else {
973 			/*
974 			 * On devices with >= 1GB of memory:
975 			 *    -- set warnings to 100MB below the per-task limit.
976 			 */
977 			if (max_task_footprint_mb > 100) {
978 				max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
979 			}
980 		}
981 
982 		/*
983 		 * Never allow warning level to land below the default.
984 		 */
985 		if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
986 			max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
987 		}
988 
989 		printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
990 
991 #else
992 		printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
993 #endif /* CONFIG_MEMORYSTATUS */
994 	}
995 
996 #if DEVELOPMENT || DEBUG
997 	PE_parse_boot_argn("task_exc_guard_default",
998 	    &task_exc_guard_default,
999 	    sizeof(task_exc_guard_default));
1000 #endif /* DEVELOPMENT || DEBUG */
1001 
1002 #if CONFIG_COREDUMP
1003 	if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
1004 	    sizeof(hwm_user_cores))) {
1005 		hwm_user_cores = 0;
1006 	}
1007 #endif
1008 
1009 	proc_init_cpumon_params();
1010 
1011 	if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof(task_wakeups_monitor_rate))) {
1012 		task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
1013 	}
1014 
1015 	if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof(task_wakeups_monitor_interval))) {
1016 		task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
1017 	}
1018 
1019 	if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
1020 	    sizeof(task_wakeups_monitor_ustackshots_trigger_pct))) {
1021 		task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
1022 	}
1023 
1024 	if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof(task_iomon_limit_mb))) {
1025 		task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
1026 	}
1027 
1028 	if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof(task_iomon_interval_secs))) {
1029 		task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
1030 	}
1031 
1032 	if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof(io_telemetry_limit))) {
1033 		io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
1034 	}
1035 
1036 /*
1037  * If we have coalitions, coalition_init() will call init_task_ledgers() as it
1038  * sets up the ledgers for the default coalition. If we don't have coalitions,
1039  * then we have to call it now.
1040  */
1041 #if CONFIG_COALITIONS
1042 	assert(task_ledger_template);
1043 #else /* CONFIG_COALITIONS */
1044 	init_task_ledgers();
1045 #endif /* CONFIG_COALITIONS */
1046 
1047 	task_ref_init();
1048 	task_zone_init();
1049 
1050 #ifdef __LP64__
1051 	boolean_t is_64bit = TRUE;
1052 #else
1053 	boolean_t is_64bit = FALSE;
1054 #endif
1055 
1056 	kernproc = (struct proc *)zalloc_flags(proc_task_zone, Z_WAITOK | Z_ZERO);
1057 	kernel_task = proc_get_task_raw(kernproc);
1058 
1059 	/*
1060 	 * Create the kernel task as the first task.
1061 	 */
1062 	if (task_create_internal(TASK_NULL, NULL, NULL, FALSE, is_64bit,
1063 	    is_64bit, TF_NONE, TF_NONE, TPF_NONE, TWF_NONE, kernel_task) != KERN_SUCCESS) {
1064 		panic("task_init");
1065 	}
1066 
1067 	ipc_task_enable(kernel_task);
1068 
1069 #if defined(HAS_APPLE_PAC)
1070 	kernel_task->rop_pid = ml_default_rop_pid();
1071 	kernel_task->jop_pid = ml_default_jop_pid();
1072 	// kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
1073 	// disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
1074 	ml_task_set_disable_user_jop(kernel_task, FALSE);
1075 #endif
1076 
1077 	vm_map_deallocate(kernel_task->map);
1078 	kernel_task->map = kernel_map;
1079 }
1080 
1081 static inline void
task_zone_init(void)1082 task_zone_init(void)
1083 {
1084 	proc_struct_size = roundup(proc_struct_size, task_alignment);
1085 	task_struct_size = roundup(sizeof(struct task), proc_alignment);
1086 	proc_and_task_size = proc_struct_size + task_struct_size;
1087 
1088 	proc_task_zone = zone_create_ext("proc_task", proc_and_task_size,
1089 	    ZC_ZFREE_CLEARMEM | ZC_SEQUESTER, ZONE_ID_PROC_TASK, NULL); /* sequester is needed for proc_rele() */
1090 }
1091 
1092 /*
1093  * Task ledgers
1094  * ------------
1095  *
1096  * phys_footprint
1097  *   Physical footprint: This is the sum of:
1098  *     + (internal - alternate_accounting)
1099  *     + (internal_compressed - alternate_accounting_compressed)
1100  *     + iokit_mapped
1101  *     + purgeable_nonvolatile
1102  *     + purgeable_nonvolatile_compressed
1103  *     + page_table
1104  *
1105  * internal
1106  *   The task's anonymous memory, which on iOS is always resident.
1107  *
1108  * internal_compressed
1109  *   Amount of this task's internal memory which is held by the compressor.
1110  *   Such memory is no longer actually resident for the task [i.e., resident in its pmap],
1111  *   and could be either decompressed back into memory, or paged out to storage, depending
1112  *   on our implementation.
1113  *
1114  * iokit_mapped
1115  *   IOKit mappings: The total size of all IOKit mappings in this task, regardless of
1116  *    clean/dirty or internal/external state].
1117  *
1118  * alternate_accounting
1119  *   The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
1120  *   are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
1121  *   double counting.
1122  *
1123  * pages_grabbed
1124  *   pages_grabbed counts all page grabs in a task.  It is also broken out into three subtypes
1125  *   which track UPL, IOPL and Kernel page grabs.
1126  */
1127 void
init_task_ledgers(void)1128 init_task_ledgers(void)
1129 {
1130 	ledger_template_t t;
1131 
1132 	assert(task_ledger_template == NULL);
1133 	assert(kernel_task == TASK_NULL);
1134 
1135 #if MACH_ASSERT
1136 	PE_parse_boot_argn("pmap_ledgers_panic",
1137 	    &pmap_ledgers_panic,
1138 	    sizeof(pmap_ledgers_panic));
1139 	PE_parse_boot_argn("pmap_ledgers_panic_leeway",
1140 	    &pmap_ledgers_panic_leeway,
1141 	    sizeof(pmap_ledgers_panic_leeway));
1142 #endif /* MACH_ASSERT */
1143 
1144 	if ((t = ledger_template_create("Per-task ledger")) == NULL) {
1145 		panic("couldn't create task ledger template");
1146 	}
1147 
1148 	task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
1149 	task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
1150 	    "physmem", "bytes");
1151 	task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
1152 	    "bytes");
1153 	task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
1154 	    "bytes");
1155 	task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
1156 	    "bytes");
1157 	task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
1158 	    "bytes");
1159 	task_ledgers.iokit_mapped = ledger_entry_add_with_flags(t, "iokit_mapped", "mappings",
1160 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1161 	task_ledgers.alternate_accounting = ledger_entry_add_with_flags(t, "alternate_accounting", "physmem",
1162 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1163 	task_ledgers.alternate_accounting_compressed = ledger_entry_add_with_flags(t, "alternate_accounting_compressed", "physmem",
1164 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1165 	task_ledgers.page_table = ledger_entry_add_with_flags(t, "page_table", "physmem",
1166 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1167 	task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
1168 	    "bytes");
1169 	task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
1170 	    "bytes");
1171 	task_ledgers.reusable = ledger_entry_add(t, "reusable", "physmem", "bytes");
1172 	task_ledgers.external = ledger_entry_add(t, "external", "physmem", "bytes");
1173 	task_ledgers.purgeable_volatile = ledger_entry_add_with_flags(t, "purgeable_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1174 	task_ledgers.purgeable_nonvolatile = ledger_entry_add_with_flags(t, "purgeable_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1175 	task_ledgers.purgeable_volatile_compressed = ledger_entry_add_with_flags(t, "purgeable_volatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1176 	task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add_with_flags(t, "purgeable_nonvolatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1177 #if DEBUG || DEVELOPMENT
1178 	task_ledgers.pages_grabbed = ledger_entry_add_with_flags(t, "pages_grabbed", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1179 	task_ledgers.pages_grabbed_kern = ledger_entry_add_with_flags(t, "pages_grabbed_kern", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1180 	task_ledgers.pages_grabbed_iopl = ledger_entry_add_with_flags(t, "pages_grabbed_iopl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1181 	task_ledgers.pages_grabbed_upl = ledger_entry_add_with_flags(t, "pages_grabbed_upl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1182 #endif
1183 	task_ledgers.tagged_nofootprint = ledger_entry_add_with_flags(t, "tagged_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1184 	task_ledgers.tagged_footprint = ledger_entry_add_with_flags(t, "tagged_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1185 	task_ledgers.tagged_nofootprint_compressed = ledger_entry_add_with_flags(t, "tagged_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1186 	task_ledgers.tagged_footprint_compressed = ledger_entry_add_with_flags(t, "tagged_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1187 	task_ledgers.network_volatile = ledger_entry_add_with_flags(t, "network_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1188 	task_ledgers.network_nonvolatile = ledger_entry_add_with_flags(t, "network_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1189 	task_ledgers.network_volatile_compressed = ledger_entry_add_with_flags(t, "network_volatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1190 	task_ledgers.network_nonvolatile_compressed = ledger_entry_add_with_flags(t, "network_nonvolatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1191 	task_ledgers.media_nofootprint = ledger_entry_add_with_flags(t, "media_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1192 	task_ledgers.media_footprint = ledger_entry_add_with_flags(t, "media_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1193 	task_ledgers.media_nofootprint_compressed = ledger_entry_add_with_flags(t, "media_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1194 	task_ledgers.media_footprint_compressed = ledger_entry_add_with_flags(t, "media_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1195 	task_ledgers.graphics_nofootprint = ledger_entry_add_with_flags(t, "graphics_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1196 	task_ledgers.graphics_footprint = ledger_entry_add_with_flags(t, "graphics_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1197 	task_ledgers.graphics_nofootprint_compressed = ledger_entry_add_with_flags(t, "graphics_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1198 	task_ledgers.graphics_footprint_compressed = ledger_entry_add_with_flags(t, "graphics_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1199 	task_ledgers.neural_nofootprint = ledger_entry_add_with_flags(t, "neural_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1200 	task_ledgers.neural_footprint = ledger_entry_add_with_flags(t, "neural_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1201 	task_ledgers.neural_nofootprint_compressed = ledger_entry_add_with_flags(t, "neural_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1202 	task_ledgers.neural_footprint_compressed = ledger_entry_add_with_flags(t, "neural_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1203 
1204 #if CONFIG_FREEZE
1205 	task_ledgers.frozen_to_swap = ledger_entry_add(t, "frozen_to_swap", "physmem", "bytes");
1206 #endif /* CONFIG_FREEZE */
1207 
1208 	task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
1209 	    "count");
1210 	task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
1211 	    "count");
1212 
1213 #if CONFIG_SCHED_SFI
1214 	sfi_class_id_t class_id, ledger_alias;
1215 	for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1216 		task_ledgers.sfi_wait_times[class_id] = -1;
1217 	}
1218 
1219 	/* don't account for UNSPECIFIED */
1220 	for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
1221 		ledger_alias = sfi_get_ledger_alias_for_class(class_id);
1222 		if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
1223 			/* Check to see if alias has been registered yet */
1224 			if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
1225 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
1226 			} else {
1227 				/* Otherwise, initialize it first */
1228 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
1229 			}
1230 		} else {
1231 			task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
1232 		}
1233 
1234 		if (task_ledgers.sfi_wait_times[class_id] < 0) {
1235 			panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
1236 		}
1237 	}
1238 
1239 	assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID - 1] != -1);
1240 #endif /* CONFIG_SCHED_SFI */
1241 
1242 	task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
1243 	task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
1244 	task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
1245 	task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
1246 	task_ledgers.logical_writes_to_external = ledger_entry_add(t, "logical_writes_to_external", "res", "bytes");
1247 #if CONFIG_PHYS_WRITE_ACCT
1248 	task_ledgers.fs_metadata_writes = ledger_entry_add(t, "fs_metadata_writes", "res", "bytes");
1249 #endif /* CONFIG_PHYS_WRITE_ACCT */
1250 	task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj");
1251 	task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj");
1252 
1253 #if CONFIG_MEMORYSTATUS
1254 	task_ledgers.memorystatus_dirty_time = ledger_entry_add(t, "memorystatus_dirty_time", "physmem", "ns");
1255 #endif /* CONFIG_MEMORYSTATUS */
1256 
1257 	task_ledgers.swapins = ledger_entry_add_with_flags(t, "swapins", "physmem", "bytes",
1258 	    LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1259 
1260 	if ((task_ledgers.cpu_time < 0) ||
1261 	    (task_ledgers.tkm_private < 0) ||
1262 	    (task_ledgers.tkm_shared < 0) ||
1263 	    (task_ledgers.phys_mem < 0) ||
1264 	    (task_ledgers.wired_mem < 0) ||
1265 	    (task_ledgers.internal < 0) ||
1266 	    (task_ledgers.external < 0) ||
1267 	    (task_ledgers.reusable < 0) ||
1268 	    (task_ledgers.iokit_mapped < 0) ||
1269 	    (task_ledgers.alternate_accounting < 0) ||
1270 	    (task_ledgers.alternate_accounting_compressed < 0) ||
1271 	    (task_ledgers.page_table < 0) ||
1272 	    (task_ledgers.phys_footprint < 0) ||
1273 	    (task_ledgers.internal_compressed < 0) ||
1274 	    (task_ledgers.purgeable_volatile < 0) ||
1275 	    (task_ledgers.purgeable_nonvolatile < 0) ||
1276 	    (task_ledgers.purgeable_volatile_compressed < 0) ||
1277 	    (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
1278 	    (task_ledgers.tagged_nofootprint < 0) ||
1279 	    (task_ledgers.tagged_footprint < 0) ||
1280 	    (task_ledgers.tagged_nofootprint_compressed < 0) ||
1281 	    (task_ledgers.tagged_footprint_compressed < 0) ||
1282 #if CONFIG_FREEZE
1283 	    (task_ledgers.frozen_to_swap < 0) ||
1284 #endif /* CONFIG_FREEZE */
1285 	    (task_ledgers.network_volatile < 0) ||
1286 	    (task_ledgers.network_nonvolatile < 0) ||
1287 	    (task_ledgers.network_volatile_compressed < 0) ||
1288 	    (task_ledgers.network_nonvolatile_compressed < 0) ||
1289 	    (task_ledgers.media_nofootprint < 0) ||
1290 	    (task_ledgers.media_footprint < 0) ||
1291 	    (task_ledgers.media_nofootprint_compressed < 0) ||
1292 	    (task_ledgers.media_footprint_compressed < 0) ||
1293 	    (task_ledgers.graphics_nofootprint < 0) ||
1294 	    (task_ledgers.graphics_footprint < 0) ||
1295 	    (task_ledgers.graphics_nofootprint_compressed < 0) ||
1296 	    (task_ledgers.graphics_footprint_compressed < 0) ||
1297 	    (task_ledgers.neural_nofootprint < 0) ||
1298 	    (task_ledgers.neural_footprint < 0) ||
1299 	    (task_ledgers.neural_nofootprint_compressed < 0) ||
1300 	    (task_ledgers.neural_footprint_compressed < 0) ||
1301 	    (task_ledgers.platform_idle_wakeups < 0) ||
1302 	    (task_ledgers.interrupt_wakeups < 0) ||
1303 	    (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
1304 	    (task_ledgers.physical_writes < 0) ||
1305 	    (task_ledgers.logical_writes < 0) ||
1306 	    (task_ledgers.logical_writes_to_external < 0) ||
1307 #if CONFIG_PHYS_WRITE_ACCT
1308 	    (task_ledgers.fs_metadata_writes < 0) ||
1309 #endif /* CONFIG_PHYS_WRITE_ACCT */
1310 #if CONFIG_MEMORYSTATUS
1311 	    (task_ledgers.memorystatus_dirty_time < 0) ||
1312 #endif /* CONFIG_MEMORYSTATUS */
1313 	    (task_ledgers.energy_billed_to_me < 0) ||
1314 	    (task_ledgers.energy_billed_to_others < 0) ||
1315 	    (task_ledgers.swapins < 0)
1316 	    ) {
1317 		panic("couldn't create entries for task ledger template");
1318 	}
1319 
1320 	ledger_track_credit_only(t, task_ledgers.phys_footprint);
1321 	ledger_track_credit_only(t, task_ledgers.internal);
1322 	ledger_track_credit_only(t, task_ledgers.external);
1323 	ledger_track_credit_only(t, task_ledgers.reusable);
1324 
1325 	ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
1326 	ledger_track_maximum(t, task_ledgers.phys_mem, 60);
1327 	ledger_track_maximum(t, task_ledgers.internal, 60);
1328 	ledger_track_maximum(t, task_ledgers.internal_compressed, 60);
1329 	ledger_track_maximum(t, task_ledgers.reusable, 60);
1330 	ledger_track_maximum(t, task_ledgers.external, 60);
1331 #if MACH_ASSERT
1332 	if (pmap_ledgers_panic) {
1333 		ledger_panic_on_negative(t, task_ledgers.phys_footprint);
1334 		ledger_panic_on_negative(t, task_ledgers.page_table);
1335 		ledger_panic_on_negative(t, task_ledgers.internal);
1336 		ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
1337 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
1338 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
1339 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
1340 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
1341 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
1342 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
1343 #if CONFIG_PHYS_WRITE_ACCT
1344 		ledger_panic_on_negative(t, task_ledgers.fs_metadata_writes);
1345 #endif /* CONFIG_PHYS_WRITE_ACCT */
1346 
1347 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint);
1348 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint);
1349 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint_compressed);
1350 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint_compressed);
1351 		ledger_panic_on_negative(t, task_ledgers.network_volatile);
1352 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile);
1353 		ledger_panic_on_negative(t, task_ledgers.network_volatile_compressed);
1354 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile_compressed);
1355 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint);
1356 		ledger_panic_on_negative(t, task_ledgers.media_footprint);
1357 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint_compressed);
1358 		ledger_panic_on_negative(t, task_ledgers.media_footprint_compressed);
1359 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint);
1360 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint);
1361 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint_compressed);
1362 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint_compressed);
1363 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint);
1364 		ledger_panic_on_negative(t, task_ledgers.neural_footprint);
1365 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint_compressed);
1366 		ledger_panic_on_negative(t, task_ledgers.neural_footprint_compressed);
1367 	}
1368 #endif /* MACH_ASSERT */
1369 
1370 #if CONFIG_MEMORYSTATUS
1371 	ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
1372 #endif /* CONFIG_MEMORYSTATUS */
1373 
1374 	ledger_set_callback(t, task_ledgers.interrupt_wakeups,
1375 	    task_wakeups_rate_exceeded, NULL, NULL);
1376 	ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
1377 
1378 #if !XNU_MONITOR
1379 	ledger_template_complete(t);
1380 #else /* !XNU_MONITOR */
1381 	ledger_template_complete_secure_alloc(t);
1382 #endif /* XNU_MONITOR */
1383 	task_ledger_template = t;
1384 }
1385 
1386 /* Create a task, but leave the task ports disabled */
1387 kern_return_t
task_create_internal(task_t parent_task,proc_ro_t proc_ro,coalition_t * parent_coalitions __unused,boolean_t inherit_memory,boolean_t is_64bit,boolean_t is_64bit_data,uint32_t t_flags,uint32_t t_flags_ro,uint32_t t_procflags,uint8_t t_returnwaitflags,task_t child_task)1388 task_create_internal(
1389 	task_t             parent_task,            /* Null-able */
1390 	proc_ro_t          proc_ro,
1391 	coalition_t        *parent_coalitions __unused,
1392 	boolean_t          inherit_memory,
1393 	boolean_t          is_64bit,
1394 	boolean_t          is_64bit_data,
1395 	uint32_t           t_flags,
1396 	uint32_t           t_flags_ro,
1397 	uint32_t           t_procflags,
1398 	uint8_t            t_returnwaitflags,
1399 	task_t             child_task)
1400 {
1401 	task_t                  new_task;
1402 	vm_shared_region_t      shared_region;
1403 	ledger_t                ledger = NULL;
1404 	struct task_ro_data     task_ro_data = {};
1405 	uint32_t                parent_t_flags_ro = 0;
1406 
1407 	new_task = child_task;
1408 
1409 	if (task_ref_count_init(new_task) != KERN_SUCCESS) {
1410 		return KERN_RESOURCE_SHORTAGE;
1411 	}
1412 
1413 	/* allocate with active entries */
1414 	assert(task_ledger_template != NULL);
1415 	ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1416 	if (ledger == NULL) {
1417 		task_ref_count_fini(new_task);
1418 		return KERN_RESOURCE_SHORTAGE;
1419 	}
1420 
1421 	counter_alloc(&(new_task->faults));
1422 
1423 #if defined(HAS_APPLE_PAC)
1424 	ml_task_set_rop_pid(new_task, parent_task, inherit_memory);
1425 	ml_task_set_jop_pid(new_task, parent_task, inherit_memory);
1426 	ml_task_set_disable_user_jop(new_task, inherit_memory ? parent_task->disable_user_jop : FALSE);
1427 #endif
1428 
1429 
1430 	new_task->ledger = ledger;
1431 
1432 	/* if inherit_memory is true, parent_task MUST not be NULL */
1433 	if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) {
1434 #if CONFIG_DEFERRED_RECLAIM
1435 		if (parent_task->deferred_reclamation_metadata) {
1436 			/*
1437 			 * Prevent concurrent reclaims while we're forking the parent_task's map,
1438 			 * so that the child's map is in sync with the forked reclamation
1439 			 * metadata.
1440 			 */
1441 			vm_deferred_reclamation_buffer_lock(parent_task->deferred_reclamation_metadata);
1442 		}
1443 #endif /* CONFIG_DEFERRED_RECLAIM */
1444 		new_task->map = vm_map_fork(ledger, parent_task->map, 0);
1445 #if CONFIG_DEFERRED_RECLAIM
1446 		if (parent_task->deferred_reclamation_metadata) {
1447 			new_task->deferred_reclamation_metadata =
1448 			    vm_deferred_reclamation_buffer_fork(new_task, parent_task->deferred_reclamation_metadata);
1449 		}
1450 #endif /* CONFIG_DEFERRED_RECLAIM */
1451 	} else {
1452 		unsigned int pmap_flags = is_64bit ? PMAP_CREATE_64BIT : 0;
1453 		pmap_t pmap = pmap_create_options(ledger, 0, pmap_flags);
1454 		vm_map_t new_map;
1455 
1456 		if (pmap == NULL) {
1457 			counter_free(&new_task->faults);
1458 			ledger_dereference(ledger);
1459 			task_ref_count_fini(new_task);
1460 			return KERN_RESOURCE_SHORTAGE;
1461 		}
1462 		new_map = vm_map_create_options(pmap,
1463 		    (vm_map_offset_t)(VM_MIN_ADDRESS),
1464 		    (vm_map_offset_t)(VM_MAX_ADDRESS),
1465 		    VM_MAP_CREATE_PAGEABLE);
1466 		if (parent_task) {
1467 			vm_map_inherit_limits(new_map, parent_task->map);
1468 		}
1469 		new_task->map = new_map;
1470 	}
1471 
1472 	if (new_task->map == NULL) {
1473 		counter_free(&new_task->faults);
1474 		ledger_dereference(ledger);
1475 		task_ref_count_fini(new_task);
1476 		return KERN_RESOURCE_SHORTAGE;
1477 	}
1478 
1479 #if defined(CONFIG_SCHED_MULTIQ)
1480 	new_task->sched_group = sched_group_create();
1481 #endif
1482 
1483 	lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
1484 	queue_init(&new_task->threads);
1485 	new_task->suspend_count = 0;
1486 	new_task->thread_count = 0;
1487 	new_task->active_thread_count = 0;
1488 	new_task->user_stop_count = 0;
1489 	new_task->legacy_stop_count = 0;
1490 	new_task->active = TRUE;
1491 	new_task->halting = FALSE;
1492 	new_task->priv_flags = 0;
1493 	new_task->t_flags = t_flags;
1494 	task_ro_data.t_flags_ro = t_flags_ro;
1495 	new_task->t_procflags = t_procflags;
1496 	new_task->t_returnwaitflags = t_returnwaitflags;
1497 	new_task->returnwait_inheritor = current_thread();
1498 	new_task->importance = 0;
1499 	new_task->crashed_thread_id = 0;
1500 	new_task->watchports = NULL;
1501 	new_task->t_rr_ranges = NULL;
1502 
1503 	new_task->bank_context = NULL;
1504 
1505 	if (parent_task) {
1506 		parent_t_flags_ro = task_ro_flags_get(parent_task);
1507 	}
1508 
1509 #if __has_feature(ptrauth_calls)
1510 	/* Inherit the pac exception flags from parent if in fork */
1511 	if (parent_task && inherit_memory) {
1512 		task_ro_data.t_flags_ro |= (parent_t_flags_ro & (TFRO_PAC_ENFORCE_USER_STATE |
1513 		    TFRO_PAC_EXC_FATAL));
1514 	}
1515 #endif
1516 
1517 #ifdef MACH_BSD
1518 	new_task->corpse_info = NULL;
1519 #endif /* MACH_BSD */
1520 
1521 	/* kern_task not created by this function has unique id 0, start with 1 here. */
1522 	task_set_uniqueid(new_task);
1523 
1524 #if CONFIG_MACF
1525 	set_task_crash_label(new_task, NULL);
1526 
1527 	task_ro_data.task_filters.mach_trap_filter_mask = NULL;
1528 	task_ro_data.task_filters.mach_kobj_filter_mask = NULL;
1529 #endif
1530 
1531 #if CONFIG_MEMORYSTATUS
1532 	if (max_task_footprint != 0) {
1533 		ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1534 	}
1535 #endif /* CONFIG_MEMORYSTATUS */
1536 
1537 	if (task_wakeups_monitor_rate != 0) {
1538 		uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1539 		int32_t  rate;        // Ignored because of WAKEMON_SET_DEFAULTS
1540 		task_wakeups_monitor_ctl(new_task, &flags, &rate);
1541 	}
1542 
1543 #if CONFIG_IO_ACCOUNTING
1544 	uint32_t flags = IOMON_ENABLE;
1545 	task_io_monitor_ctl(new_task, &flags);
1546 #endif /* CONFIG_IO_ACCOUNTING */
1547 
1548 	machine_task_init(new_task, parent_task, inherit_memory);
1549 
1550 	new_task->task_debug = NULL;
1551 
1552 #if DEVELOPMENT || DEBUG
1553 	new_task->task_unnested = FALSE;
1554 	new_task->task_disconnected_count = 0;
1555 #endif
1556 	queue_init(&new_task->semaphore_list);
1557 	new_task->semaphores_owned = 0;
1558 
1559 	new_task->vtimers = 0;
1560 
1561 	new_task->shared_region = NULL;
1562 
1563 	new_task->affinity_space = NULL;
1564 
1565 	new_task->t_kpc = 0;
1566 
1567 	new_task->pidsuspended = FALSE;
1568 	new_task->frozen = FALSE;
1569 	new_task->changing_freeze_state = FALSE;
1570 	new_task->rusage_cpu_flags = 0;
1571 	new_task->rusage_cpu_percentage = 0;
1572 	new_task->rusage_cpu_interval = 0;
1573 	new_task->rusage_cpu_deadline = 0;
1574 	new_task->rusage_cpu_callt = NULL;
1575 #if MACH_ASSERT
1576 	new_task->suspends_outstanding = 0;
1577 #endif
1578 	recount_task_init(&new_task->tk_recount);
1579 
1580 #if HYPERVISOR
1581 	new_task->hv_task_target = NULL;
1582 #endif /* HYPERVISOR */
1583 
1584 #if CONFIG_TASKWATCH
1585 	queue_init(&new_task->task_watchers);
1586 	new_task->num_taskwatchers  = 0;
1587 	new_task->watchapplying  = 0;
1588 #endif /* CONFIG_TASKWATCH */
1589 
1590 	new_task->mem_notify_reserved = 0;
1591 	new_task->memlimit_attrs_reserved = 0;
1592 
1593 	new_task->requested_policy = default_task_requested_policy;
1594 	new_task->effective_policy = default_task_effective_policy;
1595 
1596 	new_task->task_shared_region_slide = -1;
1597 
1598 	if (parent_task != NULL) {
1599 		task_ro_data.task_tokens.sec_token = *task_get_sec_token(parent_task);
1600 		task_ro_data.task_tokens.audit_token = *task_get_audit_token(parent_task);
1601 
1602 		/* only inherit the option bits, no effect until task_set_immovable_pinned() */
1603 		task_ro_data.task_control_port_options = task_get_control_port_options(parent_task);
1604 
1605 		task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_FILTER_MSG;
1606 #if CONFIG_MACF
1607 		if (!(t_flags & TF_CORPSE_FORK)) {
1608 			task_ro_data.task_filters.mach_trap_filter_mask = task_get_mach_trap_filter_mask(parent_task);
1609 			task_ro_data.task_filters.mach_kobj_filter_mask = task_get_mach_kobj_filter_mask(parent_task);
1610 		}
1611 #endif
1612 	} else {
1613 		task_ro_data.task_tokens.sec_token = KERNEL_SECURITY_TOKEN;
1614 		task_ro_data.task_tokens.audit_token = KERNEL_AUDIT_TOKEN;
1615 
1616 		task_ro_data.task_control_port_options = TASK_CONTROL_PORT_OPTIONS_NONE;
1617 	}
1618 
1619 	/* must set before task_importance_init_from_parent: */
1620 	if (proc_ro != NULL) {
1621 		new_task->bsd_info_ro = proc_ro_ref_task(proc_ro, new_task, &task_ro_data);
1622 	} else {
1623 		new_task->bsd_info_ro = proc_ro_alloc(NULL, NULL, new_task, &task_ro_data);
1624 	}
1625 
1626 	ipc_task_init(new_task, parent_task);
1627 
1628 	task_importance_init_from_parent(new_task, parent_task);
1629 
1630 	new_task->corpse_vmobject_list = NULL;
1631 
1632 	if (parent_task != TASK_NULL) {
1633 		/* inherit the parent's shared region */
1634 		shared_region = vm_shared_region_get(parent_task);
1635 		if (shared_region != NULL) {
1636 			vm_shared_region_set(new_task, shared_region);
1637 		}
1638 
1639 #if __has_feature(ptrauth_calls)
1640 		/* use parent's shared_region_id */
1641 		char *shared_region_id = task_get_vm_shared_region_id_and_jop_pid(parent_task, NULL);
1642 		if (shared_region_id != NULL) {
1643 			shared_region_key_alloc(shared_region_id, FALSE, 0);         /* get a reference */
1644 		}
1645 		task_set_shared_region_id(new_task, shared_region_id);
1646 #endif /* __has_feature(ptrauth_calls) */
1647 
1648 		if (task_has_64Bit_addr(parent_task)) {
1649 			task_set_64Bit_addr(new_task);
1650 		}
1651 
1652 		if (task_has_64Bit_data(parent_task)) {
1653 			task_set_64Bit_data(new_task);
1654 		}
1655 
1656 		new_task->all_image_info_addr = parent_task->all_image_info_addr;
1657 		new_task->all_image_info_size = parent_task->all_image_info_size;
1658 		new_task->mach_header_vm_address = 0;
1659 
1660 		if (inherit_memory && parent_task->affinity_space) {
1661 			task_affinity_create(parent_task, new_task);
1662 		}
1663 
1664 		new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
1665 
1666 		new_task->task_exc_guard = parent_task->task_exc_guard;
1667 		if (parent_task->t_flags & TF_NO_SMT) {
1668 			new_task->t_flags |= TF_NO_SMT;
1669 		}
1670 
1671 		if (parent_task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE) {
1672 			new_task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
1673 		}
1674 
1675 		if (parent_task->t_flags & TF_TECS) {
1676 			new_task->t_flags |= TF_TECS;
1677 		}
1678 
1679 #if defined(__x86_64__)
1680 		if (parent_task->t_flags & TF_INSN_COPY_OPTOUT) {
1681 			new_task->t_flags |= TF_INSN_COPY_OPTOUT;
1682 		}
1683 #endif
1684 		new_task->priority = BASEPRI_DEFAULT;
1685 		new_task->max_priority = MAXPRI_USER;
1686 
1687 		task_policy_create(new_task, parent_task);
1688 	} else {
1689 #ifdef __LP64__
1690 		if (is_64bit) {
1691 			task_set_64Bit_addr(new_task);
1692 		}
1693 #endif
1694 
1695 		if (is_64bit_data) {
1696 			task_set_64Bit_data(new_task);
1697 		}
1698 
1699 		new_task->all_image_info_addr = (mach_vm_address_t)0;
1700 		new_task->all_image_info_size = (mach_vm_size_t)0;
1701 
1702 		new_task->pset_hint = PROCESSOR_SET_NULL;
1703 
1704 		new_task->task_exc_guard = TASK_EXC_GUARD_NONE;
1705 
1706 		if (new_task == kernel_task) {
1707 			new_task->priority = BASEPRI_KERNEL;
1708 			new_task->max_priority = MAXPRI_KERNEL;
1709 		} else {
1710 			new_task->priority = BASEPRI_DEFAULT;
1711 			new_task->max_priority = MAXPRI_USER;
1712 		}
1713 	}
1714 
1715 	bzero(new_task->coalition, sizeof(new_task->coalition));
1716 	for (int i = 0; i < COALITION_NUM_TYPES; i++) {
1717 		queue_chain_init(new_task->task_coalition[i]);
1718 	}
1719 
1720 	/* Allocate I/O Statistics */
1721 	new_task->task_io_stats = kalloc_data(sizeof(struct io_stat_info),
1722 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1723 
1724 	bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats));
1725 	bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats));
1726 
1727 	bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
1728 
1729 	counter_alloc(&(new_task->pageins));
1730 	counter_alloc(&(new_task->cow_faults));
1731 	counter_alloc(&(new_task->messages_sent));
1732 	counter_alloc(&(new_task->messages_received));
1733 
1734 	/* Copy resource acc. info from Parent for Corpe Forked task. */
1735 	if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
1736 		task_rollup_accounting_info(new_task, parent_task);
1737 		task_store_owned_vmobject_info(new_task, parent_task);
1738 	} else {
1739 		/* Initialize to zero for standard fork/spawn case */
1740 		new_task->total_runnable_time = 0;
1741 		new_task->syscalls_mach = 0;
1742 		new_task->syscalls_unix = 0;
1743 		new_task->c_switch = 0;
1744 		new_task->p_switch = 0;
1745 		new_task->ps_switch = 0;
1746 		new_task->decompressions = 0;
1747 		new_task->low_mem_notified_warn = 0;
1748 		new_task->low_mem_notified_critical = 0;
1749 		new_task->purged_memory_warn = 0;
1750 		new_task->purged_memory_critical = 0;
1751 		new_task->low_mem_privileged_listener = 0;
1752 		new_task->memlimit_is_active = 0;
1753 		new_task->memlimit_is_fatal = 0;
1754 		new_task->memlimit_active_exc_resource = 0;
1755 		new_task->memlimit_inactive_exc_resource = 0;
1756 		new_task->task_timer_wakeups_bin_1 = 0;
1757 		new_task->task_timer_wakeups_bin_2 = 0;
1758 		new_task->task_gpu_ns = 0;
1759 		new_task->task_writes_counters_internal.task_immediate_writes = 0;
1760 		new_task->task_writes_counters_internal.task_deferred_writes = 0;
1761 		new_task->task_writes_counters_internal.task_invalidated_writes = 0;
1762 		new_task->task_writes_counters_internal.task_metadata_writes = 0;
1763 		new_task->task_writes_counters_external.task_immediate_writes = 0;
1764 		new_task->task_writes_counters_external.task_deferred_writes = 0;
1765 		new_task->task_writes_counters_external.task_invalidated_writes = 0;
1766 		new_task->task_writes_counters_external.task_metadata_writes = 0;
1767 #if CONFIG_PHYS_WRITE_ACCT
1768 		new_task->task_fs_metadata_writes = 0;
1769 #endif /* CONFIG_PHYS_WRITE_ACCT */
1770 	}
1771 
1772 
1773 	new_task->donates_own_pages = FALSE;
1774 #if CONFIG_COALITIONS
1775 	if (!(t_flags & TF_CORPSE_FORK)) {
1776 		/* TODO: there is no graceful failure path here... */
1777 		if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1778 			coalitions_adopt_task(parent_coalitions, new_task);
1779 			if (parent_coalitions[COALITION_TYPE_JETSAM]) {
1780 				new_task->donates_own_pages = coalition_is_swappable(parent_coalitions[COALITION_TYPE_JETSAM]);
1781 			}
1782 		} else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1783 			/*
1784 			 * all tasks at least have a resource coalition, so
1785 			 * if the parent has one then inherit all coalitions
1786 			 * the parent is a part of
1787 			 */
1788 			coalitions_adopt_task(parent_task->coalition, new_task);
1789 			if (parent_task->coalition[COALITION_TYPE_JETSAM]) {
1790 				new_task->donates_own_pages = coalition_is_swappable(parent_task->coalition[COALITION_TYPE_JETSAM]);
1791 			}
1792 		} else {
1793 			/* TODO: assert that new_task will be PID 1 (launchd) */
1794 			coalitions_adopt_init_task(new_task);
1795 		}
1796 		/*
1797 		 * on exec, we need to transfer the coalition roles from the
1798 		 * parent task to the exec copy task.
1799 		 */
1800 		if (parent_task && (t_procflags & TPF_EXEC_COPY)) {
1801 			int coal_roles[COALITION_NUM_TYPES];
1802 			task_coalition_roles(parent_task, coal_roles);
1803 			(void)coalitions_set_roles(new_task->coalition, new_task, coal_roles);
1804 		}
1805 	} else {
1806 		coalitions_adopt_corpse_task(new_task);
1807 	}
1808 
1809 	if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1810 		panic("created task is not a member of a resource coalition");
1811 	}
1812 	task_set_coalition_member(new_task);
1813 #endif /* CONFIG_COALITIONS */
1814 
1815 	new_task->dispatchqueue_offset = 0;
1816 	if (parent_task != NULL) {
1817 		new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
1818 	}
1819 
1820 	new_task->task_can_transfer_memory_ownership = FALSE;
1821 	new_task->task_volatile_objects = 0;
1822 	new_task->task_nonvolatile_objects = 0;
1823 	new_task->task_objects_disowning = FALSE;
1824 	new_task->task_objects_disowned = FALSE;
1825 	new_task->task_owned_objects = 0;
1826 	queue_init(&new_task->task_objq);
1827 
1828 #if CONFIG_FREEZE
1829 	queue_init(&new_task->task_frozen_cseg_q);
1830 #endif /* CONFIG_FREEZE */
1831 
1832 	task_objq_lock_init(new_task);
1833 
1834 #if __arm64__
1835 	new_task->task_legacy_footprint = FALSE;
1836 	new_task->task_extra_footprint_limit = FALSE;
1837 	new_task->task_ios13extended_footprint_limit = FALSE;
1838 #endif /* __arm64__ */
1839 	new_task->task_region_footprint = FALSE;
1840 	new_task->task_has_crossed_thread_limit = FALSE;
1841 	new_task->task_thread_limit = 0;
1842 #if CONFIG_SECLUDED_MEMORY
1843 	new_task->task_can_use_secluded_mem = FALSE;
1844 	new_task->task_could_use_secluded_mem = FALSE;
1845 	new_task->task_could_also_use_secluded_mem = FALSE;
1846 	new_task->task_suppressed_secluded = FALSE;
1847 #endif /* CONFIG_SECLUDED_MEMORY */
1848 
1849 	/*
1850 	 * t_flags is set up above. But since we don't
1851 	 * support darkwake mode being set that way
1852 	 * currently, we clear it out here explicitly.
1853 	 */
1854 	new_task->t_flags &= ~(TF_DARKWAKE_MODE);
1855 
1856 	queue_init(&new_task->io_user_clients);
1857 	new_task->loadTag = 0;
1858 
1859 	lck_mtx_lock(&tasks_threads_lock);
1860 	queue_enter(&tasks, new_task, task_t, tasks);
1861 	tasks_count++;
1862 	if (tasks_suspend_state) {
1863 		task_suspend_internal(new_task);
1864 	}
1865 	lck_mtx_unlock(&tasks_threads_lock);
1866 	task_ref_hold_proc_task_struct(new_task);
1867 
1868 	return KERN_SUCCESS;
1869 }
1870 
1871 /*
1872  *	task_rollup_accounting_info
1873  *
1874  *	Roll up accounting stats. Used to rollup stats
1875  *	for exec copy task and corpse fork.
1876  */
1877 void
task_rollup_accounting_info(task_t to_task,task_t from_task)1878 task_rollup_accounting_info(task_t to_task, task_t from_task)
1879 {
1880 	assert(from_task != to_task);
1881 
1882 	recount_task_copy(&to_task->tk_recount, &from_task->tk_recount);
1883 	to_task->total_runnable_time = from_task->total_runnable_time;
1884 	counter_add(&to_task->faults, counter_load(&from_task->faults));
1885 	counter_add(&to_task->pageins, counter_load(&from_task->pageins));
1886 	counter_add(&to_task->cow_faults, counter_load(&from_task->cow_faults));
1887 	counter_add(&to_task->messages_sent, counter_load(&from_task->messages_sent));
1888 	counter_add(&to_task->messages_received, counter_load(&from_task->messages_received));
1889 	to_task->decompressions = from_task->decompressions;
1890 	to_task->syscalls_mach = from_task->syscalls_mach;
1891 	to_task->syscalls_unix = from_task->syscalls_unix;
1892 	to_task->c_switch = from_task->c_switch;
1893 	to_task->p_switch = from_task->p_switch;
1894 	to_task->ps_switch = from_task->ps_switch;
1895 	to_task->extmod_statistics = from_task->extmod_statistics;
1896 	to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
1897 	to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
1898 	to_task->purged_memory_warn = from_task->purged_memory_warn;
1899 	to_task->purged_memory_critical = from_task->purged_memory_critical;
1900 	to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
1901 	*to_task->task_io_stats = *from_task->task_io_stats;
1902 	to_task->cpu_time_eqos_stats = from_task->cpu_time_eqos_stats;
1903 	to_task->cpu_time_rqos_stats = from_task->cpu_time_rqos_stats;
1904 	to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
1905 	to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
1906 	to_task->task_gpu_ns = from_task->task_gpu_ns;
1907 	to_task->task_writes_counters_internal.task_immediate_writes = from_task->task_writes_counters_internal.task_immediate_writes;
1908 	to_task->task_writes_counters_internal.task_deferred_writes = from_task->task_writes_counters_internal.task_deferred_writes;
1909 	to_task->task_writes_counters_internal.task_invalidated_writes = from_task->task_writes_counters_internal.task_invalidated_writes;
1910 	to_task->task_writes_counters_internal.task_metadata_writes = from_task->task_writes_counters_internal.task_metadata_writes;
1911 	to_task->task_writes_counters_external.task_immediate_writes = from_task->task_writes_counters_external.task_immediate_writes;
1912 	to_task->task_writes_counters_external.task_deferred_writes = from_task->task_writes_counters_external.task_deferred_writes;
1913 	to_task->task_writes_counters_external.task_invalidated_writes = from_task->task_writes_counters_external.task_invalidated_writes;
1914 	to_task->task_writes_counters_external.task_metadata_writes = from_task->task_writes_counters_external.task_metadata_writes;
1915 #if CONFIG_PHYS_WRITE_ACCT
1916 	to_task->task_fs_metadata_writes = from_task->task_fs_metadata_writes;
1917 #endif /* CONFIG_PHYS_WRITE_ACCT */
1918 
1919 #if CONFIG_MEMORYSTATUS
1920 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.memorystatus_dirty_time);
1921 #endif /* CONFIG_MEMORYSTATUS */
1922 
1923 	/* Skip ledger roll up for memory accounting entries */
1924 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
1925 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
1926 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
1927 #if CONFIG_SCHED_SFI
1928 	for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1929 		ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
1930 	}
1931 #endif
1932 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
1933 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
1934 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
1935 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
1936 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_me);
1937 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_others);
1938 }
1939 
1940 /*
1941  *	task_deallocate_internal:
1942  *
1943  *	Drop a reference on a task.
1944  *	Don't call this directly.
1945  */
1946 extern void task_deallocate_internal(task_t task, os_ref_count_t refs);
1947 void
task_deallocate_internal(task_t task,os_ref_count_t refs)1948 task_deallocate_internal(
1949 	task_t          task,
1950 	os_ref_count_t  refs)
1951 {
1952 	ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
1953 
1954 	if (task == TASK_NULL) {
1955 		return;
1956 	}
1957 
1958 #if IMPORTANCE_INHERITANCE
1959 	if (refs == 1) {
1960 		/*
1961 		 * If last ref potentially comes from the task's importance,
1962 		 * disconnect it.  But more task refs may be added before
1963 		 * that completes, so wait for the reference to go to zero
1964 		 * naturally (it may happen on a recursive task_deallocate()
1965 		 * from the ipc_importance_disconnect_task() call).
1966 		 */
1967 		if (IIT_NULL != task->task_imp_base) {
1968 			ipc_importance_disconnect_task(task);
1969 		}
1970 		return;
1971 	}
1972 #endif /* IMPORTANCE_INHERITANCE */
1973 
1974 	if (refs > 0) {
1975 		return;
1976 	}
1977 
1978 	/*
1979 	 * The task should be dead at this point. Ensure other resources
1980 	 * like threads, are gone before we trash the world.
1981 	 */
1982 	assert(queue_empty(&task->threads));
1983 	assert(get_bsdtask_info(task) == NULL);
1984 	assert(!is_active(task->itk_space));
1985 	assert(!task->active);
1986 	assert(task->active_thread_count == 0);
1987 	assert(!task_get_game_mode(task));
1988 
1989 	lck_mtx_lock(&tasks_threads_lock);
1990 	assert(terminated_tasks_count > 0);
1991 	queue_remove(&terminated_tasks, task, task_t, tasks);
1992 	terminated_tasks_count--;
1993 	lck_mtx_unlock(&tasks_threads_lock);
1994 
1995 	/*
1996 	 * remove the reference on bank context
1997 	 */
1998 	task_bank_reset(task);
1999 
2000 	kfree_data(task->task_io_stats, sizeof(struct io_stat_info));
2001 
2002 	/*
2003 	 *	Give the machine dependent code a chance
2004 	 *	to perform cleanup before ripping apart
2005 	 *	the task.
2006 	 */
2007 	machine_task_terminate(task);
2008 
2009 	ipc_task_terminate(task);
2010 
2011 	/* let iokit know */
2012 	iokit_task_terminate(task);
2013 
2014 	/* Unregister task from userspace coredumps on panic */
2015 	kern_unregister_userspace_coredump(task);
2016 
2017 	if (task->affinity_space) {
2018 		task_affinity_deallocate(task);
2019 	}
2020 
2021 #if MACH_ASSERT
2022 	if (task->ledger != NULL &&
2023 	    task->map != NULL &&
2024 	    task->map->pmap != NULL &&
2025 	    task->map->pmap->ledger != NULL) {
2026 		assert(task->ledger == task->map->pmap->ledger);
2027 	}
2028 #endif /* MACH_ASSERT */
2029 
2030 	vm_owned_objects_disown(task);
2031 	assert(task->task_objects_disowned);
2032 	if (task->task_owned_objects != 0) {
2033 		panic("task_deallocate(%p): "
2034 		    "volatile_objects=%d nonvolatile_objects=%d owned=%d\n",
2035 		    task,
2036 		    task->task_volatile_objects,
2037 		    task->task_nonvolatile_objects,
2038 		    task->task_owned_objects);
2039 	}
2040 
2041 #if CONFIG_DEFERRED_RECLAIM
2042 	if (task->deferred_reclamation_metadata != NULL) {
2043 		vm_deferred_reclamation_buffer_deallocate(task->deferred_reclamation_metadata);
2044 		task->deferred_reclamation_metadata = NULL;
2045 	}
2046 #endif /* CONFIG_DEFERRED_RECLAIM */
2047 
2048 	vm_map_deallocate(task->map);
2049 	if (task->is_large_corpse) {
2050 		assert(large_corpse_count > 0);
2051 		OSDecrementAtomic(&large_corpse_count);
2052 		task->is_large_corpse = false;
2053 	}
2054 	is_release(task->itk_space);
2055 	if (task->t_rr_ranges) {
2056 		restartable_ranges_release(task->t_rr_ranges);
2057 	}
2058 
2059 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
2060 	    &interrupt_wakeups, &debit);
2061 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
2062 	    &platform_idle_wakeups, &debit);
2063 
2064 #if defined(CONFIG_SCHED_MULTIQ)
2065 	sched_group_destroy(task->sched_group);
2066 #endif
2067 
2068 	struct recount_times_mach sum = { 0 };
2069 	struct recount_times_mach p_only = { 0 };
2070 	recount_task_times_perf_only(task, &sum, &p_only);
2071 #if CONFIG_PERVASIVE_ENERGY
2072 	uint64_t energy = recount_task_energy_nj(task);
2073 #endif /* CONFIG_PERVASIVE_ENERGY */
2074 	recount_task_deinit(&task->tk_recount);
2075 
2076 	/* Accumulate statistics for dead tasks */
2077 	lck_spin_lock(&dead_task_statistics_lock);
2078 	dead_task_statistics.total_user_time += sum.rtm_user;
2079 	dead_task_statistics.total_system_time += sum.rtm_system;
2080 
2081 	dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
2082 	dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
2083 
2084 	dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
2085 	dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
2086 	dead_task_statistics.total_ptime += p_only.rtm_user + p_only.rtm_system;
2087 	dead_task_statistics.total_pset_switches += task->ps_switch;
2088 	dead_task_statistics.task_gpu_ns += task->task_gpu_ns;
2089 #if CONFIG_PERVASIVE_ENERGY
2090 	dead_task_statistics.task_energy += energy;
2091 #endif /* CONFIG_PERVASIVE_ENERGY */
2092 
2093 	lck_spin_unlock(&dead_task_statistics_lock);
2094 	lck_mtx_destroy(&task->lock, &task_lck_grp);
2095 
2096 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
2097 	    &debit)) {
2098 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
2099 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
2100 	}
2101 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
2102 	    &debit)) {
2103 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
2104 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
2105 	}
2106 	ledger_dereference(task->ledger);
2107 
2108 	counter_free(&task->faults);
2109 	counter_free(&task->pageins);
2110 	counter_free(&task->cow_faults);
2111 	counter_free(&task->messages_sent);
2112 	counter_free(&task->messages_received);
2113 
2114 #if CONFIG_COALITIONS
2115 	task_release_coalitions(task);
2116 #endif /* CONFIG_COALITIONS */
2117 
2118 	bzero(task->coalition, sizeof(task->coalition));
2119 
2120 #if MACH_BSD
2121 	/* clean up collected information since last reference to task is gone */
2122 	if (task->corpse_info) {
2123 		void *corpse_info_kernel = kcdata_memory_get_begin_addr(task->corpse_info);
2124 		task_crashinfo_destroy(task->corpse_info);
2125 		task->corpse_info = NULL;
2126 		kfree_data(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
2127 	}
2128 #endif
2129 
2130 #if CONFIG_MACF
2131 	if (get_task_crash_label(task)) {
2132 		mac_exc_free_label(get_task_crash_label(task));
2133 		set_task_crash_label(task, NULL);
2134 	}
2135 #endif
2136 
2137 	assert(queue_empty(&task->task_objq));
2138 	task_objq_lock_destroy(task);
2139 
2140 	if (task->corpse_vmobject_list) {
2141 		kfree_data(task->corpse_vmobject_list,
2142 		    (vm_size_t)task->corpse_vmobject_list_size);
2143 	}
2144 
2145 	task_ref_count_fini(task);
2146 	proc_ro_erase_task(task->bsd_info_ro);
2147 	task_release_proc_task_struct(task);
2148 }
2149 
2150 /*
2151  *	task_name_deallocate_mig:
2152  *
2153  *	Drop a reference on a task name.
2154  */
2155 void
task_name_deallocate_mig(task_name_t task_name)2156 task_name_deallocate_mig(
2157 	task_name_t             task_name)
2158 {
2159 	return task_deallocate_grp((task_t)task_name, TASK_GRP_MIG);
2160 }
2161 
2162 /*
2163  *	task_policy_set_deallocate_mig:
2164  *
2165  *	Drop a reference on a task type.
2166  */
2167 void
task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)2168 task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)
2169 {
2170 	return task_deallocate_grp((task_t)task_policy_set, TASK_GRP_MIG);
2171 }
2172 
2173 /*
2174  *	task_policy_get_deallocate_mig:
2175  *
2176  *	Drop a reference on a task type.
2177  */
2178 void
task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)2179 task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)
2180 {
2181 	return task_deallocate_grp((task_t)task_policy_get, TASK_GRP_MIG);
2182 }
2183 
2184 /*
2185  *	task_inspect_deallocate_mig:
2186  *
2187  *	Drop a task inspection reference.
2188  */
2189 void
task_inspect_deallocate_mig(task_inspect_t task_inspect)2190 task_inspect_deallocate_mig(
2191 	task_inspect_t          task_inspect)
2192 {
2193 	return task_deallocate_grp((task_t)task_inspect, TASK_GRP_MIG);
2194 }
2195 
2196 /*
2197  *	task_read_deallocate_mig:
2198  *
2199  *	Drop a reference on task read port.
2200  */
2201 void
task_read_deallocate_mig(task_read_t task_read)2202 task_read_deallocate_mig(
2203 	task_read_t          task_read)
2204 {
2205 	return task_deallocate_grp((task_t)task_read, TASK_GRP_MIG);
2206 }
2207 
2208 /*
2209  *	task_suspension_token_deallocate:
2210  *
2211  *	Drop a reference on a task suspension token.
2212  */
2213 void
task_suspension_token_deallocate(task_suspension_token_t token)2214 task_suspension_token_deallocate(
2215 	task_suspension_token_t         token)
2216 {
2217 	return task_deallocate((task_t)token);
2218 }
2219 
2220 void
task_suspension_token_deallocate_grp(task_suspension_token_t token,task_grp_t grp)2221 task_suspension_token_deallocate_grp(
2222 	task_suspension_token_t         token,
2223 	task_grp_t                      grp)
2224 {
2225 	return task_deallocate_grp((task_t)token, grp);
2226 }
2227 
2228 /*
2229  * task_collect_crash_info:
2230  *
2231  * collect crash info from bsd and mach based data
2232  */
2233 kern_return_t
task_collect_crash_info(task_t task,struct label * crash_label,int is_corpse_fork)2234 task_collect_crash_info(
2235 	task_t task,
2236 #ifdef CONFIG_MACF
2237 	struct label *crash_label,
2238 #endif
2239 	int is_corpse_fork)
2240 {
2241 	kern_return_t kr = KERN_SUCCESS;
2242 
2243 	kcdata_descriptor_t crash_data = NULL;
2244 	kcdata_descriptor_t crash_data_release = NULL;
2245 	mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
2246 	mach_vm_offset_t crash_data_ptr = 0;
2247 	void *crash_data_kernel = NULL;
2248 	void *crash_data_kernel_release = NULL;
2249 #if CONFIG_MACF
2250 	struct label *label, *free_label;
2251 #endif
2252 
2253 	if (!corpses_enabled()) {
2254 		return KERN_NOT_SUPPORTED;
2255 	}
2256 
2257 #if CONFIG_MACF
2258 	free_label = label = mac_exc_create_label(NULL);
2259 #endif
2260 
2261 	task_lock(task);
2262 
2263 	assert(is_corpse_fork || get_bsdtask_info(task) != NULL);
2264 	if (task->corpse_info == NULL && (is_corpse_fork || get_bsdtask_info(task) != NULL)) {
2265 #if CONFIG_MACF
2266 		/* Set the crash label, used by the exception delivery mac hook */
2267 		free_label = get_task_crash_label(task);         // Most likely NULL.
2268 		set_task_crash_label(task, label);
2269 		mac_exc_update_task_crash_label(task, crash_label);
2270 #endif
2271 		task_unlock(task);
2272 
2273 		crash_data_kernel = kalloc_data(CORPSEINFO_ALLOCATION_SIZE,
2274 		    Z_WAITOK | Z_ZERO);
2275 		if (crash_data_kernel == NULL) {
2276 			kr = KERN_RESOURCE_SHORTAGE;
2277 			goto out_no_lock;
2278 		}
2279 		crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
2280 
2281 		/* Do not get a corpse ref for corpse fork */
2282 		crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size,
2283 		    is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF,
2284 		    KCFLAG_USE_MEMCOPY);
2285 		if (crash_data) {
2286 			task_lock(task);
2287 			crash_data_release = task->corpse_info;
2288 			crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);
2289 			task->corpse_info = crash_data;
2290 
2291 			task_unlock(task);
2292 			kr = KERN_SUCCESS;
2293 		} else {
2294 			kfree_data(crash_data_kernel,
2295 			    CORPSEINFO_ALLOCATION_SIZE);
2296 			kr = KERN_FAILURE;
2297 		}
2298 
2299 		if (crash_data_release != NULL) {
2300 			task_crashinfo_destroy(crash_data_release);
2301 		}
2302 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2303 	} else {
2304 		task_unlock(task);
2305 	}
2306 
2307 out_no_lock:
2308 #if CONFIG_MACF
2309 	if (free_label != NULL) {
2310 		mac_exc_free_label(free_label);
2311 	}
2312 #endif
2313 	return kr;
2314 }
2315 
2316 /*
2317  * task_deliver_crash_notification:
2318  *
2319  * Makes outcall to registered host port for a corpse.
2320  */
2321 kern_return_t
task_deliver_crash_notification(task_t corpse,thread_t thread,exception_type_t etype,mach_exception_subcode_t subcode)2322 task_deliver_crash_notification(
2323 	task_t corpse, /* corpse or corpse fork */
2324 	thread_t thread,
2325 	exception_type_t etype,
2326 	mach_exception_subcode_t subcode)
2327 {
2328 	kcdata_descriptor_t crash_info = corpse->corpse_info;
2329 	thread_t th_iter = NULL;
2330 	kern_return_t kr = KERN_SUCCESS;
2331 	wait_interrupt_t wsave;
2332 	mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2333 	ipc_port_t corpse_port;
2334 
2335 	if (crash_info == NULL) {
2336 		return KERN_FAILURE;
2337 	}
2338 
2339 	assert(task_is_a_corpse(corpse));
2340 
2341 	task_lock(corpse);
2342 
2343 	/*
2344 	 * Always populate code[0] as the effective exception type for EXC_CORPSE_NOTIFY.
2345 	 * Crash reporters should derive whether it's fatal from corpse blob.
2346 	 */
2347 	code[0] = etype;
2348 	code[1] = subcode;
2349 
2350 	queue_iterate(&corpse->threads, th_iter, thread_t, task_threads)
2351 	{
2352 		if (th_iter->corpse_dup == FALSE) {
2353 			ipc_thread_reset(th_iter);
2354 		}
2355 	}
2356 	task_unlock(corpse);
2357 
2358 	/* Arm the no-sender notification for taskport */
2359 	task_reference(corpse);
2360 	corpse_port = convert_corpse_to_port_and_nsrequest(corpse);
2361 
2362 	wsave = thread_interrupt_level(THREAD_UNINT);
2363 	kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
2364 	if (kr != KERN_SUCCESS) {
2365 		printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(corpse));
2366 	}
2367 
2368 	(void)thread_interrupt_level(wsave);
2369 
2370 	/*
2371 	 * Drop the send right on corpse port, will fire the
2372 	 * no-sender notification if exception deliver failed.
2373 	 */
2374 	ipc_port_release_send(corpse_port);
2375 	return kr;
2376 }
2377 
2378 /*
2379  *	task_terminate:
2380  *
2381  *	Terminate the specified task.  See comments on thread_terminate
2382  *	(kern/thread.c) about problems with terminating the "current task."
2383  */
2384 
2385 kern_return_t
task_terminate(task_t task)2386 task_terminate(
2387 	task_t          task)
2388 {
2389 	if (task == TASK_NULL) {
2390 		return KERN_INVALID_ARGUMENT;
2391 	}
2392 
2393 	if (get_bsdtask_info(task)) {
2394 		return KERN_FAILURE;
2395 	}
2396 
2397 	return task_terminate_internal(task);
2398 }
2399 
2400 #if MACH_ASSERT
2401 extern int proc_pid(struct proc *);
2402 extern void proc_name_kdp(struct proc *p, char *buf, int size);
2403 #endif /* MACH_ASSERT */
2404 
2405 #define VM_MAP_PARTIAL_REAP 0x54  /* 0x150 */
2406 static void
task_partial_reap(task_t task,__unused int pid)2407 __unused task_partial_reap(task_t task, __unused int pid)
2408 {
2409 	unsigned int    reclaimed_resident = 0;
2410 	unsigned int    reclaimed_compressed = 0;
2411 	uint64_t        task_page_count;
2412 
2413 	task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
2414 
2415 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_START),
2416 	    pid, task_page_count, 0, 0, 0);
2417 
2418 	vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
2419 
2420 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_END),
2421 	    pid, reclaimed_resident, reclaimed_compressed, 0, 0);
2422 }
2423 
2424 /*
2425  * task_mark_corpse:
2426  *
2427  * Mark the task as a corpse. Called by crashing thread.
2428  */
2429 kern_return_t
task_mark_corpse(task_t task)2430 task_mark_corpse(task_t task)
2431 {
2432 	kern_return_t kr = KERN_SUCCESS;
2433 	thread_t self_thread;
2434 	(void) self_thread;
2435 	wait_interrupt_t wsave;
2436 #if CONFIG_MACF
2437 	struct label *crash_label = NULL;
2438 #endif
2439 
2440 	assert(task != kernel_task);
2441 	assert(task == current_task());
2442 	assert(!task_is_a_corpse(task));
2443 
2444 #if CONFIG_MACF
2445 	crash_label = mac_exc_create_label_for_proc((struct proc*)get_bsdtask_info(task));
2446 #endif
2447 
2448 	kr = task_collect_crash_info(task,
2449 #if CONFIG_MACF
2450 	    crash_label,
2451 #endif
2452 	    FALSE);
2453 	if (kr != KERN_SUCCESS) {
2454 		goto out;
2455 	}
2456 
2457 	self_thread = current_thread();
2458 
2459 	wsave = thread_interrupt_level(THREAD_UNINT);
2460 	task_lock(task);
2461 
2462 	/*
2463 	 * Check if any other thread called task_terminate_internal
2464 	 * and made the task inactive before we could mark it for
2465 	 * corpse pending report. Bail out if the task is inactive.
2466 	 */
2467 	if (!task->active) {
2468 		kcdata_descriptor_t crash_data_release = task->corpse_info;;
2469 		void *crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);;
2470 
2471 		task->corpse_info = NULL;
2472 		task_unlock(task);
2473 
2474 		if (crash_data_release != NULL) {
2475 			task_crashinfo_destroy(crash_data_release);
2476 		}
2477 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2478 		return KERN_TERMINATED;
2479 	}
2480 
2481 	task_set_corpse_pending_report(task);
2482 	task_set_corpse(task);
2483 	task->crashed_thread_id = thread_tid(self_thread);
2484 
2485 	kr = task_start_halt_locked(task, TRUE);
2486 	assert(kr == KERN_SUCCESS);
2487 
2488 	task_set_uniqueid(task);
2489 
2490 	task_unlock(task);
2491 
2492 	/*
2493 	 * ipc_task_reset() moved to last thread_terminate_self(): rdar://75737960.
2494 	 * disable old ports here instead.
2495 	 *
2496 	 * The vm_map and ipc_space must exist until this function returns,
2497 	 * convert_port_to_{map,space}_with_flavor relies on this behavior.
2498 	 */
2499 	ipc_task_disable(task);
2500 
2501 	/* terminate the ipc space */
2502 	ipc_space_terminate(task->itk_space);
2503 
2504 	/* Add it to global corpse task list */
2505 	task_add_to_corpse_task_list(task);
2506 
2507 	thread_terminate_internal(self_thread);
2508 
2509 	(void) thread_interrupt_level(wsave);
2510 	assert(task->halting == TRUE);
2511 
2512 out:
2513 #if CONFIG_MACF
2514 	mac_exc_free_label(crash_label);
2515 #endif
2516 	return kr;
2517 }
2518 
2519 /*
2520  *	task_set_uniqueid
2521  *
2522  *	Set task uniqueid to systemwide unique 64 bit value
2523  */
2524 void
task_set_uniqueid(task_t task)2525 task_set_uniqueid(task_t task)
2526 {
2527 	task->task_uniqueid = OSIncrementAtomic64(&next_taskuniqueid);
2528 }
2529 
2530 /*
2531  *	task_clear_corpse
2532  *
2533  *	Clears the corpse pending bit on task.
2534  *	Removes inspection bit on the threads.
2535  */
2536 void
task_clear_corpse(task_t task)2537 task_clear_corpse(task_t task)
2538 {
2539 	thread_t th_iter = NULL;
2540 
2541 	task_lock(task);
2542 	queue_iterate(&task->threads, th_iter, thread_t, task_threads)
2543 	{
2544 		thread_mtx_lock(th_iter);
2545 		th_iter->inspection = FALSE;
2546 		ipc_thread_disable(th_iter);
2547 		thread_mtx_unlock(th_iter);
2548 	}
2549 
2550 	thread_terminate_crashed_threads();
2551 	/* remove the pending corpse report flag */
2552 	task_clear_corpse_pending_report(task);
2553 
2554 	task_unlock(task);
2555 }
2556 
2557 /*
2558  *	task_port_no_senders
2559  *
2560  *	Called whenever the Mach port system detects no-senders on
2561  *	the task port of a corpse.
2562  *	Each notification that comes in should terminate the task (corpse).
2563  */
2564 static void
task_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)2565 task_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
2566 {
2567 	task_t task = ipc_kobject_get_locked(port, IKOT_TASK_CONTROL);
2568 
2569 	assert(task != TASK_NULL);
2570 	assert(task_is_a_corpse(task));
2571 
2572 	/* Remove the task from global corpse task list */
2573 	task_remove_from_corpse_task_list(task);
2574 
2575 	task_clear_corpse(task);
2576 	vm_map_unset_corpse_source(task->map);
2577 	task_terminate_internal(task);
2578 }
2579 
2580 /*
2581  *	task_port_with_flavor_no_senders
2582  *
2583  *	Called whenever the Mach port system detects no-senders on
2584  *	the task inspect or read port. These ports are allocated lazily and
2585  *	should be deallocated here when there are no senders remaining.
2586  */
2587 static void
task_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)2588 task_port_with_flavor_no_senders(
2589 	ipc_port_t          port,
2590 	mach_port_mscount_t mscount __unused)
2591 {
2592 	task_t task;
2593 	mach_task_flavor_t flavor;
2594 	ipc_kobject_type_t kotype;
2595 
2596 	ip_mq_lock(port);
2597 	if (port->ip_srights > 0) {
2598 		ip_mq_unlock(port);
2599 		return;
2600 	}
2601 	kotype = ip_kotype(port);
2602 	assert((IKOT_TASK_READ == kotype) || (IKOT_TASK_INSPECT == kotype));
2603 	task = ipc_kobject_get_locked(port, kotype);
2604 	if (task != TASK_NULL) {
2605 		task_reference(task);
2606 	}
2607 	ip_mq_unlock(port);
2608 
2609 	if (task == TASK_NULL) {
2610 		/* The task is exiting or disabled; it will eventually deallocate the port */
2611 		return;
2612 	}
2613 
2614 	if (kotype == IKOT_TASK_READ) {
2615 		flavor = TASK_FLAVOR_READ;
2616 	} else {
2617 		flavor = TASK_FLAVOR_INSPECT;
2618 	}
2619 
2620 	itk_lock(task);
2621 	ip_mq_lock(port);
2622 
2623 	/*
2624 	 * If the port is no longer active, then ipc_task_terminate() ran
2625 	 * and destroyed the kobject already. Just deallocate the task
2626 	 * ref we took and go away.
2627 	 *
2628 	 * It is also possible that several nsrequests are in flight,
2629 	 * only one shall NULL-out the port entry, and this is the one
2630 	 * that gets to dealloc the port.
2631 	 *
2632 	 * Check for a stale no-senders notification. A call to any function
2633 	 * that vends out send rights to this port could resurrect it between
2634 	 * this notification being generated and actually being handled here.
2635 	 */
2636 	if (!ip_active(port) ||
2637 	    task->itk_task_ports[flavor] != port ||
2638 	    port->ip_srights > 0) {
2639 		ip_mq_unlock(port);
2640 		itk_unlock(task);
2641 		task_deallocate(task);
2642 		return;
2643 	}
2644 
2645 	assert(task->itk_task_ports[flavor] == port);
2646 	task->itk_task_ports[flavor] = IP_NULL;
2647 	itk_unlock(task);
2648 
2649 	ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
2650 
2651 	task_deallocate(task);
2652 }
2653 
2654 /*
2655  *	task_wait_till_threads_terminate_locked
2656  *
2657  *	Wait till all the threads in the task are terminated.
2658  *	Might release the task lock and re-acquire it.
2659  */
2660 void
task_wait_till_threads_terminate_locked(task_t task)2661 task_wait_till_threads_terminate_locked(task_t task)
2662 {
2663 	/* wait for all the threads in the task to terminate */
2664 	while (task->active_thread_count != 0) {
2665 		assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
2666 		task_unlock(task);
2667 		thread_block(THREAD_CONTINUE_NULL);
2668 
2669 		task_lock(task);
2670 	}
2671 }
2672 
2673 /*
2674  *	task_duplicate_map_and_threads
2675  *
2676  *	Copy vmmap of source task.
2677  *	Copy active threads from source task to destination task.
2678  *	Source task would be suspended during the copy.
2679  */
2680 kern_return_t
task_duplicate_map_and_threads(task_t task,void * p,task_t new_task,thread_t * thread_ret,uint64_t ** udata_buffer,int * size,int * num_udata,bool for_exception)2681 task_duplicate_map_and_threads(
2682 	task_t task,
2683 	void *p,
2684 	task_t new_task,
2685 	thread_t *thread_ret,
2686 	uint64_t **udata_buffer,
2687 	int *size,
2688 	int *num_udata,
2689 	bool for_exception)
2690 {
2691 	kern_return_t kr = KERN_SUCCESS;
2692 	int active;
2693 	thread_t thread, self, thread_return = THREAD_NULL;
2694 	thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL;
2695 	thread_t *thread_array;
2696 	uint32_t active_thread_count = 0, array_count = 0, i;
2697 	vm_map_t oldmap;
2698 	uint64_t *buffer = NULL;
2699 	int buf_size = 0;
2700 	int est_knotes = 0, num_knotes = 0;
2701 
2702 	self = current_thread();
2703 
2704 	/*
2705 	 * Suspend the task to copy thread state, use the internal
2706 	 * variant so that no user-space process can resume
2707 	 * the task from under us
2708 	 */
2709 	kr = task_suspend_internal(task);
2710 	if (kr != KERN_SUCCESS) {
2711 		return kr;
2712 	}
2713 
2714 	if (task->map->disable_vmentry_reuse == TRUE) {
2715 		/*
2716 		 * Quite likely GuardMalloc (or some debugging tool)
2717 		 * is being used on this task. And it has gone through
2718 		 * its limit. Making a corpse will likely encounter
2719 		 * a lot of VM entries that will need COW.
2720 		 *
2721 		 * Skip it.
2722 		 */
2723 #if DEVELOPMENT || DEBUG
2724 		memorystatus_abort_vm_map_fork(task);
2725 #endif
2726 		ktriage_record(thread_tid(self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_FAIL_LIBGMALLOC), 0 /* arg */);
2727 		task_resume_internal(task);
2728 		return KERN_FAILURE;
2729 	}
2730 
2731 	/* Check with VM if vm_map_fork is allowed for this task */
2732 	bool is_large = false;
2733 	if (memorystatus_allowed_vm_map_fork(task, &is_large)) {
2734 		/* Setup new task's vmmap, switch from parent task's map to it COW map */
2735 		oldmap = new_task->map;
2736 		new_task->map = vm_map_fork(new_task->ledger,
2737 		    task->map,
2738 		    (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
2739 		    VM_MAP_FORK_PRESERVE_PURGEABLE |
2740 		    VM_MAP_FORK_CORPSE_FOOTPRINT));
2741 		if (new_task->map) {
2742 			new_task->is_large_corpse = is_large;
2743 			vm_map_deallocate(oldmap);
2744 
2745 			/* copy ledgers that impact the memory footprint */
2746 			vm_map_copy_footprint_ledgers(task, new_task);
2747 
2748 			/* Get all the udata pointers from kqueue */
2749 			est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2750 			if (est_knotes > 0) {
2751 				buf_size = (est_knotes + 32) * sizeof(uint64_t);
2752 				buffer = kalloc_data(buf_size, Z_WAITOK);
2753 				num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2754 				if (num_knotes > est_knotes + 32) {
2755 					num_knotes = est_knotes + 32;
2756 				}
2757 			}
2758 		} else {
2759 			if (is_large) {
2760 				assert(large_corpse_count > 0);
2761 				OSDecrementAtomic(&large_corpse_count);
2762 			}
2763 			new_task->map = oldmap;
2764 #if DEVELOPMENT || DEBUG
2765 			memorystatus_abort_vm_map_fork(task);
2766 #endif
2767 			task_resume_internal(task);
2768 			return KERN_NO_SPACE;
2769 		}
2770 	} else if (!for_exception) {
2771 #if DEVELOPMENT || DEBUG
2772 		memorystatus_abort_vm_map_fork(task);
2773 #endif
2774 		task_resume_internal(task);
2775 		return KERN_NO_SPACE;
2776 	}
2777 
2778 	active_thread_count = task->active_thread_count;
2779 	if (active_thread_count == 0) {
2780 		kfree_data(buffer, buf_size);
2781 		task_resume_internal(task);
2782 		return KERN_FAILURE;
2783 	}
2784 
2785 	thread_array = kalloc_type(thread_t, active_thread_count, Z_WAITOK);
2786 
2787 	/* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
2788 	task_lock(task);
2789 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2790 		/* Skip inactive threads */
2791 		active = thread->active;
2792 		if (!active) {
2793 			continue;
2794 		}
2795 
2796 		if (array_count >= active_thread_count) {
2797 			break;
2798 		}
2799 
2800 		thread_array[array_count++] = thread;
2801 		thread_reference(thread);
2802 	}
2803 	task_unlock(task);
2804 
2805 	for (i = 0; i < array_count; i++) {
2806 		kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
2807 		if (kr != KERN_SUCCESS) {
2808 			break;
2809 		}
2810 
2811 		/* Equivalent of current thread in corpse */
2812 		if (thread_array[i] == self) {
2813 			thread_return = new_thread;
2814 			new_task->crashed_thread_id = thread_tid(new_thread);
2815 		} else if (first_thread == NULL) {
2816 			first_thread = new_thread;
2817 		} else {
2818 			/* drop the extra ref returned by thread_create_with_continuation */
2819 			thread_deallocate(new_thread);
2820 		}
2821 
2822 		kr = thread_dup2(thread_array[i], new_thread);
2823 		if (kr != KERN_SUCCESS) {
2824 			thread_mtx_lock(new_thread);
2825 			new_thread->corpse_dup = TRUE;
2826 			thread_mtx_unlock(new_thread);
2827 			continue;
2828 		}
2829 
2830 		/* Copy thread name */
2831 		bsd_copythreadname(get_bsdthread_info(new_thread),
2832 		    get_bsdthread_info(thread_array[i]));
2833 		new_thread->thread_tag = thread_array[i]->thread_tag &
2834 		    ~THREAD_TAG_USER_JOIN;
2835 		thread_copy_resource_info(new_thread, thread_array[i]);
2836 	}
2837 
2838 	/* return the first thread if we couldn't find the equivalent of current */
2839 	if (thread_return == THREAD_NULL) {
2840 		thread_return = first_thread;
2841 	} else if (first_thread != THREAD_NULL) {
2842 		/* drop the extra ref returned by thread_create_with_continuation */
2843 		thread_deallocate(first_thread);
2844 	}
2845 
2846 	task_resume_internal(task);
2847 
2848 	for (i = 0; i < array_count; i++) {
2849 		thread_deallocate(thread_array[i]);
2850 	}
2851 	kfree_type(thread_t, active_thread_count, thread_array);
2852 
2853 	if (kr == KERN_SUCCESS) {
2854 		*thread_ret = thread_return;
2855 		*udata_buffer = buffer;
2856 		*size = buf_size;
2857 		*num_udata = num_knotes;
2858 	} else {
2859 		if (thread_return != THREAD_NULL) {
2860 			thread_deallocate(thread_return);
2861 		}
2862 		kfree_data(buffer, buf_size);
2863 	}
2864 
2865 	return kr;
2866 }
2867 
2868 #if CONFIG_SECLUDED_MEMORY
2869 extern void task_set_can_use_secluded_mem_locked(
2870 	task_t          task,
2871 	boolean_t       can_use_secluded_mem);
2872 #endif /* CONFIG_SECLUDED_MEMORY */
2873 
2874 #if MACH_ASSERT
2875 int debug4k_panic_on_terminate = 0;
2876 #endif /* MACH_ASSERT */
2877 kern_return_t
task_terminate_internal(task_t task)2878 task_terminate_internal(
2879 	task_t                  task)
2880 {
2881 	thread_t                        thread, self;
2882 	task_t                          self_task;
2883 	boolean_t                       interrupt_save;
2884 	int                             pid = 0;
2885 
2886 	assert(task != kernel_task);
2887 
2888 	self = current_thread();
2889 	self_task = current_task();
2890 
2891 	/*
2892 	 *	Get the task locked and make sure that we are not racing
2893 	 *	with someone else trying to terminate us.
2894 	 */
2895 	if (task == self_task) {
2896 		task_lock(task);
2897 	} else if (task < self_task) {
2898 		task_lock(task);
2899 		task_lock(self_task);
2900 	} else {
2901 		task_lock(self_task);
2902 		task_lock(task);
2903 	}
2904 
2905 #if CONFIG_SECLUDED_MEMORY
2906 	if (task->task_can_use_secluded_mem) {
2907 		task_set_can_use_secluded_mem_locked(task, FALSE);
2908 	}
2909 	task->task_could_use_secluded_mem = FALSE;
2910 	task->task_could_also_use_secluded_mem = FALSE;
2911 
2912 	if (task->task_suppressed_secluded) {
2913 		stop_secluded_suppression(task);
2914 	}
2915 #endif /* CONFIG_SECLUDED_MEMORY */
2916 
2917 	if (!task->active) {
2918 		/*
2919 		 *	Task is already being terminated.
2920 		 *	Just return an error. If we are dying, this will
2921 		 *	just get us to our AST special handler and that
2922 		 *	will get us to finalize the termination of ourselves.
2923 		 */
2924 		task_unlock(task);
2925 		if (self_task != task) {
2926 			task_unlock(self_task);
2927 		}
2928 
2929 		return KERN_FAILURE;
2930 	}
2931 
2932 	if (task_corpse_pending_report(task)) {
2933 		/*
2934 		 *	Task is marked for reporting as corpse.
2935 		 *	Just return an error. This will
2936 		 *	just get us to our AST special handler and that
2937 		 *	will get us to finish the path to death
2938 		 */
2939 		task_unlock(task);
2940 		if (self_task != task) {
2941 			task_unlock(self_task);
2942 		}
2943 
2944 		return KERN_FAILURE;
2945 	}
2946 
2947 	if (self_task != task) {
2948 		task_unlock(self_task);
2949 	}
2950 
2951 	/*
2952 	 * Make sure the current thread does not get aborted out of
2953 	 * the waits inside these operations.
2954 	 */
2955 	interrupt_save = thread_interrupt_level(THREAD_UNINT);
2956 
2957 	/*
2958 	 *	Indicate that we want all the threads to stop executing
2959 	 *	at user space by holding the task (we would have held
2960 	 *	each thread independently in thread_terminate_internal -
2961 	 *	but this way we may be more likely to already find it
2962 	 *	held there).  Mark the task inactive, and prevent
2963 	 *	further task operations via the task port.
2964 	 *
2965 	 *	The vm_map and ipc_space must exist until this function returns,
2966 	 *	convert_port_to_{map,space}_with_flavor relies on this behavior.
2967 	 */
2968 	task_hold_locked(task);
2969 	task->active = FALSE;
2970 	ipc_task_disable(task);
2971 
2972 #if CONFIG_TELEMETRY
2973 	/*
2974 	 * Notify telemetry that this task is going away.
2975 	 */
2976 	telemetry_task_ctl_locked(task, TF_TELEMETRY, 0);
2977 #endif
2978 
2979 	/*
2980 	 *	Terminate each thread in the task.
2981 	 */
2982 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2983 		thread_terminate_internal(thread);
2984 	}
2985 
2986 #ifdef MACH_BSD
2987 	void *bsd_info = get_bsdtask_info(task);
2988 	if (bsd_info != NULL) {
2989 		pid = proc_pid(bsd_info);
2990 	}
2991 #endif /* MACH_BSD */
2992 
2993 	task_unlock(task);
2994 
2995 	proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
2996 	    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
2997 
2998 	/* Early object reap phase */
2999 
3000 // PR-17045188: Revisit implementation
3001 //        task_partial_reap(task, pid);
3002 
3003 #if CONFIG_TASKWATCH
3004 	/*
3005 	 * remove all task watchers
3006 	 */
3007 	task_removewatchers(task);
3008 
3009 #endif /* CONFIG_TASKWATCH */
3010 
3011 	/*
3012 	 *	Destroy all synchronizers owned by the task.
3013 	 */
3014 	task_synchronizer_destroy_all(task);
3015 
3016 	/*
3017 	 *	Clear the watchport boost on the task.
3018 	 */
3019 	task_remove_turnstile_watchports(task);
3020 
3021 	/*
3022 	 *	Destroy the IPC space, leaving just a reference for it.
3023 	 */
3024 	ipc_space_terminate(task->itk_space);
3025 
3026 #if 00
3027 	/* if some ledgers go negative on tear-down again... */
3028 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3029 	    task_ledgers.phys_footprint);
3030 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3031 	    task_ledgers.internal);
3032 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3033 	    task_ledgers.iokit_mapped);
3034 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3035 	    task_ledgers.alternate_accounting);
3036 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3037 	    task_ledgers.alternate_accounting_compressed);
3038 #endif
3039 
3040 #if CONFIG_DEFERRED_RECLAIM
3041 	/*
3042 	 * Remove this tasks reclaim buffer from global queues.
3043 	 */
3044 	if (task->deferred_reclamation_metadata != NULL) {
3045 		vm_deferred_reclamation_buffer_uninstall(task->deferred_reclamation_metadata);
3046 	}
3047 #endif /* CONFIG_DEFERRED_RECLAIM */
3048 
3049 	/*
3050 	 * If the current thread is a member of the task
3051 	 * being terminated, then the last reference to
3052 	 * the task will not be dropped until the thread
3053 	 * is finally reaped.  To avoid incurring the
3054 	 * expense of removing the address space regions
3055 	 * at reap time, we do it explictly here.
3056 	 */
3057 
3058 #if MACH_ASSERT
3059 	/*
3060 	 * Identify the pmap's process, in case the pmap ledgers drift
3061 	 * and we have to report it.
3062 	 */
3063 	char procname[17];
3064 	void *proc = get_bsdtask_info(task);
3065 	if (proc) {
3066 		pid = proc_pid(proc);
3067 		proc_name_kdp(proc, procname, sizeof(procname));
3068 	} else {
3069 		pid = 0;
3070 		strlcpy(procname, "<unknown>", sizeof(procname));
3071 	}
3072 	pmap_set_process(task->map->pmap, pid, procname);
3073 	if (vm_map_page_shift(task->map) < (int)PAGE_SHIFT) {
3074 		DEBUG4K_LIFE("map %p procname: %s\n", task->map, procname);
3075 		if (debug4k_panic_on_terminate) {
3076 			panic("DEBUG4K: %s:%d %d[%s] map %p", __FUNCTION__, __LINE__, pid, procname, task->map);
3077 		}
3078 	}
3079 #endif /* MACH_ASSERT */
3080 
3081 	vm_map_terminate(task->map);
3082 
3083 	/* release our shared region */
3084 	vm_shared_region_set(task, NULL);
3085 
3086 #if __has_feature(ptrauth_calls)
3087 	task_set_shared_region_id(task, NULL);
3088 #endif /* __has_feature(ptrauth_calls) */
3089 
3090 	lck_mtx_lock(&tasks_threads_lock);
3091 	queue_remove(&tasks, task, task_t, tasks);
3092 	queue_enter(&terminated_tasks, task, task_t, tasks);
3093 	tasks_count--;
3094 	terminated_tasks_count++;
3095 	lck_mtx_unlock(&tasks_threads_lock);
3096 
3097 	/*
3098 	 * We no longer need to guard against being aborted, so restore
3099 	 * the previous interruptible state.
3100 	 */
3101 	thread_interrupt_level(interrupt_save);
3102 
3103 #if KPC
3104 	/* force the task to release all ctrs */
3105 	if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) {
3106 		kpc_force_all_ctrs(task, 0);
3107 	}
3108 #endif /* KPC */
3109 
3110 #if CONFIG_COALITIONS
3111 	/*
3112 	 * Leave the coalition for corpse task or task that
3113 	 * never had any active threads (e.g. fork, exec failure).
3114 	 * For task with active threads, the task will be removed
3115 	 * from coalition by last terminating thread.
3116 	 */
3117 	if (task->active_thread_count == 0) {
3118 		coalitions_remove_task(task);
3119 	}
3120 #endif
3121 
3122 #if CONFIG_FREEZE
3123 	extern int      vm_compressor_available;
3124 	if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE && vm_compressor_available) {
3125 		task_disown_frozen_csegs(task);
3126 		assert(queue_empty(&task->task_frozen_cseg_q));
3127 	}
3128 #endif /* CONFIG_FREEZE */
3129 
3130 
3131 	/*
3132 	 * Get rid of the task active reference on itself.
3133 	 */
3134 	task_deallocate_grp(task, TASK_GRP_INTERNAL);
3135 
3136 	return KERN_SUCCESS;
3137 }
3138 
3139 void
tasks_system_suspend(boolean_t suspend)3140 tasks_system_suspend(boolean_t suspend)
3141 {
3142 	task_t task;
3143 
3144 	KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SUSPEND_USERSPACE) |
3145 	    (suspend ? DBG_FUNC_START : DBG_FUNC_END));
3146 
3147 	lck_mtx_lock(&tasks_threads_lock);
3148 	assert(tasks_suspend_state != suspend);
3149 	tasks_suspend_state = suspend;
3150 	queue_iterate(&tasks, task, task_t, tasks) {
3151 		if (task == kernel_task) {
3152 			continue;
3153 		}
3154 		suspend ? task_suspend_internal(task) : task_resume_internal(task);
3155 	}
3156 	lck_mtx_unlock(&tasks_threads_lock);
3157 }
3158 
3159 /*
3160  * task_start_halt:
3161  *
3162  *      Shut the current task down (except for the current thread) in
3163  *	preparation for dramatic changes to the task (probably exec).
3164  *	We hold the task and mark all other threads in the task for
3165  *	termination.
3166  */
3167 kern_return_t
task_start_halt(task_t task)3168 task_start_halt(task_t task)
3169 {
3170 	kern_return_t kr = KERN_SUCCESS;
3171 	task_lock(task);
3172 	kr = task_start_halt_locked(task, FALSE);
3173 	task_unlock(task);
3174 	return kr;
3175 }
3176 
3177 static kern_return_t
task_start_halt_locked(task_t task,boolean_t should_mark_corpse)3178 task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
3179 {
3180 	thread_t thread, self;
3181 	uint64_t dispatchqueue_offset;
3182 
3183 	assert(task != kernel_task);
3184 
3185 	self = current_thread();
3186 
3187 	if (task != get_threadtask(self) && !task_is_a_corpse_fork(task)) {
3188 		return KERN_INVALID_ARGUMENT;
3189 	}
3190 
3191 	if (!should_mark_corpse &&
3192 	    (task->halting || !task->active || !self->active)) {
3193 		/*
3194 		 * Task or current thread is already being terminated.
3195 		 * Hurry up and return out of the current kernel context
3196 		 * so that we run our AST special handler to terminate
3197 		 * ourselves. If should_mark_corpse is set, the corpse
3198 		 * creation might have raced with exec, let the corpse
3199 		 * creation continue, once the current thread reaches AST
3200 		 * thread in exec will be woken up from task_complete_halt.
3201 		 * Exec will fail cause the proc was marked for exit.
3202 		 * Once the thread in exec reaches AST, it will call proc_exit
3203 		 * and deliver the EXC_CORPSE_NOTIFY.
3204 		 */
3205 		return KERN_FAILURE;
3206 	}
3207 
3208 	/* Thread creation will fail after this point of no return. */
3209 	task->halting = TRUE;
3210 
3211 	/*
3212 	 * Mark all the threads to keep them from starting any more
3213 	 * user-level execution. The thread_terminate_internal code
3214 	 * would do this on a thread by thread basis anyway, but this
3215 	 * gives us a better chance of not having to wait there.
3216 	 */
3217 	task_hold_locked(task);
3218 	dispatchqueue_offset = get_dispatchqueue_offset_from_proc(get_bsdtask_info(task));
3219 
3220 	/*
3221 	 * Terminate all the other threads in the task.
3222 	 */
3223 	queue_iterate(&task->threads, thread, thread_t, task_threads)
3224 	{
3225 		/*
3226 		 * Remove priority throttles for threads to terminate timely. This has
3227 		 * to be done after task_hold_locked() traps all threads to AST, but before
3228 		 * threads are marked inactive in thread_terminate_internal(). Takes thread
3229 		 * mutex lock.
3230 		 *
3231 		 * We need task_is_a_corpse() check so that we don't accidently update policy
3232 		 * for tasks that are doing posix_spawn().
3233 		 *
3234 		 * See: thread_policy_update_tasklocked().
3235 		 */
3236 		if (task_is_a_corpse(task)) {
3237 			proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE,
3238 			    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3239 		}
3240 
3241 		if (should_mark_corpse) {
3242 			thread_mtx_lock(thread);
3243 			thread->inspection = TRUE;
3244 			thread_mtx_unlock(thread);
3245 		}
3246 		if (thread != self) {
3247 			thread_terminate_internal(thread);
3248 		}
3249 	}
3250 	task->dispatchqueue_offset = dispatchqueue_offset;
3251 
3252 	task_release_locked(task);
3253 
3254 	return KERN_SUCCESS;
3255 }
3256 
3257 
3258 /*
3259  * task_complete_halt:
3260  *
3261  *	Complete task halt by waiting for threads to terminate, then clean
3262  *	up task resources (VM, port namespace, etc...) and then let the
3263  *	current thread go in the (practically empty) task context.
3264  *
3265  *	Note: task->halting flag is not cleared in order to avoid creation
3266  *	of new thread in old exec'ed task.
3267  */
3268 void
task_complete_halt(task_t task)3269 task_complete_halt(task_t task)
3270 {
3271 	task_lock(task);
3272 	assert(task->halting);
3273 	assert(task == current_task());
3274 
3275 	/*
3276 	 *	Wait for the other threads to get shut down.
3277 	 *      When the last other thread is reaped, we'll be
3278 	 *	woken up.
3279 	 */
3280 	if (task->thread_count > 1) {
3281 		assert_wait((event_t)&task->halting, THREAD_UNINT);
3282 		task_unlock(task);
3283 		thread_block(THREAD_CONTINUE_NULL);
3284 	} else {
3285 		task_unlock(task);
3286 	}
3287 
3288 #if CONFIG_DEFERRED_RECLAIM
3289 	if (task->deferred_reclamation_metadata) {
3290 		vm_deferred_reclamation_buffer_uninstall(
3291 			task->deferred_reclamation_metadata);
3292 		vm_deferred_reclamation_buffer_deallocate(
3293 			task->deferred_reclamation_metadata);
3294 		task->deferred_reclamation_metadata = NULL;
3295 	}
3296 #endif /* CONFIG_DEFERRED_RECLAIM */
3297 
3298 	/*
3299 	 *	Give the machine dependent code a chance
3300 	 *	to perform cleanup of task-level resources
3301 	 *	associated with the current thread before
3302 	 *	ripping apart the task.
3303 	 */
3304 	machine_task_terminate(task);
3305 
3306 	/*
3307 	 *	Destroy all synchronizers owned by the task.
3308 	 */
3309 	task_synchronizer_destroy_all(task);
3310 
3311 	/*
3312 	 *	Terminate the IPC space.  A long time ago,
3313 	 *	this used to be ipc_space_clean() which would
3314 	 *	keep the space active but hollow it.
3315 	 *
3316 	 *	We really do not need this semantics given
3317 	 *	tasks die with exec now.
3318 	 */
3319 	ipc_space_terminate(task->itk_space);
3320 
3321 	/*
3322 	 * Clean out the address space, as we are going to be
3323 	 * getting a new one.
3324 	 */
3325 	vm_map_terminate(task->map);
3326 
3327 	/*
3328 	 * Kick out any IOKitUser handles to the task. At best they're stale,
3329 	 * at worst someone is racing a SUID exec.
3330 	 */
3331 	iokit_task_terminate(task);
3332 }
3333 
3334 #ifdef CONFIG_TASK_SUSPEND_STATS
3335 
3336 static void
_task_mark_suspend_source(task_t task)3337 _task_mark_suspend_source(task_t task)
3338 {
3339 	int idx;
3340 	task_suspend_stats_t stats;
3341 	task_suspend_source_t source;
3342 	task_lock_assert_owned(task);
3343 	stats = &task->t_suspend_stats;
3344 
3345 	idx = stats->tss_count % TASK_SUSPEND_SOURCES_MAX;
3346 	source = &task->t_suspend_sources[idx];
3347 	bzero(source, sizeof(*source));
3348 
3349 	source->tss_time = mach_absolute_time();
3350 	source->tss_tid = current_thread()->thread_id;
3351 	source->tss_pid = task_pid(current_task());
3352 	task_best_name(current_task(), source->tss_procname, sizeof(source->tss_procname));
3353 
3354 	stats->tss_count++;
3355 }
3356 
3357 static inline void
_task_mark_suspend_start(task_t task)3358 _task_mark_suspend_start(task_t task)
3359 {
3360 	task_lock_assert_owned(task);
3361 	task->t_suspend_stats.tss_last_start = mach_absolute_time();
3362 }
3363 
3364 static inline void
_task_mark_suspend_end(task_t task)3365 _task_mark_suspend_end(task_t task)
3366 {
3367 	task_lock_assert_owned(task);
3368 	task->t_suspend_stats.tss_last_end = mach_absolute_time();
3369 	task->t_suspend_stats.tss_duration += (task->t_suspend_stats.tss_last_end -
3370 	    task->t_suspend_stats.tss_last_start);
3371 }
3372 
3373 static kern_return_t
_task_get_suspend_stats_locked(task_t task,task_suspend_stats_t stats)3374 _task_get_suspend_stats_locked(task_t task, task_suspend_stats_t stats)
3375 {
3376 	if (task == TASK_NULL || stats == NULL) {
3377 		return KERN_INVALID_ARGUMENT;
3378 	}
3379 	task_lock_assert_owned(task);
3380 	memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3381 	return KERN_SUCCESS;
3382 }
3383 
3384 static kern_return_t
_task_get_suspend_sources_locked(task_t task,task_suspend_source_t sources)3385 _task_get_suspend_sources_locked(task_t task, task_suspend_source_t sources)
3386 {
3387 	if (task == TASK_NULL || sources == NULL) {
3388 		return KERN_INVALID_ARGUMENT;
3389 	}
3390 	task_lock_assert_owned(task);
3391 	memcpy(sources, task->t_suspend_sources,
3392 	    sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3393 	return KERN_SUCCESS;
3394 }
3395 
3396 #endif /* CONFIG_TASK_SUSPEND_STATS */
3397 
3398 kern_return_t
task_get_suspend_stats(task_t task,task_suspend_stats_t stats)3399 task_get_suspend_stats(task_t task, task_suspend_stats_t stats)
3400 {
3401 #ifdef CONFIG_TASK_SUSPEND_STATS
3402 	kern_return_t kr;
3403 	if (task == TASK_NULL || stats == NULL) {
3404 		return KERN_INVALID_ARGUMENT;
3405 	}
3406 	task_lock(task);
3407 	kr = _task_get_suspend_stats_locked(task, stats);
3408 	task_unlock(task);
3409 	return kr;
3410 #else /* CONFIG_TASK_SUSPEND_STATS */
3411 	(void)task;
3412 	(void)stats;
3413 	return KERN_NOT_SUPPORTED;
3414 #endif
3415 }
3416 
3417 kern_return_t
task_get_suspend_stats_kdp(task_t task,task_suspend_stats_t stats)3418 task_get_suspend_stats_kdp(task_t task, task_suspend_stats_t stats)
3419 {
3420 #ifdef CONFIG_TASK_SUSPEND_STATS
3421 	if (task == TASK_NULL || stats == NULL) {
3422 		return KERN_INVALID_ARGUMENT;
3423 	}
3424 	memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3425 	return KERN_SUCCESS;
3426 #else /* CONFIG_TASK_SUSPEND_STATS */
3427 #pragma unused(task, stats)
3428 	return KERN_NOT_SUPPORTED;
3429 #endif /* CONFIG_TASK_SUSPEND_STATS */
3430 }
3431 
3432 kern_return_t
task_get_suspend_sources(task_t task,task_suspend_source_array_t sources)3433 task_get_suspend_sources(task_t task, task_suspend_source_array_t sources)
3434 {
3435 #ifdef CONFIG_TASK_SUSPEND_STATS
3436 	kern_return_t kr;
3437 	if (task == TASK_NULL || sources == NULL) {
3438 		return KERN_INVALID_ARGUMENT;
3439 	}
3440 	task_lock(task);
3441 	kr = _task_get_suspend_sources_locked(task, sources);
3442 	task_unlock(task);
3443 	return kr;
3444 #else /* CONFIG_TASK_SUSPEND_STATS */
3445 	(void)task;
3446 	(void)sources;
3447 	return KERN_NOT_SUPPORTED;
3448 #endif
3449 }
3450 
3451 kern_return_t
task_get_suspend_sources_kdp(task_t task,task_suspend_source_array_t sources)3452 task_get_suspend_sources_kdp(task_t task, task_suspend_source_array_t sources)
3453 {
3454 #ifdef CONFIG_TASK_SUSPEND_STATS
3455 	if (task == TASK_NULL || sources == NULL) {
3456 		return KERN_INVALID_ARGUMENT;
3457 	}
3458 	memcpy(sources, task->t_suspend_sources,
3459 	    sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3460 	return KERN_SUCCESS;
3461 #else /* CONFIG_TASK_SUSPEND_STATS */
3462 #pragma unused(task, sources)
3463 	return KERN_NOT_SUPPORTED;
3464 #endif
3465 }
3466 
3467 /*
3468  *	task_hold_locked:
3469  *
3470  *	Suspend execution of the specified task.
3471  *	This is a recursive-style suspension of the task, a count of
3472  *	suspends is maintained.
3473  *
3474  *	CONDITIONS: the task is locked and active.
3475  */
3476 void
task_hold_locked(task_t task)3477 task_hold_locked(
3478 	task_t          task)
3479 {
3480 	thread_t        thread;
3481 	void *bsd_info = get_bsdtask_info(task);
3482 
3483 	assert(task->active);
3484 
3485 	if (task->suspend_count++ > 0) {
3486 		return;
3487 	}
3488 
3489 	if (bsd_info) {
3490 		workq_proc_suspended(bsd_info);
3491 	}
3492 
3493 	/*
3494 	 *	Iterate through all the threads and hold them.
3495 	 */
3496 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3497 		thread_mtx_lock(thread);
3498 		thread_hold(thread);
3499 		thread_mtx_unlock(thread);
3500 	}
3501 
3502 #ifdef CONFIG_TASK_SUSPEND_STATS
3503 	_task_mark_suspend_start(task);
3504 #endif
3505 }
3506 
3507 /*
3508  *	task_hold:
3509  *
3510  *	Same as the internal routine above, except that is must lock
3511  *	and verify that the task is active.  This differs from task_suspend
3512  *	in that it places a kernel hold on the task rather than just a
3513  *	user-level hold.  This keeps users from over resuming and setting
3514  *	it running out from under the kernel.
3515  *
3516  *      CONDITIONS: the caller holds a reference on the task
3517  */
3518 kern_return_t
task_hold(task_t task)3519 task_hold(
3520 	task_t          task)
3521 {
3522 	if (task == TASK_NULL) {
3523 		return KERN_INVALID_ARGUMENT;
3524 	}
3525 
3526 	task_lock(task);
3527 
3528 	if (!task->active) {
3529 		task_unlock(task);
3530 
3531 		return KERN_FAILURE;
3532 	}
3533 
3534 #ifdef CONFIG_TASK_SUSPEND_STATS
3535 	_task_mark_suspend_source(task);
3536 #endif /* CONFIG_TASK_SUSPEND_STATS */
3537 	task_hold_locked(task);
3538 	task_unlock(task);
3539 
3540 	return KERN_SUCCESS;
3541 }
3542 
3543 kern_return_t
task_wait(task_t task,boolean_t until_not_runnable)3544 task_wait(
3545 	task_t          task,
3546 	boolean_t       until_not_runnable)
3547 {
3548 	if (task == TASK_NULL) {
3549 		return KERN_INVALID_ARGUMENT;
3550 	}
3551 
3552 	task_lock(task);
3553 
3554 	if (!task->active) {
3555 		task_unlock(task);
3556 
3557 		return KERN_FAILURE;
3558 	}
3559 
3560 	task_wait_locked(task, until_not_runnable);
3561 	task_unlock(task);
3562 
3563 	return KERN_SUCCESS;
3564 }
3565 
3566 /*
3567  *	task_wait_locked:
3568  *
3569  *	Wait for all threads in task to stop.
3570  *
3571  * Conditions:
3572  *	Called with task locked, active, and held.
3573  */
3574 void
task_wait_locked(task_t task,boolean_t until_not_runnable)3575 task_wait_locked(
3576 	task_t          task,
3577 	boolean_t               until_not_runnable)
3578 {
3579 	thread_t        thread, self;
3580 
3581 	assert(task->active);
3582 	assert(task->suspend_count > 0);
3583 
3584 	self = current_thread();
3585 
3586 	/*
3587 	 *	Iterate through all the threads and wait for them to
3588 	 *	stop.  Do not wait for the current thread if it is within
3589 	 *	the task.
3590 	 */
3591 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3592 		if (thread != self) {
3593 			thread_wait(thread, until_not_runnable);
3594 		}
3595 	}
3596 }
3597 
3598 boolean_t
task_is_app_suspended(task_t task)3599 task_is_app_suspended(task_t task)
3600 {
3601 	return task->pidsuspended;
3602 }
3603 
3604 /*
3605  *	task_release_locked:
3606  *
3607  *	Release a kernel hold on a task.
3608  *
3609  *      CONDITIONS: the task is locked and active
3610  */
3611 void
task_release_locked(task_t task)3612 task_release_locked(
3613 	task_t          task)
3614 {
3615 	thread_t        thread;
3616 	void *bsd_info = get_bsdtask_info(task);
3617 
3618 	assert(task->active);
3619 	assert(task->suspend_count > 0);
3620 
3621 	if (--task->suspend_count > 0) {
3622 		return;
3623 	}
3624 
3625 	if (bsd_info) {
3626 		workq_proc_resumed(bsd_info);
3627 	}
3628 
3629 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3630 		thread_mtx_lock(thread);
3631 		thread_release(thread);
3632 		thread_mtx_unlock(thread);
3633 	}
3634 
3635 #if CONFIG_TASK_SUSPEND_STATS
3636 	_task_mark_suspend_end(task);
3637 #endif
3638 }
3639 
3640 /*
3641  *	task_release:
3642  *
3643  *	Same as the internal routine above, except that it must lock
3644  *	and verify that the task is active.
3645  *
3646  *      CONDITIONS: The caller holds a reference to the task
3647  */
3648 kern_return_t
task_release(task_t task)3649 task_release(
3650 	task_t          task)
3651 {
3652 	if (task == TASK_NULL) {
3653 		return KERN_INVALID_ARGUMENT;
3654 	}
3655 
3656 	task_lock(task);
3657 
3658 	if (!task->active) {
3659 		task_unlock(task);
3660 
3661 		return KERN_FAILURE;
3662 	}
3663 
3664 	task_release_locked(task);
3665 	task_unlock(task);
3666 
3667 	return KERN_SUCCESS;
3668 }
3669 
3670 static kern_return_t
task_threads_internal(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * countp,mach_thread_flavor_t flavor)3671 task_threads_internal(
3672 	task_t                      task,
3673 	thread_act_array_t         *threads_out,
3674 	mach_msg_type_number_t     *countp,
3675 	mach_thread_flavor_t        flavor)
3676 {
3677 	mach_msg_type_number_t  actual, count, count_needed;
3678 	thread_t               *thread_list;
3679 	thread_t                thread;
3680 	unsigned int            i;
3681 
3682 	count = 0;
3683 	thread_list = NULL;
3684 
3685 	if (task == TASK_NULL) {
3686 		return KERN_INVALID_ARGUMENT;
3687 	}
3688 
3689 	assert(flavor <= THREAD_FLAVOR_INSPECT);
3690 
3691 	for (;;) {
3692 		task_lock(task);
3693 		if (!task->active) {
3694 			task_unlock(task);
3695 
3696 			kfree_type(thread_t, count, thread_list);
3697 			return KERN_FAILURE;
3698 		}
3699 
3700 		count_needed = actual = task->thread_count;
3701 		if (count_needed <= count) {
3702 			break;
3703 		}
3704 
3705 		/* unlock the task and allocate more memory */
3706 		task_unlock(task);
3707 
3708 		kfree_type(thread_t, count, thread_list);
3709 		count = count_needed;
3710 		thread_list = kalloc_type(thread_t, count, Z_WAITOK);
3711 
3712 		if (thread_list == NULL) {
3713 			return KERN_RESOURCE_SHORTAGE;
3714 		}
3715 	}
3716 
3717 	i = 0;
3718 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3719 		assert(i < actual);
3720 		thread_reference(thread);
3721 		thread_list[i++] = thread;
3722 	}
3723 
3724 	count_needed = actual;
3725 
3726 	/* can unlock task now that we've got the thread refs */
3727 	task_unlock(task);
3728 
3729 	if (actual == 0) {
3730 		/* no threads, so return null pointer and deallocate memory */
3731 
3732 		*threads_out = NULL;
3733 		*countp = 0;
3734 		kfree_type(thread_t, count, thread_list);
3735 	} else {
3736 		/* if we allocated too much, must copy */
3737 		if (count_needed < count) {
3738 			void *newaddr;
3739 
3740 			newaddr = kalloc_type(thread_t, count_needed, Z_WAITOK);
3741 			if (newaddr == NULL) {
3742 				for (i = 0; i < actual; ++i) {
3743 					thread_deallocate(thread_list[i]);
3744 				}
3745 				kfree_type(thread_t, count, thread_list);
3746 				return KERN_RESOURCE_SHORTAGE;
3747 			}
3748 
3749 			bcopy(thread_list, newaddr, count_needed * sizeof(thread_t));
3750 			kfree_type(thread_t, count, thread_list);
3751 			thread_list = (thread_t *)newaddr;
3752 		}
3753 
3754 		*threads_out = thread_list;
3755 		*countp = actual;
3756 
3757 		/* do the conversion that Mig should handle */
3758 
3759 		switch (flavor) {
3760 		case THREAD_FLAVOR_CONTROL:
3761 			if (task == current_task()) {
3762 				for (i = 0; i < actual; ++i) {
3763 					((ipc_port_t *) thread_list)[i] = convert_thread_to_port_pinned(thread_list[i]);
3764 				}
3765 			} else {
3766 				for (i = 0; i < actual; ++i) {
3767 					((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
3768 				}
3769 			}
3770 			break;
3771 		case THREAD_FLAVOR_READ:
3772 			for (i = 0; i < actual; ++i) {
3773 				((ipc_port_t *) thread_list)[i] = convert_thread_read_to_port(thread_list[i]);
3774 			}
3775 			break;
3776 		case THREAD_FLAVOR_INSPECT:
3777 			for (i = 0; i < actual; ++i) {
3778 				((ipc_port_t *) thread_list)[i] = convert_thread_inspect_to_port(thread_list[i]);
3779 			}
3780 			break;
3781 		}
3782 	}
3783 
3784 	return KERN_SUCCESS;
3785 }
3786 
3787 kern_return_t
task_threads(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3788 task_threads(
3789 	task_t                      task,
3790 	thread_act_array_t         *threads_out,
3791 	mach_msg_type_number_t     *count)
3792 {
3793 	return task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3794 }
3795 
3796 
3797 kern_return_t
task_threads_from_user(mach_port_t port,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3798 task_threads_from_user(
3799 	mach_port_t                 port,
3800 	thread_act_array_t         *threads_out,
3801 	mach_msg_type_number_t     *count)
3802 {
3803 	ipc_kobject_type_t kotype;
3804 	kern_return_t kr;
3805 
3806 	task_t task = convert_port_to_task_inspect_no_eval(port);
3807 
3808 	if (task == TASK_NULL) {
3809 		return KERN_INVALID_ARGUMENT;
3810 	}
3811 
3812 	kotype = ip_kotype(port);
3813 
3814 	switch (kotype) {
3815 	case IKOT_TASK_CONTROL:
3816 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3817 		break;
3818 	case IKOT_TASK_READ:
3819 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_READ);
3820 		break;
3821 	case IKOT_TASK_INSPECT:
3822 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_INSPECT);
3823 		break;
3824 	default:
3825 		panic("strange kobject type");
3826 		break;
3827 	}
3828 
3829 	task_deallocate(task);
3830 	return kr;
3831 }
3832 
3833 #define TASK_HOLD_NORMAL        0
3834 #define TASK_HOLD_PIDSUSPEND    1
3835 #define TASK_HOLD_LEGACY        2
3836 #define TASK_HOLD_LEGACY_ALL    3
3837 
3838 static kern_return_t
place_task_hold(task_t task,int mode)3839 place_task_hold(
3840 	task_t task,
3841 	int mode)
3842 {
3843 	if (!task->active && !task_is_a_corpse(task)) {
3844 		return KERN_FAILURE;
3845 	}
3846 
3847 	/* Return success for corpse task */
3848 	if (task_is_a_corpse(task)) {
3849 		return KERN_SUCCESS;
3850 	}
3851 
3852 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_SUSPEND),
3853 	    task_pid(task),
3854 	    task->thread_count > 0 ?((thread_t)queue_first(&task->threads))->thread_id : 0,
3855 	    task->user_stop_count, task->user_stop_count + 1);
3856 
3857 #if MACH_ASSERT
3858 	current_task()->suspends_outstanding++;
3859 #endif
3860 
3861 	if (mode == TASK_HOLD_LEGACY) {
3862 		task->legacy_stop_count++;
3863 	}
3864 
3865 #ifdef CONFIG_TASK_SUSPEND_STATS
3866 	_task_mark_suspend_source(task);
3867 #endif /* CONFIG_TASK_SUSPEND_STATS */
3868 
3869 	if (task->user_stop_count++ > 0) {
3870 		/*
3871 		 *	If the stop count was positive, the task is
3872 		 *	already stopped and we can exit.
3873 		 */
3874 		return KERN_SUCCESS;
3875 	}
3876 
3877 	/*
3878 	 * Put a kernel-level hold on the threads in the task (all
3879 	 * user-level task suspensions added together represent a
3880 	 * single kernel-level hold).  We then wait for the threads
3881 	 * to stop executing user code.
3882 	 */
3883 	task_hold_locked(task);
3884 	task_wait_locked(task, FALSE);
3885 
3886 	return KERN_SUCCESS;
3887 }
3888 
3889 static kern_return_t
release_task_hold(task_t task,int mode)3890 release_task_hold(
3891 	task_t          task,
3892 	int                     mode)
3893 {
3894 	boolean_t release = FALSE;
3895 
3896 	if (!task->active && !task_is_a_corpse(task)) {
3897 		return KERN_FAILURE;
3898 	}
3899 
3900 	/* Return success for corpse task */
3901 	if (task_is_a_corpse(task)) {
3902 		return KERN_SUCCESS;
3903 	}
3904 
3905 	if (mode == TASK_HOLD_PIDSUSPEND) {
3906 		if (task->pidsuspended == FALSE) {
3907 			return KERN_FAILURE;
3908 		}
3909 		task->pidsuspended = FALSE;
3910 	}
3911 
3912 	if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
3913 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3914 		    MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_RESUME) | DBG_FUNC_NONE,
3915 		    task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id,
3916 		    task->user_stop_count, mode, task->legacy_stop_count);
3917 
3918 #if MACH_ASSERT
3919 		/*
3920 		 * This is obviously not robust; if we suspend one task and then resume a different one,
3921 		 * we'll fly under the radar. This is only meant to catch the common case of a crashed
3922 		 * or buggy suspender.
3923 		 */
3924 		current_task()->suspends_outstanding--;
3925 #endif
3926 
3927 		if (mode == TASK_HOLD_LEGACY_ALL) {
3928 			if (task->legacy_stop_count >= task->user_stop_count) {
3929 				task->user_stop_count = 0;
3930 				release = TRUE;
3931 			} else {
3932 				task->user_stop_count -= task->legacy_stop_count;
3933 			}
3934 			task->legacy_stop_count = 0;
3935 		} else {
3936 			if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) {
3937 				task->legacy_stop_count--;
3938 			}
3939 			if (--task->user_stop_count == 0) {
3940 				release = TRUE;
3941 			}
3942 		}
3943 	} else {
3944 		return KERN_FAILURE;
3945 	}
3946 
3947 	/*
3948 	 *	Release the task if necessary.
3949 	 */
3950 	if (release) {
3951 		task_release_locked(task);
3952 	}
3953 
3954 	return KERN_SUCCESS;
3955 }
3956 
3957 boolean_t
get_task_suspended(task_t task)3958 get_task_suspended(task_t task)
3959 {
3960 	return 0 != task->user_stop_count;
3961 }
3962 
3963 /*
3964  *	task_suspend:
3965  *
3966  *	Implement an (old-fashioned) user-level suspension on a task.
3967  *
3968  *	Because the user isn't expecting to have to manage a suspension
3969  *	token, we'll track it for him in the kernel in the form of a naked
3970  *	send right to the task's resume port.  All such send rights
3971  *	account for a single suspension against the task (unlike task_suspend2()
3972  *	where each caller gets a unique suspension count represented by a
3973  *	unique send-once right).
3974  *
3975  * Conditions:
3976  *      The caller holds a reference to the task
3977  */
3978 kern_return_t
task_suspend(task_t task)3979 task_suspend(
3980 	task_t          task)
3981 {
3982 	kern_return_t                   kr;
3983 	mach_port_t                     port;
3984 	mach_port_name_t                name;
3985 
3986 	if (task == TASK_NULL || task == kernel_task) {
3987 		return KERN_INVALID_ARGUMENT;
3988 	}
3989 
3990 	/*
3991 	 * place a legacy hold on the task.
3992 	 */
3993 	task_lock(task);
3994 	kr = place_task_hold(task, TASK_HOLD_LEGACY);
3995 	task_unlock(task);
3996 
3997 	if (kr != KERN_SUCCESS) {
3998 		return kr;
3999 	}
4000 
4001 	/*
4002 	 * Claim a send right on the task resume port, and request a no-senders
4003 	 * notification on that port (if none outstanding).
4004 	 */
4005 	itk_lock(task);
4006 	port = task->itk_resume;
4007 	if (port == IP_NULL) {
4008 		port = ipc_kobject_alloc_port(task, IKOT_TASK_RESUME,
4009 		    IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
4010 		task->itk_resume = port;
4011 	} else {
4012 		(void)ipc_kobject_make_send_nsrequest(port, task, IKOT_TASK_RESUME);
4013 	}
4014 	itk_unlock(task);
4015 
4016 	/*
4017 	 * Copyout the send right into the calling task's IPC space.  It won't know it is there,
4018 	 * but we'll look it up when calling a traditional resume.  Any IPC operations that
4019 	 * deallocate the send right will auto-release the suspension.
4020 	 */
4021 	if (IP_VALID(port)) {
4022 		kr = ipc_object_copyout(current_space(), ip_to_object(port),
4023 		    MACH_MSG_TYPE_MOVE_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
4024 		    NULL, NULL, &name);
4025 	} else {
4026 		kr = KERN_SUCCESS;
4027 	}
4028 	if (kr != KERN_SUCCESS) {
4029 		printf("warning: %s(%d) failed to copyout suspension "
4030 		    "token for pid %d with error: %d\n",
4031 		    proc_name_address(get_bsdtask_info(current_task())),
4032 		    proc_pid(get_bsdtask_info(current_task())),
4033 		    task_pid(task), kr);
4034 	}
4035 
4036 	return kr;
4037 }
4038 
4039 /*
4040  *	task_resume:
4041  *		Release a user hold on a task.
4042  *
4043  * Conditions:
4044  *		The caller holds a reference to the task
4045  */
4046 kern_return_t
task_resume(task_t task)4047 task_resume(
4048 	task_t  task)
4049 {
4050 	kern_return_t    kr;
4051 	mach_port_name_t resume_port_name;
4052 	ipc_entry_t              resume_port_entry;
4053 	ipc_space_t              space = current_task()->itk_space;
4054 
4055 	if (task == TASK_NULL || task == kernel_task) {
4056 		return KERN_INVALID_ARGUMENT;
4057 	}
4058 
4059 	/* release a legacy task hold */
4060 	task_lock(task);
4061 	kr = release_task_hold(task, TASK_HOLD_LEGACY);
4062 	task_unlock(task);
4063 
4064 	itk_lock(task); /* for itk_resume */
4065 	is_write_lock(space); /* spin lock */
4066 	if (is_active(space) && IP_VALID(task->itk_resume) &&
4067 	    ipc_hash_lookup(space, ip_to_object(task->itk_resume), &resume_port_name, &resume_port_entry) == TRUE) {
4068 		/*
4069 		 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
4070 		 * we are holding one less legacy hold on the task from this caller.  If the release failed,
4071 		 * go ahead and drop all the rights, as someone either already released our holds or the task
4072 		 * is gone.
4073 		 */
4074 		itk_unlock(task);
4075 		if (kr == KERN_SUCCESS) {
4076 			ipc_right_dealloc(space, resume_port_name, resume_port_entry);
4077 		} else {
4078 			ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0);
4079 		}
4080 		/* space unlocked */
4081 	} else {
4082 		itk_unlock(task);
4083 		is_write_unlock(space);
4084 		if (kr == KERN_SUCCESS) {
4085 			printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
4086 			    proc_name_address(get_bsdtask_info(current_task())), proc_pid(get_bsdtask_info(current_task())),
4087 			    task_pid(task));
4088 		}
4089 	}
4090 
4091 	return kr;
4092 }
4093 
4094 /*
4095  * Suspend a task that is already protected by a held lock.
4096  * Making/holding a token/reference/port is the caller's responsibility.
4097  */
4098 kern_return_t
task_suspend_internal_locked(task_t task)4099 task_suspend_internal_locked(task_t task)
4100 {
4101 	if (task == TASK_NULL || task == kernel_task) {
4102 		return KERN_INVALID_ARGUMENT;
4103 	}
4104 
4105 	return place_task_hold(task, TASK_HOLD_NORMAL);
4106 }
4107 
4108 /*
4109  * Suspend a task.
4110  * Making/holding a token/reference/port is the caller's responsibility.
4111  */
4112 kern_return_t
task_suspend_internal(task_t task)4113 task_suspend_internal(task_t task)
4114 {
4115 	kern_return_t    kr;
4116 
4117 	if (task == TASK_NULL || task == kernel_task) {
4118 		return KERN_INVALID_ARGUMENT;
4119 	}
4120 
4121 	task_lock(task);
4122 	kr = task_suspend_internal_locked(task);
4123 	task_unlock(task);
4124 	return kr;
4125 }
4126 
4127 /*
4128  * Suspend the target task, and return a suspension token. The token
4129  * represents a reference on the suspended task.
4130  */
4131 static kern_return_t
task_suspend2_grp(task_t task,task_suspension_token_t * suspend_token,task_grp_t grp)4132 task_suspend2_grp(
4133 	task_t                  task,
4134 	task_suspension_token_t *suspend_token,
4135 	task_grp_t              grp)
4136 {
4137 	kern_return_t    kr;
4138 
4139 	kr = task_suspend_internal(task);
4140 	if (kr != KERN_SUCCESS) {
4141 		*suspend_token = TASK_NULL;
4142 		return kr;
4143 	}
4144 
4145 	/*
4146 	 * Take a reference on the target task and return that to the caller
4147 	 * as a "suspension token," which can be converted into an SO right to
4148 	 * the now-suspended task's resume port.
4149 	 */
4150 	task_reference_grp(task, grp);
4151 	*suspend_token = task;
4152 
4153 	return KERN_SUCCESS;
4154 }
4155 
4156 kern_return_t
task_suspend2_mig(task_t task,task_suspension_token_t * suspend_token)4157 task_suspend2_mig(
4158 	task_t                  task,
4159 	task_suspension_token_t *suspend_token)
4160 {
4161 	return task_suspend2_grp(task, suspend_token, TASK_GRP_MIG);
4162 }
4163 
4164 kern_return_t
task_suspend2_external(task_t task,task_suspension_token_t * suspend_token)4165 task_suspend2_external(
4166 	task_t                  task,
4167 	task_suspension_token_t *suspend_token)
4168 {
4169 	return task_suspend2_grp(task, suspend_token, TASK_GRP_EXTERNAL);
4170 }
4171 
4172 /*
4173  * Resume a task that is already protected by a held lock.
4174  * (reference/token/port management is caller's responsibility).
4175  */
4176 kern_return_t
task_resume_internal_locked(task_suspension_token_t task)4177 task_resume_internal_locked(
4178 	task_suspension_token_t         task)
4179 {
4180 	if (task == TASK_NULL || task == kernel_task) {
4181 		return KERN_INVALID_ARGUMENT;
4182 	}
4183 
4184 	return release_task_hold(task, TASK_HOLD_NORMAL);
4185 }
4186 
4187 /*
4188  * Resume a task.
4189  * (reference/token/port management is caller's responsibility).
4190  */
4191 kern_return_t
task_resume_internal(task_suspension_token_t task)4192 task_resume_internal(
4193 	task_suspension_token_t         task)
4194 {
4195 	kern_return_t kr;
4196 
4197 	if (task == TASK_NULL || task == kernel_task) {
4198 		return KERN_INVALID_ARGUMENT;
4199 	}
4200 
4201 	task_lock(task);
4202 	kr = task_resume_internal_locked(task);
4203 	task_unlock(task);
4204 	return kr;
4205 }
4206 
4207 /*
4208  * Resume the task using a suspension token. Consumes the token's ref.
4209  */
4210 static kern_return_t
task_resume2_grp(task_suspension_token_t task,task_grp_t grp)4211 task_resume2_grp(
4212 	task_suspension_token_t         task,
4213 	task_grp_t                      grp)
4214 {
4215 	kern_return_t kr;
4216 
4217 	kr = task_resume_internal(task);
4218 	task_suspension_token_deallocate_grp(task, grp);
4219 
4220 	return kr;
4221 }
4222 
4223 kern_return_t
task_resume2_mig(task_suspension_token_t task)4224 task_resume2_mig(
4225 	task_suspension_token_t         task)
4226 {
4227 	return task_resume2_grp(task, TASK_GRP_MIG);
4228 }
4229 
4230 kern_return_t
task_resume2_external(task_suspension_token_t task)4231 task_resume2_external(
4232 	task_suspension_token_t         task)
4233 {
4234 	return task_resume2_grp(task, TASK_GRP_EXTERNAL);
4235 }
4236 
4237 static void
task_suspension_no_senders(ipc_port_t port,mach_port_mscount_t mscount)4238 task_suspension_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
4239 {
4240 	task_t task = convert_port_to_task_suspension_token(port);
4241 	kern_return_t kr;
4242 
4243 	if (task == TASK_NULL) {
4244 		return;
4245 	}
4246 
4247 	if (task == kernel_task) {
4248 		task_suspension_token_deallocate(task);
4249 		return;
4250 	}
4251 
4252 	task_lock(task);
4253 
4254 	kr = ipc_kobject_nsrequest(port, mscount, NULL);
4255 	if (kr == KERN_FAILURE) {
4256 		/* release all the [remaining] outstanding legacy holds */
4257 		release_task_hold(task, TASK_HOLD_LEGACY_ALL);
4258 	}
4259 
4260 	task_unlock(task);
4261 
4262 	task_suspension_token_deallocate(task);         /* drop token reference */
4263 }
4264 
4265 /*
4266  * Fires when a send once made
4267  * by convert_task_suspension_token_to_port() dies.
4268  */
4269 void
task_suspension_send_once(ipc_port_t port)4270 task_suspension_send_once(ipc_port_t port)
4271 {
4272 	task_t task = convert_port_to_task_suspension_token(port);
4273 
4274 	if (task == TASK_NULL || task == kernel_task) {
4275 		return; /* nothing to do */
4276 	}
4277 
4278 	/* release the hold held by this specific send-once right */
4279 	task_lock(task);
4280 	release_task_hold(task, TASK_HOLD_NORMAL);
4281 	task_unlock(task);
4282 
4283 	task_suspension_token_deallocate(task);         /* drop token reference */
4284 }
4285 
4286 static kern_return_t
task_pidsuspend_locked(task_t task)4287 task_pidsuspend_locked(task_t task)
4288 {
4289 	kern_return_t kr;
4290 
4291 	if (task->pidsuspended) {
4292 		kr = KERN_FAILURE;
4293 		goto out;
4294 	}
4295 
4296 	task->pidsuspended = TRUE;
4297 
4298 	kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
4299 	if (kr != KERN_SUCCESS) {
4300 		task->pidsuspended = FALSE;
4301 	}
4302 out:
4303 	return kr;
4304 }
4305 
4306 
4307 /*
4308  *	task_pidsuspend:
4309  *
4310  *	Suspends a task by placing a hold on its threads.
4311  *
4312  * Conditions:
4313  *      The caller holds a reference to the task
4314  */
4315 kern_return_t
task_pidsuspend(task_t task)4316 task_pidsuspend(
4317 	task_t          task)
4318 {
4319 	kern_return_t    kr;
4320 
4321 	if (task == TASK_NULL || task == kernel_task) {
4322 		return KERN_INVALID_ARGUMENT;
4323 	}
4324 
4325 	task_lock(task);
4326 
4327 	kr = task_pidsuspend_locked(task);
4328 
4329 	task_unlock(task);
4330 
4331 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4332 		iokit_task_app_suspended_changed(task);
4333 	}
4334 
4335 	return kr;
4336 }
4337 
4338 /*
4339  *	task_pidresume:
4340  *		Resumes a previously suspended task.
4341  *
4342  * Conditions:
4343  *		The caller holds a reference to the task
4344  */
4345 kern_return_t
task_pidresume(task_t task)4346 task_pidresume(
4347 	task_t  task)
4348 {
4349 	kern_return_t    kr;
4350 
4351 	if (task == TASK_NULL || task == kernel_task) {
4352 		return KERN_INVALID_ARGUMENT;
4353 	}
4354 
4355 	task_lock(task);
4356 
4357 #if CONFIG_FREEZE
4358 
4359 	while (task->changing_freeze_state) {
4360 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4361 		task_unlock(task);
4362 		thread_block(THREAD_CONTINUE_NULL);
4363 
4364 		task_lock(task);
4365 	}
4366 	task->changing_freeze_state = TRUE;
4367 #endif
4368 
4369 	kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
4370 
4371 	task_unlock(task);
4372 
4373 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4374 		iokit_task_app_suspended_changed(task);
4375 	}
4376 
4377 #if CONFIG_FREEZE
4378 
4379 	task_lock(task);
4380 
4381 	if (kr == KERN_SUCCESS) {
4382 		task->frozen = FALSE;
4383 	}
4384 	task->changing_freeze_state = FALSE;
4385 	thread_wakeup(&task->changing_freeze_state);
4386 
4387 	task_unlock(task);
4388 #endif
4389 
4390 	return kr;
4391 }
4392 
4393 os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
4394 
4395 /*
4396  *	task_add_turnstile_watchports:
4397  *		Setup watchports to boost the main thread of the task.
4398  *
4399  *	Arguments:
4400  *		task: task being spawned
4401  *		thread: main thread of task
4402  *		portwatch_ports: array of watchports
4403  *		portwatch_count: number of watchports
4404  *
4405  *	Conditions:
4406  *		Nothing locked.
4407  */
4408 void
task_add_turnstile_watchports(task_t task,thread_t thread,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4409 task_add_turnstile_watchports(
4410 	task_t          task,
4411 	thread_t        thread,
4412 	ipc_port_t      *portwatch_ports,
4413 	uint32_t        portwatch_count)
4414 {
4415 	struct task_watchports *watchports = NULL;
4416 	struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
4417 	os_ref_count_t refs;
4418 
4419 	/* Check if the task has terminated */
4420 	if (!task->active) {
4421 		return;
4422 	}
4423 
4424 	assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
4425 
4426 	watchports = task_watchports_alloc_init(task, thread, portwatch_count);
4427 
4428 	/* Lock the ipc space */
4429 	is_write_lock(task->itk_space);
4430 
4431 	/* Setup watchports to boost the main thread */
4432 	refs = task_add_turnstile_watchports_locked(task,
4433 	    watchports, previous_elem_array, portwatch_ports,
4434 	    portwatch_count);
4435 
4436 	/* Drop the space lock */
4437 	is_write_unlock(task->itk_space);
4438 
4439 	if (refs == 0) {
4440 		task_watchports_deallocate(watchports);
4441 	}
4442 
4443 	/* Drop the ref on previous_elem_array */
4444 	for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
4445 		task_watchport_elem_deallocate(previous_elem_array[i]);
4446 	}
4447 }
4448 
4449 /*
4450  *	task_remove_turnstile_watchports:
4451  *		Clear all turnstile boost on the task from watchports.
4452  *
4453  *	Arguments:
4454  *		task: task being terminated
4455  *
4456  *	Conditions:
4457  *		Nothing locked.
4458  */
4459 void
task_remove_turnstile_watchports(task_t task)4460 task_remove_turnstile_watchports(
4461 	task_t          task)
4462 {
4463 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4464 	struct task_watchports *watchports = NULL;
4465 	ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
4466 	uint32_t portwatch_count;
4467 
4468 	/* Lock the ipc space */
4469 	is_write_lock(task->itk_space);
4470 
4471 	/* Check if watchport boost exist */
4472 	if (task->watchports == NULL) {
4473 		is_write_unlock(task->itk_space);
4474 		return;
4475 	}
4476 	watchports = task->watchports;
4477 	portwatch_count = watchports->tw_elem_array_count;
4478 
4479 	refs = task_remove_turnstile_watchports_locked(task, watchports,
4480 	    port_freelist);
4481 
4482 	is_write_unlock(task->itk_space);
4483 
4484 	/* Drop all the port references */
4485 	for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
4486 		ip_release(port_freelist[i]);
4487 	}
4488 
4489 	/* Clear the task and thread references for task_watchport */
4490 	if (refs == 0) {
4491 		task_watchports_deallocate(watchports);
4492 	}
4493 }
4494 
4495 /*
4496  *	task_transfer_turnstile_watchports:
4497  *		Transfer all watchport turnstile boost from old task to new task.
4498  *
4499  *	Arguments:
4500  *		old_task: task calling exec
4501  *		new_task: new exec'ed task
4502  *		thread: main thread of new task
4503  *
4504  *	Conditions:
4505  *		Nothing locked.
4506  */
4507 void
task_transfer_turnstile_watchports(task_t old_task,task_t new_task,thread_t new_thread)4508 task_transfer_turnstile_watchports(
4509 	task_t   old_task,
4510 	task_t   new_task,
4511 	thread_t new_thread)
4512 {
4513 	struct task_watchports *old_watchports = NULL;
4514 	struct task_watchports *new_watchports = NULL;
4515 	os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
4516 	os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
4517 	uint32_t portwatch_count;
4518 
4519 	if (old_task->watchports == NULL || !new_task->active) {
4520 		return;
4521 	}
4522 
4523 	/* Get the watch port count from the old task */
4524 	is_write_lock(old_task->itk_space);
4525 	if (old_task->watchports == NULL) {
4526 		is_write_unlock(old_task->itk_space);
4527 		return;
4528 	}
4529 
4530 	portwatch_count = old_task->watchports->tw_elem_array_count;
4531 	is_write_unlock(old_task->itk_space);
4532 
4533 	new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
4534 
4535 	/* Lock the ipc space for old task */
4536 	is_write_lock(old_task->itk_space);
4537 
4538 	/* Lock the ipc space for new task */
4539 	is_write_lock(new_task->itk_space);
4540 
4541 	/* Check if watchport boost exist */
4542 	if (old_task->watchports == NULL || !new_task->active) {
4543 		is_write_unlock(new_task->itk_space);
4544 		is_write_unlock(old_task->itk_space);
4545 		(void)task_watchports_release(new_watchports);
4546 		task_watchports_deallocate(new_watchports);
4547 		return;
4548 	}
4549 
4550 	old_watchports = old_task->watchports;
4551 	assert(portwatch_count == old_task->watchports->tw_elem_array_count);
4552 
4553 	/* Setup new task watchports */
4554 	new_task->watchports = new_watchports;
4555 
4556 	for (uint32_t i = 0; i < portwatch_count; i++) {
4557 		ipc_port_t port = old_watchports->tw_elem[i].twe_port;
4558 
4559 		if (port == NULL) {
4560 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4561 			continue;
4562 		}
4563 
4564 		/* Lock the port and check if it has the entry */
4565 		ip_mq_lock(port);
4566 
4567 		task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
4568 
4569 		if (ipc_port_replace_watchport_elem_conditional_locked(port,
4570 		    &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
4571 			task_watchport_elem_clear(&old_watchports->tw_elem[i]);
4572 
4573 			task_watchports_retain(new_watchports);
4574 			old_refs = task_watchports_release(old_watchports);
4575 
4576 			/* Check if all ports are cleaned */
4577 			if (old_refs == 0) {
4578 				old_task->watchports = NULL;
4579 			}
4580 		} else {
4581 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4582 		}
4583 		/* port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
4584 	}
4585 
4586 	/* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
4587 	new_refs = task_watchports_release(new_watchports);
4588 	if (new_refs == 0) {
4589 		new_task->watchports = NULL;
4590 	}
4591 
4592 	is_write_unlock(new_task->itk_space);
4593 	is_write_unlock(old_task->itk_space);
4594 
4595 	/* Clear the task and thread references for old_watchport */
4596 	if (old_refs == 0) {
4597 		task_watchports_deallocate(old_watchports);
4598 	}
4599 
4600 	/* Clear the task and thread references for new_watchport */
4601 	if (new_refs == 0) {
4602 		task_watchports_deallocate(new_watchports);
4603 	}
4604 }
4605 
4606 /*
4607  *	task_add_turnstile_watchports_locked:
4608  *		Setup watchports to boost the main thread of the task.
4609  *
4610  *	Arguments:
4611  *		task: task to boost
4612  *		watchports: watchport structure to be attached to the task
4613  *		previous_elem_array: an array of old watchport_elem to be returned to caller
4614  *		portwatch_ports: array of watchports
4615  *		portwatch_count: number of watchports
4616  *
4617  *	Conditions:
4618  *		ipc space of the task locked.
4619  *		returns array of old watchport_elem in previous_elem_array
4620  */
4621 static os_ref_count_t
task_add_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,struct task_watchport_elem ** previous_elem_array,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4622 task_add_turnstile_watchports_locked(
4623 	task_t                      task,
4624 	struct task_watchports      *watchports,
4625 	struct task_watchport_elem  **previous_elem_array,
4626 	ipc_port_t                  *portwatch_ports,
4627 	uint32_t                    portwatch_count)
4628 {
4629 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4630 
4631 	/* Check if the task is still active */
4632 	if (!task->active) {
4633 		refs = task_watchports_release(watchports);
4634 		return refs;
4635 	}
4636 
4637 	assert(task->watchports == NULL);
4638 	task->watchports = watchports;
4639 
4640 	for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
4641 		ipc_port_t port = portwatch_ports[i];
4642 
4643 		task_watchport_elem_init(&watchports->tw_elem[i], task, port);
4644 		if (port == NULL) {
4645 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4646 			continue;
4647 		}
4648 
4649 		ip_mq_lock(port);
4650 
4651 		/* Check if port is in valid state to be setup as watchport */
4652 		if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
4653 		    &previous_elem_array[j]) != KERN_SUCCESS) {
4654 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4655 			continue;
4656 		}
4657 		/* port unlocked on return */
4658 
4659 		ip_reference(port);
4660 		task_watchports_retain(watchports);
4661 		if (previous_elem_array[j] != NULL) {
4662 			j++;
4663 		}
4664 	}
4665 
4666 	/* Drop the reference on task_watchport struct returned by os_ref_init */
4667 	refs = task_watchports_release(watchports);
4668 	if (refs == 0) {
4669 		task->watchports = NULL;
4670 	}
4671 
4672 	return refs;
4673 }
4674 
4675 /*
4676  *	task_remove_turnstile_watchports_locked:
4677  *		Clear all turnstile boost on the task from watchports.
4678  *
4679  *	Arguments:
4680  *		task: task to remove watchports from
4681  *		watchports: watchports structure for the task
4682  *		port_freelist: array of ports returned with ref to caller
4683  *
4684  *
4685  *	Conditions:
4686  *		ipc space of the task locked.
4687  *		array of ports with refs are returned in port_freelist
4688  */
4689 static os_ref_count_t
task_remove_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,ipc_port_t * port_freelist)4690 task_remove_turnstile_watchports_locked(
4691 	task_t                 task,
4692 	struct task_watchports *watchports,
4693 	ipc_port_t             *port_freelist)
4694 {
4695 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4696 
4697 	for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
4698 		ipc_port_t port = watchports->tw_elem[i].twe_port;
4699 		if (port == NULL) {
4700 			continue;
4701 		}
4702 
4703 		/* Lock the port and check if it has the entry */
4704 		ip_mq_lock(port);
4705 		if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
4706 		    &watchports->tw_elem[i]) == KERN_SUCCESS) {
4707 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4708 			port_freelist[j++] = port;
4709 			refs = task_watchports_release(watchports);
4710 
4711 			/* Check if all ports are cleaned */
4712 			if (refs == 0) {
4713 				task->watchports = NULL;
4714 				break;
4715 			}
4716 		}
4717 		/* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
4718 	}
4719 	return refs;
4720 }
4721 
4722 /*
4723  *	task_watchports_alloc_init:
4724  *		Allocate and initialize task watchport struct.
4725  *
4726  *	Conditions:
4727  *		Nothing locked.
4728  */
4729 static struct task_watchports *
task_watchports_alloc_init(task_t task,thread_t thread,uint32_t count)4730 task_watchports_alloc_init(
4731 	task_t        task,
4732 	thread_t      thread,
4733 	uint32_t      count)
4734 {
4735 	struct task_watchports *watchports = kalloc_type(struct task_watchports,
4736 	    struct task_watchport_elem, count, Z_WAITOK | Z_ZERO | Z_NOFAIL);
4737 
4738 	task_reference(task);
4739 	thread_reference(thread);
4740 	watchports->tw_task = task;
4741 	watchports->tw_thread = thread;
4742 	watchports->tw_elem_array_count = count;
4743 	os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
4744 
4745 	return watchports;
4746 }
4747 
4748 /*
4749  *	task_watchports_deallocate:
4750  *		Deallocate task watchport struct.
4751  *
4752  *	Conditions:
4753  *		Nothing locked.
4754  */
4755 static void
task_watchports_deallocate(struct task_watchports * watchports)4756 task_watchports_deallocate(
4757 	struct task_watchports *watchports)
4758 {
4759 	uint32_t portwatch_count = watchports->tw_elem_array_count;
4760 
4761 	task_deallocate(watchports->tw_task);
4762 	thread_deallocate(watchports->tw_thread);
4763 	kfree_type(struct task_watchports, struct task_watchport_elem,
4764 	    portwatch_count, watchports);
4765 }
4766 
4767 /*
4768  *	task_watchport_elem_deallocate:
4769  *		Deallocate task watchport element and release its ref on task_watchport.
4770  *
4771  *	Conditions:
4772  *		Nothing locked.
4773  */
4774 void
task_watchport_elem_deallocate(struct task_watchport_elem * watchport_elem)4775 task_watchport_elem_deallocate(
4776 	struct task_watchport_elem *watchport_elem)
4777 {
4778 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4779 	task_t task = watchport_elem->twe_task;
4780 	struct task_watchports *watchports = NULL;
4781 	ipc_port_t port = NULL;
4782 
4783 	assert(task != NULL);
4784 
4785 	/* Take the space lock to modify the elememt */
4786 	is_write_lock(task->itk_space);
4787 
4788 	watchports = task->watchports;
4789 	assert(watchports != NULL);
4790 
4791 	port = watchport_elem->twe_port;
4792 	assert(port != NULL);
4793 
4794 	task_watchport_elem_clear(watchport_elem);
4795 	refs = task_watchports_release(watchports);
4796 
4797 	if (refs == 0) {
4798 		task->watchports = NULL;
4799 	}
4800 
4801 	is_write_unlock(task->itk_space);
4802 
4803 	ip_release(port);
4804 	if (refs == 0) {
4805 		task_watchports_deallocate(watchports);
4806 	}
4807 }
4808 
4809 /*
4810  *	task_has_watchports:
4811  *		Return TRUE if task has watchport boosts.
4812  *
4813  *	Conditions:
4814  *		Nothing locked.
4815  */
4816 boolean_t
task_has_watchports(task_t task)4817 task_has_watchports(task_t task)
4818 {
4819 	return task->watchports != NULL;
4820 }
4821 
4822 #if DEVELOPMENT || DEBUG
4823 
4824 extern void IOSleep(int);
4825 
4826 kern_return_t
task_disconnect_page_mappings(task_t task)4827 task_disconnect_page_mappings(task_t task)
4828 {
4829 	int     n;
4830 
4831 	if (task == TASK_NULL || task == kernel_task) {
4832 		return KERN_INVALID_ARGUMENT;
4833 	}
4834 
4835 	/*
4836 	 * this function is used to strip all of the mappings from
4837 	 * the pmap for the specified task to force the task to
4838 	 * re-fault all of the pages it is actively using... this
4839 	 * allows us to approximate the true working set of the
4840 	 * specified task.  We only engage if at least 1 of the
4841 	 * threads in the task is runnable, but we want to continuously
4842 	 * sweep (at least for a while - I've arbitrarily set the limit at
4843 	 * 100 sweeps to be re-looked at as we gain experience) to get a better
4844 	 * view into what areas within a page are being visited (as opposed to only
4845 	 * seeing the first fault of a page after the task becomes
4846 	 * runnable)...  in the future I may
4847 	 * try to block until awakened by a thread in this task
4848 	 * being made runnable, but for now we'll periodically poll from the
4849 	 * user level debug tool driving the sysctl
4850 	 */
4851 	for (n = 0; n < 100; n++) {
4852 		thread_t        thread;
4853 		boolean_t       runnable;
4854 		boolean_t       do_unnest;
4855 		int             page_count;
4856 
4857 		runnable = FALSE;
4858 		do_unnest = FALSE;
4859 
4860 		task_lock(task);
4861 
4862 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
4863 			if (thread->state & TH_RUN) {
4864 				runnable = TRUE;
4865 				break;
4866 			}
4867 		}
4868 		if (n == 0) {
4869 			task->task_disconnected_count++;
4870 		}
4871 
4872 		if (task->task_unnested == FALSE) {
4873 			if (runnable == TRUE) {
4874 				task->task_unnested = TRUE;
4875 				do_unnest = TRUE;
4876 			}
4877 		}
4878 		task_unlock(task);
4879 
4880 		if (runnable == FALSE) {
4881 			break;
4882 		}
4883 
4884 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
4885 		    task, do_unnest, task->task_disconnected_count, 0, 0);
4886 
4887 		page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
4888 
4889 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
4890 		    task, page_count, 0, 0, 0);
4891 
4892 		if ((n % 5) == 4) {
4893 			IOSleep(1);
4894 		}
4895 	}
4896 	return KERN_SUCCESS;
4897 }
4898 
4899 #endif
4900 
4901 
4902 #if CONFIG_FREEZE
4903 
4904 /*
4905  *	task_freeze:
4906  *
4907  *	Freeze a task.
4908  *
4909  * Conditions:
4910  *      The caller holds a reference to the task
4911  */
4912 extern void     vm_wake_compactor_swapper(void);
4913 extern struct freezer_context freezer_context_global;
4914 
4915 kern_return_t
task_freeze(task_t task,uint32_t * purgeable_count,uint32_t * wired_count,uint32_t * clean_count,uint32_t * dirty_count,uint32_t dirty_budget,uint32_t * shared_count,int * freezer_error_code,boolean_t eval_only)4916 task_freeze(
4917 	task_t    task,
4918 	uint32_t           *purgeable_count,
4919 	uint32_t           *wired_count,
4920 	uint32_t           *clean_count,
4921 	uint32_t           *dirty_count,
4922 	uint32_t           dirty_budget,
4923 	uint32_t           *shared_count,
4924 	int                *freezer_error_code,
4925 	boolean_t          eval_only)
4926 {
4927 	kern_return_t kr = KERN_SUCCESS;
4928 
4929 	if (task == TASK_NULL || task == kernel_task) {
4930 		return KERN_INVALID_ARGUMENT;
4931 	}
4932 
4933 	task_lock(task);
4934 
4935 	while (task->changing_freeze_state) {
4936 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4937 		task_unlock(task);
4938 		thread_block(THREAD_CONTINUE_NULL);
4939 
4940 		task_lock(task);
4941 	}
4942 	if (task->frozen) {
4943 		task_unlock(task);
4944 		return KERN_FAILURE;
4945 	}
4946 	task->changing_freeze_state = TRUE;
4947 
4948 	freezer_context_global.freezer_ctx_task = task;
4949 
4950 	task_unlock(task);
4951 
4952 	kr = vm_map_freeze(task,
4953 	    purgeable_count,
4954 	    wired_count,
4955 	    clean_count,
4956 	    dirty_count,
4957 	    dirty_budget,
4958 	    shared_count,
4959 	    freezer_error_code,
4960 	    eval_only);
4961 
4962 	task_lock(task);
4963 
4964 	if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) {
4965 		task->frozen = TRUE;
4966 
4967 		freezer_context_global.freezer_ctx_task = NULL;
4968 		freezer_context_global.freezer_ctx_uncompressed_pages = 0;
4969 
4970 		if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
4971 			/*
4972 			 * reset the counter tracking the # of swapped compressed pages
4973 			 * because we are now done with this freeze session and task.
4974 			 */
4975 
4976 			*dirty_count = (uint32_t) (freezer_context_global.freezer_ctx_swapped_bytes / PAGE_SIZE_64);         /*used to track pageouts*/
4977 		}
4978 
4979 		freezer_context_global.freezer_ctx_swapped_bytes = 0;
4980 	}
4981 
4982 	task->changing_freeze_state = FALSE;
4983 	thread_wakeup(&task->changing_freeze_state);
4984 
4985 	task_unlock(task);
4986 
4987 	if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
4988 	    (kr == KERN_SUCCESS) &&
4989 	    (eval_only == FALSE)) {
4990 		vm_wake_compactor_swapper();
4991 		/*
4992 		 * We do an explicit wakeup of the swapout thread here
4993 		 * because the compact_and_swap routines don't have
4994 		 * knowledge about these kind of "per-task packed c_segs"
4995 		 * and so will not be evaluating whether we need to do
4996 		 * a wakeup there.
4997 		 */
4998 		thread_wakeup((event_t)&vm_swapout_thread);
4999 	}
5000 
5001 	return kr;
5002 }
5003 
5004 /*
5005  *	task_thaw:
5006  *
5007  *	Thaw a currently frozen task.
5008  *
5009  * Conditions:
5010  *      The caller holds a reference to the task
5011  */
5012 kern_return_t
task_thaw(task_t task)5013 task_thaw(
5014 	task_t          task)
5015 {
5016 	if (task == TASK_NULL || task == kernel_task) {
5017 		return KERN_INVALID_ARGUMENT;
5018 	}
5019 
5020 	task_lock(task);
5021 
5022 	while (task->changing_freeze_state) {
5023 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5024 		task_unlock(task);
5025 		thread_block(THREAD_CONTINUE_NULL);
5026 
5027 		task_lock(task);
5028 	}
5029 	if (!task->frozen) {
5030 		task_unlock(task);
5031 		return KERN_FAILURE;
5032 	}
5033 	task->frozen = FALSE;
5034 
5035 	task_unlock(task);
5036 
5037 	return KERN_SUCCESS;
5038 }
5039 
5040 void
task_update_frozen_to_swap_acct(task_t task,int64_t amount,freezer_acct_op_t op)5041 task_update_frozen_to_swap_acct(task_t task, int64_t amount, freezer_acct_op_t op)
5042 {
5043 	/*
5044 	 * We don't assert that the task lock is held because we call this
5045 	 * routine from the decompression path and we won't be holding the
5046 	 * task lock. However, since we are in the context of the task we are
5047 	 * safe.
5048 	 * In the case of the task_freeze path, we call it from behind the task
5049 	 * lock but we don't need to because we have a reference on the proc
5050 	 * being frozen.
5051 	 */
5052 
5053 	assert(task);
5054 	if (amount == 0) {
5055 		return;
5056 	}
5057 
5058 	if (op == CREDIT_TO_SWAP) {
5059 		ledger_credit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5060 	} else if (op == DEBIT_FROM_SWAP) {
5061 		ledger_debit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5062 	} else {
5063 		panic("task_update_frozen_to_swap_acct: Invalid ledger op");
5064 	}
5065 }
5066 #endif /* CONFIG_FREEZE */
5067 
5068 kern_return_t
task_set_security_tokens(task_t task,security_token_t sec_token,audit_token_t audit_token,host_priv_t host_priv)5069 task_set_security_tokens(
5070 	task_t           task,
5071 	security_token_t sec_token,
5072 	audit_token_t    audit_token,
5073 	host_priv_t      host_priv)
5074 {
5075 	ipc_port_t       host_port = IP_NULL;
5076 	kern_return_t    kr;
5077 
5078 	if (task == TASK_NULL) {
5079 		return KERN_INVALID_ARGUMENT;
5080 	}
5081 
5082 	task_lock(task);
5083 	task_set_tokens(task, &sec_token, &audit_token);
5084 	task_unlock(task);
5085 
5086 	if (host_priv != HOST_PRIV_NULL) {
5087 		kr = host_get_host_priv_port(host_priv, &host_port);
5088 	} else {
5089 		kr = host_get_host_port(host_priv_self(), &host_port);
5090 	}
5091 	assert(kr == KERN_SUCCESS);
5092 
5093 	kr = task_set_special_port_internal(task, TASK_HOST_PORT, host_port);
5094 	return kr;
5095 }
5096 
5097 kern_return_t
task_send_trace_memory(__unused task_t target_task,__unused uint32_t pid,__unused uint64_t uniqueid)5098 task_send_trace_memory(
5099 	__unused task_t   target_task,
5100 	__unused uint32_t pid,
5101 	__unused uint64_t uniqueid)
5102 {
5103 	return KERN_INVALID_ARGUMENT;
5104 }
5105 
5106 /*
5107  * This routine was added, pretty much exclusively, for registering the
5108  * RPC glue vector for in-kernel short circuited tasks.  Rather than
5109  * removing it completely, I have only disabled that feature (which was
5110  * the only feature at the time).  It just appears that we are going to
5111  * want to add some user data to tasks in the future (i.e. bsd info,
5112  * task names, etc...), so I left it in the formal task interface.
5113  */
5114 kern_return_t
task_set_info(task_t task,task_flavor_t flavor,__unused task_info_t task_info_in,__unused mach_msg_type_number_t task_info_count)5115 task_set_info(
5116 	task_t          task,
5117 	task_flavor_t   flavor,
5118 	__unused task_info_t    task_info_in,           /* pointer to IN array */
5119 	__unused mach_msg_type_number_t task_info_count)
5120 {
5121 	if (task == TASK_NULL) {
5122 		return KERN_INVALID_ARGUMENT;
5123 	}
5124 	switch (flavor) {
5125 #if CONFIG_ATM
5126 	case TASK_TRACE_MEMORY_INFO:
5127 		return KERN_NOT_SUPPORTED;
5128 #endif // CONFIG_ATM
5129 	default:
5130 		return KERN_INVALID_ARGUMENT;
5131 	}
5132 }
5133 
5134 static void
_task_fill_times(task_t task,time_value_t * user_time,time_value_t * sys_time)5135 _task_fill_times(task_t task, time_value_t *user_time, time_value_t *sys_time)
5136 {
5137 	clock_sec_t sec;
5138 	clock_usec_t usec;
5139 
5140 	struct recount_times_mach times = recount_task_terminated_times(task);
5141 	absolutetime_to_microtime(times.rtm_user, &sec, &usec);
5142 	user_time->seconds = (typeof(user_time->seconds))sec;
5143 	user_time->microseconds = usec;
5144 	absolutetime_to_microtime(times.rtm_system, &sec, &usec);
5145 	sys_time->seconds = (typeof(sys_time->seconds))sec;
5146 	sys_time->microseconds = usec;
5147 }
5148 
5149 int radar_20146450 = 1;
5150 kern_return_t
task_info(task_t task,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)5151 task_info(
5152 	task_t                  task,
5153 	task_flavor_t           flavor,
5154 	task_info_t             task_info_out,
5155 	mach_msg_type_number_t  *task_info_count)
5156 {
5157 	kern_return_t error = KERN_SUCCESS;
5158 	mach_msg_type_number_t  original_task_info_count;
5159 	bool is_kernel_task = (task == kernel_task);
5160 
5161 	if (task == TASK_NULL) {
5162 		return KERN_INVALID_ARGUMENT;
5163 	}
5164 
5165 	original_task_info_count = *task_info_count;
5166 	task_lock(task);
5167 
5168 	if (task != current_task() && !task->active) {
5169 		task_unlock(task);
5170 		return KERN_INVALID_ARGUMENT;
5171 	}
5172 
5173 
5174 	switch (flavor) {
5175 	case TASK_BASIC_INFO_32:
5176 	case TASK_BASIC2_INFO_32:
5177 #if defined(__arm64__)
5178 	case TASK_BASIC_INFO_64:
5179 #endif
5180 		{
5181 			task_basic_info_32_t basic_info;
5182 			ledger_amount_t      tmp;
5183 
5184 			if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
5185 				error = KERN_INVALID_ARGUMENT;
5186 				break;
5187 			}
5188 
5189 			basic_info = (task_basic_info_32_t)task_info_out;
5190 
5191 			basic_info->virtual_size = (typeof(basic_info->virtual_size))
5192 			    vm_map_adjusted_size(is_kernel_task ? kernel_map : task->map);
5193 			if (flavor == TASK_BASIC2_INFO_32) {
5194 				/*
5195 				 * The "BASIC2" flavor gets the maximum resident
5196 				 * size instead of the current resident size...
5197 				 */
5198 				ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, &tmp);
5199 			} else {
5200 				ledger_get_balance(task->ledger, task_ledgers.phys_mem, &tmp);
5201 			}
5202 			basic_info->resident_size = (natural_t) MIN((ledger_amount_t) UINT32_MAX, tmp);
5203 
5204 			_task_fill_times(task, &basic_info->user_time,
5205 			    &basic_info->system_time);
5206 
5207 			basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5208 			basic_info->suspend_count = task->user_stop_count;
5209 
5210 			*task_info_count = TASK_BASIC_INFO_32_COUNT;
5211 			break;
5212 		}
5213 
5214 #if defined(__arm64__)
5215 	case TASK_BASIC_INFO_64_2:
5216 	{
5217 		task_basic_info_64_2_t  basic_info;
5218 
5219 		if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) {
5220 			error = KERN_INVALID_ARGUMENT;
5221 			break;
5222 		}
5223 
5224 		basic_info = (task_basic_info_64_2_t)task_info_out;
5225 
5226 		basic_info->virtual_size  = vm_map_adjusted_size(is_kernel_task ?
5227 		    kernel_map : task->map);
5228 		ledger_get_balance(task->ledger, task_ledgers.phys_mem,
5229 		    (ledger_amount_t *)&basic_info->resident_size);
5230 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5231 		basic_info->suspend_count = task->user_stop_count;
5232 		_task_fill_times(task, &basic_info->user_time,
5233 		    &basic_info->system_time);
5234 
5235 		*task_info_count = TASK_BASIC_INFO_64_2_COUNT;
5236 		break;
5237 	}
5238 
5239 #else /* defined(__arm64__) */
5240 	case TASK_BASIC_INFO_64:
5241 	{
5242 		task_basic_info_64_t basic_info;
5243 
5244 		if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
5245 			error = KERN_INVALID_ARGUMENT;
5246 			break;
5247 		}
5248 
5249 		basic_info = (task_basic_info_64_t)task_info_out;
5250 
5251 		basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5252 		    kernel_map : task->map);
5253 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *)&basic_info->resident_size);
5254 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5255 		basic_info->suspend_count = task->user_stop_count;
5256 		_task_fill_times(task, &basic_info->user_time,
5257 		    &basic_info->system_time);
5258 
5259 		*task_info_count = TASK_BASIC_INFO_64_COUNT;
5260 		break;
5261 	}
5262 #endif /* defined(__arm64__) */
5263 
5264 	case MACH_TASK_BASIC_INFO:
5265 	{
5266 		mach_task_basic_info_t  basic_info;
5267 
5268 		if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
5269 			error = KERN_INVALID_ARGUMENT;
5270 			break;
5271 		}
5272 
5273 		basic_info = (mach_task_basic_info_t)task_info_out;
5274 
5275 		basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5276 		    kernel_map : task->map);
5277 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size);
5278 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size_max);
5279 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5280 		basic_info->suspend_count = task->user_stop_count;
5281 		_task_fill_times(task, &basic_info->user_time,
5282 		    &basic_info->system_time);
5283 
5284 		*task_info_count = MACH_TASK_BASIC_INFO_COUNT;
5285 		break;
5286 	}
5287 
5288 	case TASK_THREAD_TIMES_INFO:
5289 	{
5290 		task_thread_times_info_t times_info;
5291 		thread_t                 thread;
5292 
5293 		if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
5294 			error = KERN_INVALID_ARGUMENT;
5295 			break;
5296 		}
5297 
5298 		times_info = (task_thread_times_info_t)task_info_out;
5299 		times_info->user_time = (time_value_t){ 0 };
5300 		times_info->system_time = (time_value_t){ 0 };
5301 
5302 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5303 			if ((thread->options & TH_OPT_IDLE_THREAD) == 0) {
5304 				time_value_t user_time, system_time;
5305 
5306 				thread_read_times(thread, &user_time, &system_time, NULL);
5307 				time_value_add(&times_info->user_time, &user_time);
5308 				time_value_add(&times_info->system_time, &system_time);
5309 			}
5310 		}
5311 
5312 		*task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
5313 		break;
5314 	}
5315 
5316 	case TASK_ABSOLUTETIME_INFO:
5317 	{
5318 		task_absolutetime_info_t        info;
5319 
5320 		if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
5321 			error = KERN_INVALID_ARGUMENT;
5322 			break;
5323 		}
5324 
5325 		info = (task_absolutetime_info_t)task_info_out;
5326 
5327 		struct recount_times_mach term_times =
5328 		    recount_task_terminated_times(task);
5329 		struct recount_times_mach total_times = recount_task_times(task);
5330 
5331 		info->total_user = total_times.rtm_user;
5332 		info->total_system = total_times.rtm_system;
5333 		info->threads_user = total_times.rtm_user - term_times.rtm_user;
5334 		info->threads_system += total_times.rtm_system - term_times.rtm_system;
5335 
5336 		*task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
5337 		break;
5338 	}
5339 
5340 	case TASK_DYLD_INFO:
5341 	{
5342 		task_dyld_info_t info;
5343 
5344 		/*
5345 		 * We added the format field to TASK_DYLD_INFO output.  For
5346 		 * temporary backward compatibility, accept the fact that
5347 		 * clients may ask for the old version - distinquished by the
5348 		 * size of the expected result structure.
5349 		 */
5350 #define TASK_LEGACY_DYLD_INFO_COUNT \
5351 	        offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
5352 
5353 		if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
5354 			error = KERN_INVALID_ARGUMENT;
5355 			break;
5356 		}
5357 
5358 		info = (task_dyld_info_t)task_info_out;
5359 		info->all_image_info_addr = task->all_image_info_addr;
5360 		info->all_image_info_size = task->all_image_info_size;
5361 
5362 		/* only set format on output for those expecting it */
5363 		if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
5364 			info->all_image_info_format = task_has_64Bit_addr(task) ?
5365 			    TASK_DYLD_ALL_IMAGE_INFO_64 :
5366 			    TASK_DYLD_ALL_IMAGE_INFO_32;
5367 			*task_info_count = TASK_DYLD_INFO_COUNT;
5368 		} else {
5369 			*task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
5370 		}
5371 		break;
5372 	}
5373 
5374 	case TASK_EXTMOD_INFO:
5375 	{
5376 		task_extmod_info_t info;
5377 		void *p;
5378 
5379 		if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
5380 			error = KERN_INVALID_ARGUMENT;
5381 			break;
5382 		}
5383 
5384 		info = (task_extmod_info_t)task_info_out;
5385 
5386 		p = get_bsdtask_info(task);
5387 		if (p) {
5388 			proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
5389 		} else {
5390 			bzero(info->task_uuid, sizeof(info->task_uuid));
5391 		}
5392 		info->extmod_statistics = task->extmod_statistics;
5393 		*task_info_count = TASK_EXTMOD_INFO_COUNT;
5394 
5395 		break;
5396 	}
5397 
5398 	case TASK_KERNELMEMORY_INFO:
5399 	{
5400 		task_kernelmemory_info_t        tkm_info;
5401 		ledger_amount_t                 credit, debit;
5402 
5403 		if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
5404 			error = KERN_INVALID_ARGUMENT;
5405 			break;
5406 		}
5407 
5408 		tkm_info = (task_kernelmemory_info_t) task_info_out;
5409 		tkm_info->total_palloc = 0;
5410 		tkm_info->total_pfree = 0;
5411 		tkm_info->total_salloc = 0;
5412 		tkm_info->total_sfree = 0;
5413 
5414 		if (task == kernel_task) {
5415 			/*
5416 			 * All shared allocs/frees from other tasks count against
5417 			 * the kernel private memory usage.  If we are looking up
5418 			 * info for the kernel task, gather from everywhere.
5419 			 */
5420 			task_unlock(task);
5421 
5422 			/* start by accounting for all the terminated tasks against the kernel */
5423 			tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
5424 			tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
5425 
5426 			/* count all other task/thread shared alloc/free against the kernel */
5427 			lck_mtx_lock(&tasks_threads_lock);
5428 
5429 			/* XXX this really shouldn't be using the function parameter 'task' as a local var! */
5430 			queue_iterate(&tasks, task, task_t, tasks) {
5431 				if (task == kernel_task) {
5432 					if (ledger_get_entries(task->ledger,
5433 					    task_ledgers.tkm_private, &credit,
5434 					    &debit) == KERN_SUCCESS) {
5435 						tkm_info->total_palloc += credit;
5436 						tkm_info->total_pfree += debit;
5437 					}
5438 				}
5439 				if (!ledger_get_entries(task->ledger,
5440 				    task_ledgers.tkm_shared, &credit, &debit)) {
5441 					tkm_info->total_palloc += credit;
5442 					tkm_info->total_pfree += debit;
5443 				}
5444 			}
5445 			lck_mtx_unlock(&tasks_threads_lock);
5446 		} else {
5447 			if (!ledger_get_entries(task->ledger,
5448 			    task_ledgers.tkm_private, &credit, &debit)) {
5449 				tkm_info->total_palloc = credit;
5450 				tkm_info->total_pfree = debit;
5451 			}
5452 			if (!ledger_get_entries(task->ledger,
5453 			    task_ledgers.tkm_shared, &credit, &debit)) {
5454 				tkm_info->total_salloc = credit;
5455 				tkm_info->total_sfree = debit;
5456 			}
5457 			task_unlock(task);
5458 		}
5459 
5460 		*task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
5461 		return KERN_SUCCESS;
5462 	}
5463 
5464 	/* OBSOLETE */
5465 	case TASK_SCHED_FIFO_INFO:
5466 	{
5467 		if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
5468 			error = KERN_INVALID_ARGUMENT;
5469 			break;
5470 		}
5471 
5472 		error = KERN_INVALID_POLICY;
5473 		break;
5474 	}
5475 
5476 	/* OBSOLETE */
5477 	case TASK_SCHED_RR_INFO:
5478 	{
5479 		policy_rr_base_t        rr_base;
5480 		uint32_t quantum_time;
5481 		uint64_t quantum_ns;
5482 
5483 		if (*task_info_count < POLICY_RR_BASE_COUNT) {
5484 			error = KERN_INVALID_ARGUMENT;
5485 			break;
5486 		}
5487 
5488 		rr_base = (policy_rr_base_t) task_info_out;
5489 
5490 		if (task != kernel_task) {
5491 			error = KERN_INVALID_POLICY;
5492 			break;
5493 		}
5494 
5495 		rr_base->base_priority = task->priority;
5496 
5497 		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
5498 		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
5499 
5500 		rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
5501 
5502 		*task_info_count = POLICY_RR_BASE_COUNT;
5503 		break;
5504 	}
5505 
5506 	/* OBSOLETE */
5507 	case TASK_SCHED_TIMESHARE_INFO:
5508 	{
5509 		policy_timeshare_base_t ts_base;
5510 
5511 		if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
5512 			error = KERN_INVALID_ARGUMENT;
5513 			break;
5514 		}
5515 
5516 		ts_base = (policy_timeshare_base_t) task_info_out;
5517 
5518 		if (task == kernel_task) {
5519 			error = KERN_INVALID_POLICY;
5520 			break;
5521 		}
5522 
5523 		ts_base->base_priority = task->priority;
5524 
5525 		*task_info_count = POLICY_TIMESHARE_BASE_COUNT;
5526 		break;
5527 	}
5528 
5529 	case TASK_SECURITY_TOKEN:
5530 	{
5531 		security_token_t        *sec_token_p;
5532 
5533 		if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
5534 			error = KERN_INVALID_ARGUMENT;
5535 			break;
5536 		}
5537 
5538 		sec_token_p = (security_token_t *) task_info_out;
5539 
5540 		*sec_token_p = *task_get_sec_token(task);
5541 
5542 		*task_info_count = TASK_SECURITY_TOKEN_COUNT;
5543 		break;
5544 	}
5545 
5546 	case TASK_AUDIT_TOKEN:
5547 	{
5548 		audit_token_t   *audit_token_p;
5549 
5550 		if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
5551 			error = KERN_INVALID_ARGUMENT;
5552 			break;
5553 		}
5554 
5555 		audit_token_p = (audit_token_t *) task_info_out;
5556 
5557 		*audit_token_p = *task_get_audit_token(task);
5558 
5559 		*task_info_count = TASK_AUDIT_TOKEN_COUNT;
5560 		break;
5561 	}
5562 
5563 	case TASK_SCHED_INFO:
5564 		error = KERN_INVALID_ARGUMENT;
5565 		break;
5566 
5567 	case TASK_EVENTS_INFO:
5568 	{
5569 		task_events_info_t      events_info;
5570 		thread_t                thread;
5571 		uint64_t                n_syscalls_mach, n_syscalls_unix, n_csw;
5572 
5573 		if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
5574 			error = KERN_INVALID_ARGUMENT;
5575 			break;
5576 		}
5577 
5578 		events_info = (task_events_info_t) task_info_out;
5579 
5580 
5581 		events_info->faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
5582 		events_info->pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
5583 		events_info->cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
5584 		events_info->messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
5585 		events_info->messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
5586 
5587 		n_syscalls_mach = task->syscalls_mach;
5588 		n_syscalls_unix = task->syscalls_unix;
5589 		n_csw = task->c_switch;
5590 
5591 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5592 			n_csw           += thread->c_switch;
5593 			n_syscalls_mach += thread->syscalls_mach;
5594 			n_syscalls_unix += thread->syscalls_unix;
5595 		}
5596 
5597 		events_info->syscalls_mach = (int32_t) MIN(n_syscalls_mach, INT32_MAX);
5598 		events_info->syscalls_unix = (int32_t) MIN(n_syscalls_unix, INT32_MAX);
5599 		events_info->csw = (int32_t) MIN(n_csw, INT32_MAX);
5600 
5601 		*task_info_count = TASK_EVENTS_INFO_COUNT;
5602 		break;
5603 	}
5604 	case TASK_AFFINITY_TAG_INFO:
5605 	{
5606 		if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
5607 			error = KERN_INVALID_ARGUMENT;
5608 			break;
5609 		}
5610 
5611 		error = task_affinity_info(task, task_info_out, task_info_count);
5612 		break;
5613 	}
5614 	case TASK_POWER_INFO:
5615 	{
5616 		if (*task_info_count < TASK_POWER_INFO_COUNT) {
5617 			error = KERN_INVALID_ARGUMENT;
5618 			break;
5619 		}
5620 
5621 		task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL, NULL);
5622 		break;
5623 	}
5624 
5625 	case TASK_POWER_INFO_V2:
5626 	{
5627 		if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
5628 			error = KERN_INVALID_ARGUMENT;
5629 			break;
5630 		}
5631 		task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
5632 		task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2, NULL);
5633 		break;
5634 	}
5635 
5636 	case TASK_VM_INFO:
5637 	case TASK_VM_INFO_PURGEABLE:
5638 	{
5639 		task_vm_info_t          vm_info;
5640 		vm_map_t                map;
5641 		ledger_amount_t         tmp_amount;
5642 
5643 		struct proc *p;
5644 		uint32_t platform, sdk;
5645 		p = current_proc();
5646 		platform = proc_platform(p);
5647 		sdk = proc_sdk(p);
5648 		if (original_task_info_count > TASK_VM_INFO_COUNT) {
5649 			/*
5650 			 * Some iOS apps pass an incorrect value for
5651 			 * task_info_count, expressed in number of bytes
5652 			 * instead of number of "natural_t" elements, which
5653 			 * can lead to binary compatibility issues (including
5654 			 * stack corruption) when the data structure is
5655 			 * expanded in the future.
5656 			 * Let's make this potential issue visible by
5657 			 * logging about it...
5658 			 */
5659 			printf("%s:%d %d[%s] task_info(flavor=%d) possibly invalid "
5660 			    "task_info_count=%d > TASK_VM_INFO_COUNT=%d platform %d sdk "
5661 			    "%d.%d.%d - please use TASK_VM_INFO_COUNT.\n",
5662 			    __FUNCTION__, __LINE__, proc_pid(p), proc_name_address(p),
5663 			    flavor, original_task_info_count, TASK_VM_INFO_COUNT,
5664 			    platform, (sdk >> 16), ((sdk >> 8) & 0xff), (sdk & 0xff));
5665 			DTRACE_VM4(suspicious_task_vm_info_count,
5666 			    mach_msg_type_number_t, original_task_info_count,
5667 			    mach_msg_type_number_t, TASK_VM_INFO_COUNT,
5668 			    uint32_t, platform,
5669 			    uint32_t, sdk);
5670 		}
5671 #if __arm64__
5672 		if (original_task_info_count > TASK_VM_INFO_REV2_COUNT &&
5673 		    platform == PLATFORM_IOS &&
5674 		    sdk != 0 &&
5675 		    (sdk >> 16) <= 12) {
5676 			/*
5677 			 * Some iOS apps pass an incorrect value for
5678 			 * task_info_count, expressed in number of bytes
5679 			 * instead of number of "natural_t" elements.
5680 			 * For the sake of backwards binary compatibility
5681 			 * for apps built with an iOS12 or older SDK and using
5682 			 * the "rev2" data structure, let's fix task_info_count
5683 			 * for them, to avoid stomping past the actual end
5684 			 * of their buffer.
5685 			 */
5686 #if DEVELOPMENT || DEBUG
5687 			printf("%s:%d %d[%s] rdar://49484582 task_info_count %d -> %d "
5688 			    "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5689 			    proc_name_address(p), original_task_info_count,
5690 			    TASK_VM_INFO_REV2_COUNT, platform, (sdk >> 16),
5691 			    ((sdk >> 8) & 0xff), (sdk & 0xff));
5692 #endif /* DEVELOPMENT || DEBUG */
5693 			DTRACE_VM4(workaround_task_vm_info_count,
5694 			    mach_msg_type_number_t, original_task_info_count,
5695 			    mach_msg_type_number_t, TASK_VM_INFO_REV2_COUNT,
5696 			    uint32_t, platform,
5697 			    uint32_t, sdk);
5698 			original_task_info_count = TASK_VM_INFO_REV2_COUNT;
5699 			*task_info_count = original_task_info_count;
5700 		}
5701 		if (original_task_info_count > TASK_VM_INFO_REV5_COUNT &&
5702 		    platform == PLATFORM_IOS &&
5703 		    sdk != 0 &&
5704 		    (sdk >> 16) <= 15) {
5705 			/*
5706 			 * Some iOS apps pass an incorrect value for
5707 			 * task_info_count, expressed in number of bytes
5708 			 * instead of number of "natural_t" elements.
5709 			 */
5710 			printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5711 			    "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5712 			    proc_name_address(p), original_task_info_count,
5713 			    TASK_VM_INFO_REV5_COUNT, platform, (sdk >> 16),
5714 			    ((sdk >> 8) & 0xff), (sdk & 0xff));
5715 			DTRACE_VM4(workaround_task_vm_info_count,
5716 			    mach_msg_type_number_t, original_task_info_count,
5717 			    mach_msg_type_number_t, TASK_VM_INFO_REV5_COUNT,
5718 			    uint32_t, platform,
5719 			    uint32_t, sdk);
5720 #if DEVELOPMENT || DEBUG
5721 			/*
5722 			 * For the sake of internal builds livability,
5723 			 * work around this user-space bug by capping the
5724 			 * buffer's size to what it was with the iOS15 SDK.
5725 			 */
5726 			original_task_info_count = TASK_VM_INFO_REV5_COUNT;
5727 			*task_info_count = original_task_info_count;
5728 #endif /* DEVELOPMENT || DEBUG */
5729 		}
5730 #endif /* __arm64__ */
5731 
5732 		if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
5733 			error = KERN_INVALID_ARGUMENT;
5734 			break;
5735 		}
5736 
5737 		vm_info = (task_vm_info_t)task_info_out;
5738 
5739 		/*
5740 		 * Do not hold both the task and map locks,
5741 		 * so convert the task lock into a map reference,
5742 		 * drop the task lock, then lock the map.
5743 		 */
5744 		if (is_kernel_task) {
5745 			map = kernel_map;
5746 			task_unlock(task);
5747 			/* no lock, no reference */
5748 		} else {
5749 			map = task->map;
5750 			vm_map_reference(map);
5751 			task_unlock(task);
5752 			vm_map_lock_read(map);
5753 		}
5754 
5755 		vm_info->virtual_size = (typeof(vm_info->virtual_size))vm_map_adjusted_size(map);
5756 		vm_info->region_count = map->hdr.nentries;
5757 		vm_info->page_size = vm_map_page_size(map);
5758 
5759 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size);
5760 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size_peak);
5761 
5762 		vm_info->device = 0;
5763 		vm_info->device_peak = 0;
5764 		ledger_get_balance(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external);
5765 		ledger_get_lifetime_max(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external_peak);
5766 		ledger_get_balance(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal);
5767 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal_peak);
5768 		ledger_get_balance(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable);
5769 		ledger_get_lifetime_max(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable_peak);
5770 		ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed);
5771 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_peak);
5772 		ledger_get_entries(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_lifetime, &tmp_amount);
5773 
5774 		vm_info->purgeable_volatile_pmap = 0;
5775 		vm_info->purgeable_volatile_resident = 0;
5776 		vm_info->purgeable_volatile_virtual = 0;
5777 		if (is_kernel_task) {
5778 			/*
5779 			 * We do not maintain the detailed stats for the
5780 			 * kernel_pmap, so just count everything as
5781 			 * "internal"...
5782 			 */
5783 			vm_info->internal = vm_info->resident_size;
5784 			/*
5785 			 * ... but since the memory held by the VM compressor
5786 			 * in the kernel address space ought to be attributed
5787 			 * to user-space tasks, we subtract it from "internal"
5788 			 * to give memory reporting tools a more accurate idea
5789 			 * of what the kernel itself is actually using, instead
5790 			 * of making it look like the kernel is leaking memory
5791 			 * when the system is under memory pressure.
5792 			 */
5793 			vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
5794 			    PAGE_SIZE);
5795 		} else {
5796 			mach_vm_size_t  volatile_virtual_size;
5797 			mach_vm_size_t  volatile_resident_size;
5798 			mach_vm_size_t  volatile_compressed_size;
5799 			mach_vm_size_t  volatile_pmap_size;
5800 			mach_vm_size_t  volatile_compressed_pmap_size;
5801 			kern_return_t   kr;
5802 
5803 			if (flavor == TASK_VM_INFO_PURGEABLE) {
5804 				kr = vm_map_query_volatile(
5805 					map,
5806 					&volatile_virtual_size,
5807 					&volatile_resident_size,
5808 					&volatile_compressed_size,
5809 					&volatile_pmap_size,
5810 					&volatile_compressed_pmap_size);
5811 				if (kr == KERN_SUCCESS) {
5812 					vm_info->purgeable_volatile_pmap =
5813 					    volatile_pmap_size;
5814 					if (radar_20146450) {
5815 						vm_info->compressed -=
5816 						    volatile_compressed_pmap_size;
5817 					}
5818 					vm_info->purgeable_volatile_resident =
5819 					    volatile_resident_size;
5820 					vm_info->purgeable_volatile_virtual =
5821 					    volatile_virtual_size;
5822 				}
5823 			}
5824 		}
5825 		*task_info_count = TASK_VM_INFO_REV0_COUNT;
5826 
5827 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5828 			/* must be captured while we still have the map lock */
5829 			vm_info->min_address = map->min_offset;
5830 			vm_info->max_address = map->max_offset;
5831 		}
5832 
5833 		/*
5834 		 * Done with vm map things, can drop the map lock and reference,
5835 		 * and take the task lock back.
5836 		 *
5837 		 * Re-validate that the task didn't die on us.
5838 		 */
5839 		if (!is_kernel_task) {
5840 			vm_map_unlock_read(map);
5841 			vm_map_deallocate(map);
5842 		}
5843 		map = VM_MAP_NULL;
5844 
5845 		task_lock(task);
5846 
5847 		if ((task != current_task()) && (!task->active)) {
5848 			error = KERN_INVALID_ARGUMENT;
5849 			break;
5850 		}
5851 
5852 		if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
5853 			vm_info->phys_footprint =
5854 			    (mach_vm_size_t) get_task_phys_footprint(task);
5855 			*task_info_count = TASK_VM_INFO_REV1_COUNT;
5856 		}
5857 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5858 			/* data was captured above */
5859 			*task_info_count = TASK_VM_INFO_REV2_COUNT;
5860 		}
5861 
5862 		if (original_task_info_count >= TASK_VM_INFO_REV3_COUNT) {
5863 			ledger_get_lifetime_max(task->ledger,
5864 			    task_ledgers.phys_footprint,
5865 			    &vm_info->ledger_phys_footprint_peak);
5866 			ledger_get_balance(task->ledger,
5867 			    task_ledgers.purgeable_nonvolatile,
5868 			    &vm_info->ledger_purgeable_nonvolatile);
5869 			ledger_get_balance(task->ledger,
5870 			    task_ledgers.purgeable_nonvolatile_compressed,
5871 			    &vm_info->ledger_purgeable_novolatile_compressed);
5872 			ledger_get_balance(task->ledger,
5873 			    task_ledgers.purgeable_volatile,
5874 			    &vm_info->ledger_purgeable_volatile);
5875 			ledger_get_balance(task->ledger,
5876 			    task_ledgers.purgeable_volatile_compressed,
5877 			    &vm_info->ledger_purgeable_volatile_compressed);
5878 			ledger_get_balance(task->ledger,
5879 			    task_ledgers.network_nonvolatile,
5880 			    &vm_info->ledger_tag_network_nonvolatile);
5881 			ledger_get_balance(task->ledger,
5882 			    task_ledgers.network_nonvolatile_compressed,
5883 			    &vm_info->ledger_tag_network_nonvolatile_compressed);
5884 			ledger_get_balance(task->ledger,
5885 			    task_ledgers.network_volatile,
5886 			    &vm_info->ledger_tag_network_volatile);
5887 			ledger_get_balance(task->ledger,
5888 			    task_ledgers.network_volatile_compressed,
5889 			    &vm_info->ledger_tag_network_volatile_compressed);
5890 			ledger_get_balance(task->ledger,
5891 			    task_ledgers.media_footprint,
5892 			    &vm_info->ledger_tag_media_footprint);
5893 			ledger_get_balance(task->ledger,
5894 			    task_ledgers.media_footprint_compressed,
5895 			    &vm_info->ledger_tag_media_footprint_compressed);
5896 			ledger_get_balance(task->ledger,
5897 			    task_ledgers.media_nofootprint,
5898 			    &vm_info->ledger_tag_media_nofootprint);
5899 			ledger_get_balance(task->ledger,
5900 			    task_ledgers.media_nofootprint_compressed,
5901 			    &vm_info->ledger_tag_media_nofootprint_compressed);
5902 			ledger_get_balance(task->ledger,
5903 			    task_ledgers.graphics_footprint,
5904 			    &vm_info->ledger_tag_graphics_footprint);
5905 			ledger_get_balance(task->ledger,
5906 			    task_ledgers.graphics_footprint_compressed,
5907 			    &vm_info->ledger_tag_graphics_footprint_compressed);
5908 			ledger_get_balance(task->ledger,
5909 			    task_ledgers.graphics_nofootprint,
5910 			    &vm_info->ledger_tag_graphics_nofootprint);
5911 			ledger_get_balance(task->ledger,
5912 			    task_ledgers.graphics_nofootprint_compressed,
5913 			    &vm_info->ledger_tag_graphics_nofootprint_compressed);
5914 			ledger_get_balance(task->ledger,
5915 			    task_ledgers.neural_footprint,
5916 			    &vm_info->ledger_tag_neural_footprint);
5917 			ledger_get_balance(task->ledger,
5918 			    task_ledgers.neural_footprint_compressed,
5919 			    &vm_info->ledger_tag_neural_footprint_compressed);
5920 			ledger_get_balance(task->ledger,
5921 			    task_ledgers.neural_nofootprint,
5922 			    &vm_info->ledger_tag_neural_nofootprint);
5923 			ledger_get_balance(task->ledger,
5924 			    task_ledgers.neural_nofootprint_compressed,
5925 			    &vm_info->ledger_tag_neural_nofootprint_compressed);
5926 			*task_info_count = TASK_VM_INFO_REV3_COUNT;
5927 		}
5928 		if (original_task_info_count >= TASK_VM_INFO_REV4_COUNT) {
5929 			if (get_bsdtask_info(task)) {
5930 				vm_info->limit_bytes_remaining =
5931 				    memorystatus_available_memory_internal(get_bsdtask_info(task));
5932 			} else {
5933 				vm_info->limit_bytes_remaining = 0;
5934 			}
5935 			*task_info_count = TASK_VM_INFO_REV4_COUNT;
5936 		}
5937 		if (original_task_info_count >= TASK_VM_INFO_REV5_COUNT) {
5938 			thread_t thread;
5939 			uint64_t total = task->decompressions;
5940 			queue_iterate(&task->threads, thread, thread_t, task_threads) {
5941 				total += thread->decompressions;
5942 			}
5943 			vm_info->decompressions = (int32_t) MIN(total, INT32_MAX);
5944 			*task_info_count = TASK_VM_INFO_REV5_COUNT;
5945 		}
5946 		if (original_task_info_count >= TASK_VM_INFO_REV6_COUNT) {
5947 			ledger_get_balance(task->ledger, task_ledgers.swapins,
5948 			    &vm_info->ledger_swapins);
5949 			*task_info_count = TASK_VM_INFO_REV6_COUNT;
5950 		}
5951 
5952 		break;
5953 	}
5954 
5955 	case TASK_WAIT_STATE_INFO:
5956 	{
5957 		/*
5958 		 * Deprecated flavor. Currently allowing some results until all users
5959 		 * stop calling it. The results may not be accurate.
5960 		 */
5961 		task_wait_state_info_t  wait_state_info;
5962 		uint64_t total_sfi_ledger_val = 0;
5963 
5964 		if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
5965 			error = KERN_INVALID_ARGUMENT;
5966 			break;
5967 		}
5968 
5969 		wait_state_info = (task_wait_state_info_t) task_info_out;
5970 
5971 		wait_state_info->total_wait_state_time = 0;
5972 		bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
5973 
5974 #if CONFIG_SCHED_SFI
5975 		int i, prev_lentry = -1;
5976 		int64_t  val_credit, val_debit;
5977 
5978 		for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
5979 			val_credit = 0;
5980 			/*
5981 			 * checking with prev_lentry != entry ensures adjacent classes
5982 			 * which share the same ledger do not add wait times twice.
5983 			 * Note: Use ledger() call to get data for each individual sfi class.
5984 			 */
5985 			if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
5986 			    KERN_SUCCESS == ledger_get_entries(task->ledger,
5987 			    task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
5988 				total_sfi_ledger_val += val_credit;
5989 			}
5990 			prev_lentry = task_ledgers.sfi_wait_times[i];
5991 		}
5992 
5993 #endif /* CONFIG_SCHED_SFI */
5994 		wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
5995 		*task_info_count = TASK_WAIT_STATE_INFO_COUNT;
5996 
5997 		break;
5998 	}
5999 	case TASK_VM_INFO_PURGEABLE_ACCOUNT:
6000 	{
6001 #if DEVELOPMENT || DEBUG
6002 		pvm_account_info_t      acnt_info;
6003 
6004 		if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
6005 			error = KERN_INVALID_ARGUMENT;
6006 			break;
6007 		}
6008 
6009 		if (task_info_out == NULL) {
6010 			error = KERN_INVALID_ARGUMENT;
6011 			break;
6012 		}
6013 
6014 		acnt_info = (pvm_account_info_t) task_info_out;
6015 
6016 		error = vm_purgeable_account(task, acnt_info);
6017 
6018 		*task_info_count = PVM_ACCOUNT_INFO_COUNT;
6019 
6020 		break;
6021 #else /* DEVELOPMENT || DEBUG */
6022 		error = KERN_NOT_SUPPORTED;
6023 		break;
6024 #endif /* DEVELOPMENT || DEBUG */
6025 	}
6026 	case TASK_FLAGS_INFO:
6027 	{
6028 		task_flags_info_t               flags_info;
6029 
6030 		if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
6031 			error = KERN_INVALID_ARGUMENT;
6032 			break;
6033 		}
6034 
6035 		flags_info = (task_flags_info_t)task_info_out;
6036 
6037 		/* only publish the 64-bit flag of the task */
6038 		flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA);
6039 
6040 		*task_info_count = TASK_FLAGS_INFO_COUNT;
6041 		break;
6042 	}
6043 
6044 	case TASK_DEBUG_INFO_INTERNAL:
6045 	{
6046 #if DEVELOPMENT || DEBUG
6047 		task_debug_info_internal_t dbg_info;
6048 		ipc_space_t space = task->itk_space;
6049 		if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
6050 			error = KERN_NOT_SUPPORTED;
6051 			break;
6052 		}
6053 
6054 		if (task_info_out == NULL) {
6055 			error = KERN_INVALID_ARGUMENT;
6056 			break;
6057 		}
6058 		dbg_info = (task_debug_info_internal_t) task_info_out;
6059 		dbg_info->ipc_space_size = 0;
6060 
6061 		if (space) {
6062 			smr_ipc_enter();
6063 			ipc_entry_table_t table = smr_entered_load(&space->is_table);
6064 			if (table) {
6065 				dbg_info->ipc_space_size =
6066 				    ipc_entry_table_count(table);
6067 			}
6068 			smr_ipc_leave();
6069 		}
6070 
6071 		dbg_info->suspend_count = task->suspend_count;
6072 
6073 		error = KERN_SUCCESS;
6074 		*task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
6075 		break;
6076 #else /* DEVELOPMENT || DEBUG */
6077 		error = KERN_NOT_SUPPORTED;
6078 		break;
6079 #endif /* DEVELOPMENT || DEBUG */
6080 	}
6081 	case TASK_SUSPEND_STATS_INFO:
6082 	{
6083 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6084 		if (*task_info_count < TASK_SUSPEND_STATS_INFO_COUNT || task_info_out == NULL) {
6085 			error = KERN_INVALID_ARGUMENT;
6086 			break;
6087 		}
6088 		error = _task_get_suspend_stats_locked(task, (task_suspend_stats_t)task_info_out);
6089 		*task_info_count = TASK_SUSPEND_STATS_INFO_COUNT;
6090 		break;
6091 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6092 		error = KERN_NOT_SUPPORTED;
6093 		break;
6094 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6095 	}
6096 	case TASK_SUSPEND_SOURCES_INFO:
6097 	{
6098 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6099 		if (*task_info_count < TASK_SUSPEND_SOURCES_INFO_COUNT || task_info_out == NULL) {
6100 			error = KERN_INVALID_ARGUMENT;
6101 			break;
6102 		}
6103 		error = _task_get_suspend_sources_locked(task, (task_suspend_source_t)task_info_out);
6104 		*task_info_count = TASK_SUSPEND_SOURCES_INFO_COUNT;
6105 		break;
6106 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6107 		error = KERN_NOT_SUPPORTED;
6108 		break;
6109 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6110 	}
6111 	default:
6112 		error = KERN_INVALID_ARGUMENT;
6113 	}
6114 
6115 	task_unlock(task);
6116 	return error;
6117 }
6118 
6119 /*
6120  * task_info_from_user
6121  *
6122  * When calling task_info from user space,
6123  * this function will be executed as mig server side
6124  * instead of calling directly into task_info.
6125  * This gives the possibility to perform more security
6126  * checks on task_port.
6127  *
6128  * In the case of TASK_DYLD_INFO, we require the more
6129  * privileged task_read_port not the less-privileged task_name_port.
6130  *
6131  */
6132 kern_return_t
task_info_from_user(mach_port_t task_port,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)6133 task_info_from_user(
6134 	mach_port_t             task_port,
6135 	task_flavor_t           flavor,
6136 	task_info_t             task_info_out,
6137 	mach_msg_type_number_t  *task_info_count)
6138 {
6139 	task_t task;
6140 	kern_return_t ret;
6141 
6142 	if (flavor == TASK_DYLD_INFO) {
6143 		task = convert_port_to_task_read(task_port);
6144 	} else {
6145 		task = convert_port_to_task_name(task_port);
6146 	}
6147 
6148 	ret = task_info(task, flavor, task_info_out, task_info_count);
6149 
6150 	task_deallocate(task);
6151 
6152 	return ret;
6153 }
6154 
6155 /*
6156  * Routine: task_dyld_process_info_update_helper
6157  *
6158  * Release send rights in release_ports.
6159  *
6160  * If no active ports found in task's dyld notifier array, unset the magic value
6161  * in user space to indicate so.
6162  *
6163  * Condition:
6164  *      task's itk_lock is locked, and is unlocked upon return.
6165  *      Global g_dyldinfo_mtx is locked, and is unlocked upon return.
6166  */
6167 void
task_dyld_process_info_update_helper(task_t task,size_t active_count,vm_map_address_t magic_addr,ipc_port_t * release_ports,size_t release_count)6168 task_dyld_process_info_update_helper(
6169 	task_t                  task,
6170 	size_t                  active_count,
6171 	vm_map_address_t        magic_addr,    /* a userspace address */
6172 	ipc_port_t             *release_ports,
6173 	size_t                  release_count)
6174 {
6175 	void *notifiers_ptr = NULL;
6176 
6177 	assert(release_count <= DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT);
6178 
6179 	if (active_count == 0) {
6180 		assert(task->itk_dyld_notify != NULL);
6181 		notifiers_ptr = task->itk_dyld_notify;
6182 		task->itk_dyld_notify = NULL;
6183 		itk_unlock(task);
6184 
6185 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6186 		(void)copyoutmap_atomic32(task->map, MACH_PORT_NULL, magic_addr); /* unset magic */
6187 	} else {
6188 		itk_unlock(task);
6189 		(void)copyoutmap_atomic32(task->map, (mach_port_name_t)DYLD_PROCESS_INFO_NOTIFY_MAGIC,
6190 		    magic_addr);     /* reset magic */
6191 	}
6192 
6193 	lck_mtx_unlock(&g_dyldinfo_mtx);
6194 
6195 	for (size_t i = 0; i < release_count; i++) {
6196 		ipc_port_release_send(release_ports[i]);
6197 	}
6198 }
6199 
6200 /*
6201  * Routine: task_dyld_process_info_notify_register
6202  *
6203  * Insert a send right to target task's itk_dyld_notify array. Allocate kernel
6204  * memory for the array if it's the first port to be registered. Also cleanup
6205  * any dead rights found in the array.
6206  *
6207  * Consumes sright if returns KERN_SUCCESS, otherwise MIG will destroy it.
6208  *
6209  * Args:
6210  *     task:   Target task for the registration.
6211  *     sright: A send right.
6212  *
6213  * Returns:
6214  *     KERN_SUCCESS: Registration succeeded.
6215  *     KERN_INVALID_TASK: task is invalid.
6216  *     KERN_INVALID_RIGHT: sright is invalid.
6217  *     KERN_DENIED: Security policy denied this call.
6218  *     KERN_RESOURCE_SHORTAGE: Kernel memory allocation failed.
6219  *     KERN_NO_SPACE: No available notifier port slot left for this task.
6220  *     KERN_RIGHT_EXISTS: The notifier port is already registered and active.
6221  *
6222  *     Other error code see task_info().
6223  *
6224  * See Also:
6225  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6226  */
6227 kern_return_t
task_dyld_process_info_notify_register(task_t task,ipc_port_t sright)6228 task_dyld_process_info_notify_register(
6229 	task_t                  task,
6230 	ipc_port_t              sright)
6231 {
6232 	struct task_dyld_info dyld_info;
6233 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6234 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6235 	uint32_t release_count = 0, active_count = 0;
6236 	mach_vm_address_t ports_addr; /* a user space address */
6237 	kern_return_t kr;
6238 	boolean_t right_exists = false;
6239 	ipc_port_t *notifiers_ptr = NULL;
6240 	ipc_port_t *portp;
6241 
6242 	if (task == TASK_NULL || task == kernel_task) {
6243 		return KERN_INVALID_TASK;
6244 	}
6245 
6246 	if (!IP_VALID(sright)) {
6247 		return KERN_INVALID_RIGHT;
6248 	}
6249 
6250 #if CONFIG_MACF
6251 	if (mac_task_check_dyld_process_info_notify_register()) {
6252 		return KERN_DENIED;
6253 	}
6254 #endif
6255 
6256 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6257 	if (kr) {
6258 		return kr;
6259 	}
6260 
6261 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6262 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6263 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6264 	} else {
6265 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6266 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6267 	}
6268 
6269 	if (task->itk_dyld_notify == NULL) {
6270 		notifiers_ptr = kalloc_type(ipc_port_t,
6271 		    DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT,
6272 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
6273 	}
6274 
6275 	lck_mtx_lock(&g_dyldinfo_mtx);
6276 	itk_lock(task);
6277 
6278 	if (task->itk_dyld_notify == NULL) {
6279 		task->itk_dyld_notify = notifiers_ptr;
6280 		notifiers_ptr = NULL;
6281 	}
6282 
6283 	assert(task->itk_dyld_notify != NULL);
6284 	/* First pass: clear dead names and check for duplicate registration */
6285 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6286 		portp = &task->itk_dyld_notify[slot];
6287 		if (*portp != IPC_PORT_NULL && !ip_active(*portp)) {
6288 			release_ports[release_count++] = *portp;
6289 			*portp = IPC_PORT_NULL;
6290 		} else if (*portp == sright) {
6291 			/* the port is already registered and is active */
6292 			right_exists = true;
6293 		}
6294 
6295 		if (*portp != IPC_PORT_NULL) {
6296 			active_count++;
6297 		}
6298 	}
6299 
6300 	if (right_exists) {
6301 		/* skip second pass */
6302 		kr = KERN_RIGHT_EXISTS;
6303 		goto out;
6304 	}
6305 
6306 	/* Second pass: register the port */
6307 	kr = KERN_NO_SPACE;
6308 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6309 		portp = &task->itk_dyld_notify[slot];
6310 		if (*portp == IPC_PORT_NULL) {
6311 			*portp = sright;
6312 			active_count++;
6313 			kr = KERN_SUCCESS;
6314 			break;
6315 		}
6316 	}
6317 
6318 out:
6319 	assert(active_count > 0);
6320 
6321 	task_dyld_process_info_update_helper(task, active_count,
6322 	    (vm_map_address_t)ports_addr, release_ports, release_count);
6323 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6324 
6325 	kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6326 
6327 	return kr;
6328 }
6329 
6330 /*
6331  * Routine: task_dyld_process_info_notify_deregister
6332  *
6333  * Remove a send right in target task's itk_dyld_notify array matching the receive
6334  * right name passed in. Deallocate kernel memory for the array if it's the last port to
6335  * be deregistered, or all ports have died. Also cleanup any dead rights found in the array.
6336  *
6337  * Does not consume any reference.
6338  *
6339  * Args:
6340  *     task: Target task for the deregistration.
6341  *     rcv_name: The name denoting the receive right in caller's space.
6342  *
6343  * Returns:
6344  *     KERN_SUCCESS: A matching entry found and degistration succeeded.
6345  *     KERN_INVALID_TASK: task is invalid.
6346  *     KERN_INVALID_NAME: name is invalid.
6347  *     KERN_DENIED: Security policy denied this call.
6348  *     KERN_FAILURE: A matching entry is not found.
6349  *     KERN_INVALID_RIGHT: The name passed in does not represent a valid rcv right.
6350  *
6351  *     Other error code see task_info().
6352  *
6353  * See Also:
6354  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6355  */
6356 kern_return_t
task_dyld_process_info_notify_deregister(task_t task,mach_port_name_t rcv_name)6357 task_dyld_process_info_notify_deregister(
6358 	task_t                  task,
6359 	mach_port_name_t        rcv_name)
6360 {
6361 	struct task_dyld_info dyld_info;
6362 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6363 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6364 	uint32_t release_count = 0, active_count = 0;
6365 	boolean_t port_found = false;
6366 	mach_vm_address_t ports_addr; /* a user space address */
6367 	ipc_port_t sright;
6368 	kern_return_t kr;
6369 	ipc_port_t *portp;
6370 
6371 	if (task == TASK_NULL || task == kernel_task) {
6372 		return KERN_INVALID_TASK;
6373 	}
6374 
6375 	if (!MACH_PORT_VALID(rcv_name)) {
6376 		return KERN_INVALID_NAME;
6377 	}
6378 
6379 #if CONFIG_MACF
6380 	if (mac_task_check_dyld_process_info_notify_register()) {
6381 		return KERN_DENIED;
6382 	}
6383 #endif
6384 
6385 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6386 	if (kr) {
6387 		return kr;
6388 	}
6389 
6390 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6391 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6392 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6393 	} else {
6394 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6395 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6396 	}
6397 
6398 	kr = ipc_port_translate_receive(current_space(), rcv_name, &sright); /* does not produce port ref */
6399 	if (kr) {
6400 		return KERN_INVALID_RIGHT;
6401 	}
6402 
6403 	ip_reference(sright);
6404 	ip_mq_unlock(sright);
6405 
6406 	assert(sright != IPC_PORT_NULL);
6407 
6408 	lck_mtx_lock(&g_dyldinfo_mtx);
6409 	itk_lock(task);
6410 
6411 	if (task->itk_dyld_notify == NULL) {
6412 		itk_unlock(task);
6413 		lck_mtx_unlock(&g_dyldinfo_mtx);
6414 		ip_release(sright);
6415 		return KERN_FAILURE;
6416 	}
6417 
6418 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6419 		portp = &task->itk_dyld_notify[slot];
6420 		if (*portp == sright) {
6421 			release_ports[release_count++] = *portp;
6422 			*portp = IPC_PORT_NULL;
6423 			port_found = true;
6424 		} else if ((*portp != IPC_PORT_NULL && !ip_active(*portp))) {
6425 			release_ports[release_count++] = *portp;
6426 			*portp = IPC_PORT_NULL;
6427 		}
6428 
6429 		if (*portp != IPC_PORT_NULL) {
6430 			active_count++;
6431 		}
6432 	}
6433 
6434 	task_dyld_process_info_update_helper(task, active_count,
6435 	    (vm_map_address_t)ports_addr, release_ports, release_count);
6436 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6437 
6438 	ip_release(sright);
6439 
6440 	return port_found ? KERN_SUCCESS : KERN_FAILURE;
6441 }
6442 
6443 /*
6444  *	task_power_info
6445  *
6446  *	Returns power stats for the task.
6447  *	Note: Called with task locked.
6448  */
6449 void
task_power_info_locked(task_t task,task_power_info_t info,gpu_energy_data_t ginfo,task_power_info_v2_t infov2,struct task_power_info_extra * extra_info)6450 task_power_info_locked(
6451 	task_t                        task,
6452 	task_power_info_t             info,
6453 	gpu_energy_data_t             ginfo,
6454 	task_power_info_v2_t          infov2,
6455 	struct task_power_info_extra *extra_info)
6456 {
6457 	thread_t                thread;
6458 	ledger_amount_t         tmp;
6459 
6460 	uint64_t                runnable_time_sum = 0;
6461 
6462 	task_lock_assert_owned(task);
6463 
6464 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
6465 	    (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
6466 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
6467 	    (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
6468 
6469 	info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
6470 	info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
6471 
6472 	struct recount_usage usage = { 0 };
6473 	struct recount_usage usage_perf = { 0 };
6474 	recount_task_usage_perf_only(task, &usage, &usage_perf);
6475 
6476 	info->total_user = usage.ru_user_time_mach;
6477 	info->total_system = usage.ru_system_time_mach;
6478 	runnable_time_sum = task->total_runnable_time;
6479 
6480 	if (ginfo) {
6481 		ginfo->task_gpu_utilisation = task->task_gpu_ns;
6482 	}
6483 
6484 	if (infov2) {
6485 		infov2->task_ptime = usage_perf.ru_system_time_mach +
6486 		    usage_perf.ru_user_time_mach;
6487 		infov2->task_pset_switches = task->ps_switch;
6488 #if CONFIG_PERVASIVE_ENERGY
6489 		infov2->task_energy = usage.ru_energy_nj;
6490 #endif /* CONFIG_PERVASIVE_ENERGY */
6491 	}
6492 
6493 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6494 		spl_t x;
6495 
6496 		if (thread->options & TH_OPT_IDLE_THREAD) {
6497 			continue;
6498 		}
6499 
6500 		x = splsched();
6501 		thread_lock(thread);
6502 
6503 		info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
6504 		info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
6505 
6506 		if (infov2) {
6507 			infov2->task_pset_switches += thread->ps_switch;
6508 		}
6509 
6510 		runnable_time_sum += timer_grab(&thread->runnable_timer);
6511 
6512 		if (ginfo) {
6513 			ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
6514 		}
6515 		thread_unlock(thread);
6516 		splx(x);
6517 	}
6518 
6519 	if (extra_info) {
6520 		extra_info->runnable_time = runnable_time_sum;
6521 #if CONFIG_PERVASIVE_CPI
6522 		extra_info->cycles = usage.ru_cycles;
6523 		extra_info->instructions = usage.ru_instructions;
6524 		extra_info->pcycles = usage_perf.ru_cycles;
6525 		extra_info->pinstructions = usage_perf.ru_instructions;
6526 		extra_info->user_ptime = usage_perf.ru_user_time_mach;
6527 		extra_info->system_ptime = usage_perf.ru_system_time_mach;
6528 #endif // CONFIG_PERVASIVE_CPI
6529 #if CONFIG_PERVASIVE_ENERGY
6530 		extra_info->energy = usage.ru_energy_nj;
6531 		extra_info->penergy = usage_perf.ru_energy_nj;
6532 #endif // CONFIG_PERVASIVE_ENERGY
6533 	}
6534 }
6535 
6536 /*
6537  *	task_gpu_utilisation
6538  *
6539  *	Returns the total gpu time used by the all the threads of the task
6540  *  (both dead and alive)
6541  */
6542 uint64_t
task_gpu_utilisation(task_t task)6543 task_gpu_utilisation(
6544 	task_t  task)
6545 {
6546 	uint64_t gpu_time = 0;
6547 #if defined(__x86_64__)
6548 	thread_t thread;
6549 
6550 	task_lock(task);
6551 	gpu_time += task->task_gpu_ns;
6552 
6553 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6554 		spl_t x;
6555 		x = splsched();
6556 		thread_lock(thread);
6557 		gpu_time += ml_gpu_stat(thread);
6558 		thread_unlock(thread);
6559 		splx(x);
6560 	}
6561 
6562 	task_unlock(task);
6563 #else /* defined(__x86_64__) */
6564 	/* silence compiler warning */
6565 	(void)task;
6566 #endif /* defined(__x86_64__) */
6567 	return gpu_time;
6568 }
6569 
6570 /* This function updates the cpu time in the arrays for each
6571  * effective and requested QoS class
6572  */
6573 void
task_update_cpu_time_qos_stats(task_t task,uint64_t * eqos_stats,uint64_t * rqos_stats)6574 task_update_cpu_time_qos_stats(
6575 	task_t  task,
6576 	uint64_t *eqos_stats,
6577 	uint64_t *rqos_stats)
6578 {
6579 	if (!eqos_stats && !rqos_stats) {
6580 		return;
6581 	}
6582 
6583 	task_lock(task);
6584 	thread_t thread;
6585 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6586 		if (thread->options & TH_OPT_IDLE_THREAD) {
6587 			continue;
6588 		}
6589 
6590 		thread_update_qos_cpu_time(thread);
6591 	}
6592 
6593 	if (eqos_stats) {
6594 		eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default;
6595 		eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
6596 		eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background;
6597 		eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility;
6598 		eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy;
6599 		eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
6600 		eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
6601 	}
6602 
6603 	if (rqos_stats) {
6604 		rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default;
6605 		rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance;
6606 		rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background;
6607 		rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility;
6608 		rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy;
6609 		rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated;
6610 		rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive;
6611 	}
6612 
6613 	task_unlock(task);
6614 }
6615 
6616 kern_return_t
task_purgable_info(task_t task,task_purgable_info_t * stats)6617 task_purgable_info(
6618 	task_t                  task,
6619 	task_purgable_info_t    *stats)
6620 {
6621 	if (task == TASK_NULL || stats == NULL) {
6622 		return KERN_INVALID_ARGUMENT;
6623 	}
6624 	/* Take task reference */
6625 	task_reference(task);
6626 	vm_purgeable_stats((vm_purgeable_info_t)stats, task);
6627 	/* Drop task reference */
6628 	task_deallocate(task);
6629 	return KERN_SUCCESS;
6630 }
6631 
6632 void
task_vtimer_set(task_t task,integer_t which)6633 task_vtimer_set(
6634 	task_t          task,
6635 	integer_t       which)
6636 {
6637 	thread_t        thread;
6638 	spl_t           x;
6639 
6640 	task_lock(task);
6641 
6642 	task->vtimers |= which;
6643 
6644 	switch (which) {
6645 	case TASK_VTIMER_USER:
6646 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6647 			x = splsched();
6648 			thread_lock(thread);
6649 			struct recount_times_mach times = recount_thread_times(thread);
6650 			thread->vtimer_user_save = times.rtm_user;
6651 			thread_unlock(thread);
6652 			splx(x);
6653 		}
6654 		break;
6655 
6656 	case TASK_VTIMER_PROF:
6657 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6658 			x = splsched();
6659 			thread_lock(thread);
6660 			thread->vtimer_prof_save = recount_thread_time_mach(thread);
6661 			thread_unlock(thread);
6662 			splx(x);
6663 		}
6664 		break;
6665 
6666 	case TASK_VTIMER_RLIM:
6667 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6668 			x = splsched();
6669 			thread_lock(thread);
6670 			thread->vtimer_rlim_save = recount_thread_time_mach(thread);
6671 			thread_unlock(thread);
6672 			splx(x);
6673 		}
6674 		break;
6675 	}
6676 
6677 	task_unlock(task);
6678 }
6679 
6680 void
task_vtimer_clear(task_t task,integer_t which)6681 task_vtimer_clear(
6682 	task_t          task,
6683 	integer_t       which)
6684 {
6685 	task_lock(task);
6686 
6687 	task->vtimers &= ~which;
6688 
6689 	task_unlock(task);
6690 }
6691 
6692 void
task_vtimer_update(__unused task_t task,integer_t which,uint32_t * microsecs)6693 task_vtimer_update(
6694 	__unused
6695 	task_t          task,
6696 	integer_t       which,
6697 	uint32_t        *microsecs)
6698 {
6699 	thread_t        thread = current_thread();
6700 	uint32_t        tdelt = 0;
6701 	clock_sec_t     secs = 0;
6702 	uint64_t        tsum;
6703 
6704 	assert(task == current_task());
6705 
6706 	spl_t s = splsched();
6707 	thread_lock(thread);
6708 
6709 	if ((task->vtimers & which) != (uint32_t)which) {
6710 		thread_unlock(thread);
6711 		splx(s);
6712 		return;
6713 	}
6714 
6715 	switch (which) {
6716 	case TASK_VTIMER_USER:;
6717 		struct recount_times_mach times = recount_thread_times(thread);
6718 		tsum = times.rtm_user;
6719 		tdelt = (uint32_t)(tsum - thread->vtimer_user_save);
6720 		thread->vtimer_user_save = tsum;
6721 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6722 		break;
6723 
6724 	case TASK_VTIMER_PROF:
6725 		tsum = recount_current_thread_time_mach();
6726 		tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
6727 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6728 		/* if the time delta is smaller than a usec, ignore */
6729 		if (*microsecs != 0) {
6730 			thread->vtimer_prof_save = tsum;
6731 		}
6732 		break;
6733 
6734 	case TASK_VTIMER_RLIM:
6735 		tsum = recount_current_thread_time_mach();
6736 		tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
6737 		thread->vtimer_rlim_save = tsum;
6738 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6739 		break;
6740 	}
6741 
6742 	thread_unlock(thread);
6743 	splx(s);
6744 }
6745 
6746 uint64_t
get_task_dispatchqueue_offset(task_t task)6747 get_task_dispatchqueue_offset(
6748 	task_t          task)
6749 {
6750 	return task->dispatchqueue_offset;
6751 }
6752 
6753 void
task_synchronizer_destroy_all(task_t task)6754 task_synchronizer_destroy_all(task_t task)
6755 {
6756 	/*
6757 	 *  Destroy owned semaphores
6758 	 */
6759 	semaphore_destroy_all(task);
6760 }
6761 
6762 /*
6763  * Install default (machine-dependent) initial thread state
6764  * on the task.  Subsequent thread creation will have this initial
6765  * state set on the thread by machine_thread_inherit_taskwide().
6766  * Flavors and structures are exactly the same as those to thread_set_state()
6767  */
6768 kern_return_t
task_set_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t state_count)6769 task_set_state(
6770 	task_t task,
6771 	int flavor,
6772 	thread_state_t state,
6773 	mach_msg_type_number_t state_count)
6774 {
6775 	kern_return_t ret;
6776 
6777 	if (task == TASK_NULL) {
6778 		return KERN_INVALID_ARGUMENT;
6779 	}
6780 
6781 	task_lock(task);
6782 
6783 	if (!task->active) {
6784 		task_unlock(task);
6785 		return KERN_FAILURE;
6786 	}
6787 
6788 	ret = machine_task_set_state(task, flavor, state, state_count);
6789 
6790 	task_unlock(task);
6791 	return ret;
6792 }
6793 
6794 /*
6795  * Examine the default (machine-dependent) initial thread state
6796  * on the task, as set by task_set_state().  Flavors and structures
6797  * are exactly the same as those passed to thread_get_state().
6798  */
6799 kern_return_t
task_get_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)6800 task_get_state(
6801 	task_t  task,
6802 	int     flavor,
6803 	thread_state_t state,
6804 	mach_msg_type_number_t *state_count)
6805 {
6806 	kern_return_t ret;
6807 
6808 	if (task == TASK_NULL) {
6809 		return KERN_INVALID_ARGUMENT;
6810 	}
6811 
6812 	task_lock(task);
6813 
6814 	if (!task->active) {
6815 		task_unlock(task);
6816 		return KERN_FAILURE;
6817 	}
6818 
6819 	ret = machine_task_get_state(task, flavor, state, state_count);
6820 
6821 	task_unlock(task);
6822 	return ret;
6823 }
6824 
6825 
6826 static kern_return_t __attribute__((noinline, not_tail_called))
PROC_VIOLATED_GUARD__SEND_EXC_GUARD(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,boolean_t backtrace_only)6827 PROC_VIOLATED_GUARD__SEND_EXC_GUARD(
6828 	mach_exception_code_t code,
6829 	mach_exception_subcode_t subcode,
6830 	void *reason,
6831 	boolean_t backtrace_only)
6832 {
6833 #ifdef MACH_BSD
6834 	if (1 == proc_selfpid()) {
6835 		return KERN_NOT_SUPPORTED;              // initproc is immune
6836 	}
6837 #endif
6838 	mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
6839 		[0] = code,
6840 		[1] = subcode,
6841 	};
6842 	task_t task = current_task();
6843 	kern_return_t kr;
6844 	void *bsd_info = get_bsdtask_info(task);
6845 
6846 	/* (See jetsam-related comments below) */
6847 
6848 	proc_memstat_skip(bsd_info, TRUE);
6849 	kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason, backtrace_only);
6850 	proc_memstat_skip(bsd_info, FALSE);
6851 	return kr;
6852 }
6853 
6854 kern_return_t
task_violated_guard(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,bool backtrace_only)6855 task_violated_guard(
6856 	mach_exception_code_t code,
6857 	mach_exception_subcode_t subcode,
6858 	void *reason,
6859 	bool backtrace_only)
6860 {
6861 	return PROC_VIOLATED_GUARD__SEND_EXC_GUARD(code, subcode, reason, backtrace_only);
6862 }
6863 
6864 
6865 #if CONFIG_MEMORYSTATUS
6866 
6867 boolean_t
task_get_memlimit_is_active(task_t task)6868 task_get_memlimit_is_active(task_t task)
6869 {
6870 	assert(task != NULL);
6871 
6872 	if (task->memlimit_is_active == 1) {
6873 		return TRUE;
6874 	} else {
6875 		return FALSE;
6876 	}
6877 }
6878 
6879 void
task_set_memlimit_is_active(task_t task,boolean_t memlimit_is_active)6880 task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
6881 {
6882 	assert(task != NULL);
6883 
6884 	if (memlimit_is_active) {
6885 		task->memlimit_is_active = 1;
6886 	} else {
6887 		task->memlimit_is_active = 0;
6888 	}
6889 }
6890 
6891 boolean_t
task_get_memlimit_is_fatal(task_t task)6892 task_get_memlimit_is_fatal(task_t task)
6893 {
6894 	assert(task != NULL);
6895 
6896 	if (task->memlimit_is_fatal == 1) {
6897 		return TRUE;
6898 	} else {
6899 		return FALSE;
6900 	}
6901 }
6902 
6903 void
task_set_memlimit_is_fatal(task_t task,boolean_t memlimit_is_fatal)6904 task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
6905 {
6906 	assert(task != NULL);
6907 
6908 	if (memlimit_is_fatal) {
6909 		task->memlimit_is_fatal = 1;
6910 	} else {
6911 		task->memlimit_is_fatal = 0;
6912 	}
6913 }
6914 
6915 uint64_t
task_get_dirty_start(task_t task)6916 task_get_dirty_start(task_t task)
6917 {
6918 	return task->memstat_dirty_start;
6919 }
6920 
6921 void
task_set_dirty_start(task_t task,uint64_t start)6922 task_set_dirty_start(task_t task, uint64_t start)
6923 {
6924 	task_lock(task);
6925 	task->memstat_dirty_start = start;
6926 	task_unlock(task);
6927 }
6928 
6929 boolean_t
task_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)6930 task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
6931 {
6932 	boolean_t triggered = FALSE;
6933 
6934 	assert(task == current_task());
6935 
6936 	/*
6937 	 * Returns true, if task has already triggered an exc_resource exception.
6938 	 */
6939 
6940 	if (memlimit_is_active) {
6941 		triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
6942 	} else {
6943 		triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
6944 	}
6945 
6946 	return triggered;
6947 }
6948 
6949 void
task_mark_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)6950 task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
6951 {
6952 	assert(task == current_task());
6953 
6954 	/*
6955 	 * We allow one exc_resource per process per active/inactive limit.
6956 	 * The limit's fatal attribute does not come into play.
6957 	 */
6958 
6959 	if (memlimit_is_active) {
6960 		task->memlimit_active_exc_resource = 1;
6961 	} else {
6962 		task->memlimit_inactive_exc_resource = 1;
6963 	}
6964 }
6965 
6966 #define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
6967 
6968 void __attribute__((noinline))
PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb,send_exec_resource_options_t exception_options)6969 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options)
6970 {
6971 	task_t                                          task            = current_task();
6972 	int                                                     pid         = 0;
6973 	const char                                      *procname       = "unknown";
6974 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
6975 	boolean_t send_sync_exc_resource = FALSE;
6976 	void *cur_bsd_info = get_bsdtask_info(current_task());
6977 
6978 #ifdef MACH_BSD
6979 	pid = proc_selfpid();
6980 
6981 	if (pid == 1) {
6982 		/*
6983 		 * Cannot have ReportCrash analyzing
6984 		 * a suspended initproc.
6985 		 */
6986 		return;
6987 	}
6988 
6989 	if (cur_bsd_info != NULL) {
6990 		procname = proc_name_address(cur_bsd_info);
6991 		send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(cur_bsd_info);
6992 	}
6993 #endif
6994 #if CONFIG_COREDUMP
6995 	if (hwm_user_cores) {
6996 		int                             error;
6997 		uint64_t                starttime, end;
6998 		clock_sec_t             secs = 0;
6999 		uint32_t                microsecs = 0;
7000 
7001 		starttime = mach_absolute_time();
7002 		/*
7003 		 * Trigger a coredump of this process. Don't proceed unless we know we won't
7004 		 * be filling up the disk; and ignore the core size resource limit for this
7005 		 * core file.
7006 		 */
7007 		if ((error = coredump(cur_bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
7008 			printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
7009 		}
7010 		/*
7011 		 * coredump() leaves the task suspended.
7012 		 */
7013 		task_resume_internal(current_task());
7014 
7015 		end = mach_absolute_time();
7016 		absolutetime_to_microtime(end - starttime, &secs, &microsecs);
7017 		printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
7018 		    proc_name_address(cur_bsd_info), pid, (int)secs, microsecs);
7019 	}
7020 #endif /* CONFIG_COREDUMP */
7021 
7022 	if (disable_exc_resource) {
7023 		printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7024 		    "suppressed by a boot-arg.\n", procname, pid, max_footprint_mb);
7025 		return;
7026 	}
7027 	printf("process %s [%d] crossed memory %s (%d MB); EXC_RESOURCE "
7028 	    "\n", procname, pid, (!(exception_options & EXEC_RESOURCE_DIAGNOSTIC) ? "high watermark" : "diagnostics limit"), max_footprint_mb);
7029 
7030 	/*
7031 	 * A task that has triggered an EXC_RESOURCE, should not be
7032 	 * jetsammed when the device is under memory pressure.  Here
7033 	 * we set the P_MEMSTAT_SKIP flag so that the process
7034 	 * will be skipped if the memorystatus_thread wakes up.
7035 	 *
7036 	 * This is a debugging aid to ensure we can get a corpse before
7037 	 * the jetsam thread kills the process.
7038 	 * Note that proc_memstat_skip is a no-op on release kernels.
7039 	 */
7040 	proc_memstat_skip(cur_bsd_info, TRUE);
7041 
7042 	code[0] = code[1] = 0;
7043 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
7044 	/*
7045 	 * Regardless if there was a diag memlimit violation, fatal exceptions shall be notified always
7046 	 * as high level watermaks. In another words, if there was a diag limit and a watermark, and the
7047 	 * violation if for limit watermark, a watermark shall be reported.
7048 	 */
7049 	if (!(exception_options & EXEC_RESOURCE_FATAL)) {
7050 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], !(exception_options & EXEC_RESOURCE_DIAGNOSTIC)  ? FLAVOR_HIGH_WATERMARK : FLAVOR_DIAG_MEMLIMIT);
7051 	} else {
7052 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK );
7053 	}
7054 	EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
7055 	/*
7056 	 * Do not generate a corpse fork if the violation is a fatal one
7057 	 * or the process wants synchronous EXC_RESOURCE exceptions.
7058 	 */
7059 	if ((exception_options & EXEC_RESOURCE_FATAL) || send_sync_exc_resource || !exc_via_corpse_forking) {
7060 		if (exception_options & EXEC_RESOURCE_FATAL) {
7061 			vm_map_set_corpse_source(task->map);
7062 		}
7063 
7064 		/* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */
7065 		if (send_sync_exc_resource || !corpse_for_fatal_memkill) {
7066 			/*
7067 			 * Use the _internal_ variant so that no user-space
7068 			 * process can resume our task from under us.
7069 			 */
7070 			task_suspend_internal(task);
7071 			exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
7072 			task_resume_internal(task);
7073 		}
7074 	} else {
7075 		if (disable_exc_resource_during_audio && audio_active) {
7076 			printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7077 			    "suppressed due to audio playback.\n", procname, pid, max_footprint_mb);
7078 		} else {
7079 			task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
7080 			    code, EXCEPTION_CODE_MAX, NULL, FALSE);
7081 		}
7082 	}
7083 
7084 	/*
7085 	 * After the EXC_RESOURCE has been handled, we must clear the
7086 	 * P_MEMSTAT_SKIP flag so that the process can again be
7087 	 * considered for jetsam if the memorystatus_thread wakes up.
7088 	 */
7089 	proc_memstat_skip(cur_bsd_info, FALSE);         /* clear the flag */
7090 }
7091 /*
7092  * Callback invoked when a task exceeds its physical footprint limit.
7093  */
7094 void
task_footprint_exceeded(int warning,__unused const void * param0,__unused const void * param1)7095 task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
7096 {
7097 	ledger_amount_t max_footprint = 0;
7098 	ledger_amount_t max_footprint_mb = 0;
7099 #if DEBUG || DEVELOPMENT
7100 	ledger_amount_t diag_threshold_limit_mb = 0;
7101 	ledger_amount_t diag_threshold_limit = 0;
7102 #endif
7103 #if CONFIG_DEFERRED_RECLAIM
7104 	ledger_amount_t current_footprint;
7105 #endif /* CONFIG_DEFERRED_RECLAIM */
7106 	task_t task;
7107 	send_exec_resource_is_warning is_warning = IS_NOT_WARNING;
7108 	boolean_t memlimit_is_active;
7109 	send_exec_resource_is_fatal memlimit_is_fatal;
7110 	send_exec_resource_is_diagnostics is_diag_mem_threshold = IS_NOT_DIAGNOSTICS;
7111 	if (warning == LEDGER_WARNING_DIAG_MEM_THRESHOLD) {
7112 		is_diag_mem_threshold = IS_DIAGNOSTICS;
7113 		is_warning = IS_WARNING;
7114 	} else if (warning == LEDGER_WARNING_DIPPED_BELOW) {
7115 		/*
7116 		 * Task memory limits only provide a warning on the way up.
7117 		 */
7118 		return;
7119 	} else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
7120 		/*
7121 		 * This task is in danger of violating a memory limit,
7122 		 * It has exceeded a percentage level of the limit.
7123 		 */
7124 		is_warning = IS_WARNING;
7125 	} else {
7126 		/*
7127 		 * The task has exceeded the physical footprint limit.
7128 		 * This is not a warning but a true limit violation.
7129 		 */
7130 		is_warning = IS_NOT_WARNING;
7131 	}
7132 
7133 	task = current_task();
7134 
7135 	ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint);
7136 #if DEBUG || DEVELOPMENT
7137 	ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &diag_threshold_limit);
7138 #endif
7139 #if CONFIG_DEFERRED_RECLAIM
7140 	if (task->deferred_reclamation_metadata != NULL) {
7141 		/*
7142 		 * Task is enrolled in deferred reclamation.
7143 		 * Do a reclaim to ensure it's really over its limit.
7144 		 */
7145 		vm_deferred_reclamation_reclaim_from_task_sync(task, UINT64_MAX);
7146 		ledger_get_balance(task->ledger, task_ledgers.phys_footprint, &current_footprint);
7147 		if (current_footprint < max_footprint) {
7148 			return;
7149 		}
7150 	}
7151 #endif /* CONFIG_DEFERRED_RECLAIM */
7152 	max_footprint_mb = max_footprint >> 20;
7153 #if DEBUG || DEVELOPMENT
7154 	diag_threshold_limit_mb = diag_threshold_limit >> 20;
7155 #endif
7156 	memlimit_is_active = task_get_memlimit_is_active(task);
7157 	memlimit_is_fatal = task_get_memlimit_is_fatal(task) == FALSE ? IS_NOT_FATAL : IS_FATAL;
7158 #if DEBUG || DEVELOPMENT
7159 	if (is_diag_mem_threshold == IS_NOT_DIAGNOSTICS) {
7160 		task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7161 	} else {
7162 		task_process_crossed_limit_diag(diag_threshold_limit_mb);
7163 	}
7164 #else
7165 	task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7166 #endif
7167 }
7168 
7169 /*
7170  * Actions to perfrom when a process has crossed watermark or is a fatal consumption */
7171 static inline void
task_process_crossed_limit_no_diag(task_t task,ledger_amount_t ledger_limit_size,bool memlimit_is_fatal,bool memlimit_is_active,send_exec_resource_is_warning is_warning)7172 task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning)
7173 {
7174 	send_exec_resource_options_t exception_options = 0;
7175 	if (memlimit_is_fatal) {
7176 		exception_options |= EXEC_RESOURCE_FATAL;
7177 	}
7178 	/*
7179 	 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7180 	 * We only generate the exception once per process per memlimit (active/inactive limit).
7181 	 * To enforce this, we monitor state based on the  memlimit's active/inactive attribute
7182 	 * and we disable it by marking that memlimit as exception triggered.
7183 	 */
7184 	if (is_warning == IS_NOT_WARNING && !task_has_triggered_exc_resource(task, memlimit_is_active)) {
7185 		PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7186 		// If it was not a diag threshold (if was a memory limit), then we do not want more signalling,
7187 		// however, if was a diag limit, the user may reload a different limit and signal again the violation
7188 		memorystatus_log_exception((int)ledger_limit_size, memlimit_is_active, memlimit_is_fatal);
7189 		task_mark_has_triggered_exc_resource(task, memlimit_is_active);
7190 	}
7191 	memorystatus_on_ledger_footprint_exceeded(is_warning == IS_NOT_WARNING ? FALSE : TRUE, memlimit_is_active, memlimit_is_fatal);
7192 }
7193 
7194 #if DEBUG || DEVELOPMENT
7195 /**
7196  * Actions to take when a process has crossed the diagnostics limit
7197  */
7198 static inline void
task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)7199 task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)
7200 {
7201 	/*
7202 	 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7203 	 * In the case of the diagnostics thresholds, the exception will be signaled only once, but the
7204 	 * inhibit / rearm mechanism if performed at ledger level.
7205 	 */
7206 	send_exec_resource_options_t exception_options = EXEC_RESOURCE_DIAGNOSTIC;
7207 	PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7208 	memorystatus_log_diag_threshold_exception((int)ledger_limit_size);
7209 }
7210 #endif
7211 
7212 extern int proc_check_footprint_priv(void);
7213 
7214 kern_return_t
task_set_phys_footprint_limit(task_t task,int new_limit_mb,int * old_limit_mb)7215 task_set_phys_footprint_limit(
7216 	task_t task,
7217 	int new_limit_mb,
7218 	int *old_limit_mb)
7219 {
7220 	kern_return_t error;
7221 
7222 	boolean_t memlimit_is_active;
7223 	boolean_t memlimit_is_fatal;
7224 
7225 	if ((error = proc_check_footprint_priv())) {
7226 		return KERN_NO_ACCESS;
7227 	}
7228 
7229 	/*
7230 	 * This call should probably be obsoleted.
7231 	 * But for now, we default to current state.
7232 	 */
7233 	memlimit_is_active = task_get_memlimit_is_active(task);
7234 	memlimit_is_fatal = task_get_memlimit_is_fatal(task);
7235 
7236 	return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
7237 }
7238 
7239 /*
7240  * Set the limit of diagnostics memory consumption for a concrete task
7241  */
7242 #if CONFIG_MEMORYSTATUS
7243 #if DEVELOPMENT || DEBUG
7244 kern_return_t
task_set_diag_footprint_limit(task_t task,uint64_t new_limit_mb,uint64_t * old_limit_mb)7245 task_set_diag_footprint_limit(
7246 	task_t task,
7247 	uint64_t new_limit_mb,
7248 	uint64_t *old_limit_mb)
7249 {
7250 	kern_return_t error;
7251 
7252 	if ((error = proc_check_footprint_priv())) {
7253 		return KERN_NO_ACCESS;
7254 	}
7255 
7256 	return task_set_diag_footprint_limit_internal(task, new_limit_mb, old_limit_mb);
7257 }
7258 
7259 #endif // DEVELOPMENT || DEBUG
7260 #endif // CONFIG_MEMORYSTATUS
7261 
7262 kern_return_t
task_convert_phys_footprint_limit(int limit_mb,int * converted_limit_mb)7263 task_convert_phys_footprint_limit(
7264 	int limit_mb,
7265 	int *converted_limit_mb)
7266 {
7267 	if (limit_mb == -1) {
7268 		/*
7269 		 * No limit
7270 		 */
7271 		if (max_task_footprint != 0) {
7272 			*converted_limit_mb = (int)(max_task_footprint / 1024 / 1024);         /* bytes to MB */
7273 		} else {
7274 			*converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
7275 		}
7276 	} else {
7277 		/* nothing to convert */
7278 		*converted_limit_mb = limit_mb;
7279 	}
7280 	return KERN_SUCCESS;
7281 }
7282 
7283 kern_return_t
task_set_phys_footprint_limit_internal(task_t task,int new_limit_mb,int * old_limit_mb,boolean_t memlimit_is_active,boolean_t memlimit_is_fatal)7284 task_set_phys_footprint_limit_internal(
7285 	task_t task,
7286 	int new_limit_mb,
7287 	int *old_limit_mb,
7288 	boolean_t memlimit_is_active,
7289 	boolean_t memlimit_is_fatal)
7290 {
7291 	ledger_amount_t old;
7292 	kern_return_t ret;
7293 #if DEVELOPMENT || DEBUG
7294 	diagthreshold_check_return diag_threshold_validity;
7295 #endif
7296 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
7297 
7298 	if (ret != KERN_SUCCESS) {
7299 		return ret;
7300 	}
7301 	/**
7302 	 * Maybe we will need to re-enable the diag threshold, lets get the value
7303 	 * and the current status
7304 	 */
7305 #if DEVELOPMENT || DEBUG
7306 	diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_mb, false);
7307 	/**
7308 	 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7309 	 */
7310 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7311 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7312 	} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7313 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7314 	}
7315 #endif
7316 
7317 	/*
7318 	 * Check that limit >> 20 will not give an "unexpected" 32-bit
7319 	 * result. There are, however, implicit assumptions that -1 mb limit
7320 	 * equates to LEDGER_LIMIT_INFINITY.
7321 	 */
7322 	assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
7323 
7324 	if (old_limit_mb) {
7325 		*old_limit_mb = (int)(old >> 20);
7326 	}
7327 
7328 	if (new_limit_mb == -1) {
7329 		/*
7330 		 * Caller wishes to remove the limit.
7331 		 */
7332 		ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7333 		    max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
7334 		    max_task_footprint ? (uint8_t)max_task_footprint_warning_level : 0);
7335 
7336 		task_lock(task);
7337 		task_set_memlimit_is_active(task, memlimit_is_active);
7338 		task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7339 		task_unlock(task);
7340 		/**
7341 		 * If the diagnostics were disabled, and now we have a new limit, we have to re-enable it.
7342 		 */
7343 #if DEVELOPMENT || DEBUG
7344 		if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7345 			ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7346 		} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7347 			ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7348 		}
7349 	#endif
7350 		return KERN_SUCCESS;
7351 	}
7352 
7353 #ifdef CONFIG_NOMONITORS
7354 	return KERN_SUCCESS;
7355 #endif /* CONFIG_NOMONITORS */
7356 
7357 	task_lock(task);
7358 
7359 	if ((memlimit_is_active == task_get_memlimit_is_active(task)) &&
7360 	    (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) &&
7361 	    (((ledger_amount_t)new_limit_mb << 20) == old)) {
7362 		/*
7363 		 * memlimit state is not changing
7364 		 */
7365 		task_unlock(task);
7366 		return KERN_SUCCESS;
7367 	}
7368 
7369 	task_set_memlimit_is_active(task, memlimit_is_active);
7370 	task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7371 
7372 	ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7373 	    (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
7374 
7375 	if (task == current_task()) {
7376 		ledger_check_new_balance(current_thread(), task->ledger,
7377 		    task_ledgers.phys_footprint);
7378 	}
7379 
7380 	task_unlock(task);
7381 #if DEVELOPMENT || DEBUG
7382 	if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7383 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7384 	}
7385 	#endif
7386 
7387 	return KERN_SUCCESS;
7388 }
7389 
7390 #if RESETTABLE_DIAG_FOOTPRINT_LIMITS
7391 kern_return_t
task_set_diag_footprint_limit_internal(task_t task,uint64_t new_limit_bytes,uint64_t * old_limit_bytes)7392 task_set_diag_footprint_limit_internal(
7393 	task_t task,
7394 	uint64_t new_limit_bytes,
7395 	uint64_t *old_limit_bytes)
7396 {
7397 	ledger_amount_t old = 0;
7398 	kern_return_t ret = KERN_SUCCESS;
7399 	diagthreshold_check_return diag_threshold_validity;
7400 	ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &old);
7401 
7402 	if (ret != KERN_SUCCESS) {
7403 		return ret;
7404 	}
7405 	/**
7406 	 * Maybe we will need to re-enable the diag threshold, lets get the value
7407 	 * and the current status
7408 	 */
7409 	diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_bytes >> 20, true);
7410 	/**
7411 	 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7412 	 */
7413 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7414 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7415 	}
7416 
7417 	/*
7418 	 * Check that limit >> 20 will not give an "unexpected" 32-bit
7419 	 * result. There are, however, implicit assumptions that -1 mb limit
7420 	 * equates to LEDGER_LIMIT_INFINITY.
7421 	 */
7422 	if (old_limit_bytes) {
7423 		*old_limit_bytes = old;
7424 	}
7425 
7426 	if (new_limit_bytes == -1) {
7427 		/*
7428 		 * Caller wishes to remove the limit.
7429 		 */
7430 		ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7431 		    LEDGER_LIMIT_INFINITY);
7432 		/*
7433 		 * If the memory diagnostics flag was disabled, lets enable it again
7434 		 */
7435 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7436 		return KERN_SUCCESS;
7437 	}
7438 
7439 #ifdef CONFIG_NOMONITORS
7440 	return KERN_SUCCESS;
7441 #else
7442 
7443 	task_lock(task);
7444 	ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7445 	    (ledger_amount_t)new_limit_bytes );
7446 	if (task == current_task()) {
7447 		ledger_check_new_balance(current_thread(), task->ledger,
7448 		    task_ledgers.phys_footprint);
7449 	}
7450 
7451 	task_unlock(task);
7452 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7453 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7454 	} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7455 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7456 	}
7457 
7458 	return KERN_SUCCESS;
7459 #endif /* CONFIG_NOMONITORS */
7460 }
7461 
7462 kern_return_t
task_get_diag_footprint_limit_internal(task_t task,uint64_t * new_limit_bytes,bool * threshold_disabled)7463 task_get_diag_footprint_limit_internal(
7464 	task_t task,
7465 	uint64_t *new_limit_bytes,
7466 	bool *threshold_disabled)
7467 {
7468 	ledger_amount_t ledger_limit;
7469 	kern_return_t ret = KERN_SUCCESS;
7470 	if (new_limit_bytes == NULL || threshold_disabled == NULL) {
7471 		return KERN_INVALID_ARGUMENT;
7472 	}
7473 	ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &ledger_limit);
7474 	if (ledger_limit == LEDGER_LIMIT_INFINITY) {
7475 		ledger_limit = -1;
7476 	}
7477 	if (ret == KERN_SUCCESS) {
7478 		*new_limit_bytes = ledger_limit;
7479 		ret = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, threshold_disabled);
7480 	}
7481 	return ret;
7482 }
7483 #endif /* RESETTABLE_DIAG_FOOTPRINT_LIMITS */
7484 
7485 
7486 kern_return_t
task_get_phys_footprint_limit(task_t task,int * limit_mb)7487 task_get_phys_footprint_limit(
7488 	task_t task,
7489 	int *limit_mb)
7490 {
7491 	ledger_amount_t limit;
7492 	kern_return_t ret;
7493 
7494 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
7495 	if (ret != KERN_SUCCESS) {
7496 		return ret;
7497 	}
7498 
7499 	/*
7500 	 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
7501 	 * result. There are, however, implicit assumptions that -1 mb limit
7502 	 * equates to LEDGER_LIMIT_INFINITY.
7503 	 */
7504 	assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
7505 	*limit_mb = (int)(limit >> 20);
7506 
7507 	return KERN_SUCCESS;
7508 }
7509 #else /* CONFIG_MEMORYSTATUS */
7510 kern_return_t
task_set_phys_footprint_limit(__unused task_t task,__unused int new_limit_mb,__unused int * old_limit_mb)7511 task_set_phys_footprint_limit(
7512 	__unused task_t task,
7513 	__unused int new_limit_mb,
7514 	__unused int *old_limit_mb)
7515 {
7516 	return KERN_FAILURE;
7517 }
7518 
7519 kern_return_t
task_get_phys_footprint_limit(__unused task_t task,__unused int * limit_mb)7520 task_get_phys_footprint_limit(
7521 	__unused task_t task,
7522 	__unused int *limit_mb)
7523 {
7524 	return KERN_FAILURE;
7525 }
7526 #endif /* CONFIG_MEMORYSTATUS */
7527 
7528 security_token_t *
task_get_sec_token(task_t task)7529 task_get_sec_token(task_t task)
7530 {
7531 	return &task_get_ro(task)->task_tokens.sec_token;
7532 }
7533 
7534 void
task_set_sec_token(task_t task,security_token_t * token)7535 task_set_sec_token(task_t task, security_token_t *token)
7536 {
7537 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7538 	    task_tokens.sec_token, token);
7539 }
7540 
7541 audit_token_t *
task_get_audit_token(task_t task)7542 task_get_audit_token(task_t task)
7543 {
7544 	return &task_get_ro(task)->task_tokens.audit_token;
7545 }
7546 
7547 void
task_set_audit_token(task_t task,audit_token_t * token)7548 task_set_audit_token(task_t task, audit_token_t *token)
7549 {
7550 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7551 	    task_tokens.audit_token, token);
7552 }
7553 
7554 void
task_set_tokens(task_t task,security_token_t * sec_token,audit_token_t * audit_token)7555 task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token)
7556 {
7557 	struct task_token_ro_data tokens;
7558 
7559 	tokens = task_get_ro(task)->task_tokens;
7560 	tokens.sec_token = *sec_token;
7561 	tokens.audit_token = *audit_token;
7562 
7563 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task), task_tokens,
7564 	    &tokens);
7565 }
7566 
7567 boolean_t
task_is_privileged(task_t task)7568 task_is_privileged(task_t task)
7569 {
7570 	return task_get_sec_token(task)->val[0] == 0;
7571 }
7572 
7573 #ifdef CONFIG_MACF
7574 uint8_t *
task_get_mach_trap_filter_mask(task_t task)7575 task_get_mach_trap_filter_mask(task_t task)
7576 {
7577 	return task_get_ro(task)->task_filters.mach_trap_filter_mask;
7578 }
7579 
7580 void
task_set_mach_trap_filter_mask(task_t task,uint8_t * mask)7581 task_set_mach_trap_filter_mask(task_t task, uint8_t *mask)
7582 {
7583 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7584 	    task_filters.mach_trap_filter_mask, &mask);
7585 }
7586 
7587 uint8_t *
task_get_mach_kobj_filter_mask(task_t task)7588 task_get_mach_kobj_filter_mask(task_t task)
7589 {
7590 	return task_get_ro(task)->task_filters.mach_kobj_filter_mask;
7591 }
7592 
7593 mach_vm_address_t
task_get_all_image_info_addr(task_t task)7594 task_get_all_image_info_addr(task_t task)
7595 {
7596 	return task->all_image_info_addr;
7597 }
7598 
7599 void
task_set_mach_kobj_filter_mask(task_t task,uint8_t * mask)7600 task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask)
7601 {
7602 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7603 	    task_filters.mach_kobj_filter_mask, &mask);
7604 }
7605 
7606 #endif /* CONFIG_MACF */
7607 
7608 void
task_set_thread_limit(task_t task,uint16_t thread_limit)7609 task_set_thread_limit(task_t task, uint16_t thread_limit)
7610 {
7611 	assert(task != kernel_task);
7612 	if (thread_limit <= TASK_MAX_THREAD_LIMIT) {
7613 		task_lock(task);
7614 		task->task_thread_limit = thread_limit;
7615 		task_unlock(task);
7616 	}
7617 }
7618 
7619 #if CONFIG_PROC_RESOURCE_LIMITS
7620 kern_return_t
task_set_port_space_limits(task_t task,uint32_t soft_limit,uint32_t hard_limit)7621 task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit)
7622 {
7623 	return ipc_space_set_table_size_limits(task->itk_space, soft_limit, hard_limit);
7624 }
7625 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7626 
7627 #if XNU_TARGET_OS_OSX
7628 boolean_t
task_has_system_version_compat_enabled(task_t task)7629 task_has_system_version_compat_enabled(task_t task)
7630 {
7631 	boolean_t enabled = FALSE;
7632 
7633 	task_lock(task);
7634 	enabled = (task->t_flags & TF_SYS_VERSION_COMPAT);
7635 	task_unlock(task);
7636 
7637 	return enabled;
7638 }
7639 
7640 void
task_set_system_version_compat_enabled(task_t task,boolean_t enable_system_version_compat)7641 task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat)
7642 {
7643 	assert(task == current_task());
7644 	assert(task != kernel_task);
7645 
7646 	task_lock(task);
7647 	if (enable_system_version_compat) {
7648 		task->t_flags |= TF_SYS_VERSION_COMPAT;
7649 	} else {
7650 		task->t_flags &= ~TF_SYS_VERSION_COMPAT;
7651 	}
7652 	task_unlock(task);
7653 }
7654 #endif /* XNU_TARGET_OS_OSX */
7655 
7656 /*
7657  * We need to export some functions to other components that
7658  * are currently implemented in macros within the osfmk
7659  * component.  Just export them as functions of the same name.
7660  */
7661 boolean_t
is_kerneltask(task_t t)7662 is_kerneltask(task_t t)
7663 {
7664 	if (t == kernel_task) {
7665 		return TRUE;
7666 	}
7667 
7668 	return FALSE;
7669 }
7670 
7671 boolean_t
is_corpsefork(task_t t)7672 is_corpsefork(task_t t)
7673 {
7674 	return task_is_a_corpse_fork(t);
7675 }
7676 
7677 task_t
current_task_early(void)7678 current_task_early(void)
7679 {
7680 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
7681 		if (current_thread()->t_tro == NULL) {
7682 			return TASK_NULL;
7683 		}
7684 	}
7685 	return get_threadtask(current_thread());
7686 }
7687 
7688 task_t
current_task(void)7689 current_task(void)
7690 {
7691 	return get_threadtask(current_thread());
7692 }
7693 
7694 /* defined in bsd/kern/kern_prot.c */
7695 extern int get_audit_token_pid(audit_token_t *audit_token);
7696 
7697 int
task_pid(task_t task)7698 task_pid(task_t task)
7699 {
7700 	if (task) {
7701 		return get_audit_token_pid(task_get_audit_token(task));
7702 	}
7703 	return -1;
7704 }
7705 
7706 #if __has_feature(ptrauth_calls)
7707 /*
7708  * Get the shared region id and jop signing key for the task.
7709  * The function will allocate a kalloc buffer and return
7710  * it to caller, the caller needs to free it. This is used
7711  * for getting the information via task port.
7712  */
7713 char *
task_get_vm_shared_region_id_and_jop_pid(task_t task,uint64_t * jop_pid)7714 task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid)
7715 {
7716 	size_t len;
7717 	char *shared_region_id = NULL;
7718 
7719 	task_lock(task);
7720 	if (task->shared_region_id == NULL) {
7721 		task_unlock(task);
7722 		return NULL;
7723 	}
7724 	len = strlen(task->shared_region_id) + 1;
7725 
7726 	/* don't hold task lock while allocating */
7727 	task_unlock(task);
7728 	shared_region_id = kalloc_data(len, Z_WAITOK);
7729 	task_lock(task);
7730 
7731 	if (task->shared_region_id == NULL) {
7732 		task_unlock(task);
7733 		kfree_data(shared_region_id, len);
7734 		return NULL;
7735 	}
7736 	assert(len == strlen(task->shared_region_id) + 1);         /* should never change */
7737 	strlcpy(shared_region_id, task->shared_region_id, len);
7738 	task_unlock(task);
7739 
7740 	/* find key from its auth pager */
7741 	if (jop_pid != NULL) {
7742 		*jop_pid = shared_region_find_key(shared_region_id);
7743 	}
7744 
7745 	return shared_region_id;
7746 }
7747 
7748 /*
7749  * set the shared region id for a task
7750  */
7751 void
task_set_shared_region_id(task_t task,char * id)7752 task_set_shared_region_id(task_t task, char *id)
7753 {
7754 	char *old_id;
7755 
7756 	task_lock(task);
7757 	old_id = task->shared_region_id;
7758 	task->shared_region_id = id;
7759 	task->shared_region_auth_remapped = FALSE;
7760 	task_unlock(task);
7761 
7762 	/* free any pre-existing shared region id */
7763 	if (old_id != NULL) {
7764 		shared_region_key_dealloc(old_id);
7765 		kfree_data(old_id, strlen(old_id) + 1);
7766 	}
7767 }
7768 #endif /* __has_feature(ptrauth_calls) */
7769 
7770 /*
7771  * This routine finds a thread in a task by its unique id
7772  * Returns a referenced thread or THREAD_NULL if the thread was not found
7773  *
7774  * TODO: This is super inefficient - it's an O(threads in task) list walk!
7775  *       We should make a tid hash, or transition all tid clients to thread ports
7776  *
7777  * Precondition: No locks held (will take task lock)
7778  */
7779 thread_t
task_findtid(task_t task,uint64_t tid)7780 task_findtid(task_t task, uint64_t tid)
7781 {
7782 	thread_t self           = current_thread();
7783 	thread_t found_thread   = THREAD_NULL;
7784 	thread_t iter_thread    = THREAD_NULL;
7785 
7786 	/* Short-circuit the lookup if we're looking up ourselves */
7787 	if (tid == self->thread_id || tid == TID_NULL) {
7788 		assert(get_threadtask(self) == task);
7789 
7790 		thread_reference(self);
7791 
7792 		return self;
7793 	}
7794 
7795 	task_lock(task);
7796 
7797 	queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
7798 		if (iter_thread->thread_id == tid) {
7799 			found_thread = iter_thread;
7800 			thread_reference(found_thread);
7801 			break;
7802 		}
7803 	}
7804 
7805 	task_unlock(task);
7806 
7807 	return found_thread;
7808 }
7809 
7810 int
pid_from_task(task_t task)7811 pid_from_task(task_t task)
7812 {
7813 	int pid = -1;
7814 	void *bsd_info = get_bsdtask_info(task);
7815 
7816 	if (bsd_info) {
7817 		pid = proc_pid(bsd_info);
7818 	} else {
7819 		pid = task_pid(task);
7820 	}
7821 
7822 	return pid;
7823 }
7824 
7825 /*
7826  * Control the CPU usage monitor for a task.
7827  */
7828 kern_return_t
task_cpu_usage_monitor_ctl(task_t task,uint32_t * flags)7829 task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
7830 {
7831 	int error = KERN_SUCCESS;
7832 
7833 	if (*flags & CPUMON_MAKE_FATAL) {
7834 		task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
7835 	} else {
7836 		error = KERN_INVALID_ARGUMENT;
7837 	}
7838 
7839 	return error;
7840 }
7841 
7842 /*
7843  * Control the wakeups monitor for a task.
7844  */
7845 kern_return_t
task_wakeups_monitor_ctl(task_t task,uint32_t * flags,int32_t * rate_hz)7846 task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
7847 {
7848 	ledger_t ledger = task->ledger;
7849 
7850 	task_lock(task);
7851 	if (*flags & WAKEMON_GET_PARAMS) {
7852 		ledger_amount_t limit;
7853 		uint64_t                period;
7854 
7855 		ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
7856 		ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
7857 
7858 		if (limit != LEDGER_LIMIT_INFINITY) {
7859 			/*
7860 			 * An active limit means the wakeups monitor is enabled.
7861 			 */
7862 			*rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
7863 			*flags = WAKEMON_ENABLE;
7864 			if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
7865 				*flags |= WAKEMON_MAKE_FATAL;
7866 			}
7867 		} else {
7868 			*flags = WAKEMON_DISABLE;
7869 			*rate_hz = -1;
7870 		}
7871 
7872 		/*
7873 		 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
7874 		 */
7875 		task_unlock(task);
7876 		return KERN_SUCCESS;
7877 	}
7878 
7879 	if (*flags & WAKEMON_ENABLE) {
7880 		if (*flags & WAKEMON_SET_DEFAULTS) {
7881 			*rate_hz = task_wakeups_monitor_rate;
7882 		}
7883 
7884 #ifndef CONFIG_NOMONITORS
7885 		if (*flags & WAKEMON_MAKE_FATAL) {
7886 			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
7887 		}
7888 #endif /* CONFIG_NOMONITORS */
7889 
7890 		if (*rate_hz <= 0) {
7891 			task_unlock(task);
7892 			return KERN_INVALID_ARGUMENT;
7893 		}
7894 
7895 #ifndef CONFIG_NOMONITORS
7896 		ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
7897 		    (uint8_t)task_wakeups_monitor_ustackshots_trigger_pct);
7898 		ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
7899 		ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
7900 #endif /* CONFIG_NOMONITORS */
7901 	} else if (*flags & WAKEMON_DISABLE) {
7902 		/*
7903 		 * Caller wishes to disable wakeups monitor on the task.
7904 		 *
7905 		 * Disable telemetry if it was triggered by the wakeups monitor, and
7906 		 * remove the limit & callback on the wakeups ledger entry.
7907 		 */
7908 #if CONFIG_TELEMETRY
7909 		telemetry_task_ctl_locked(task, TF_WAKEMON_WARNING, 0);
7910 #endif
7911 		ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
7912 		ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
7913 	}
7914 
7915 	task_unlock(task);
7916 	return KERN_SUCCESS;
7917 }
7918 
7919 void
task_wakeups_rate_exceeded(int warning,__unused const void * param0,__unused const void * param1)7920 task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
7921 {
7922 	if (warning == LEDGER_WARNING_ROSE_ABOVE) {
7923 #if CONFIG_TELEMETRY
7924 		/*
7925 		 * This task is in danger of violating the wakeups monitor. Enable telemetry on this task
7926 		 * so there are micro-stackshots available if and when EXC_RESOURCE is triggered.
7927 		 */
7928 		telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 1);
7929 #endif
7930 		return;
7931 	}
7932 
7933 #if CONFIG_TELEMETRY
7934 	/*
7935 	 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
7936 	 * exceeded the limit, turn telemetry off for the task.
7937 	 */
7938 	telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 0);
7939 #endif
7940 
7941 	if (warning == 0) {
7942 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
7943 	}
7944 }
7945 
7946 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)7947 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
7948 {
7949 	task_t                      task        = current_task();
7950 	int                         pid         = 0;
7951 	const char                  *procname   = "unknown";
7952 	boolean_t                   fatal;
7953 	kern_return_t               kr;
7954 #ifdef EXC_RESOURCE_MONITORS
7955 	mach_exception_data_type_t  code[EXCEPTION_CODE_MAX];
7956 #endif /* EXC_RESOURCE_MONITORS */
7957 	struct ledger_entry_info    lei;
7958 
7959 #ifdef MACH_BSD
7960 	pid = proc_selfpid();
7961 	if (get_bsdtask_info(task) != NULL) {
7962 		procname = proc_name_address(get_bsdtask_info(current_task()));
7963 	}
7964 #endif
7965 
7966 	ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
7967 
7968 	/*
7969 	 * Disable the exception notification so we don't overwhelm
7970 	 * the listener with an endless stream of redundant exceptions.
7971 	 * TODO: detect whether another thread is already reporting the violation.
7972 	 */
7973 	uint32_t flags = WAKEMON_DISABLE;
7974 	task_wakeups_monitor_ctl(task, &flags, NULL);
7975 
7976 	fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
7977 	trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
7978 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
7979 	    "over ~%llu seconds, averaging %llu wakes / second and "
7980 	    "violating a %slimit of %llu wakes over %llu seconds.\n",
7981 	    procname, pid,
7982 	    lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
7983 	    lei.lei_last_refill == 0 ? 0 :
7984 	    (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
7985 	    fatal ? "FATAL " : "",
7986 	    lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
7987 
7988 	kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
7989 	    fatal ? kRNFatalLimitFlag : 0);
7990 	if (kr) {
7991 		printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
7992 	}
7993 
7994 #ifdef EXC_RESOURCE_MONITORS
7995 	if (disable_exc_resource) {
7996 		printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
7997 		    "suppressed by a boot-arg\n", procname, pid);
7998 		return;
7999 	}
8000 	if (disable_exc_resource_during_audio && audio_active) {
8001 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8002 		    "suppressed due to audio playback\n", procname, pid);
8003 		return;
8004 	}
8005 	if (lei.lei_last_refill == 0) {
8006 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8007 		    "suppressed due to lei.lei_last_refill = 0 \n", procname, pid);
8008 	}
8009 
8010 	code[0] = code[1] = 0;
8011 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
8012 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
8013 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
8014 	    NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
8015 	EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
8016 	    lei.lei_last_refill);
8017 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
8018 	    NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
8019 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8020 #endif /* EXC_RESOURCE_MONITORS */
8021 
8022 	if (fatal) {
8023 		task_terminate_internal(task);
8024 	}
8025 }
8026 
8027 static boolean_t
global_update_logical_writes(int64_t io_delta,int64_t * global_write_count)8028 global_update_logical_writes(int64_t io_delta, int64_t *global_write_count)
8029 {
8030 	int64_t old_count, new_count;
8031 	boolean_t needs_telemetry;
8032 
8033 	do {
8034 		new_count = old_count = *global_write_count;
8035 		new_count += io_delta;
8036 		if (new_count >= io_telemetry_limit) {
8037 			new_count = 0;
8038 			needs_telemetry = TRUE;
8039 		} else {
8040 			needs_telemetry = FALSE;
8041 		}
8042 	} while (!OSCompareAndSwap64(old_count, new_count, global_write_count));
8043 	return needs_telemetry;
8044 }
8045 
8046 void
task_update_physical_writes(__unused task_t task,__unused task_physical_write_flavor_t flavor,__unused uint64_t io_size,__unused task_balance_flags_t flags)8047 task_update_physical_writes(__unused task_t task, __unused task_physical_write_flavor_t flavor, __unused uint64_t io_size, __unused task_balance_flags_t flags)
8048 {
8049 #if CONFIG_PHYS_WRITE_ACCT
8050 	if (!io_size) {
8051 		return;
8052 	}
8053 
8054 	/*
8055 	 * task == NULL means that we have to update kernel_task ledgers
8056 	 */
8057 	if (!task) {
8058 		task = kernel_task;
8059 	}
8060 
8061 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PHYS_WRITE_ACCT)) | DBG_FUNC_NONE,
8062 	    task_pid(task), flavor, io_size, flags, 0);
8063 	DTRACE_IO4(physical_writes, struct task *, task, task_physical_write_flavor_t, flavor, uint64_t, io_size, task_balance_flags_t, flags);
8064 
8065 	if (flags & TASK_BALANCE_CREDIT) {
8066 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8067 			OSAddAtomic64(io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8068 			ledger_credit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8069 		}
8070 	} else if (flags & TASK_BALANCE_DEBIT) {
8071 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8072 			OSAddAtomic64(-1 * io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8073 			ledger_debit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8074 		}
8075 	}
8076 #endif /* CONFIG_PHYS_WRITE_ACCT */
8077 }
8078 
8079 void
task_update_logical_writes(task_t task,uint32_t io_size,int flags,void * vp)8080 task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
8081 {
8082 	int64_t io_delta = 0;
8083 	int64_t * global_counter_to_update;
8084 	boolean_t needs_telemetry = FALSE;
8085 	boolean_t is_external_device = FALSE;
8086 	int ledger_to_update = 0;
8087 	struct task_writes_counters * writes_counters_to_update;
8088 
8089 	if ((!task) || (!io_size) || (!vp)) {
8090 		return;
8091 	}
8092 
8093 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_DATA_WRITE)) | DBG_FUNC_NONE,
8094 	    task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp), 0);
8095 	DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
8096 
8097 	// Is the drive backing this vnode internal or external to the system?
8098 	if (vnode_isonexternalstorage(vp) == false) {
8099 		global_counter_to_update = &global_logical_writes_count;
8100 		ledger_to_update = task_ledgers.logical_writes;
8101 		writes_counters_to_update = &task->task_writes_counters_internal;
8102 		is_external_device = FALSE;
8103 	} else {
8104 		global_counter_to_update = &global_logical_writes_to_external_count;
8105 		ledger_to_update = task_ledgers.logical_writes_to_external;
8106 		writes_counters_to_update = &task->task_writes_counters_external;
8107 		is_external_device = TRUE;
8108 	}
8109 
8110 	switch (flags) {
8111 	case TASK_WRITE_IMMEDIATE:
8112 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_immediate_writes));
8113 		ledger_credit(task->ledger, ledger_to_update, io_size);
8114 		if (!is_external_device) {
8115 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8116 		}
8117 		break;
8118 	case TASK_WRITE_DEFERRED:
8119 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_deferred_writes));
8120 		ledger_credit(task->ledger, ledger_to_update, io_size);
8121 		if (!is_external_device) {
8122 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8123 		}
8124 		break;
8125 	case TASK_WRITE_INVALIDATED:
8126 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_invalidated_writes));
8127 		ledger_debit(task->ledger, ledger_to_update, io_size);
8128 		if (!is_external_device) {
8129 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, FALSE, io_size);
8130 		}
8131 		break;
8132 	case TASK_WRITE_METADATA:
8133 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_metadata_writes));
8134 		ledger_credit(task->ledger, ledger_to_update, io_size);
8135 		if (!is_external_device) {
8136 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8137 		}
8138 		break;
8139 	}
8140 
8141 	io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
8142 	if (io_telemetry_limit != 0) {
8143 		/* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
8144 		needs_telemetry = global_update_logical_writes(io_delta, global_counter_to_update);
8145 		if (needs_telemetry && !is_external_device) {
8146 			act_set_io_telemetry_ast(current_thread());
8147 		}
8148 	}
8149 }
8150 
8151 /*
8152  * Control the I/O monitor for a task.
8153  */
8154 kern_return_t
task_io_monitor_ctl(task_t task,uint32_t * flags)8155 task_io_monitor_ctl(task_t task, uint32_t *flags)
8156 {
8157 	ledger_t ledger = task->ledger;
8158 
8159 	task_lock(task);
8160 	if (*flags & IOMON_ENABLE) {
8161 		/* Configure the physical I/O ledger */
8162 		ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
8163 		ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
8164 	} else if (*flags & IOMON_DISABLE) {
8165 		/*
8166 		 * Caller wishes to disable I/O monitor on the task.
8167 		 */
8168 		ledger_disable_refill(ledger, task_ledgers.physical_writes);
8169 		ledger_disable_callback(ledger, task_ledgers.physical_writes);
8170 	}
8171 
8172 	task_unlock(task);
8173 	return KERN_SUCCESS;
8174 }
8175 
8176 void
task_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)8177 task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
8178 {
8179 	if (warning == 0) {
8180 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
8181 	}
8182 }
8183 
8184 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)8185 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
8186 {
8187 	int                             pid = 0;
8188 	task_t                          task = current_task();
8189 #ifdef EXC_RESOURCE_MONITORS
8190 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
8191 #endif /* EXC_RESOURCE_MONITORS */
8192 	struct ledger_entry_info        lei = {};
8193 	kern_return_t                   kr;
8194 
8195 #ifdef MACH_BSD
8196 	pid = proc_selfpid();
8197 #endif
8198 	/*
8199 	 * Get the ledger entry info. We need to do this before disabling the exception
8200 	 * to get correct values for all fields.
8201 	 */
8202 	switch (flavor) {
8203 	case FLAVOR_IO_PHYSICAL_WRITES:
8204 		ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
8205 		break;
8206 	}
8207 
8208 
8209 	/*
8210 	 * Disable the exception notification so we don't overwhelm
8211 	 * the listener with an endless stream of redundant exceptions.
8212 	 * TODO: detect whether another thread is already reporting the violation.
8213 	 */
8214 	uint32_t flags = IOMON_DISABLE;
8215 	task_io_monitor_ctl(task, &flags);
8216 
8217 	if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
8218 		trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
8219 	}
8220 	os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
8221 	    pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
8222 
8223 	kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
8224 	if (kr) {
8225 		printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
8226 	}
8227 
8228 #ifdef EXC_RESOURCE_MONITORS
8229 	code[0] = code[1] = 0;
8230 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
8231 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
8232 	EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
8233 	EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
8234 	EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
8235 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8236 #endif /* EXC_RESOURCE_MONITORS */
8237 }
8238 
8239 void
task_port_space_ast(__unused task_t task)8240 task_port_space_ast(__unused task_t task)
8241 {
8242 	uint32_t current_size, soft_limit, hard_limit;
8243 	assert(task == current_task());
8244 	kern_return_t ret = ipc_space_get_table_size_and_limits(task->itk_space,
8245 	    &current_size, &soft_limit, &hard_limit);
8246 	if (ret == KERN_SUCCESS) {
8247 		SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task, current_size, soft_limit, hard_limit);
8248 	}
8249 }
8250 
8251 #if CONFIG_PROC_RESOURCE_LIMITS
8252 static mach_port_t
task_allocate_fatal_port(void)8253 task_allocate_fatal_port(void)
8254 {
8255 	mach_port_t task_fatal_port = MACH_PORT_NULL;
8256 	task_id_token_t token;
8257 
8258 	kern_return_t kr = task_create_identity_token(current_task(), &token); /* Takes a reference on the token */
8259 	if (kr) {
8260 		return MACH_PORT_NULL;
8261 	}
8262 	task_fatal_port = ipc_kobject_alloc_port((ipc_kobject_t)token, IKOT_TASK_FATAL,
8263 	    IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
8264 
8265 	task_id_token_set_port(token, task_fatal_port);
8266 
8267 	return task_fatal_port;
8268 }
8269 
8270 static void
task_fatal_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)8271 task_fatal_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
8272 {
8273 	task_t task = TASK_NULL;
8274 	kern_return_t kr;
8275 
8276 	task_id_token_t token = ipc_kobject_get_stable(port, IKOT_TASK_FATAL);
8277 
8278 	assert(token != NULL);
8279 	if (token) {
8280 		kr = task_identity_token_get_task_grp(token, &task, TASK_GRP_KERNEL); /* takes a reference on task */
8281 		if (task) {
8282 			task_bsdtask_kill(task);
8283 			task_deallocate(task);
8284 		}
8285 		task_id_token_release(token); /* consumes ref given by notification */
8286 	}
8287 }
8288 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8289 
8290 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,uint32_t current_size,uint32_t soft_limit,uint32_t hard_limit)8291 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task, uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit)
8292 {
8293 	int pid = 0;
8294 	char *procname = (char *) "unknown";
8295 	__unused kern_return_t kr;
8296 	__unused resource_notify_flags_t flags = kRNFlagsNone;
8297 	__unused uint32_t limit;
8298 	__unused mach_port_t task_fatal_port = MACH_PORT_NULL;
8299 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
8300 
8301 #ifdef MACH_BSD
8302 	pid = proc_selfpid();
8303 	if (get_bsdtask_info(task) != NULL) {
8304 		procname = proc_name_address(get_bsdtask_info(task));
8305 	}
8306 #endif
8307 	/*
8308 	 * Only kernel_task and launchd may be allowed to
8309 	 * have really large ipc space.
8310 	 */
8311 	if (pid == 0 || pid == 1) {
8312 		return;
8313 	}
8314 
8315 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many mach ports. \
8316 	    Num of ports allocated %u; \n", procname, pid, current_size);
8317 
8318 	/* Abort the process if it has hit the system-wide limit for ipc port table size */
8319 	if (!hard_limit && !soft_limit) {
8320 		code[0] = code[1] = 0;
8321 		EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_PORTS);
8322 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_PORT_SPACE_FULL);
8323 		EXC_RESOURCE_PORTS_ENCODE_PORTS(code[0], current_size);
8324 
8325 		exit_with_port_space_exception(current_proc(), code[0], code[1]);
8326 
8327 		return;
8328 	}
8329 
8330 #if CONFIG_PROC_RESOURCE_LIMITS
8331 	if (hard_limit > 0) {
8332 		flags |= kRNHardLimitFlag;
8333 		limit = hard_limit;
8334 		task_fatal_port = task_allocate_fatal_port();
8335 		if (!task_fatal_port) {
8336 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8337 			task_bsdtask_kill(task);
8338 		}
8339 	} else {
8340 		flags |= kRNSoftLimitFlag;
8341 		limit = soft_limit;
8342 	}
8343 
8344 	kr = send_resource_violation_with_fatal_port(send_port_space_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8345 	if (kr) {
8346 		os_log(OS_LOG_DEFAULT, "send_resource_violation(ports, ...): error %#x\n", kr);
8347 	}
8348 	if (task_fatal_port) {
8349 		ipc_port_release_send(task_fatal_port);
8350 	}
8351 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8352 }
8353 
8354 void
task_filedesc_ast(__unused task_t task,__unused int current_size,__unused int soft_limit,__unused int hard_limit)8355 task_filedesc_ast(__unused task_t task, __unused int current_size, __unused int soft_limit, __unused int hard_limit)
8356 {
8357 #if CONFIG_PROC_RESOURCE_LIMITS
8358 	assert(task == current_task());
8359 	SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task, current_size, soft_limit, hard_limit);
8360 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8361 }
8362 
8363 #if CONFIG_PROC_RESOURCE_LIMITS
8364 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task,int current_size,int soft_limit,int hard_limit)8365 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit)
8366 {
8367 	int pid = 0;
8368 	char *procname = (char *) "unknown";
8369 	kern_return_t kr;
8370 	resource_notify_flags_t flags = kRNFlagsNone;
8371 	int limit;
8372 	mach_port_t task_fatal_port = MACH_PORT_NULL;
8373 
8374 #ifdef MACH_BSD
8375 	pid = proc_selfpid();
8376 	if (get_bsdtask_info(task) != NULL) {
8377 		procname = proc_name_address(get_bsdtask_info(task));
8378 	}
8379 #endif
8380 	/*
8381 	 * Only kernel_task and launchd may be allowed to
8382 	 * have really large ipc space.
8383 	 */
8384 	if (pid == 0 || pid == 1) {
8385 		return;
8386 	}
8387 
8388 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many file descriptors. \
8389 	    Num of fds allocated %u; \n", procname, pid, current_size);
8390 
8391 	if (hard_limit > 0) {
8392 		flags |= kRNHardLimitFlag;
8393 		limit = hard_limit;
8394 		task_fatal_port = task_allocate_fatal_port();
8395 		if (!task_fatal_port) {
8396 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8397 			task_bsdtask_kill(task);
8398 		}
8399 	} else {
8400 		flags |= kRNSoftLimitFlag;
8401 		limit = soft_limit;
8402 	}
8403 
8404 	kr = send_resource_violation_with_fatal_port(send_file_descriptors_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8405 	if (kr) {
8406 		os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(filedesc, ...): error %#x\n", kr);
8407 	}
8408 	if (task_fatal_port) {
8409 		ipc_port_release_send(task_fatal_port);
8410 	}
8411 }
8412 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8413 
8414 /* Placeholders for the task set/get voucher interfaces */
8415 kern_return_t
task_get_mach_voucher(task_t task,mach_voucher_selector_t __unused which,ipc_voucher_t * voucher)8416 task_get_mach_voucher(
8417 	task_t                  task,
8418 	mach_voucher_selector_t __unused which,
8419 	ipc_voucher_t           *voucher)
8420 {
8421 	if (TASK_NULL == task) {
8422 		return KERN_INVALID_TASK;
8423 	}
8424 
8425 	*voucher = NULL;
8426 	return KERN_SUCCESS;
8427 }
8428 
8429 kern_return_t
task_set_mach_voucher(task_t task,ipc_voucher_t __unused voucher)8430 task_set_mach_voucher(
8431 	task_t                  task,
8432 	ipc_voucher_t           __unused voucher)
8433 {
8434 	if (TASK_NULL == task) {
8435 		return KERN_INVALID_TASK;
8436 	}
8437 
8438 	return KERN_SUCCESS;
8439 }
8440 
8441 kern_return_t
task_swap_mach_voucher(__unused task_t task,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)8442 task_swap_mach_voucher(
8443 	__unused task_t         task,
8444 	__unused ipc_voucher_t  new_voucher,
8445 	ipc_voucher_t          *in_out_old_voucher)
8446 {
8447 	/*
8448 	 * Currently this function is only called from a MIG generated
8449 	 * routine which doesn't release the reference on the voucher
8450 	 * addressed by in_out_old_voucher. To avoid leaking this reference,
8451 	 * a call to release it has been added here.
8452 	 */
8453 	ipc_voucher_release(*in_out_old_voucher);
8454 	OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
8455 }
8456 
8457 void
task_set_gpu_denied(task_t task,boolean_t denied)8458 task_set_gpu_denied(task_t task, boolean_t denied)
8459 {
8460 	task_lock(task);
8461 
8462 	if (denied) {
8463 		task->t_flags |= TF_GPU_DENIED;
8464 	} else {
8465 		task->t_flags &= ~TF_GPU_DENIED;
8466 	}
8467 
8468 	task_unlock(task);
8469 }
8470 
8471 boolean_t
task_is_gpu_denied(task_t task)8472 task_is_gpu_denied(task_t task)
8473 {
8474 	/* We don't need the lock to read this flag */
8475 	return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE;
8476 }
8477 
8478 /*
8479  * Task policy termination uses this path to clear the bit the final time
8480  * during the termination flow, and the TASK_POLICY_TERMINATED bit guarantees
8481  * that it won't be changed again on a terminated task.
8482  */
8483 bool
task_set_game_mode_locked(task_t task,bool enabled)8484 task_set_game_mode_locked(task_t task, bool enabled)
8485 {
8486 	task_lock_assert_owned(task);
8487 
8488 	if (enabled) {
8489 		assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
8490 	}
8491 
8492 	bool previously_enabled = task_get_game_mode(task);
8493 	bool needs_update = false;
8494 	uint32_t new_count = 0;
8495 
8496 	if (enabled) {
8497 		task->t_flags |= TF_GAME_MODE;
8498 	} else {
8499 		task->t_flags &= ~TF_GAME_MODE;
8500 	}
8501 
8502 	if (enabled && !previously_enabled) {
8503 		if (task_coalition_adjust_game_mode_count(task, 1, &new_count) && (new_count == 1)) {
8504 			needs_update = true;
8505 		}
8506 	} else if (!enabled && previously_enabled) {
8507 		if (task_coalition_adjust_game_mode_count(task, -1, &new_count) && (new_count == 0)) {
8508 			needs_update = true;
8509 		}
8510 	}
8511 
8512 	return needs_update;
8513 }
8514 
8515 void
task_set_game_mode(task_t task,bool enabled)8516 task_set_game_mode(task_t task, bool enabled)
8517 {
8518 	bool needs_update = false;
8519 
8520 	task_lock(task);
8521 
8522 	/* After termination, further updates are no longer effective */
8523 	if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
8524 		needs_update = task_set_game_mode_locked(task, enabled);
8525 	}
8526 
8527 	task_unlock(task);
8528 
8529 #if CONFIG_THREAD_GROUPS
8530 	if (needs_update) {
8531 		task_coalition_thread_group_game_mode_update(task);
8532 	}
8533 #endif /* CONFIG_THREAD_GROUPS */
8534 }
8535 
8536 bool
task_get_game_mode(task_t task)8537 task_get_game_mode(task_t task)
8538 {
8539 	/* We don't need the lock to read this flag */
8540 	return task->t_flags & TF_GAME_MODE;
8541 }
8542 
8543 
8544 uint64_t
get_task_memory_region_count(task_t task)8545 get_task_memory_region_count(task_t task)
8546 {
8547 	vm_map_t map;
8548 	map = (task == kernel_task) ? kernel_map: task->map;
8549 	return (uint64_t)get_map_nentries(map);
8550 }
8551 
8552 static void
kdebug_trace_dyld_internal(uint32_t base_code,struct dyld_kernel_image_info * info)8553 kdebug_trace_dyld_internal(uint32_t base_code,
8554     struct dyld_kernel_image_info *info)
8555 {
8556 	static_assert(sizeof(info->uuid) >= 16);
8557 
8558 #if defined(__LP64__)
8559 	uint64_t *uuid = (uint64_t *)&(info->uuid);
8560 
8561 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8562 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
8563 	    uuid[1], info->load_addr,
8564 	    (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
8565 	    0);
8566 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8567 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
8568 	    (uint64_t)info->fsobjid.fid_objno |
8569 	    ((uint64_t)info->fsobjid.fid_generation << 32),
8570 	    0, 0, 0, 0);
8571 #else /* defined(__LP64__) */
8572 	uint32_t *uuid = (uint32_t *)&(info->uuid);
8573 
8574 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8575 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
8576 	    uuid[1], uuid[2], uuid[3], 0);
8577 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8578 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
8579 	    (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
8580 	    info->fsobjid.fid_objno, 0);
8581 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8582 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
8583 	    info->fsobjid.fid_generation, 0, 0, 0, 0);
8584 #endif /* !defined(__LP64__) */
8585 }
8586 
8587 static kern_return_t
kdebug_trace_dyld(task_t task,uint32_t base_code,vm_map_copy_t infos_copy,mach_msg_type_number_t infos_len)8588 kdebug_trace_dyld(task_t task, uint32_t base_code,
8589     vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
8590 {
8591 	kern_return_t kr;
8592 	dyld_kernel_image_info_array_t infos;
8593 	vm_map_offset_t map_data;
8594 	vm_offset_t data;
8595 
8596 	if (!infos_copy) {
8597 		return KERN_INVALID_ADDRESS;
8598 	}
8599 
8600 	if (!kdebug_enable ||
8601 	    !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) {
8602 		vm_map_copy_discard(infos_copy);
8603 		return KERN_SUCCESS;
8604 	}
8605 
8606 	if (task == NULL || task != current_task()) {
8607 		return KERN_INVALID_TASK;
8608 	}
8609 
8610 	kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
8611 	if (kr != KERN_SUCCESS) {
8612 		return kr;
8613 	}
8614 
8615 	infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
8616 
8617 	for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
8618 		kdebug_trace_dyld_internal(base_code, &(infos[i]));
8619 	}
8620 
8621 	data = CAST_DOWN(vm_offset_t, map_data);
8622 	mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
8623 	return KERN_SUCCESS;
8624 }
8625 
8626 kern_return_t
task_register_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8627 task_register_dyld_image_infos(task_t task,
8628     dyld_kernel_image_info_array_t infos_copy,
8629     mach_msg_type_number_t infos_len)
8630 {
8631 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
8632 	           (vm_map_copy_t)infos_copy, infos_len);
8633 }
8634 
8635 kern_return_t
task_unregister_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8636 task_unregister_dyld_image_infos(task_t task,
8637     dyld_kernel_image_info_array_t infos_copy,
8638     mach_msg_type_number_t infos_len)
8639 {
8640 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
8641 	           (vm_map_copy_t)infos_copy, infos_len);
8642 }
8643 
8644 kern_return_t
task_get_dyld_image_infos(__unused task_t task,__unused dyld_kernel_image_info_array_t * dyld_images,__unused mach_msg_type_number_t * dyld_imagesCnt)8645 task_get_dyld_image_infos(__unused task_t task,
8646     __unused dyld_kernel_image_info_array_t * dyld_images,
8647     __unused mach_msg_type_number_t * dyld_imagesCnt)
8648 {
8649 	return KERN_NOT_SUPPORTED;
8650 }
8651 
8652 kern_return_t
task_register_dyld_shared_cache_image_info(task_t task,dyld_kernel_image_info_t cache_img,__unused boolean_t no_cache,__unused boolean_t private_cache)8653 task_register_dyld_shared_cache_image_info(task_t task,
8654     dyld_kernel_image_info_t cache_img,
8655     __unused boolean_t no_cache,
8656     __unused boolean_t private_cache)
8657 {
8658 	if (task == NULL || task != current_task()) {
8659 		return KERN_INVALID_TASK;
8660 	}
8661 
8662 	kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
8663 	return KERN_SUCCESS;
8664 }
8665 
8666 kern_return_t
task_register_dyld_set_dyld_state(__unused task_t task,__unused uint8_t dyld_state)8667 task_register_dyld_set_dyld_state(__unused task_t task,
8668     __unused uint8_t dyld_state)
8669 {
8670 	return KERN_NOT_SUPPORTED;
8671 }
8672 
8673 kern_return_t
task_register_dyld_get_process_state(__unused task_t task,__unused dyld_kernel_process_info_t * dyld_process_state)8674 task_register_dyld_get_process_state(__unused task_t task,
8675     __unused dyld_kernel_process_info_t * dyld_process_state)
8676 {
8677 	return KERN_NOT_SUPPORTED;
8678 }
8679 
8680 kern_return_t
task_inspect(task_inspect_t task_insp,task_inspect_flavor_t flavor,task_inspect_info_t info_out,mach_msg_type_number_t * size_in_out)8681 task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
8682     task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out)
8683 {
8684 #if CONFIG_PERVASIVE_CPI
8685 	task_t task = (task_t)task_insp;
8686 	kern_return_t kr = KERN_SUCCESS;
8687 	mach_msg_type_number_t size;
8688 
8689 	if (task == TASK_NULL) {
8690 		return KERN_INVALID_ARGUMENT;
8691 	}
8692 
8693 	size = *size_in_out;
8694 
8695 	switch (flavor) {
8696 	case TASK_INSPECT_BASIC_COUNTS: {
8697 		struct task_inspect_basic_counts *bc =
8698 		    (struct task_inspect_basic_counts *)info_out;
8699 		struct recount_usage stats = { 0 };
8700 		if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
8701 			kr = KERN_INVALID_ARGUMENT;
8702 			break;
8703 		}
8704 
8705 		recount_sum(&recount_task_plan, task->tk_recount.rtk_lifetime, &stats);
8706 		bc->instructions = stats.ru_instructions;
8707 		bc->cycles = stats.ru_cycles;
8708 		size = TASK_INSPECT_BASIC_COUNTS_COUNT;
8709 		break;
8710 	}
8711 	default:
8712 		kr = KERN_INVALID_ARGUMENT;
8713 		break;
8714 	}
8715 
8716 	if (kr == KERN_SUCCESS) {
8717 		*size_in_out = size;
8718 	}
8719 	return kr;
8720 #else /* CONFIG_PERVASIVE_CPI */
8721 #pragma unused(task_insp, flavor, info_out, size_in_out)
8722 	return KERN_NOT_SUPPORTED;
8723 #endif /* !CONFIG_PERVASIVE_CPI */
8724 }
8725 
8726 #if CONFIG_SECLUDED_MEMORY
8727 int num_tasks_can_use_secluded_mem = 0;
8728 
8729 void
task_set_can_use_secluded_mem(task_t task,boolean_t can_use_secluded_mem)8730 task_set_can_use_secluded_mem(
8731 	task_t          task,
8732 	boolean_t       can_use_secluded_mem)
8733 {
8734 	if (!task->task_could_use_secluded_mem) {
8735 		return;
8736 	}
8737 	task_lock(task);
8738 	task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
8739 	task_unlock(task);
8740 }
8741 
8742 void
task_set_can_use_secluded_mem_locked(task_t task,boolean_t can_use_secluded_mem)8743 task_set_can_use_secluded_mem_locked(
8744 	task_t          task,
8745 	boolean_t       can_use_secluded_mem)
8746 {
8747 	assert(task->task_could_use_secluded_mem);
8748 	if (can_use_secluded_mem &&
8749 	    secluded_for_apps &&         /* global boot-arg */
8750 	    !task->task_can_use_secluded_mem) {
8751 		assert(num_tasks_can_use_secluded_mem >= 0);
8752 		OSAddAtomic(+1,
8753 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
8754 		task->task_can_use_secluded_mem = TRUE;
8755 	} else if (!can_use_secluded_mem &&
8756 	    task->task_can_use_secluded_mem) {
8757 		assert(num_tasks_can_use_secluded_mem > 0);
8758 		OSAddAtomic(-1,
8759 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
8760 		task->task_can_use_secluded_mem = FALSE;
8761 	}
8762 }
8763 
8764 void
task_set_could_use_secluded_mem(task_t task,boolean_t could_use_secluded_mem)8765 task_set_could_use_secluded_mem(
8766 	task_t          task,
8767 	boolean_t       could_use_secluded_mem)
8768 {
8769 	task->task_could_use_secluded_mem = !!could_use_secluded_mem;
8770 }
8771 
8772 void
task_set_could_also_use_secluded_mem(task_t task,boolean_t could_also_use_secluded_mem)8773 task_set_could_also_use_secluded_mem(
8774 	task_t          task,
8775 	boolean_t       could_also_use_secluded_mem)
8776 {
8777 	task->task_could_also_use_secluded_mem = !!could_also_use_secluded_mem;
8778 }
8779 
8780 boolean_t
task_can_use_secluded_mem(task_t task,boolean_t is_alloc)8781 task_can_use_secluded_mem(
8782 	task_t          task,
8783 	boolean_t       is_alloc)
8784 {
8785 	if (task->task_can_use_secluded_mem) {
8786 		assert(task->task_could_use_secluded_mem);
8787 		assert(num_tasks_can_use_secluded_mem > 0);
8788 		return TRUE;
8789 	}
8790 	if (task->task_could_also_use_secluded_mem &&
8791 	    num_tasks_can_use_secluded_mem > 0) {
8792 		assert(num_tasks_can_use_secluded_mem > 0);
8793 		return TRUE;
8794 	}
8795 
8796 	/*
8797 	 * If a single task is using more than some large amount of
8798 	 * memory (i.e. secluded_shutoff_trigger) and is approaching
8799 	 * its task limit, allow it to dip into secluded and begin
8800 	 * suppression of rebuilding secluded memory until that task exits.
8801 	 */
8802 	if (is_alloc && secluded_shutoff_trigger != 0) {
8803 		uint64_t phys_used = get_task_phys_footprint(task);
8804 		uint64_t limit = get_task_phys_footprint_limit(task);
8805 		if (phys_used > secluded_shutoff_trigger &&
8806 		    limit > secluded_shutoff_trigger &&
8807 		    phys_used > limit - secluded_shutoff_headroom) {
8808 			start_secluded_suppression(task);
8809 			return TRUE;
8810 		}
8811 	}
8812 
8813 	return FALSE;
8814 }
8815 
8816 boolean_t
task_could_use_secluded_mem(task_t task)8817 task_could_use_secluded_mem(
8818 	task_t  task)
8819 {
8820 	return task->task_could_use_secluded_mem;
8821 }
8822 
8823 boolean_t
task_could_also_use_secluded_mem(task_t task)8824 task_could_also_use_secluded_mem(
8825 	task_t  task)
8826 {
8827 	return task->task_could_also_use_secluded_mem;
8828 }
8829 #endif /* CONFIG_SECLUDED_MEMORY */
8830 
8831 queue_head_t *
task_io_user_clients(task_t task)8832 task_io_user_clients(task_t task)
8833 {
8834 	return &task->io_user_clients;
8835 }
8836 
8837 void
task_set_message_app_suspended(task_t task,boolean_t enable)8838 task_set_message_app_suspended(task_t task, boolean_t enable)
8839 {
8840 	task->message_app_suspended = enable;
8841 }
8842 
8843 void
task_copy_fields_for_exec(task_t dst_task,task_t src_task)8844 task_copy_fields_for_exec(task_t dst_task, task_t src_task)
8845 {
8846 	dst_task->vtimers = src_task->vtimers;
8847 }
8848 
8849 #if DEVELOPMENT || DEBUG
8850 int vm_region_footprint = 0;
8851 #endif /* DEVELOPMENT || DEBUG */
8852 
8853 boolean_t
task_self_region_footprint(void)8854 task_self_region_footprint(void)
8855 {
8856 #if DEVELOPMENT || DEBUG
8857 	if (vm_region_footprint) {
8858 		/* system-wide override */
8859 		return TRUE;
8860 	}
8861 #endif /* DEVELOPMENT || DEBUG */
8862 	return current_task()->task_region_footprint;
8863 }
8864 
8865 void
task_self_region_footprint_set(boolean_t newval)8866 task_self_region_footprint_set(
8867 	boolean_t newval)
8868 {
8869 	task_t  curtask;
8870 
8871 	curtask = current_task();
8872 	task_lock(curtask);
8873 	if (newval) {
8874 		curtask->task_region_footprint = TRUE;
8875 	} else {
8876 		curtask->task_region_footprint = FALSE;
8877 	}
8878 	task_unlock(curtask);
8879 }
8880 
8881 void
task_set_darkwake_mode(task_t task,boolean_t set_mode)8882 task_set_darkwake_mode(task_t task, boolean_t set_mode)
8883 {
8884 	assert(task);
8885 
8886 	task_lock(task);
8887 
8888 	if (set_mode) {
8889 		task->t_flags |= TF_DARKWAKE_MODE;
8890 	} else {
8891 		task->t_flags &= ~(TF_DARKWAKE_MODE);
8892 	}
8893 
8894 	task_unlock(task);
8895 }
8896 
8897 boolean_t
task_get_darkwake_mode(task_t task)8898 task_get_darkwake_mode(task_t task)
8899 {
8900 	assert(task);
8901 	return (task->t_flags & TF_DARKWAKE_MODE) != 0;
8902 }
8903 
8904 /*
8905  * Set default behavior for task's control port and EXC_GUARD variants that have
8906  * settable behavior.
8907  *
8908  * Platform binaries typically have one behavior, third parties another -
8909  * but there are special exception we may need to account for.
8910  */
8911 void
task_set_exc_guard_ctrl_port_default(task_t task,thread_t main_thread,const char * name,unsigned int namelen,boolean_t is_simulated,uint32_t platform,uint32_t sdk)8912 task_set_exc_guard_ctrl_port_default(
8913 	task_t task,
8914 	thread_t main_thread,
8915 	const char *name,
8916 	unsigned int namelen,
8917 	boolean_t is_simulated,
8918 	uint32_t platform,
8919 	uint32_t sdk)
8920 {
8921 	task_control_port_options_t opts = TASK_CONTROL_PORT_OPTIONS_NONE;
8922 
8923 	if (task_get_platform_binary(task)) {
8924 		/* set exc guard default behavior for first-party code */
8925 		task->task_exc_guard = (task_exc_guard_default & TASK_EXC_GUARD_ALL);
8926 
8927 		if (1 == task_pid(task)) {
8928 			/* special flags for inittask - delivery every instance as corpse */
8929 			task->task_exc_guard = _TASK_EXC_GUARD_ALL_CORPSE;
8930 		} else if (task_exc_guard_default & TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS) {
8931 			/* honor by-name default setting overrides */
8932 
8933 			int count = sizeof(task_exc_guard_named_defaults) / sizeof(struct task_exc_guard_named_default);
8934 
8935 			for (int i = 0; i < count; i++) {
8936 				const struct task_exc_guard_named_default *named_default =
8937 				    &task_exc_guard_named_defaults[i];
8938 				if (strncmp(named_default->name, name, namelen) == 0 &&
8939 				    strlen(named_default->name) == namelen) {
8940 					task->task_exc_guard = named_default->behavior;
8941 					break;
8942 				}
8943 			}
8944 		}
8945 
8946 		/* set control port options for 1p code, inherited from parent task by default */
8947 		opts = ipc_control_port_options & ICP_OPTIONS_1P_MASK;
8948 	} else {
8949 		/* set exc guard default behavior for third-party code */
8950 		task->task_exc_guard = ((task_exc_guard_default >> TASK_EXC_GUARD_THIRD_PARTY_DEFAULT_SHIFT) & TASK_EXC_GUARD_ALL);
8951 		/* set control port options for 3p code, inherited from parent task by default */
8952 		opts = (ipc_control_port_options & ICP_OPTIONS_3P_MASK) >> ICP_OPTIONS_3P_SHIFT;
8953 	}
8954 
8955 	if (is_simulated) {
8956 		/* If simulated and built against pre-iOS 15 SDK, disable all EXC_GUARD */
8957 		if ((platform == PLATFORM_IOSSIMULATOR && sdk < 0xf0000) ||
8958 		    (platform == PLATFORM_TVOSSIMULATOR && sdk < 0xf0000) ||
8959 		    (platform == PLATFORM_WATCHOSSIMULATOR && sdk < 0x80000)) {
8960 			task->task_exc_guard = TASK_EXC_GUARD_NONE;
8961 		}
8962 		/* Disable protection for control ports for simulated binaries */
8963 		opts = TASK_CONTROL_PORT_OPTIONS_NONE;
8964 	}
8965 
8966 
8967 	task_set_control_port_options(task, opts);
8968 
8969 	task_set_immovable_pinned(task);
8970 	main_thread_set_immovable_pinned(main_thread);
8971 }
8972 
8973 kern_return_t
task_get_exc_guard_behavior(task_t task,task_exc_guard_behavior_t * behaviorp)8974 task_get_exc_guard_behavior(
8975 	task_t task,
8976 	task_exc_guard_behavior_t *behaviorp)
8977 {
8978 	if (task == TASK_NULL) {
8979 		return KERN_INVALID_TASK;
8980 	}
8981 	*behaviorp = task->task_exc_guard;
8982 	return KERN_SUCCESS;
8983 }
8984 
8985 kern_return_t
task_set_exc_guard_behavior(task_t task,task_exc_guard_behavior_t new_behavior)8986 task_set_exc_guard_behavior(
8987 	task_t task,
8988 	task_exc_guard_behavior_t new_behavior)
8989 {
8990 	if (task == TASK_NULL) {
8991 		return KERN_INVALID_TASK;
8992 	}
8993 	if (new_behavior & ~TASK_EXC_GUARD_ALL) {
8994 		return KERN_INVALID_VALUE;
8995 	}
8996 
8997 	/* limit setting to that allowed for this config */
8998 	new_behavior = new_behavior & task_exc_guard_config_mask;
8999 
9000 #if !defined (DEBUG) && !defined (DEVELOPMENT)
9001 	/* On release kernels, only allow _upgrading_ exc guard behavior */
9002 	task_exc_guard_behavior_t cur_behavior;
9003 
9004 	os_atomic_rmw_loop(&task->task_exc_guard, cur_behavior, new_behavior, relaxed, {
9005 		if ((cur_behavior & task_exc_guard_no_unset_mask) & ~(new_behavior & task_exc_guard_no_unset_mask)) {
9006 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
9007 		}
9008 
9009 		if ((new_behavior & task_exc_guard_no_set_mask) & ~(cur_behavior & task_exc_guard_no_set_mask)) {
9010 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
9011 		}
9012 
9013 		/* no restrictions on CORPSE bit */
9014 	});
9015 #else
9016 	task->task_exc_guard = new_behavior;
9017 #endif
9018 	return KERN_SUCCESS;
9019 }
9020 
9021 kern_return_t
task_set_corpse_forking_behavior(task_t task,task_corpse_forking_behavior_t behavior)9022 task_set_corpse_forking_behavior(task_t task, task_corpse_forking_behavior_t behavior)
9023 {
9024 #if DEVELOPMENT || DEBUG
9025 	if (task == TASK_NULL) {
9026 		return KERN_INVALID_TASK;
9027 	}
9028 
9029 	task_lock(task);
9030 	if (behavior & TASK_CORPSE_FORKING_DISABLED_MEM_DIAG) {
9031 		task->t_flags |= TF_NO_CORPSE_FORKING;
9032 	} else {
9033 		task->t_flags &= ~TF_NO_CORPSE_FORKING;
9034 	}
9035 	task_unlock(task);
9036 
9037 	return KERN_SUCCESS;
9038 #else
9039 	(void)task;
9040 	(void)behavior;
9041 	return KERN_NOT_SUPPORTED;
9042 #endif
9043 }
9044 
9045 boolean_t
task_corpse_forking_disabled(task_t task)9046 task_corpse_forking_disabled(task_t task)
9047 {
9048 	boolean_t disabled = FALSE;
9049 
9050 	task_lock(task);
9051 	disabled = (task->t_flags & TF_NO_CORPSE_FORKING);
9052 	task_unlock(task);
9053 
9054 	return disabled;
9055 }
9056 
9057 #if __arm64__
9058 extern int legacy_footprint_entitlement_mode;
9059 extern void memorystatus_act_on_legacy_footprint_entitlement(struct proc *, boolean_t);
9060 extern void memorystatus_act_on_ios13extended_footprint_entitlement(struct proc *);
9061 
9062 
9063 void
task_set_legacy_footprint(task_t task)9064 task_set_legacy_footprint(
9065 	task_t task)
9066 {
9067 	task_lock(task);
9068 	task->task_legacy_footprint = TRUE;
9069 	task_unlock(task);
9070 }
9071 
9072 void
task_set_extra_footprint_limit(task_t task)9073 task_set_extra_footprint_limit(
9074 	task_t task)
9075 {
9076 	if (task->task_extra_footprint_limit) {
9077 		return;
9078 	}
9079 	task_lock(task);
9080 	if (task->task_extra_footprint_limit) {
9081 		task_unlock(task);
9082 		return;
9083 	}
9084 	task->task_extra_footprint_limit = TRUE;
9085 	task_unlock(task);
9086 	memorystatus_act_on_legacy_footprint_entitlement(get_bsdtask_info(task), TRUE);
9087 }
9088 
9089 void
task_set_ios13extended_footprint_limit(task_t task)9090 task_set_ios13extended_footprint_limit(
9091 	task_t task)
9092 {
9093 	if (task->task_ios13extended_footprint_limit) {
9094 		return;
9095 	}
9096 	task_lock(task);
9097 	if (task->task_ios13extended_footprint_limit) {
9098 		task_unlock(task);
9099 		return;
9100 	}
9101 	task->task_ios13extended_footprint_limit = TRUE;
9102 	task_unlock(task);
9103 	memorystatus_act_on_ios13extended_footprint_entitlement(get_bsdtask_info(task));
9104 }
9105 #endif /* __arm64__ */
9106 
9107 static inline ledger_amount_t
task_ledger_get_balance(ledger_t ledger,int ledger_idx)9108 task_ledger_get_balance(
9109 	ledger_t        ledger,
9110 	int             ledger_idx)
9111 {
9112 	ledger_amount_t amount;
9113 	amount = 0;
9114 	ledger_get_balance(ledger, ledger_idx, &amount);
9115 	return amount;
9116 }
9117 
9118 /*
9119  * Gather the amount of memory counted in a task's footprint due to
9120  * being in a specific set of ledgers.
9121  */
9122 void
task_ledgers_footprint(ledger_t ledger,ledger_amount_t * ledger_resident,ledger_amount_t * ledger_compressed)9123 task_ledgers_footprint(
9124 	ledger_t        ledger,
9125 	ledger_amount_t *ledger_resident,
9126 	ledger_amount_t *ledger_compressed)
9127 {
9128 	*ledger_resident = 0;
9129 	*ledger_compressed = 0;
9130 
9131 	/* purgeable non-volatile memory */
9132 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile);
9133 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile_compressed);
9134 
9135 	/* "default" tagged memory */
9136 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint);
9137 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint_compressed);
9138 
9139 	/* "network" currently never counts in the footprint... */
9140 
9141 	/* "media" tagged memory */
9142 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.media_footprint);
9143 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.media_footprint_compressed);
9144 
9145 	/* "graphics" tagged memory */
9146 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint);
9147 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint_compressed);
9148 
9149 	/* "neural" tagged memory */
9150 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.neural_footprint);
9151 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.neural_footprint_compressed);
9152 }
9153 
9154 #if CONFIG_MEMORYSTATUS
9155 /*
9156  * Credit any outstanding task dirty time to the ledger.
9157  * memstat_dirty_start is pushed forward to prevent any possibility of double
9158  * counting, making it safe to call this as often as necessary to ensure that
9159  * anyone reading the ledger gets up-to-date information.
9160  */
9161 void
task_ledger_settle_dirty_time(task_t t)9162 task_ledger_settle_dirty_time(task_t t)
9163 {
9164 	task_lock(t);
9165 
9166 	uint64_t start = t->memstat_dirty_start;
9167 	if (start) {
9168 		uint64_t now = mach_absolute_time();
9169 
9170 		uint64_t duration;
9171 		absolutetime_to_nanoseconds(now - start, &duration);
9172 
9173 		ledger_t ledger = get_task_ledger(t);
9174 		ledger_credit(ledger, task_ledgers.memorystatus_dirty_time, duration);
9175 
9176 		t->memstat_dirty_start = now;
9177 	}
9178 
9179 	task_unlock(t);
9180 }
9181 #endif /* CONFIG_MEMORYSTATUS */
9182 
9183 void
task_set_memory_ownership_transfer(task_t task,boolean_t value)9184 task_set_memory_ownership_transfer(
9185 	task_t    task,
9186 	boolean_t value)
9187 {
9188 	task_lock(task);
9189 	task->task_can_transfer_memory_ownership = !!value;
9190 	task_unlock(task);
9191 }
9192 
9193 #if DEVELOPMENT || DEBUG
9194 
9195 void
task_set_no_footprint_for_debug(task_t task,boolean_t value)9196 task_set_no_footprint_for_debug(task_t task, boolean_t value)
9197 {
9198 	task_lock(task);
9199 	task->task_no_footprint_for_debug = !!value;
9200 	task_unlock(task);
9201 }
9202 
9203 int
task_get_no_footprint_for_debug(task_t task)9204 task_get_no_footprint_for_debug(task_t task)
9205 {
9206 	return task->task_no_footprint_for_debug;
9207 }
9208 
9209 #endif /* DEVELOPMENT || DEBUG */
9210 
9211 void
task_copy_vmobjects(task_t task,vm_object_query_t query,size_t len,size_t * num)9212 task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num)
9213 {
9214 	vm_object_t find_vmo;
9215 	size_t size = 0;
9216 
9217 	task_objq_lock(task);
9218 	if (query != NULL) {
9219 		queue_iterate(&task->task_objq, find_vmo, vm_object_t, task_objq)
9220 		{
9221 			vm_object_query_t p = &query[size++];
9222 
9223 			/* make sure to not overrun */
9224 			if (size * sizeof(vm_object_query_data_t) > len) {
9225 				--size;
9226 				break;
9227 			}
9228 
9229 			bzero(p, sizeof(*p));
9230 			p->object_id = (vm_object_id_t) VM_KERNEL_ADDRPERM(find_vmo);
9231 			p->virtual_size = find_vmo->internal ? find_vmo->vo_size : 0;
9232 			p->resident_size = find_vmo->resident_page_count * PAGE_SIZE;
9233 			p->wired_size = find_vmo->wired_page_count * PAGE_SIZE;
9234 			p->reusable_size = find_vmo->reusable_page_count * PAGE_SIZE;
9235 			p->vo_no_footprint = find_vmo->vo_no_footprint;
9236 			p->vo_ledger_tag = find_vmo->vo_ledger_tag;
9237 			p->purgable = find_vmo->purgable;
9238 
9239 			if (find_vmo->internal && find_vmo->pager_created && find_vmo->pager != NULL) {
9240 				p->compressed_size = vm_compressor_pager_get_count(find_vmo->pager) * PAGE_SIZE;
9241 			} else {
9242 				p->compressed_size = 0;
9243 			}
9244 		}
9245 	} else {
9246 		size = (size_t)task->task_owned_objects;
9247 	}
9248 	task_objq_unlock(task);
9249 
9250 	*num = size;
9251 }
9252 
9253 void
task_get_owned_vmobjects(task_t task,size_t buffer_size,vmobject_list_output_t buffer,size_t * output_size,size_t * entries)9254 task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries)
9255 {
9256 	assert(output_size);
9257 	assert(entries);
9258 
9259 	/* copy the vmobjects and vmobject data out of the task */
9260 	if (buffer_size == 0) {
9261 		task_copy_vmobjects(task, NULL, 0, entries);
9262 		*output_size = (*entries > 0) ? *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer) : 0;
9263 	} else {
9264 		assert(buffer);
9265 		task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(*buffer), entries);
9266 		buffer->entries = (uint64_t)*entries;
9267 		*output_size = *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer);
9268 	}
9269 }
9270 
9271 void
task_store_owned_vmobject_info(task_t to_task,task_t from_task)9272 task_store_owned_vmobject_info(task_t to_task, task_t from_task)
9273 {
9274 	size_t buffer_size;
9275 	vmobject_list_output_t buffer;
9276 	size_t output_size;
9277 	size_t entries;
9278 
9279 	assert(to_task != from_task);
9280 
9281 	/* get the size, allocate a bufferr, and populate */
9282 	entries = 0;
9283 	output_size = 0;
9284 	task_get_owned_vmobjects(from_task, 0, NULL, &output_size, &entries);
9285 
9286 	if (output_size) {
9287 		buffer_size = output_size;
9288 		buffer = kalloc_data(buffer_size, Z_WAITOK);
9289 
9290 		if (buffer) {
9291 			entries = 0;
9292 			output_size = 0;
9293 
9294 			task_get_owned_vmobjects(from_task, buffer_size, buffer, &output_size, &entries);
9295 
9296 			if (entries) {
9297 				to_task->corpse_vmobject_list = buffer;
9298 				to_task->corpse_vmobject_list_size = buffer_size;
9299 			}
9300 		}
9301 	}
9302 }
9303 
9304 void
task_set_filter_msg_flag(task_t task,boolean_t flag)9305 task_set_filter_msg_flag(
9306 	task_t task,
9307 	boolean_t flag)
9308 {
9309 	assert(task != TASK_NULL);
9310 
9311 	if (flag) {
9312 		task_ro_flags_set(task, TFRO_FILTER_MSG);
9313 	} else {
9314 		task_ro_flags_clear(task, TFRO_FILTER_MSG);
9315 	}
9316 }
9317 
9318 boolean_t
task_get_filter_msg_flag(task_t task)9319 task_get_filter_msg_flag(
9320 	task_t task)
9321 {
9322 	if (!task) {
9323 		return false;
9324 	}
9325 
9326 	return (task_ro_flags_get(task) & TFRO_FILTER_MSG) ? TRUE : FALSE;
9327 }
9328 bool
task_is_exotic(task_t task)9329 task_is_exotic(
9330 	task_t task)
9331 {
9332 	if (task == TASK_NULL) {
9333 		return false;
9334 	}
9335 	return vm_map_is_exotic(get_task_map(task));
9336 }
9337 
9338 bool
task_is_alien(task_t task)9339 task_is_alien(
9340 	task_t task)
9341 {
9342 	if (task == TASK_NULL) {
9343 		return false;
9344 	}
9345 	return vm_map_is_alien(get_task_map(task));
9346 }
9347 
9348 
9349 
9350 #if CONFIG_MACF
9351 /* Set the filter mask for Mach traps. */
9352 void
mac_task_set_mach_filter_mask(task_t task,uint8_t * maskptr)9353 mac_task_set_mach_filter_mask(task_t task, uint8_t *maskptr)
9354 {
9355 	assert(task);
9356 
9357 	task_set_mach_trap_filter_mask(task, maskptr);
9358 }
9359 
9360 /* Set the filter mask for kobject msgs. */
9361 void
mac_task_set_kobj_filter_mask(task_t task,uint8_t * maskptr)9362 mac_task_set_kobj_filter_mask(task_t task, uint8_t *maskptr)
9363 {
9364 	assert(task);
9365 
9366 	task_set_mach_kobj_filter_mask(task, maskptr);
9367 }
9368 
9369 /* Hook for mach trap/sc filter evaluation policy. */
9370 SECURITY_READ_ONLY_LATE(mac_task_mach_filter_cbfunc_t) mac_task_mach_trap_evaluate = NULL;
9371 
9372 /* Hook for kobj message filter evaluation policy. */
9373 SECURITY_READ_ONLY_LATE(mac_task_kobj_filter_cbfunc_t) mac_task_kobj_msg_evaluate = NULL;
9374 
9375 /* Set the callback hooks for the filtering policy. */
9376 int
mac_task_register_filter_callbacks(const mac_task_mach_filter_cbfunc_t mach_cbfunc,const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)9377 mac_task_register_filter_callbacks(
9378 	const mac_task_mach_filter_cbfunc_t mach_cbfunc,
9379 	const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)
9380 {
9381 	if (mach_cbfunc != NULL) {
9382 		if (mac_task_mach_trap_evaluate != NULL) {
9383 			return KERN_FAILURE;
9384 		}
9385 		mac_task_mach_trap_evaluate = mach_cbfunc;
9386 	}
9387 	if (kobj_cbfunc != NULL) {
9388 		if (mac_task_kobj_msg_evaluate != NULL) {
9389 			return KERN_FAILURE;
9390 		}
9391 		mac_task_kobj_msg_evaluate = kobj_cbfunc;
9392 	}
9393 
9394 	return KERN_SUCCESS;
9395 }
9396 #endif /* CONFIG_MACF */
9397 
9398 #if CONFIG_ROSETTA
9399 bool
task_is_translated(task_t task)9400 task_is_translated(task_t task)
9401 {
9402 	extern boolean_t proc_is_translated(struct proc* p);
9403 	return task && proc_is_translated(get_bsdtask_info(task));
9404 }
9405 #endif
9406 
9407 
9408 #if __has_feature(ptrauth_calls)
9409 /* All pac violations will be delivered as fatal exceptions irrespective of
9410  * the enable_pac_exception boot-arg value.
9411  */
9412 #define PAC_EXCEPTION_ENTITLEMENT "com.apple.private.pac.exception"
9413 /*
9414  * When enable_pac_exception boot-arg is set to true, processes
9415  * can choose to get non-fatal pac exception delivery by setting
9416  * this entitlement.
9417  */
9418 #define SKIP_PAC_EXCEPTION_ENTITLEMENT "com.apple.private.skip.pac.exception"
9419 
9420 void
task_set_pac_exception_fatal_flag(task_t task)9421 task_set_pac_exception_fatal_flag(
9422 	task_t task)
9423 {
9424 	assert(task != TASK_NULL);
9425 	bool pac_entitlement = false;
9426 	uint32_t set_flags = 0;
9427 
9428 	if (enable_pac_exception && IOTaskHasEntitlement(task, SKIP_PAC_EXCEPTION_ENTITLEMENT)) {
9429 		return;
9430 	}
9431 
9432 	if (IOTaskHasEntitlement(task, PAC_EXCEPTION_ENTITLEMENT)) {
9433 		pac_entitlement = true;
9434 	}
9435 
9436 	if (pac_entitlement) {
9437 		set_flags |= TFRO_PAC_ENFORCE_USER_STATE;
9438 	}
9439 	if (pac_entitlement || (enable_pac_exception && task_get_platform_binary(task))) {
9440 		set_flags |= TFRO_PAC_EXC_FATAL;
9441 	}
9442 	if (set_flags != 0) {
9443 		task_ro_flags_set(task, set_flags);
9444 	}
9445 }
9446 
9447 bool
task_is_pac_exception_fatal(task_t task)9448 task_is_pac_exception_fatal(
9449 	task_t task)
9450 {
9451 	assert(task != TASK_NULL);
9452 	return !!(task_ro_flags_get(task) & TFRO_PAC_EXC_FATAL);
9453 }
9454 #endif /* __has_feature(ptrauth_calls) */
9455 
9456 bool
task_needs_user_signed_thread_state(task_t task)9457 task_needs_user_signed_thread_state(
9458 	task_t task)
9459 {
9460 	assert(task != TASK_NULL);
9461 	return !!(task_ro_flags_get(task) & TFRO_PAC_ENFORCE_USER_STATE);
9462 }
9463 
9464 void
task_set_tecs(task_t task)9465 task_set_tecs(task_t task)
9466 {
9467 	if (task == TASK_NULL) {
9468 		task = current_task();
9469 	}
9470 
9471 	if (!machine_csv(CPUVN_CI)) {
9472 		return;
9473 	}
9474 
9475 	LCK_MTX_ASSERT(&task->lock, LCK_MTX_ASSERT_NOTOWNED);
9476 
9477 	task_lock(task);
9478 
9479 	task->t_flags |= TF_TECS;
9480 
9481 	thread_t thread;
9482 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
9483 		machine_tecs(thread);
9484 	}
9485 	task_unlock(task);
9486 }
9487 
9488 kern_return_t
task_test_sync_upcall(task_t task,ipc_port_t send_port)9489 task_test_sync_upcall(
9490 	task_t     task,
9491 	ipc_port_t send_port)
9492 {
9493 #if DEVELOPMENT || DEBUG
9494 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9495 		return KERN_INVALID_ARGUMENT;
9496 	}
9497 
9498 	/* Block on sync kernel upcall on the given send port */
9499 	mach_test_sync_upcall(send_port);
9500 
9501 	ipc_port_release_send(send_port);
9502 	return KERN_SUCCESS;
9503 #else
9504 	(void)task;
9505 	(void)send_port;
9506 	return KERN_NOT_SUPPORTED;
9507 #endif
9508 }
9509 
9510 kern_return_t
task_test_async_upcall_propagation(task_t task,ipc_port_t send_port,int qos,int iotier)9511 task_test_async_upcall_propagation(
9512 	task_t      task,
9513 	ipc_port_t  send_port,
9514 	int         qos,
9515 	int         iotier)
9516 {
9517 #if DEVELOPMENT || DEBUG
9518 	kern_return_t kr;
9519 
9520 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9521 		return KERN_INVALID_ARGUMENT;
9522 	}
9523 
9524 	if (qos < THREAD_QOS_DEFAULT || qos > THREAD_QOS_USER_INTERACTIVE ||
9525 	    iotier < THROTTLE_LEVEL_START || iotier > THROTTLE_LEVEL_END) {
9526 		return KERN_INVALID_ARGUMENT;
9527 	}
9528 
9529 	struct thread_attr_for_ipc_propagation attr = {
9530 		.tafip_iotier = iotier,
9531 		.tafip_qos = qos
9532 	};
9533 
9534 	/* Apply propagate attr to port */
9535 	kr = ipc_port_propagate_thread_attr(send_port, attr);
9536 	if (kr != KERN_SUCCESS) {
9537 		return kr;
9538 	}
9539 
9540 	thread_enable_send_importance(current_thread(), TRUE);
9541 
9542 	/* Perform an async kernel upcall on the given send port */
9543 	mach_test_async_upcall(send_port);
9544 	thread_enable_send_importance(current_thread(), FALSE);
9545 
9546 	ipc_port_release_send(send_port);
9547 	return KERN_SUCCESS;
9548 #else
9549 	(void)task;
9550 	(void)send_port;
9551 	(void)qos;
9552 	(void)iotier;
9553 	return KERN_NOT_SUPPORTED;
9554 #endif
9555 }
9556 
9557 #if CONFIG_PROC_RESOURCE_LIMITS
9558 mach_port_name_t
current_task_get_fatal_port_name(void)9559 current_task_get_fatal_port_name(void)
9560 {
9561 	mach_port_t task_fatal_port = MACH_PORT_NULL;
9562 	mach_port_name_t port_name = 0;
9563 
9564 	task_fatal_port = task_allocate_fatal_port();
9565 
9566 	if (task_fatal_port) {
9567 		ipc_object_copyout(current_space(), ip_to_object(task_fatal_port), MACH_MSG_TYPE_PORT_SEND,
9568 		    IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, &port_name);
9569 	}
9570 
9571 	return port_name;
9572 }
9573 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
9574 
9575 #if defined(__x86_64__)
9576 bool
curtask_get_insn_copy_optout(void)9577 curtask_get_insn_copy_optout(void)
9578 {
9579 	bool optout;
9580 	task_t cur_task = current_task();
9581 
9582 	task_lock(cur_task);
9583 	optout = (cur_task->t_flags & TF_INSN_COPY_OPTOUT) ? true : false;
9584 	task_unlock(cur_task);
9585 
9586 	return optout;
9587 }
9588 
9589 void
curtask_set_insn_copy_optout(void)9590 curtask_set_insn_copy_optout(void)
9591 {
9592 	task_t cur_task = current_task();
9593 
9594 	task_lock(cur_task);
9595 
9596 	cur_task->t_flags |= TF_INSN_COPY_OPTOUT;
9597 
9598 	thread_t thread;
9599 	queue_iterate(&cur_task->threads, thread, thread_t, task_threads) {
9600 		machine_thread_set_insn_copy_optout(thread);
9601 	}
9602 	task_unlock(cur_task);
9603 }
9604 #endif /* defined(__x86_64__) */
9605 
9606 void
task_get_corpse_vmobject_list(task_t task,vmobject_list_output_t * list,size_t * list_size)9607 task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size)
9608 {
9609 	assert(task);
9610 	assert(list_size);
9611 
9612 	*list = task->corpse_vmobject_list;
9613 	*list_size = (size_t)task->corpse_vmobject_list_size;
9614 }
9615 
9616 __abortlike
9617 static void
panic_proc_ro_task_backref_mismatch(task_t t,proc_ro_t ro)9618 panic_proc_ro_task_backref_mismatch(task_t t, proc_ro_t ro)
9619 {
9620 	panic("proc_ro->task backref mismatch: t=%p, ro=%p, "
9621 	    "proc_ro_task(ro)=%p", t, ro, proc_ro_task(ro));
9622 }
9623 
9624 proc_ro_t
task_get_ro(task_t t)9625 task_get_ro(task_t t)
9626 {
9627 	proc_ro_t ro = (proc_ro_t)t->bsd_info_ro;
9628 
9629 	zone_require_ro(ZONE_ID_PROC_RO, sizeof(struct proc_ro), ro);
9630 	if (__improbable(proc_ro_task(ro) != t)) {
9631 		panic_proc_ro_task_backref_mismatch(t, ro);
9632 	}
9633 
9634 	return ro;
9635 }
9636 
9637 uint32_t
task_ro_flags_get(task_t task)9638 task_ro_flags_get(task_t task)
9639 {
9640 	return task_get_ro(task)->t_flags_ro;
9641 }
9642 
9643 void
task_ro_flags_set(task_t task,uint32_t flags)9644 task_ro_flags_set(task_t task, uint32_t flags)
9645 {
9646 	zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
9647 	    t_flags_ro, ZRO_ATOMIC_OR_32, flags);
9648 }
9649 
9650 void
task_ro_flags_clear(task_t task,uint32_t flags)9651 task_ro_flags_clear(task_t task, uint32_t flags)
9652 {
9653 	zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
9654 	    t_flags_ro, ZRO_ATOMIC_AND_32, ~flags);
9655 }
9656 
9657 task_control_port_options_t
task_get_control_port_options(task_t task)9658 task_get_control_port_options(task_t task)
9659 {
9660 	return task_get_ro(task)->task_control_port_options;
9661 }
9662 
9663 void
task_set_control_port_options(task_t task,task_control_port_options_t opts)9664 task_set_control_port_options(task_t task, task_control_port_options_t opts)
9665 {
9666 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
9667 	    task_control_port_options, &opts);
9668 }
9669 
9670 /*!
9671  * @function kdp_task_is_locked
9672  *
9673  * @abstract
9674  * Checks if task is locked.
9675  *
9676  * @discussion
9677  * NOT SAFE: To be used only by kernel debugger.
9678  *
9679  * @param task task to check
9680  *
9681  * @returns TRUE if the task is locked.
9682  */
9683 boolean_t
kdp_task_is_locked(task_t task)9684 kdp_task_is_locked(task_t task)
9685 {
9686 	return kdp_lck_mtx_lock_spin_is_acquired(&task->lock);
9687 }
9688 
9689 #if DEBUG || DEVELOPMENT
9690 /**
9691  *
9692  * Check if a threshold limit is valid based on the actual phys memory
9693  * limit. If they are same, race conditions may arise, so we have to prevent
9694  * it to happen.
9695  */
9696 static diagthreshold_check_return
task_check_memorythreshold_is_valid(task_t task,uint64_t new_limit,bool is_diagnostics_value)9697 task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value)
9698 {
9699 	int phys_limit_mb;
9700 	kern_return_t ret_value;
9701 	bool threshold_enabled;
9702 	bool dummy;
9703 	ret_value = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, &threshold_enabled);
9704 	if (ret_value != KERN_SUCCESS) {
9705 		return ret_value;
9706 	}
9707 	if (is_diagnostics_value == true) {
9708 		ret_value = task_get_phys_footprint_limit(task, &phys_limit_mb);
9709 	} else {
9710 		uint64_t diag_limit;
9711 		ret_value = task_get_diag_footprint_limit_internal(task, &diag_limit, &dummy);
9712 		phys_limit_mb = (int)(diag_limit >> 20);
9713 	}
9714 	if (ret_value != KERN_SUCCESS) {
9715 		return ret_value;
9716 	}
9717 	if (phys_limit_mb == (int)  new_limit) {
9718 		if (threshold_enabled == false) {
9719 			return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED;
9720 		} else {
9721 			return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED;
9722 		}
9723 	}
9724 	if (threshold_enabled == false) {
9725 		return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED;
9726 	} else {
9727 		return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED;
9728 	}
9729 }
9730 #endif
9731 
9732 
9733 #pragma mark task utils
9734 
9735 /* defined in bsd/kern/kern_proc.c */
9736 extern void proc_name(int pid, char *buf, int size);
9737 extern char *proc_best_name(struct proc *p);
9738 
9739 void
task_procname(task_t task,char * buf,int size)9740 task_procname(task_t task, char *buf, int size)
9741 {
9742 	proc_name(task_pid(task), buf, size);
9743 }
9744 
9745 void
task_best_name(task_t task,char * buf,size_t size)9746 task_best_name(task_t task, char *buf, size_t size)
9747 {
9748 	char *name = proc_best_name(task_get_proc_raw(task));
9749 	strlcpy(buf, name, size);
9750 }
9751