xref: /xnu-11215.61.5/osfmk/kern/task.c (revision 4f1223e81cd707a65cc109d0b8ad6653699da3c4)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  *	File:	kern/task.c
58  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59  *		David Black
60  *
61  *	Task management primitives implementation.
62  */
63 /*
64  * Copyright (c) 1993 The University of Utah and
65  * the Computer Systems Laboratory (CSL).  All rights reserved.
66  *
67  * Permission to use, copy, modify and distribute this software and its
68  * documentation is hereby granted, provided that both the copyright
69  * notice and this permission notice appear in all copies of the
70  * software, derivative works or modified versions, and any portions
71  * thereof, and that both notices appear in supporting documentation.
72  *
73  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76  *
77  * CSL requests users of this software to return to [email protected] any
78  * improvements that they make and grant CSL redistribution rights.
79  *
80  */
81 /*
82  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83  * support for mandatory and extensible security protections.  This notice
84  * is included in support of clause 2.2 (b) of the Apple Public License,
85  * Version 2.0.
86  * Copyright (c) 2005 SPARTA, Inc.
87  */
88 
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/host_priv.h>
92 #include <mach/machine/vm_types.h>
93 #include <mach/vm_param.h>
94 #include <mach/mach_vm.h>
95 #include <mach/semaphore.h>
96 #include <mach/task_info.h>
97 #include <mach/task_inspect.h>
98 #include <mach/task_special_ports.h>
99 #include <mach/sdt.h>
100 #include <mach/mach_test_upcall.h>
101 
102 #include <ipc/ipc_importance.h>
103 #include <ipc/ipc_types.h>
104 #include <ipc/ipc_space.h>
105 #include <ipc/ipc_entry.h>
106 #include <ipc/ipc_hash.h>
107 #include <ipc/ipc_init.h>
108 
109 #include <kern/kern_types.h>
110 #include <kern/mach_param.h>
111 #include <kern/misc_protos.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/coalition.h>
115 #include <kern/zalloc.h>
116 #include <kern/kalloc.h>
117 #include <kern/kern_cdata.h>
118 #include <kern/processor.h>
119 #include <kern/recount.h>
120 #include <kern/sched_prim.h>    /* for thread_wakeup */
121 #include <kern/ipc_tt.h>
122 #include <kern/host.h>
123 #include <kern/clock.h>
124 #include <kern/timer.h>
125 #include <kern/assert.h>
126 #include <kern/affinity.h>
127 #include <kern/exc_resource.h>
128 #include <kern/machine.h>
129 #include <kern/policy_internal.h>
130 #include <kern/restartable.h>
131 #include <kern/ipc_kobject.h>
132 
133 #include <corpses/task_corpse.h>
134 #if CONFIG_TELEMETRY
135 #include <kern/telemetry.h>
136 #endif
137 
138 #if CONFIG_PERVASIVE_CPI
139 #include <kern/monotonic.h>
140 #include <machine/monotonic.h>
141 #endif /* CONFIG_PERVASIVE_CPI */
142 
143 #if CONFIG_EXCLAVES
144 #include "exclaves_boot.h"
145 #include "exclaves_resource.h"
146 #include "exclaves_boot.h"
147 #include "exclaves_inspection.h"
148 #include "exclaves_conclave.h"
149 #endif /* CONFIG_EXCLAVES */
150 
151 #include <os/log.h>
152 
153 #include <vm/pmap.h>
154 #include <vm/vm_map_xnu.h>
155 #include <vm/vm_kern_xnu.h>         /* for kernel_map, ipc_kernel_map */
156 #include <vm/vm_pageout_xnu.h>
157 #include <vm/vm_protos.h>
158 #include <vm/vm_purgeable_xnu.h>
159 #include <vm/vm_compressor_pager_xnu.h>
160 #include <vm/vm_reclaim_xnu.h>
161 #include <vm/vm_compressor_xnu.h>
162 
163 #include <sys/kdebug.h>
164 #include <sys/proc_ro.h>
165 #include <sys/resource.h>
166 #include <sys/signalvar.h> /* for coredump */
167 #include <sys/bsdtask_info.h>
168 #include <sys/kdebug_triage.h>
169 #include <sys/code_signing.h> /* for address_space_debugged */
170 #include <sys/reason.h>
171 
172 /*
173  * Exported interfaces
174  */
175 
176 #include <mach/task_server.h>
177 #include <mach/mach_host_server.h>
178 #include <mach/mach_port_server.h>
179 
180 #include <vm/vm_shared_region_xnu.h>
181 
182 #include <libkern/OSDebug.h>
183 #include <libkern/OSAtomic.h>
184 #include <libkern/section_keywords.h>
185 
186 #include <mach-o/loader.h>
187 #include <kdp/kdp_dyld.h>
188 
189 #include <kern/sfi.h>           /* picks up ledger.h */
190 
191 #if CONFIG_MACF
192 #include <security/mac_mach_internal.h>
193 #endif
194 
195 #include <IOKit/IOBSD.h>
196 #include <kdp/processor_core.h>
197 
198 #include <string.h>
199 
200 #if KPERF
201 extern int kpc_force_all_ctrs(task_t, int);
202 #endif
203 
204 SECURITY_READ_ONLY_LATE(task_t) kernel_task;
205 
206 int64_t         next_taskuniqueid = 0;
207 const size_t task_alignment = _Alignof(struct task);
208 extern const size_t proc_alignment;
209 extern size_t proc_struct_size;
210 extern size_t proc_and_task_size;
211 size_t task_struct_size;
212 
213 extern uint32_t ipc_control_port_options;
214 
215 extern int large_corpse_count;
216 
217 extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p);
218 extern boolean_t proc_is_simulated(const proc_t);
219 
220 static void task_port_no_senders(ipc_port_t, mach_msg_type_number_t);
221 static void task_port_with_flavor_no_senders(ipc_port_t, mach_msg_type_number_t);
222 static void task_suspension_no_senders(ipc_port_t, mach_msg_type_number_t);
223 static inline void task_zone_init(void);
224 
225 #if CONFIG_EXCLAVES
226 static bool task_should_panic_on_exit_due_to_conclave_taint(task_t task);
227 static bool task_is_conclave_tainted(task_t task);
228 static void task_set_conclave_taint(task_t task);
229 kern_return_t task_crash_info_conclave_upcall(task_t task,
230     const struct conclave_sharedbuffer_t *shared_buf, uint32_t length);
231 #endif /* CONFIG_EXCLAVES */
232 
233 IPC_KOBJECT_DEFINE(IKOT_TASK_NAME);
234 IPC_KOBJECT_DEFINE(IKOT_TASK_CONTROL,
235     .iko_op_no_senders = task_port_no_senders);
236 IPC_KOBJECT_DEFINE(IKOT_TASK_READ,
237     .iko_op_no_senders = task_port_with_flavor_no_senders);
238 IPC_KOBJECT_DEFINE(IKOT_TASK_INSPECT,
239     .iko_op_no_senders = task_port_with_flavor_no_senders);
240 IPC_KOBJECT_DEFINE(IKOT_TASK_RESUME,
241     .iko_op_no_senders = task_suspension_no_senders);
242 
243 #if CONFIG_PROC_RESOURCE_LIMITS
244 static void task_fatal_port_no_senders(ipc_port_t, mach_msg_type_number_t);
245 static mach_port_t task_allocate_fatal_port(void);
246 
247 IPC_KOBJECT_DEFINE(IKOT_TASK_FATAL,
248     .iko_op_stable     = true,
249     .iko_op_no_senders = task_fatal_port_no_senders);
250 
251 extern void task_id_token_set_port(task_id_token_t token, ipc_port_t port);
252 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
253 
254 /* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
255 int audio_active = 0;
256 
257 /*
258  *	structure for tracking zone usage
259  *	Used either one per task/thread for all zones or <per-task,per-zone>.
260  */
261 typedef struct zinfo_usage_store_t {
262 	/* These fields may be updated atomically, and so must be 8 byte aligned */
263 	uint64_t        alloc __attribute__((aligned(8)));              /* allocation counter */
264 	uint64_t        free __attribute__((aligned(8)));               /* free counter */
265 } zinfo_usage_store_t;
266 
267 /**
268  * Return codes related to diag threshold and memory limit
269  */
270 __options_decl(diagthreshold_check_return, int, {
271 	THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED        = 0,
272 	THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED         = 1,
273 	THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED    = 2,
274 	THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED     = 3,
275 });
276 
277 /**
278  * Return codes related to diag threshold and memory limit
279  */
280 __options_decl(current_, int, {
281 	THRESHOLD_IS_SAME_AS_LIMIT      = 0,
282 	THRESHOLD_IS_NOT_SAME_AS_LIMIT  = 1
283 });
284 
285 zinfo_usage_store_t tasks_tkm_private;
286 zinfo_usage_store_t tasks_tkm_shared;
287 
288 /* A container to accumulate statistics for expired tasks */
289 expired_task_statistics_t               dead_task_statistics;
290 LCK_SPIN_DECLARE_ATTR(dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
291 
292 ledger_template_t task_ledger_template = NULL;
293 
294 /* global lock for task_dyld_process_info_notify_{register, deregister, get_trap} */
295 LCK_GRP_DECLARE(g_dyldinfo_mtx_grp, "g_dyldinfo");
296 LCK_MTX_DECLARE(g_dyldinfo_mtx, &g_dyldinfo_mtx_grp);
297 
298 SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__((used)) =
299 {.cpu_time = -1,
300  .tkm_private = -1,
301  .tkm_shared = -1,
302  .phys_mem = -1,
303  .wired_mem = -1,
304  .internal = -1,
305  .iokit_mapped = -1,
306  .external = -1,
307  .reusable = -1,
308  .alternate_accounting = -1,
309  .alternate_accounting_compressed = -1,
310  .page_table = -1,
311  .phys_footprint = -1,
312  .internal_compressed = -1,
313  .purgeable_volatile = -1,
314  .purgeable_nonvolatile = -1,
315  .purgeable_volatile_compressed = -1,
316  .purgeable_nonvolatile_compressed = -1,
317  .tagged_nofootprint = -1,
318  .tagged_footprint = -1,
319  .tagged_nofootprint_compressed = -1,
320  .tagged_footprint_compressed = -1,
321  .network_volatile = -1,
322  .network_nonvolatile = -1,
323  .network_volatile_compressed = -1,
324  .network_nonvolatile_compressed = -1,
325  .media_nofootprint = -1,
326  .media_footprint = -1,
327  .media_nofootprint_compressed = -1,
328  .media_footprint_compressed = -1,
329  .graphics_nofootprint = -1,
330  .graphics_footprint = -1,
331  .graphics_nofootprint_compressed = -1,
332  .graphics_footprint_compressed = -1,
333  .neural_nofootprint = -1,
334  .neural_footprint = -1,
335  .neural_nofootprint_compressed = -1,
336  .neural_footprint_compressed = -1,
337  .neural_nofootprint_total = -1,
338  .platform_idle_wakeups = -1,
339  .interrupt_wakeups = -1,
340 #if CONFIG_SCHED_SFI
341  .sfi_wait_times = { 0 /* initialized at runtime */},
342 #endif /* CONFIG_SCHED_SFI */
343  .cpu_time_billed_to_me = -1,
344  .cpu_time_billed_to_others = -1,
345  .physical_writes = -1,
346  .logical_writes = -1,
347  .logical_writes_to_external = -1,
348 #if DEBUG || DEVELOPMENT
349  .pages_grabbed = -1,
350  .pages_grabbed_kern = -1,
351  .pages_grabbed_iopl = -1,
352  .pages_grabbed_upl = -1,
353 #endif
354 #if CONFIG_FREEZE
355  .frozen_to_swap = -1,
356 #endif /* CONFIG_FREEZE */
357  .energy_billed_to_me = -1,
358  .energy_billed_to_others = -1,
359 #if CONFIG_PHYS_WRITE_ACCT
360  .fs_metadata_writes = -1,
361 #endif /* CONFIG_PHYS_WRITE_ACCT */
362 #if CONFIG_MEMORYSTATUS
363  .memorystatus_dirty_time = -1,
364 #endif /* CONFIG_MEMORYSTATUS */
365  .swapins = -1,
366  .conclave_mem = -1, };
367 
368 /* System sleep state */
369 boolean_t tasks_suspend_state;
370 
371 __options_decl(send_exec_resource_is_fatal, bool, {
372 	IS_NOT_FATAL            = false,
373 	IS_FATAL                = true
374 });
375 
376 __options_decl(send_exec_resource_is_diagnostics, bool, {
377 	IS_NOT_DIAGNOSTICS      = false,
378 	IS_DIAGNOSTICS          = true
379 });
380 
381 __options_decl(send_exec_resource_is_warning, bool, {
382 	IS_NOT_WARNING          = false,
383 	IS_WARNING              = true
384 });
385 
386 __options_decl(send_exec_resource_options_t, uint8_t, {
387 	EXEC_RESOURCE_FATAL = 0x01,
388 	EXEC_RESOURCE_DIAGNOSTIC = 0x02,
389 	EXEC_RESOURCE_WARNING = 0x04,
390 });
391 
392 /**
393  * Actions to take when a process has reached the memory limit or the diagnostics threshold limits
394  */
395 static inline void task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning);
396 #if DEBUG || DEVELOPMENT
397 static inline void task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size);
398 #endif
399 void init_task_ledgers(void);
400 void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
401 void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
402 void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
403 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
404 void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options);
405 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
406 #if CONFIG_PROC_RESOURCE_LIMITS
407 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit);
408 mach_port_name_t current_task_get_fatal_port_name(void);
409 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit);
410 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
411 
412 kern_return_t task_suspend_internal(task_t);
413 kern_return_t task_resume_internal(task_t);
414 static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
415 
416 extern kern_return_t iokit_task_terminate(task_t task, int phase);
417 extern void          iokit_task_app_suspended_changed(task_t task);
418 
419 extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
420 extern void bsd_copythreadname(void *dst_uth, void *src_uth);
421 extern kern_return_t thread_resume(thread_t thread);
422 
423 // Condition to include diag footprints
424 #define RESETTABLE_DIAG_FOOTPRINT_LIMITS ((DEBUG || DEVELOPMENT) && CONFIG_MEMORYSTATUS)
425 
426 // Warn tasks when they hit 80% of their memory limit.
427 #define PHYS_FOOTPRINT_WARNING_LEVEL 80
428 
429 #define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT              150 /* wakeups per second */
430 #define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL   300 /* in seconds. */
431 
432 /*
433  * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
434  *
435  * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
436  *  stacktraces, aka micro-stackshots)
437  */
438 #define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER        70
439 
440 int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
441 int task_wakeups_monitor_rate;     /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
442 
443 unsigned int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
444 
445 TUNABLE(bool, disable_exc_resource, "disable_exc_resource", false); /* Global override to suppress EXC_RESOURCE for resource monitor violations. */
446 TUNABLE(bool, disable_exc_resource_during_audio, "disable_exc_resource_during_audio", true); /* Global override to suppress EXC_RESOURCE while audio is active */
447 
448 ledger_amount_t max_task_footprint = 0;  /* Per-task limit on physical memory consumption in bytes     */
449 unsigned int max_task_footprint_warning_level = 0;  /* Per-task limit warning percentage */
450 
451 /*
452  * Configure per-task memory limit.
453  * The boot-arg is interpreted as Megabytes,
454  * and takes precedence over the device tree.
455  * Setting the boot-arg to 0 disables task limits.
456  */
457 TUNABLE_DT_WRITEABLE(int, max_task_footprint_mb, "/defaults", "kern.max_task_pmem", "max_task_pmem", 0, TUNABLE_DT_NONE);
458 
459 /* I/O Monitor Limits */
460 #define IOMON_DEFAULT_LIMIT                     (20480ull)      /* MB of logical/physical I/O */
461 #define IOMON_DEFAULT_INTERVAL                  (86400ull)      /* in seconds */
462 
463 uint64_t task_iomon_limit_mb;           /* Per-task I/O monitor limit in MBs */
464 uint64_t task_iomon_interval_secs;      /* Per-task I/O monitor interval in secs */
465 
466 #define IO_TELEMETRY_DEFAULT_LIMIT              (10ll * 1024ll * 1024ll)
467 int64_t io_telemetry_limit;                     /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
468 int64_t global_logical_writes_count = 0;        /* Global count for logical writes */
469 int64_t global_logical_writes_to_external_count = 0;        /* Global count for logical writes to external storage*/
470 static boolean_t global_update_logical_writes(int64_t, int64_t*);
471 
472 #if DEBUG || DEVELOPMENT
473 static diagthreshold_check_return task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value);
474 #endif
475 #define TASK_MAX_THREAD_LIMIT 256
476 
477 #if MACH_ASSERT
478 int pmap_ledgers_panic = 1;
479 int pmap_ledgers_panic_leeway = 3;
480 #endif /* MACH_ASSERT */
481 
482 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
483 
484 #if CONFIG_COREDUMP
485 int hwm_user_cores = 0; /* high watermark violations generate user core files */
486 #endif
487 
488 #ifdef MACH_BSD
489 extern uint32_t proc_platform(const struct proc *);
490 extern uint32_t proc_sdk(struct proc *);
491 extern void     proc_getexecutableuuid(void *, unsigned char *, unsigned long);
492 extern int      proc_pid(struct proc *p);
493 extern int      proc_selfpid(void);
494 extern struct proc *current_proc(void);
495 extern char     *proc_name_address(struct proc *p);
496 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
497 extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize);
498 extern void workq_proc_suspended(struct proc *p);
499 extern void workq_proc_resumed(struct proc *p);
500 extern struct proc *kernproc;
501 
502 #if CONFIG_MEMORYSTATUS
503 extern void     proc_memstat_skip(struct proc* p, boolean_t set);
504 extern void     memorystatus_on_ledger_footprint_exceeded(int warning, bool memlimit_is_active, bool memlimit_is_fatal);
505 extern void     memorystatus_log_exception(const int max_footprint_mb, bool memlimit_is_active, bool memlimit_is_fatal);
506 extern void     memorystatus_log_diag_threshold_exception(const int diag_threshold_value);
507 extern boolean_t memorystatus_allowed_vm_map_fork(task_t task, bool *is_large);
508 extern uint64_t  memorystatus_available_memory_internal(struct proc *p);
509 
510 #if DEVELOPMENT || DEBUG
511 extern void memorystatus_abort_vm_map_fork(task_t);
512 #endif
513 
514 #endif /* CONFIG_MEMORYSTATUS */
515 
516 #endif /* MACH_BSD */
517 
518 /* Boot-arg that turns on fatal pac exception delivery for all first-party apps */
519 static TUNABLE(bool, enable_pac_exception, "enable_pac_exception", false);
520 
521 /*
522  * Defaults for controllable EXC_GUARD behaviors
523  *
524  * Internal builds are fatal by default (except BRIDGE).
525  * Create an alternate set of defaults for special processes by name.
526  */
527 struct task_exc_guard_named_default {
528 	char *name;
529 	uint32_t behavior;
530 };
531 #define _TASK_EXC_GUARD_MP_CORPSE  (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_CORPSE)
532 #define _TASK_EXC_GUARD_MP_ONCE    (_TASK_EXC_GUARD_MP_CORPSE | TASK_EXC_GUARD_MP_ONCE)
533 #define _TASK_EXC_GUARD_MP_FATAL   (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_FATAL)
534 
535 #define _TASK_EXC_GUARD_VM_CORPSE  (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_ONCE)
536 #define _TASK_EXC_GUARD_VM_ONCE    (_TASK_EXC_GUARD_VM_CORPSE | TASK_EXC_GUARD_VM_ONCE)
537 #define _TASK_EXC_GUARD_VM_FATAL   (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_FATAL)
538 
539 #define _TASK_EXC_GUARD_ALL_CORPSE (_TASK_EXC_GUARD_MP_CORPSE | _TASK_EXC_GUARD_VM_CORPSE)
540 #define _TASK_EXC_GUARD_ALL_ONCE   (_TASK_EXC_GUARD_MP_ONCE | _TASK_EXC_GUARD_VM_ONCE)
541 #define _TASK_EXC_GUARD_ALL_FATAL  (_TASK_EXC_GUARD_MP_FATAL | _TASK_EXC_GUARD_VM_FATAL)
542 
543 /* cannot turn off FATAL and DELIVER bit if set */
544 uint32_t task_exc_guard_no_unset_mask = TASK_EXC_GUARD_MP_FATAL | TASK_EXC_GUARD_VM_FATAL |
545     TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_VM_DELIVER;
546 /* cannot turn on ONCE bit if unset */
547 uint32_t task_exc_guard_no_set_mask = TASK_EXC_GUARD_MP_ONCE | TASK_EXC_GUARD_VM_ONCE;
548 
549 #if !defined(XNU_TARGET_OS_BRIDGE)
550 
551 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_FATAL;
552 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
553 /*
554  * These "by-process-name" default overrides are intended to be a short-term fix to
555  * quickly get over races between changes introducing new EXC_GUARD raising behaviors
556  * in some process and a change in default behavior for same. We should ship with
557  * these lists empty (by fixing the bugs, or explicitly changing the task's EXC_GUARD
558  * exception behavior via task_set_exc_guard_behavior()).
559  *
560  * XXX Remember to add/remove TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS back to
561  * task_exc_guard_default when transitioning this list between empty and
562  * non-empty.
563  */
564 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
565 
566 #else /* !defined(XNU_TARGET_OS_BRIDGE) */
567 
568 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_ONCE;
569 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
570 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
571 
572 #endif /* !defined(XNU_TARGET_OS_BRIDGE) */
573 
574 /* Forwards */
575 
576 static bool task_hold_locked(task_t task);
577 static void task_wait_locked(task_t task, boolean_t until_not_runnable);
578 static void task_release_locked(task_t task);
579 extern task_t proc_get_task_raw(void *proc);
580 extern void task_ref_hold_proc_task_struct(task_t task);
581 extern void task_release_proc_task_struct(task_t task, proc_ro_t proc_ro);
582 
583 static void task_synchronizer_destroy_all(task_t task);
584 static os_ref_count_t
585 task_add_turnstile_watchports_locked(
586 	task_t                      task,
587 	struct task_watchports      *watchports,
588 	struct task_watchport_elem  **previous_elem_array,
589 	ipc_port_t                  *portwatch_ports,
590 	uint32_t                    portwatch_count);
591 
592 static os_ref_count_t
593 task_remove_turnstile_watchports_locked(
594 	task_t                 task,
595 	struct task_watchports *watchports,
596 	ipc_port_t             *port_freelist);
597 
598 static struct task_watchports *
599 task_watchports_alloc_init(
600 	task_t        task,
601 	thread_t      thread,
602 	uint32_t      count);
603 
604 static void
605 task_watchports_deallocate(
606 	struct task_watchports *watchports);
607 
608 void
task_set_64bit(task_t task,boolean_t is_64bit,boolean_t is_64bit_data)609 task_set_64bit(
610 	task_t task,
611 	boolean_t is_64bit,
612 	boolean_t is_64bit_data)
613 {
614 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
615 	thread_t thread;
616 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
617 
618 	task_lock(task);
619 
620 	/*
621 	 * Switching to/from 64-bit address spaces
622 	 */
623 	if (is_64bit) {
624 		if (!task_has_64Bit_addr(task)) {
625 			task_set_64Bit_addr(task);
626 		}
627 	} else {
628 		if (task_has_64Bit_addr(task)) {
629 			task_clear_64Bit_addr(task);
630 		}
631 	}
632 
633 	/*
634 	 * Switching to/from 64-bit register state.
635 	 */
636 	if (is_64bit_data) {
637 		if (task_has_64Bit_data(task)) {
638 			goto out;
639 		}
640 
641 		task_set_64Bit_data(task);
642 	} else {
643 		if (!task_has_64Bit_data(task)) {
644 			goto out;
645 		}
646 
647 		task_clear_64Bit_data(task);
648 	}
649 
650 	/* FIXME: On x86, the thread save state flavor can diverge from the
651 	 * task's 64-bit feature flag due to the 32-bit/64-bit register save
652 	 * state dichotomy. Since we can be pre-empted in this interval,
653 	 * certain routines may observe the thread as being in an inconsistent
654 	 * state with respect to its task's 64-bitness.
655 	 */
656 
657 #if defined(__x86_64__) || defined(__arm64__)
658 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
659 		thread_mtx_lock(thread);
660 		machine_thread_switch_addrmode(thread);
661 		thread_mtx_unlock(thread);
662 	}
663 #endif /* defined(__x86_64__) || defined(__arm64__) */
664 
665 out:
666 	task_unlock(task);
667 }
668 
669 bool
task_get_64bit_addr(task_t task)670 task_get_64bit_addr(task_t task)
671 {
672 	return task_has_64Bit_addr(task);
673 }
674 
675 bool
task_get_64bit_data(task_t task)676 task_get_64bit_data(task_t task)
677 {
678 	return task_has_64Bit_data(task);
679 }
680 
681 void
task_set_platform_binary(task_t task,boolean_t is_platform)682 task_set_platform_binary(
683 	task_t task,
684 	boolean_t is_platform)
685 {
686 	if (is_platform) {
687 		task_ro_flags_set(task, TFRO_PLATFORM);
688 	} else {
689 		task_ro_flags_clear(task, TFRO_PLATFORM);
690 	}
691 }
692 
693 #if XNU_TARGET_OS_OSX
694 #if DEVELOPMENT || DEBUG
695 SECURITY_READ_ONLY_LATE(bool) AMFI_bootarg_disable_mach_hardening = false;
696 #endif /* DEVELOPMENT || DEBUG */
697 
698 void
task_disable_mach_hardening(task_t task)699 task_disable_mach_hardening(task_t task)
700 {
701 	task_ro_flags_set(task, TFRO_MACH_HARDENING_OPT_OUT);
702 }
703 
704 bool
task_opted_out_mach_hardening(task_t task)705 task_opted_out_mach_hardening(task_t task)
706 {
707 	return task_ro_flags_get(task) & TFRO_MACH_HARDENING_OPT_OUT;
708 }
709 #endif /* XNU_TARGET_OS_OSX */
710 
711 /*
712  * Use the `task_is_hardened_binary` macro below
713  * when applying new security policies.
714  *
715  * Kernel security policies now generally apply to
716  * "hardened binaries" - which are platform binaries, and
717  * third party binaries who adopt hardened runtime on ios.
718  */
719 boolean_t
task_get_platform_binary(task_t task)720 task_get_platform_binary(task_t task)
721 {
722 	return (task_ro_flags_get(task) & TFRO_PLATFORM) != 0;
723 }
724 
725 static boolean_t
task_get_hardened_runtime(task_t task)726 task_get_hardened_runtime(task_t task)
727 {
728 	return (task_ro_flags_get(task) & TFRO_HARDENED) != 0;
729 }
730 
731 boolean_t
task_is_hardened_binary(task_t task)732 task_is_hardened_binary(task_t task)
733 {
734 	return task_get_platform_binary(task) ||
735 	       task_get_hardened_runtime(task);
736 }
737 
738 void
task_set_hardened_runtime(task_t task,bool is_hardened)739 task_set_hardened_runtime(
740 	task_t task,
741 	bool is_hardened)
742 {
743 	if (is_hardened) {
744 		task_ro_flags_set(task, TFRO_HARDENED);
745 	} else {
746 		task_ro_flags_clear(task, TFRO_HARDENED);
747 	}
748 }
749 
750 boolean_t
task_is_a_corpse(task_t task)751 task_is_a_corpse(task_t task)
752 {
753 	return (task_ro_flags_get(task) & TFRO_CORPSE) != 0;
754 }
755 
756 boolean_t
task_is_ipc_active(task_t task)757 task_is_ipc_active(task_t task)
758 {
759 	return task->ipc_active;
760 }
761 
762 void
task_set_corpse(task_t task)763 task_set_corpse(task_t task)
764 {
765 	return task_ro_flags_set(task, TFRO_CORPSE);
766 }
767 
768 void
task_set_immovable_pinned(task_t task)769 task_set_immovable_pinned(task_t task)
770 {
771 	ipc_task_set_immovable_pinned(task);
772 }
773 
774 /*
775  * Set or clear per-task TF_CA_CLIENT_WI flag according to specified argument.
776  * Returns "false" if flag is already set, and "true" in other cases.
777  */
778 bool
task_set_ca_client_wi(task_t task,boolean_t set_or_clear)779 task_set_ca_client_wi(
780 	task_t task,
781 	boolean_t set_or_clear)
782 {
783 	bool ret = true;
784 	task_lock(task);
785 	if (set_or_clear) {
786 		/* Tasks can have only one CA_CLIENT work interval */
787 		if (task->t_flags & TF_CA_CLIENT_WI) {
788 			ret = false;
789 		} else {
790 			task->t_flags |= TF_CA_CLIENT_WI;
791 		}
792 	} else {
793 		task->t_flags &= ~TF_CA_CLIENT_WI;
794 	}
795 	task_unlock(task);
796 	return ret;
797 }
798 
799 /*
800  * task_set_dyld_info() is called at most three times.
801  * 1) at task struct creation to set addr/size to zero.
802  * 2) in mach_loader.c to set location of __all_image_info section in loaded dyld
803  * 3) is from dyld itself to update location of all_image_info
804  * For security any calls after that are ignored.  The TF_DYLD_ALL_IMAGE_SET bit is used to determine state.
805  */
806 kern_return_t
task_set_dyld_info(task_t task,mach_vm_address_t addr,mach_vm_size_t size,bool finalize_value)807 task_set_dyld_info(
808 	task_t            task,
809 	mach_vm_address_t addr,
810 	mach_vm_size_t    size,
811 	bool              finalize_value)
812 {
813 	mach_vm_address_t end;
814 	if (os_add_overflow(addr, size, &end)) {
815 		return KERN_FAILURE;
816 	}
817 
818 	task_lock(task);
819 	/* don't accept updates if all_image_info_addr is final */
820 	if ((task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) == 0) {
821 		bool inputNonZero   = ((addr != 0) || (size != 0));
822 		bool currentNonZero = ((task->all_image_info_addr != 0) || (task->all_image_info_size != 0));
823 		task->all_image_info_addr = addr;
824 		task->all_image_info_size = size;
825 		/* can only change from a non-zero value to another non-zero once */
826 		if ((inputNonZero && currentNonZero) || finalize_value) {
827 			task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
828 		}
829 		task_unlock(task);
830 		return KERN_SUCCESS;
831 	} else {
832 		task_unlock(task);
833 		return KERN_FAILURE;
834 	}
835 }
836 
837 bool
task_donates_own_pages(task_t task)838 task_donates_own_pages(
839 	task_t task)
840 {
841 	return task->donates_own_pages;
842 }
843 
844 void
task_set_mach_header_address(task_t task,mach_vm_address_t addr)845 task_set_mach_header_address(
846 	task_t task,
847 	mach_vm_address_t addr)
848 {
849 	task_lock(task);
850 	task->mach_header_vm_address = addr;
851 	task_unlock(task);
852 }
853 
854 void
task_bank_reset(__unused task_t task)855 task_bank_reset(__unused task_t task)
856 {
857 	if (task->bank_context != NULL) {
858 		bank_task_destroy(task);
859 	}
860 }
861 
862 /*
863  * NOTE: This should only be called when the P_LINTRANSIT
864  *	 flag is set (the proc_trans lock is held) on the
865  *	 proc associated with the task.
866  */
867 void
task_bank_init(__unused task_t task)868 task_bank_init(__unused task_t task)
869 {
870 	if (task->bank_context != NULL) {
871 		panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
872 	}
873 	bank_task_initialize(task);
874 }
875 
876 void
task_set_did_exec_flag(task_t task)877 task_set_did_exec_flag(task_t task)
878 {
879 	task->t_procflags |= TPF_DID_EXEC;
880 }
881 
882 void
task_clear_exec_copy_flag(task_t task)883 task_clear_exec_copy_flag(task_t task)
884 {
885 	task->t_procflags &= ~TPF_EXEC_COPY;
886 }
887 
888 event_t
task_get_return_wait_event(task_t task)889 task_get_return_wait_event(task_t task)
890 {
891 	return (event_t)&task->returnwait_inheritor;
892 }
893 
894 void
task_clear_return_wait(task_t task,uint32_t flags)895 task_clear_return_wait(task_t task, uint32_t flags)
896 {
897 	if (flags & TCRW_CLEAR_INITIAL_WAIT) {
898 		thread_wakeup(task_get_return_wait_event(task));
899 	}
900 
901 	if (flags & TCRW_CLEAR_FINAL_WAIT) {
902 		is_write_lock(task->itk_space);
903 
904 		task->t_returnwaitflags &= ~TRW_LRETURNWAIT;
905 		task->returnwait_inheritor = NULL;
906 
907 		if (flags & TCRW_CLEAR_EXEC_COMPLETE) {
908 			task->t_returnwaitflags &= ~TRW_LEXEC_COMPLETE;
909 		}
910 
911 		if (task->t_returnwaitflags & TRW_LRETURNWAITER) {
912 			struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
913 			    TURNSTILE_ULOCK);
914 
915 			waitq_wakeup64_all(&turnstile->ts_waitq,
916 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
917 			    THREAD_AWAKENED, WAITQ_UPDATE_INHERITOR);
918 
919 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_HELD);
920 
921 			turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
922 			turnstile_cleanup();
923 			task->t_returnwaitflags &= ~TRW_LRETURNWAITER;
924 		}
925 		is_write_unlock(task->itk_space);
926 	}
927 }
928 
929 void __attribute__((noreturn))
task_wait_to_return(void)930 task_wait_to_return(void)
931 {
932 	task_t task = current_task();
933 	uint8_t returnwaitflags;
934 
935 	is_write_lock(task->itk_space);
936 
937 	if (task->t_returnwaitflags & TRW_LRETURNWAIT) {
938 		struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
939 		    TURNSTILE_ULOCK);
940 
941 		do {
942 			task->t_returnwaitflags |= TRW_LRETURNWAITER;
943 			turnstile_update_inheritor(turnstile, task->returnwait_inheritor,
944 			    (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
945 
946 			waitq_assert_wait64(&turnstile->ts_waitq,
947 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
948 			    THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
949 
950 			is_write_unlock(task->itk_space);
951 
952 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
953 
954 			thread_block(THREAD_CONTINUE_NULL);
955 
956 			is_write_lock(task->itk_space);
957 		} while (task->t_returnwaitflags & TRW_LRETURNWAIT);
958 
959 		turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
960 	}
961 
962 	returnwaitflags = task->t_returnwaitflags;
963 	is_write_unlock(task->itk_space);
964 	turnstile_cleanup();
965 
966 	/**
967 	 * In posix_spawn() path, process_signature() is guaranteed to complete
968 	 * when the "second wait" is cleared. Call out to execute whatever depends
969 	 * on the result of that before we return to EL0.
970 	 */
971 	task_post_signature_processing_hook(task);
972 #if CONFIG_MACF
973 	/*
974 	 * Before jumping to userspace and allowing this process
975 	 * to execute any code, make sure its credentials are cached,
976 	 * and notify any interested parties.
977 	 */
978 	extern void current_cached_proc_cred_update(void);
979 
980 	current_cached_proc_cred_update();
981 	if (returnwaitflags & TRW_LEXEC_COMPLETE) {
982 		mac_proc_notify_exec_complete(current_proc());
983 	}
984 #endif
985 
986 	thread_bootstrap_return();
987 }
988 
989 /**
990  * A callout by task_wait_to_return on the main thread of a newly spawned task
991  * after process_signature() is completed by the parent task.
992  *
993  * @param task The newly spawned task
994  */
995 void
task_post_signature_processing_hook(task_t task)996 task_post_signature_processing_hook(task_t task)
997 {
998 	ml_task_post_signature_processing_hook(task);
999 }
1000 
1001 boolean_t
task_is_exec_copy(task_t task)1002 task_is_exec_copy(task_t task)
1003 {
1004 	return task_is_exec_copy_internal(task);
1005 }
1006 
1007 boolean_t
task_did_exec(task_t task)1008 task_did_exec(task_t task)
1009 {
1010 	return task_did_exec_internal(task);
1011 }
1012 
1013 boolean_t
task_is_active(task_t task)1014 task_is_active(task_t task)
1015 {
1016 	return task->active;
1017 }
1018 
1019 boolean_t
task_is_halting(task_t task)1020 task_is_halting(task_t task)
1021 {
1022 	return task->halting;
1023 }
1024 
1025 void
task_init(void)1026 task_init(void)
1027 {
1028 	if (max_task_footprint_mb != 0) {
1029 #if CONFIG_MEMORYSTATUS
1030 		if (max_task_footprint_mb < 50) {
1031 			printf("Warning: max_task_pmem %d below minimum.\n",
1032 			    max_task_footprint_mb);
1033 			max_task_footprint_mb = 50;
1034 		}
1035 		printf("Limiting task physical memory footprint to %d MB\n",
1036 		    max_task_footprint_mb);
1037 
1038 		max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024;         // Convert MB to bytes
1039 
1040 		/*
1041 		 * Configure the per-task memory limit warning level.
1042 		 * This is computed as a percentage.
1043 		 */
1044 		max_task_footprint_warning_level = 0;
1045 
1046 		if (max_mem < 0x40000000) {
1047 			/*
1048 			 * On devices with < 1GB of memory:
1049 			 *    -- set warnings to 50MB below the per-task limit.
1050 			 */
1051 			if (max_task_footprint_mb > 50) {
1052 				max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
1053 			}
1054 		} else {
1055 			/*
1056 			 * On devices with >= 1GB of memory:
1057 			 *    -- set warnings to 100MB below the per-task limit.
1058 			 */
1059 			if (max_task_footprint_mb > 100) {
1060 				max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
1061 			}
1062 		}
1063 
1064 		/*
1065 		 * Never allow warning level to land below the default.
1066 		 */
1067 		if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
1068 			max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
1069 		}
1070 
1071 		printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
1072 
1073 #else
1074 		printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
1075 #endif /* CONFIG_MEMORYSTATUS */
1076 	}
1077 
1078 #if DEVELOPMENT || DEBUG
1079 	PE_parse_boot_argn("task_exc_guard_default",
1080 	    &task_exc_guard_default,
1081 	    sizeof(task_exc_guard_default));
1082 #endif /* DEVELOPMENT || DEBUG */
1083 
1084 #if CONFIG_COREDUMP
1085 	if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
1086 	    sizeof(hwm_user_cores))) {
1087 		hwm_user_cores = 0;
1088 	}
1089 #endif
1090 
1091 	proc_init_cpumon_params();
1092 
1093 	if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof(task_wakeups_monitor_rate))) {
1094 		task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
1095 	}
1096 
1097 	if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof(task_wakeups_monitor_interval))) {
1098 		task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
1099 	}
1100 
1101 	if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
1102 	    sizeof(task_wakeups_monitor_ustackshots_trigger_pct))) {
1103 		task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
1104 	}
1105 
1106 	if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof(task_iomon_limit_mb))) {
1107 		task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
1108 	}
1109 
1110 	if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof(task_iomon_interval_secs))) {
1111 		task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
1112 	}
1113 
1114 	if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof(io_telemetry_limit))) {
1115 		io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
1116 	}
1117 
1118 /*
1119  * If we have coalitions, coalition_init() will call init_task_ledgers() as it
1120  * sets up the ledgers for the default coalition. If we don't have coalitions,
1121  * then we have to call it now.
1122  */
1123 #if CONFIG_COALITIONS
1124 	assert(task_ledger_template);
1125 #else /* CONFIG_COALITIONS */
1126 	init_task_ledgers();
1127 #endif /* CONFIG_COALITIONS */
1128 
1129 	task_ref_init();
1130 	task_zone_init();
1131 
1132 #ifdef __LP64__
1133 	boolean_t is_64bit = TRUE;
1134 #else
1135 	boolean_t is_64bit = FALSE;
1136 #endif
1137 
1138 	kernproc = (struct proc *)zalloc_flags(proc_task_zone, Z_WAITOK | Z_ZERO);
1139 	kernel_task = proc_get_task_raw(kernproc);
1140 
1141 	/*
1142 	 * Create the kernel task as the first task.
1143 	 */
1144 	if (task_create_internal(TASK_NULL, NULL, NULL, FALSE, is_64bit,
1145 	    is_64bit, TF_NONE, TF_NONE, TPF_NONE, TWF_NONE, kernel_task) != KERN_SUCCESS) {
1146 		panic("task_init");
1147 	}
1148 
1149 	ipc_task_enable(kernel_task);
1150 
1151 #if defined(HAS_APPLE_PAC)
1152 	kernel_task->rop_pid = ml_default_rop_pid();
1153 	kernel_task->jop_pid = ml_default_jop_pid();
1154 	// kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
1155 	// disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
1156 	ml_task_set_disable_user_jop(kernel_task, FALSE);
1157 #endif
1158 
1159 	vm_map_deallocate(kernel_task->map);
1160 	kernel_task->map = kernel_map;
1161 }
1162 
1163 static inline void
task_zone_init(void)1164 task_zone_init(void)
1165 {
1166 	proc_struct_size = roundup(proc_struct_size, task_alignment);
1167 	task_struct_size = roundup(sizeof(struct task), proc_alignment);
1168 	proc_and_task_size = proc_struct_size + task_struct_size;
1169 
1170 	proc_task_zone = zone_create_ext("proc_task", proc_and_task_size,
1171 	    ZC_ZFREE_CLEARMEM | ZC_SEQUESTER, ZONE_ID_PROC_TASK, NULL); /* sequester is needed for proc_rele() */
1172 }
1173 
1174 /*
1175  * Task ledgers
1176  * ------------
1177  *
1178  * phys_footprint
1179  *   Physical footprint: This is the sum of:
1180  *     + (internal - alternate_accounting)
1181  *     + (internal_compressed - alternate_accounting_compressed)
1182  *     + iokit_mapped
1183  *     + purgeable_nonvolatile
1184  *     + purgeable_nonvolatile_compressed
1185  *     + page_table
1186  *
1187  * internal
1188  *   The task's anonymous memory, which on iOS is always resident.
1189  *
1190  * internal_compressed
1191  *   Amount of this task's internal memory which is held by the compressor.
1192  *   Such memory is no longer actually resident for the task [i.e., resident in its pmap],
1193  *   and could be either decompressed back into memory, or paged out to storage, depending
1194  *   on our implementation.
1195  *
1196  * iokit_mapped
1197  *   IOKit mappings: The total size of all IOKit mappings in this task, regardless of
1198  *    clean/dirty or internal/external state].
1199  *
1200  * alternate_accounting
1201  *   The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
1202  *   are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
1203  *   double counting.
1204  *
1205  * pages_grabbed
1206  *   pages_grabbed counts all page grabs in a task.  It is also broken out into three subtypes
1207  *   which track UPL, IOPL and Kernel page grabs.
1208  */
1209 void
init_task_ledgers(void)1210 init_task_ledgers(void)
1211 {
1212 	ledger_template_t t;
1213 
1214 	assert(task_ledger_template == NULL);
1215 	assert(kernel_task == TASK_NULL);
1216 
1217 #if MACH_ASSERT
1218 	PE_parse_boot_argn("pmap_ledgers_panic",
1219 	    &pmap_ledgers_panic,
1220 	    sizeof(pmap_ledgers_panic));
1221 	PE_parse_boot_argn("pmap_ledgers_panic_leeway",
1222 	    &pmap_ledgers_panic_leeway,
1223 	    sizeof(pmap_ledgers_panic_leeway));
1224 #endif /* MACH_ASSERT */
1225 
1226 	if ((t = ledger_template_create("Per-task ledger")) == NULL) {
1227 		panic("couldn't create task ledger template");
1228 	}
1229 
1230 	task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
1231 	task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
1232 	    "physmem", "bytes");
1233 	task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
1234 	    "bytes");
1235 	task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
1236 	    "bytes");
1237 	task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
1238 	    "bytes");
1239 	task_ledgers.conclave_mem = ledger_entry_add_with_flags(t, "conclave_mem", "physmem", "bytes",
1240 	    LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE | LEDGER_ENTRY_ALLOW_DEBIT);
1241 	task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
1242 	    "bytes");
1243 	task_ledgers.iokit_mapped = ledger_entry_add_with_flags(t, "iokit_mapped", "mappings",
1244 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1245 	task_ledgers.alternate_accounting = ledger_entry_add_with_flags(t, "alternate_accounting", "physmem",
1246 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1247 	task_ledgers.alternate_accounting_compressed = ledger_entry_add_with_flags(t, "alternate_accounting_compressed", "physmem",
1248 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1249 	task_ledgers.page_table = ledger_entry_add_with_flags(t, "page_table", "physmem",
1250 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1251 	task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
1252 	    "bytes");
1253 	task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
1254 	    "bytes");
1255 	task_ledgers.reusable = ledger_entry_add(t, "reusable", "physmem", "bytes");
1256 	task_ledgers.external = ledger_entry_add(t, "external", "physmem", "bytes");
1257 	task_ledgers.purgeable_volatile = ledger_entry_add_with_flags(t, "purgeable_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1258 	task_ledgers.purgeable_nonvolatile = ledger_entry_add_with_flags(t, "purgeable_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1259 	task_ledgers.purgeable_volatile_compressed = ledger_entry_add_with_flags(t, "purgeable_volatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1260 	task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add_with_flags(t, "purgeable_nonvolatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1261 #if DEBUG || DEVELOPMENT
1262 	task_ledgers.pages_grabbed = ledger_entry_add_with_flags(t, "pages_grabbed", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1263 	task_ledgers.pages_grabbed_kern = ledger_entry_add_with_flags(t, "pages_grabbed_kern", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1264 	task_ledgers.pages_grabbed_iopl = ledger_entry_add_with_flags(t, "pages_grabbed_iopl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1265 	task_ledgers.pages_grabbed_upl = ledger_entry_add_with_flags(t, "pages_grabbed_upl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1266 #endif
1267 	task_ledgers.tagged_nofootprint = ledger_entry_add_with_flags(t, "tagged_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1268 	task_ledgers.tagged_footprint = ledger_entry_add_with_flags(t, "tagged_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1269 	task_ledgers.tagged_nofootprint_compressed = ledger_entry_add_with_flags(t, "tagged_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1270 	task_ledgers.tagged_footprint_compressed = ledger_entry_add_with_flags(t, "tagged_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1271 	task_ledgers.network_volatile = ledger_entry_add_with_flags(t, "network_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1272 	task_ledgers.network_nonvolatile = ledger_entry_add_with_flags(t, "network_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1273 	task_ledgers.network_volatile_compressed = ledger_entry_add_with_flags(t, "network_volatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1274 	task_ledgers.network_nonvolatile_compressed = ledger_entry_add_with_flags(t, "network_nonvolatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1275 	task_ledgers.media_nofootprint = ledger_entry_add_with_flags(t, "media_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1276 	task_ledgers.media_footprint = ledger_entry_add_with_flags(t, "media_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1277 	task_ledgers.media_nofootprint_compressed = ledger_entry_add_with_flags(t, "media_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1278 	task_ledgers.media_footprint_compressed = ledger_entry_add_with_flags(t, "media_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1279 	task_ledgers.graphics_nofootprint = ledger_entry_add_with_flags(t, "graphics_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1280 	task_ledgers.graphics_footprint = ledger_entry_add_with_flags(t, "graphics_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1281 	task_ledgers.graphics_nofootprint_compressed = ledger_entry_add_with_flags(t, "graphics_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1282 	task_ledgers.graphics_footprint_compressed = ledger_entry_add_with_flags(t, "graphics_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1283 	task_ledgers.neural_nofootprint = ledger_entry_add_with_flags(t, "neural_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1284 	task_ledgers.neural_footprint = ledger_entry_add_with_flags(t, "neural_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1285 	task_ledgers.neural_nofootprint_compressed = ledger_entry_add_with_flags(t, "neural_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1286 	task_ledgers.neural_footprint_compressed = ledger_entry_add_with_flags(t, "neural_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1287 	task_ledgers.neural_nofootprint_total = ledger_entry_add(t, "neural_nofootprint_total", "physmem", "bytes");
1288 
1289 #if CONFIG_FREEZE
1290 	task_ledgers.frozen_to_swap = ledger_entry_add(t, "frozen_to_swap", "physmem", "bytes");
1291 #endif /* CONFIG_FREEZE */
1292 
1293 	task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
1294 	    "count");
1295 	task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
1296 	    "count");
1297 
1298 #if CONFIG_SCHED_SFI
1299 	sfi_class_id_t class_id, ledger_alias;
1300 	for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1301 		task_ledgers.sfi_wait_times[class_id] = -1;
1302 	}
1303 
1304 	/* don't account for UNSPECIFIED */
1305 	for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
1306 		ledger_alias = sfi_get_ledger_alias_for_class(class_id);
1307 		if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
1308 			/* Check to see if alias has been registered yet */
1309 			if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
1310 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
1311 			} else {
1312 				/* Otherwise, initialize it first */
1313 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
1314 			}
1315 		} else {
1316 			task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
1317 		}
1318 
1319 		if (task_ledgers.sfi_wait_times[class_id] < 0) {
1320 			panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
1321 		}
1322 	}
1323 
1324 	assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID - 1] != -1);
1325 #endif /* CONFIG_SCHED_SFI */
1326 
1327 	task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
1328 	task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
1329 	task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
1330 	task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
1331 	task_ledgers.logical_writes_to_external = ledger_entry_add(t, "logical_writes_to_external", "res", "bytes");
1332 #if CONFIG_PHYS_WRITE_ACCT
1333 	task_ledgers.fs_metadata_writes = ledger_entry_add(t, "fs_metadata_writes", "res", "bytes");
1334 #endif /* CONFIG_PHYS_WRITE_ACCT */
1335 	task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj");
1336 	task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj");
1337 
1338 #if CONFIG_MEMORYSTATUS
1339 	task_ledgers.memorystatus_dirty_time = ledger_entry_add(t, "memorystatus_dirty_time", "physmem", "ns");
1340 #endif /* CONFIG_MEMORYSTATUS */
1341 
1342 	task_ledgers.swapins = ledger_entry_add_with_flags(t, "swapins", "physmem", "bytes",
1343 	    LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1344 
1345 	if ((task_ledgers.cpu_time < 0) ||
1346 	    (task_ledgers.tkm_private < 0) ||
1347 	    (task_ledgers.tkm_shared < 0) ||
1348 	    (task_ledgers.phys_mem < 0) ||
1349 	    (task_ledgers.wired_mem < 0) ||
1350 	    (task_ledgers.conclave_mem < 0) ||
1351 	    (task_ledgers.internal < 0) ||
1352 	    (task_ledgers.external < 0) ||
1353 	    (task_ledgers.reusable < 0) ||
1354 	    (task_ledgers.iokit_mapped < 0) ||
1355 	    (task_ledgers.alternate_accounting < 0) ||
1356 	    (task_ledgers.alternate_accounting_compressed < 0) ||
1357 	    (task_ledgers.page_table < 0) ||
1358 	    (task_ledgers.phys_footprint < 0) ||
1359 	    (task_ledgers.internal_compressed < 0) ||
1360 	    (task_ledgers.purgeable_volatile < 0) ||
1361 	    (task_ledgers.purgeable_nonvolatile < 0) ||
1362 	    (task_ledgers.purgeable_volatile_compressed < 0) ||
1363 	    (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
1364 	    (task_ledgers.tagged_nofootprint < 0) ||
1365 	    (task_ledgers.tagged_footprint < 0) ||
1366 	    (task_ledgers.tagged_nofootprint_compressed < 0) ||
1367 	    (task_ledgers.tagged_footprint_compressed < 0) ||
1368 #if CONFIG_FREEZE
1369 	    (task_ledgers.frozen_to_swap < 0) ||
1370 #endif /* CONFIG_FREEZE */
1371 	    (task_ledgers.network_volatile < 0) ||
1372 	    (task_ledgers.network_nonvolatile < 0) ||
1373 	    (task_ledgers.network_volatile_compressed < 0) ||
1374 	    (task_ledgers.network_nonvolatile_compressed < 0) ||
1375 	    (task_ledgers.media_nofootprint < 0) ||
1376 	    (task_ledgers.media_footprint < 0) ||
1377 	    (task_ledgers.media_nofootprint_compressed < 0) ||
1378 	    (task_ledgers.media_footprint_compressed < 0) ||
1379 	    (task_ledgers.graphics_nofootprint < 0) ||
1380 	    (task_ledgers.graphics_footprint < 0) ||
1381 	    (task_ledgers.graphics_nofootprint_compressed < 0) ||
1382 	    (task_ledgers.graphics_footprint_compressed < 0) ||
1383 	    (task_ledgers.neural_nofootprint < 0) ||
1384 	    (task_ledgers.neural_footprint < 0) ||
1385 	    (task_ledgers.neural_nofootprint_compressed < 0) ||
1386 	    (task_ledgers.neural_footprint_compressed < 0) ||
1387 	    (task_ledgers.neural_nofootprint_total < 0) ||
1388 	    (task_ledgers.platform_idle_wakeups < 0) ||
1389 	    (task_ledgers.interrupt_wakeups < 0) ||
1390 	    (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
1391 	    (task_ledgers.physical_writes < 0) ||
1392 	    (task_ledgers.logical_writes < 0) ||
1393 	    (task_ledgers.logical_writes_to_external < 0) ||
1394 #if CONFIG_PHYS_WRITE_ACCT
1395 	    (task_ledgers.fs_metadata_writes < 0) ||
1396 #endif /* CONFIG_PHYS_WRITE_ACCT */
1397 #if CONFIG_MEMORYSTATUS
1398 	    (task_ledgers.memorystatus_dirty_time < 0) ||
1399 #endif /* CONFIG_MEMORYSTATUS */
1400 	    (task_ledgers.energy_billed_to_me < 0) ||
1401 	    (task_ledgers.energy_billed_to_others < 0) ||
1402 	    (task_ledgers.swapins < 0)
1403 	    ) {
1404 		panic("couldn't create entries for task ledger template");
1405 	}
1406 
1407 	ledger_track_credit_only(t, task_ledgers.phys_footprint);
1408 	ledger_track_credit_only(t, task_ledgers.internal);
1409 	ledger_track_credit_only(t, task_ledgers.external);
1410 	ledger_track_credit_only(t, task_ledgers.reusable);
1411 
1412 	ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
1413 	ledger_track_maximum(t, task_ledgers.phys_mem, 60);
1414 	ledger_track_maximum(t, task_ledgers.internal, 60);
1415 	ledger_track_maximum(t, task_ledgers.internal_compressed, 60);
1416 	ledger_track_maximum(t, task_ledgers.reusable, 60);
1417 	ledger_track_maximum(t, task_ledgers.external, 60);
1418 	ledger_track_maximum(t, task_ledgers.neural_nofootprint_total, 60);
1419 #if MACH_ASSERT
1420 	if (pmap_ledgers_panic) {
1421 		ledger_panic_on_negative(t, task_ledgers.phys_footprint);
1422 		ledger_panic_on_negative(t, task_ledgers.conclave_mem);
1423 		ledger_panic_on_negative(t, task_ledgers.page_table);
1424 		ledger_panic_on_negative(t, task_ledgers.internal);
1425 		ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
1426 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
1427 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
1428 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
1429 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
1430 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
1431 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
1432 #if CONFIG_PHYS_WRITE_ACCT
1433 		ledger_panic_on_negative(t, task_ledgers.fs_metadata_writes);
1434 #endif /* CONFIG_PHYS_WRITE_ACCT */
1435 
1436 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint);
1437 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint);
1438 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint_compressed);
1439 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint_compressed);
1440 		ledger_panic_on_negative(t, task_ledgers.network_volatile);
1441 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile);
1442 		ledger_panic_on_negative(t, task_ledgers.network_volatile_compressed);
1443 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile_compressed);
1444 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint);
1445 		ledger_panic_on_negative(t, task_ledgers.media_footprint);
1446 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint_compressed);
1447 		ledger_panic_on_negative(t, task_ledgers.media_footprint_compressed);
1448 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint);
1449 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint);
1450 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint_compressed);
1451 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint_compressed);
1452 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint);
1453 		ledger_panic_on_negative(t, task_ledgers.neural_footprint);
1454 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint_compressed);
1455 		ledger_panic_on_negative(t, task_ledgers.neural_footprint_compressed);
1456 	}
1457 #endif /* MACH_ASSERT */
1458 
1459 #if CONFIG_MEMORYSTATUS
1460 	ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
1461 #endif /* CONFIG_MEMORYSTATUS */
1462 
1463 	ledger_set_callback(t, task_ledgers.interrupt_wakeups,
1464 	    task_wakeups_rate_exceeded, NULL, NULL);
1465 	ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
1466 
1467 #if CONFIG_SPTM || !XNU_MONITOR
1468 	ledger_template_complete(t);
1469 #else /* CONFIG_SPTM || !XNU_MONITOR */
1470 	ledger_template_complete_secure_alloc(t);
1471 #endif /* XNU_MONITOR */
1472 	task_ledger_template = t;
1473 }
1474 
1475 /* Create a task, but leave the task ports disabled */
1476 kern_return_t
task_create_internal(task_t parent_task,proc_ro_t proc_ro,coalition_t * parent_coalitions __unused,boolean_t inherit_memory,boolean_t is_64bit,boolean_t is_64bit_data,uint32_t t_flags,uint32_t t_flags_ro,uint32_t t_procflags,uint8_t t_returnwaitflags,task_t child_task)1477 task_create_internal(
1478 	task_t             parent_task,            /* Null-able */
1479 	proc_ro_t          proc_ro,
1480 	coalition_t        *parent_coalitions __unused,
1481 	boolean_t          inherit_memory,
1482 	boolean_t          is_64bit,
1483 	boolean_t          is_64bit_data,
1484 	uint32_t           t_flags,
1485 	uint32_t           t_flags_ro,
1486 	uint32_t           t_procflags,
1487 	uint8_t            t_returnwaitflags,
1488 	task_t             child_task)
1489 {
1490 	task_t                  new_task;
1491 	vm_shared_region_t      shared_region;
1492 	ledger_t                ledger = NULL;
1493 	struct task_ro_data     task_ro_data = {};
1494 	uint32_t                parent_t_flags_ro = 0;
1495 
1496 	new_task = child_task;
1497 
1498 	if (task_ref_count_init(new_task) != KERN_SUCCESS) {
1499 		return KERN_RESOURCE_SHORTAGE;
1500 	}
1501 
1502 	/* allocate with active entries */
1503 	assert(task_ledger_template != NULL);
1504 	ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1505 	if (ledger == NULL) {
1506 		task_ref_count_fini(new_task);
1507 		return KERN_RESOURCE_SHORTAGE;
1508 	}
1509 
1510 	counter_alloc(&(new_task->faults));
1511 
1512 #if defined(HAS_APPLE_PAC)
1513 	const uint8_t disable_user_jop = inherit_memory ? parent_task->disable_user_jop : FALSE;
1514 	ml_task_set_rop_pid(new_task, parent_task, inherit_memory);
1515 	ml_task_set_jop_pid(new_task, parent_task, inherit_memory, disable_user_jop);
1516 	ml_task_set_disable_user_jop(new_task, disable_user_jop);
1517 #endif
1518 
1519 
1520 	new_task->ledger = ledger;
1521 
1522 	/* if inherit_memory is true, parent_task MUST not be NULL */
1523 	if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) {
1524 #if CONFIG_DEFERRED_RECLAIM
1525 		if (parent_task->deferred_reclamation_metadata) {
1526 			/*
1527 			 * Prevent concurrent reclaims while we're forking the parent_task's map,
1528 			 * so that the child's map is in sync with the forked reclamation
1529 			 * metadata.
1530 			 */
1531 			vm_deferred_reclamation_buffer_own(
1532 				parent_task->deferred_reclamation_metadata);
1533 		}
1534 #endif /* CONFIG_DEFERRED_RECLAIM */
1535 		new_task->map = vm_map_fork(ledger, parent_task->map, 0);
1536 #if CONFIG_DEFERRED_RECLAIM
1537 		if (new_task->map != NULL &&
1538 		    parent_task->deferred_reclamation_metadata) {
1539 			new_task->deferred_reclamation_metadata =
1540 			    vm_deferred_reclamation_buffer_fork(new_task,
1541 			    parent_task->deferred_reclamation_metadata);
1542 		}
1543 #endif /* CONFIG_DEFERRED_RECLAIM */
1544 	} else {
1545 		unsigned int pmap_flags = is_64bit ? PMAP_CREATE_64BIT : 0;
1546 		pmap_t pmap = pmap_create_options(ledger, 0, pmap_flags);
1547 		vm_map_t new_map;
1548 
1549 		if (pmap == NULL) {
1550 			counter_free(&new_task->faults);
1551 			ledger_dereference(ledger);
1552 			task_ref_count_fini(new_task);
1553 			return KERN_RESOURCE_SHORTAGE;
1554 		}
1555 		new_map = vm_map_create_options(pmap,
1556 		    (vm_map_offset_t)(VM_MIN_ADDRESS),
1557 		    (vm_map_offset_t)(VM_MAX_ADDRESS),
1558 		    VM_MAP_CREATE_PAGEABLE);
1559 		if (parent_task) {
1560 			vm_map_inherit_limits(new_map, parent_task->map);
1561 		}
1562 		new_task->map = new_map;
1563 	}
1564 
1565 	if (new_task->map == NULL) {
1566 		counter_free(&new_task->faults);
1567 		ledger_dereference(ledger);
1568 		task_ref_count_fini(new_task);
1569 		return KERN_RESOURCE_SHORTAGE;
1570 	}
1571 
1572 	lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
1573 	queue_init(&new_task->threads);
1574 	new_task->suspend_count = 0;
1575 	new_task->thread_count = 0;
1576 	new_task->active_thread_count = 0;
1577 	new_task->user_stop_count = 0;
1578 	new_task->legacy_stop_count = 0;
1579 	new_task->active = TRUE;
1580 	new_task->halting = FALSE;
1581 	new_task->priv_flags = 0;
1582 	new_task->t_flags = t_flags;
1583 	task_ro_data.t_flags_ro = t_flags_ro;
1584 	new_task->t_procflags = t_procflags;
1585 	new_task->t_returnwaitflags = t_returnwaitflags;
1586 	new_task->returnwait_inheritor = current_thread();
1587 	new_task->importance = 0;
1588 	new_task->crashed_thread_id = 0;
1589 	new_task->watchports = NULL;
1590 	new_task->t_rr_ranges = NULL;
1591 
1592 	new_task->bank_context = NULL;
1593 
1594 	if (parent_task) {
1595 		parent_t_flags_ro = task_ro_flags_get(parent_task);
1596 	}
1597 
1598 	if (parent_task && inherit_memory) {
1599 #if __has_feature(ptrauth_calls)
1600 		/* Inherit the pac exception flags from parent if in fork */
1601 		task_ro_data.t_flags_ro |= (parent_t_flags_ro & (TFRO_PAC_ENFORCE_USER_STATE |
1602 		    TFRO_PAC_EXC_FATAL));
1603 #endif /* __has_feature(ptrauth_calls) */
1604 		/* Inherit the hardened binary flags from parent if in fork */
1605 		task_ro_data.t_flags_ro |= parent_t_flags_ro & (TFRO_HARDENED | TFRO_PLATFORM | TFRO_JIT_EXC_FATAL);
1606 #if XNU_TARGET_OS_OSX
1607 		task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_MACH_HARDENING_OPT_OUT;
1608 #endif /* XNU_TARGET_OS_OSX */
1609 	}
1610 
1611 #ifdef MACH_BSD
1612 	new_task->corpse_info = NULL;
1613 #endif /* MACH_BSD */
1614 
1615 	/* kern_task not created by this function has unique id 0, start with 1 here. */
1616 	task_set_uniqueid(new_task);
1617 
1618 #if CONFIG_MACF
1619 	set_task_crash_label(new_task, NULL);
1620 
1621 	task_ro_data.task_filters.mach_trap_filter_mask = NULL;
1622 	task_ro_data.task_filters.mach_kobj_filter_mask = NULL;
1623 #endif
1624 
1625 #if CONFIG_MEMORYSTATUS
1626 	if (max_task_footprint != 0) {
1627 		ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1628 	}
1629 #endif /* CONFIG_MEMORYSTATUS */
1630 
1631 	if (task_wakeups_monitor_rate != 0) {
1632 		uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1633 		int32_t  rate;        // Ignored because of WAKEMON_SET_DEFAULTS
1634 		task_wakeups_monitor_ctl(new_task, &flags, &rate);
1635 	}
1636 
1637 #if CONFIG_IO_ACCOUNTING
1638 	uint32_t flags = IOMON_ENABLE;
1639 	task_io_monitor_ctl(new_task, &flags);
1640 #endif /* CONFIG_IO_ACCOUNTING */
1641 
1642 	machine_task_init(new_task, parent_task, inherit_memory);
1643 
1644 	new_task->task_debug = NULL;
1645 
1646 #if DEVELOPMENT || DEBUG
1647 	new_task->task_unnested = FALSE;
1648 	new_task->task_disconnected_count = 0;
1649 #endif
1650 	queue_init(&new_task->semaphore_list);
1651 	new_task->semaphores_owned = 0;
1652 
1653 	new_task->vtimers = 0;
1654 
1655 	new_task->shared_region = NULL;
1656 
1657 	new_task->affinity_space = NULL;
1658 
1659 #if CONFIG_CPU_COUNTERS
1660 	new_task->t_kpc = 0;
1661 #endif /* CONFIG_CPU_COUNTERS */
1662 
1663 	new_task->pidsuspended = FALSE;
1664 	new_task->frozen = FALSE;
1665 	new_task->changing_freeze_state = FALSE;
1666 	new_task->rusage_cpu_flags = 0;
1667 	new_task->rusage_cpu_percentage = 0;
1668 	new_task->rusage_cpu_interval = 0;
1669 	new_task->rusage_cpu_deadline = 0;
1670 	new_task->rusage_cpu_callt = NULL;
1671 #if MACH_ASSERT
1672 	new_task->suspends_outstanding = 0;
1673 #endif
1674 	recount_task_init(&new_task->tk_recount);
1675 
1676 #if HYPERVISOR
1677 	new_task->hv_task_target = NULL;
1678 #endif /* HYPERVISOR */
1679 
1680 #if CONFIG_TASKWATCH
1681 	queue_init(&new_task->task_watchers);
1682 	new_task->num_taskwatchers  = 0;
1683 	new_task->watchapplying  = 0;
1684 #endif /* CONFIG_TASKWATCH */
1685 
1686 	new_task->mem_notify_reserved = 0;
1687 	new_task->memlimit_attrs_reserved = 0;
1688 
1689 	new_task->requested_policy = default_task_requested_policy;
1690 	new_task->effective_policy = default_task_effective_policy;
1691 
1692 	new_task->task_shared_region_slide = -1;
1693 
1694 	if (parent_task != NULL) {
1695 		task_ro_data.task_tokens.sec_token = *task_get_sec_token(parent_task);
1696 		task_ro_data.task_tokens.audit_token = *task_get_audit_token(parent_task);
1697 
1698 		/* only inherit the option bits, no effect until task_set_immovable_pinned() */
1699 		task_ro_data.task_control_port_options = task_get_control_port_options(parent_task);
1700 
1701 		task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_FILTER_MSG;
1702 #if CONFIG_MACF
1703 		if (!(t_flags & TF_CORPSE_FORK)) {
1704 			task_ro_data.task_filters.mach_trap_filter_mask = task_get_mach_trap_filter_mask(parent_task);
1705 			task_ro_data.task_filters.mach_kobj_filter_mask = task_get_mach_kobj_filter_mask(parent_task);
1706 		}
1707 #endif
1708 	} else {
1709 		task_ro_data.task_tokens.sec_token = KERNEL_SECURITY_TOKEN;
1710 		task_ro_data.task_tokens.audit_token = KERNEL_AUDIT_TOKEN;
1711 
1712 		task_ro_data.task_control_port_options = TASK_CONTROL_PORT_OPTIONS_NONE;
1713 	}
1714 
1715 	/* must set before task_importance_init_from_parent: */
1716 	if (proc_ro != NULL) {
1717 		new_task->bsd_info_ro = proc_ro_ref_task(proc_ro, new_task, &task_ro_data);
1718 	} else {
1719 		new_task->bsd_info_ro = proc_ro_alloc(NULL, NULL, new_task, &task_ro_data);
1720 	}
1721 
1722 	ipc_task_init(new_task, parent_task);
1723 
1724 	task_importance_init_from_parent(new_task, parent_task);
1725 
1726 	new_task->corpse_vmobject_list = NULL;
1727 
1728 	if (parent_task != TASK_NULL) {
1729 		/* inherit the parent's shared region */
1730 		shared_region = vm_shared_region_get(parent_task);
1731 		if (shared_region != NULL) {
1732 			vm_shared_region_set(new_task, shared_region);
1733 		}
1734 
1735 #if __has_feature(ptrauth_calls)
1736 		/* use parent's shared_region_id */
1737 		char *shared_region_id = task_get_vm_shared_region_id_and_jop_pid(parent_task, NULL);
1738 		if (shared_region_id != NULL) {
1739 			shared_region_key_alloc(shared_region_id, FALSE, 0);         /* get a reference */
1740 		}
1741 		task_set_shared_region_id(new_task, shared_region_id);
1742 #endif /* __has_feature(ptrauth_calls) */
1743 
1744 		if (task_has_64Bit_addr(parent_task)) {
1745 			task_set_64Bit_addr(new_task);
1746 		}
1747 
1748 		if (task_has_64Bit_data(parent_task)) {
1749 			task_set_64Bit_data(new_task);
1750 		}
1751 
1752 		if (inherit_memory) {
1753 			new_task->all_image_info_addr = parent_task->all_image_info_addr;
1754 			new_task->all_image_info_size = parent_task->all_image_info_size;
1755 			if (parent_task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) {
1756 				new_task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
1757 			}
1758 		}
1759 		new_task->mach_header_vm_address = 0;
1760 
1761 		if (inherit_memory && parent_task->affinity_space) {
1762 			task_affinity_create(parent_task, new_task);
1763 		}
1764 
1765 		new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
1766 
1767 		new_task->task_exc_guard = parent_task->task_exc_guard;
1768 		if (parent_task->t_flags & TF_NO_SMT) {
1769 			new_task->t_flags |= TF_NO_SMT;
1770 		}
1771 
1772 		if (parent_task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE) {
1773 			new_task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
1774 		}
1775 
1776 		if (parent_task->t_flags & TF_TECS) {
1777 			new_task->t_flags |= TF_TECS;
1778 		}
1779 
1780 #if defined(__x86_64__)
1781 		if (parent_task->t_flags & TF_INSN_COPY_OPTOUT) {
1782 			new_task->t_flags |= TF_INSN_COPY_OPTOUT;
1783 		}
1784 #endif
1785 
1786 		new_task->priority = BASEPRI_DEFAULT;
1787 		new_task->max_priority = MAXPRI_USER;
1788 	} else {
1789 #ifdef __LP64__
1790 		if (is_64bit) {
1791 			task_set_64Bit_addr(new_task);
1792 		}
1793 #endif
1794 
1795 		if (is_64bit_data) {
1796 			task_set_64Bit_data(new_task);
1797 		}
1798 
1799 		new_task->all_image_info_addr = (mach_vm_address_t)0;
1800 		new_task->all_image_info_size = (mach_vm_size_t)0;
1801 
1802 		new_task->pset_hint = PROCESSOR_SET_NULL;
1803 
1804 		new_task->task_exc_guard = TASK_EXC_GUARD_NONE;
1805 
1806 		if (new_task == kernel_task) {
1807 			new_task->priority = BASEPRI_KERNEL;
1808 			new_task->max_priority = MAXPRI_KERNEL;
1809 		} else {
1810 			new_task->priority = BASEPRI_DEFAULT;
1811 			new_task->max_priority = MAXPRI_USER;
1812 		}
1813 	}
1814 
1815 	bzero(new_task->coalition, sizeof(new_task->coalition));
1816 	for (int i = 0; i < COALITION_NUM_TYPES; i++) {
1817 		queue_chain_init(new_task->task_coalition[i]);
1818 	}
1819 
1820 	/* Allocate I/O Statistics */
1821 	new_task->task_io_stats = kalloc_data(sizeof(struct io_stat_info),
1822 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1823 
1824 	bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats));
1825 	bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats));
1826 
1827 	bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
1828 
1829 	counter_alloc(&(new_task->pageins));
1830 	counter_alloc(&(new_task->cow_faults));
1831 	counter_alloc(&(new_task->messages_sent));
1832 	counter_alloc(&(new_task->messages_received));
1833 
1834 	/* Copy resource acc. info from Parent for Corpe Forked task. */
1835 	if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
1836 		task_rollup_accounting_info(new_task, parent_task);
1837 		task_store_owned_vmobject_info(new_task, parent_task);
1838 	} else {
1839 		/* Initialize to zero for standard fork/spawn case */
1840 		new_task->total_runnable_time = 0;
1841 		new_task->syscalls_mach = 0;
1842 		new_task->syscalls_unix = 0;
1843 		new_task->c_switch = 0;
1844 		new_task->p_switch = 0;
1845 		new_task->ps_switch = 0;
1846 		new_task->decompressions = 0;
1847 		new_task->low_mem_notified_warn = 0;
1848 		new_task->low_mem_notified_critical = 0;
1849 		new_task->purged_memory_warn = 0;
1850 		new_task->purged_memory_critical = 0;
1851 		new_task->low_mem_privileged_listener = 0;
1852 		new_task->memlimit_is_active = 0;
1853 		new_task->memlimit_is_fatal = 0;
1854 		new_task->memlimit_active_exc_resource = 0;
1855 		new_task->memlimit_inactive_exc_resource = 0;
1856 		new_task->task_timer_wakeups_bin_1 = 0;
1857 		new_task->task_timer_wakeups_bin_2 = 0;
1858 		new_task->task_gpu_ns = 0;
1859 		new_task->task_writes_counters_internal.task_immediate_writes = 0;
1860 		new_task->task_writes_counters_internal.task_deferred_writes = 0;
1861 		new_task->task_writes_counters_internal.task_invalidated_writes = 0;
1862 		new_task->task_writes_counters_internal.task_metadata_writes = 0;
1863 		new_task->task_writes_counters_external.task_immediate_writes = 0;
1864 		new_task->task_writes_counters_external.task_deferred_writes = 0;
1865 		new_task->task_writes_counters_external.task_invalidated_writes = 0;
1866 		new_task->task_writes_counters_external.task_metadata_writes = 0;
1867 #if CONFIG_PHYS_WRITE_ACCT
1868 		new_task->task_fs_metadata_writes = 0;
1869 #endif /* CONFIG_PHYS_WRITE_ACCT */
1870 	}
1871 
1872 
1873 	new_task->donates_own_pages = FALSE;
1874 #if CONFIG_COALITIONS
1875 	if (!(t_flags & TF_CORPSE_FORK)) {
1876 		/* TODO: there is no graceful failure path here... */
1877 		if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1878 			coalitions_adopt_task(parent_coalitions, new_task);
1879 			if (parent_coalitions[COALITION_TYPE_JETSAM]) {
1880 				new_task->donates_own_pages = coalition_is_swappable(parent_coalitions[COALITION_TYPE_JETSAM]);
1881 			}
1882 		} else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1883 			/*
1884 			 * all tasks at least have a resource coalition, so
1885 			 * if the parent has one then inherit all coalitions
1886 			 * the parent is a part of
1887 			 */
1888 			coalitions_adopt_task(parent_task->coalition, new_task);
1889 			if (parent_task->coalition[COALITION_TYPE_JETSAM]) {
1890 				new_task->donates_own_pages = coalition_is_swappable(parent_task->coalition[COALITION_TYPE_JETSAM]);
1891 			}
1892 		} else {
1893 			/* TODO: assert that new_task will be PID 1 (launchd) */
1894 			coalitions_adopt_init_task(new_task);
1895 		}
1896 		/*
1897 		 * on exec, we need to transfer the coalition roles from the
1898 		 * parent task to the exec copy task.
1899 		 */
1900 		if (parent_task && (t_procflags & TPF_EXEC_COPY)) {
1901 			int coal_roles[COALITION_NUM_TYPES];
1902 			task_coalition_roles(parent_task, coal_roles);
1903 			(void)coalitions_set_roles(new_task->coalition, new_task, coal_roles);
1904 		}
1905 	} else {
1906 		coalitions_adopt_corpse_task(new_task);
1907 	}
1908 
1909 	if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1910 		panic("created task is not a member of a resource coalition");
1911 	}
1912 	task_set_coalition_member(new_task);
1913 #endif /* CONFIG_COALITIONS */
1914 
1915 	if (parent_task != TASK_NULL) {
1916 		/* task_policy_create queries the adopted coalition */
1917 		task_policy_create(new_task, parent_task);
1918 	}
1919 
1920 	new_task->dispatchqueue_offset = 0;
1921 	if (parent_task != NULL) {
1922 		new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
1923 	}
1924 
1925 	new_task->task_can_transfer_memory_ownership = FALSE;
1926 	new_task->task_volatile_objects = 0;
1927 	new_task->task_nonvolatile_objects = 0;
1928 	new_task->task_objects_disowning = FALSE;
1929 	new_task->task_objects_disowned = FALSE;
1930 	new_task->task_owned_objects = 0;
1931 	queue_init(&new_task->task_objq);
1932 
1933 #if CONFIG_FREEZE
1934 	queue_init(&new_task->task_frozen_cseg_q);
1935 #endif /* CONFIG_FREEZE */
1936 
1937 	task_objq_lock_init(new_task);
1938 
1939 #if __arm64__
1940 	new_task->task_legacy_footprint = FALSE;
1941 	new_task->task_extra_footprint_limit = FALSE;
1942 	new_task->task_ios13extended_footprint_limit = FALSE;
1943 #endif /* __arm64__ */
1944 	new_task->task_region_footprint = FALSE;
1945 	new_task->task_has_crossed_thread_limit = FALSE;
1946 	new_task->task_thread_limit = 0;
1947 #if CONFIG_SECLUDED_MEMORY
1948 	new_task->task_can_use_secluded_mem = FALSE;
1949 	new_task->task_could_use_secluded_mem = FALSE;
1950 	new_task->task_could_also_use_secluded_mem = FALSE;
1951 	new_task->task_suppressed_secluded = FALSE;
1952 #endif /* CONFIG_SECLUDED_MEMORY */
1953 
1954 
1955 	/*
1956 	 * t_flags is set up above. But since we don't
1957 	 * support darkwake mode being set that way
1958 	 * currently, we clear it out here explicitly.
1959 	 */
1960 	new_task->t_flags &= ~(TF_DARKWAKE_MODE);
1961 
1962 	queue_init(&new_task->io_user_clients);
1963 	new_task->loadTag = 0;
1964 
1965 	lck_mtx_lock(&tasks_threads_lock);
1966 	queue_enter(&tasks, new_task, task_t, tasks);
1967 	tasks_count++;
1968 	if (tasks_suspend_state) {
1969 		task_suspend_internal(new_task);
1970 	}
1971 	lck_mtx_unlock(&tasks_threads_lock);
1972 	task_ref_hold_proc_task_struct(new_task);
1973 
1974 	return KERN_SUCCESS;
1975 }
1976 
1977 /*
1978  *	task_rollup_accounting_info
1979  *
1980  *	Roll up accounting stats. Used to rollup stats
1981  *	for exec copy task and corpse fork.
1982  */
1983 void
task_rollup_accounting_info(task_t to_task,task_t from_task)1984 task_rollup_accounting_info(task_t to_task, task_t from_task)
1985 {
1986 	assert(from_task != to_task);
1987 
1988 	recount_task_copy(&to_task->tk_recount, &from_task->tk_recount);
1989 	to_task->total_runnable_time = from_task->total_runnable_time;
1990 	counter_add(&to_task->faults, counter_load(&from_task->faults));
1991 	counter_add(&to_task->pageins, counter_load(&from_task->pageins));
1992 	counter_add(&to_task->cow_faults, counter_load(&from_task->cow_faults));
1993 	counter_add(&to_task->messages_sent, counter_load(&from_task->messages_sent));
1994 	counter_add(&to_task->messages_received, counter_load(&from_task->messages_received));
1995 	to_task->decompressions = from_task->decompressions;
1996 	to_task->syscalls_mach = from_task->syscalls_mach;
1997 	to_task->syscalls_unix = from_task->syscalls_unix;
1998 	to_task->c_switch = from_task->c_switch;
1999 	to_task->p_switch = from_task->p_switch;
2000 	to_task->ps_switch = from_task->ps_switch;
2001 	to_task->extmod_statistics = from_task->extmod_statistics;
2002 	to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
2003 	to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
2004 	to_task->purged_memory_warn = from_task->purged_memory_warn;
2005 	to_task->purged_memory_critical = from_task->purged_memory_critical;
2006 	to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
2007 	*to_task->task_io_stats = *from_task->task_io_stats;
2008 	to_task->cpu_time_eqos_stats = from_task->cpu_time_eqos_stats;
2009 	to_task->cpu_time_rqos_stats = from_task->cpu_time_rqos_stats;
2010 	to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
2011 	to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
2012 	to_task->task_gpu_ns = from_task->task_gpu_ns;
2013 	to_task->task_writes_counters_internal.task_immediate_writes = from_task->task_writes_counters_internal.task_immediate_writes;
2014 	to_task->task_writes_counters_internal.task_deferred_writes = from_task->task_writes_counters_internal.task_deferred_writes;
2015 	to_task->task_writes_counters_internal.task_invalidated_writes = from_task->task_writes_counters_internal.task_invalidated_writes;
2016 	to_task->task_writes_counters_internal.task_metadata_writes = from_task->task_writes_counters_internal.task_metadata_writes;
2017 	to_task->task_writes_counters_external.task_immediate_writes = from_task->task_writes_counters_external.task_immediate_writes;
2018 	to_task->task_writes_counters_external.task_deferred_writes = from_task->task_writes_counters_external.task_deferred_writes;
2019 	to_task->task_writes_counters_external.task_invalidated_writes = from_task->task_writes_counters_external.task_invalidated_writes;
2020 	to_task->task_writes_counters_external.task_metadata_writes = from_task->task_writes_counters_external.task_metadata_writes;
2021 #if CONFIG_PHYS_WRITE_ACCT
2022 	to_task->task_fs_metadata_writes = from_task->task_fs_metadata_writes;
2023 #endif /* CONFIG_PHYS_WRITE_ACCT */
2024 
2025 #if CONFIG_MEMORYSTATUS
2026 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.memorystatus_dirty_time);
2027 #endif /* CONFIG_MEMORYSTATUS */
2028 
2029 	/* Skip ledger roll up for memory accounting entries */
2030 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
2031 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
2032 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
2033 #if CONFIG_SCHED_SFI
2034 	for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
2035 		ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
2036 	}
2037 #endif
2038 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
2039 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
2040 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
2041 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
2042 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_me);
2043 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_others);
2044 }
2045 
2046 /*
2047  *	task_deallocate_internal:
2048  *
2049  *	Drop a reference on a task.
2050  *	Don't call this directly.
2051  */
2052 extern void task_deallocate_internal(task_t task, os_ref_count_t refs);
2053 void
task_deallocate_internal(task_t task,os_ref_count_t refs)2054 task_deallocate_internal(
2055 	task_t          task,
2056 	os_ref_count_t  refs)
2057 {
2058 	ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
2059 
2060 	if (task == TASK_NULL) {
2061 		return;
2062 	}
2063 
2064 #if IMPORTANCE_INHERITANCE
2065 	if (refs == 1) {
2066 		/*
2067 		 * If last ref potentially comes from the task's importance,
2068 		 * disconnect it.  But more task refs may be added before
2069 		 * that completes, so wait for the reference to go to zero
2070 		 * naturally (it may happen on a recursive task_deallocate()
2071 		 * from the ipc_importance_disconnect_task() call).
2072 		 */
2073 		if (IIT_NULL != task->task_imp_base) {
2074 			ipc_importance_disconnect_task(task);
2075 		}
2076 		return;
2077 	}
2078 #endif /* IMPORTANCE_INHERITANCE */
2079 
2080 	if (refs > 0) {
2081 		return;
2082 	}
2083 
2084 	/*
2085 	 * The task should be dead at this point. Ensure other resources
2086 	 * like threads, are gone before we trash the world.
2087 	 */
2088 	assert(queue_empty(&task->threads));
2089 	assert(get_bsdtask_info(task) == NULL);
2090 	assert(!is_active(task->itk_space));
2091 	assert(!task->active);
2092 	assert(task->active_thread_count == 0);
2093 	assert(!task_get_game_mode(task));
2094 	assert(!task_get_carplay_mode(task));
2095 
2096 	lck_mtx_lock(&tasks_threads_lock);
2097 	assert(terminated_tasks_count > 0);
2098 	queue_remove(&terminated_tasks, task, task_t, tasks);
2099 	terminated_tasks_count--;
2100 	lck_mtx_unlock(&tasks_threads_lock);
2101 
2102 	/*
2103 	 * remove the reference on bank context
2104 	 */
2105 	task_bank_reset(task);
2106 
2107 	kfree_data(task->task_io_stats, sizeof(struct io_stat_info));
2108 
2109 	/*
2110 	 *	Give the machine dependent code a chance
2111 	 *	to perform cleanup before ripping apart
2112 	 *	the task.
2113 	 */
2114 	machine_task_terminate(task);
2115 
2116 	ipc_task_terminate(task);
2117 
2118 	/* let iokit know 2 */
2119 	iokit_task_terminate(task, 2);
2120 
2121 	/* Unregister task from userspace coredumps on panic */
2122 	kern_unregister_userspace_coredump(task);
2123 
2124 	if (task->affinity_space) {
2125 		task_affinity_deallocate(task);
2126 	}
2127 
2128 #if MACH_ASSERT
2129 	if (task->ledger != NULL &&
2130 	    task->map != NULL &&
2131 	    task->map->pmap != NULL &&
2132 	    task->map->pmap->ledger != NULL) {
2133 		assert(task->ledger == task->map->pmap->ledger);
2134 	}
2135 #endif /* MACH_ASSERT */
2136 
2137 	vm_owned_objects_disown(task);
2138 	assert(task->task_objects_disowned);
2139 	if (task->task_owned_objects != 0) {
2140 		panic("task_deallocate(%p): "
2141 		    "volatile_objects=%d nonvolatile_objects=%d owned=%d\n",
2142 		    task,
2143 		    task->task_volatile_objects,
2144 		    task->task_nonvolatile_objects,
2145 		    task->task_owned_objects);
2146 	}
2147 
2148 #if CONFIG_DEFERRED_RECLAIM
2149 	if (task->deferred_reclamation_metadata != NULL) {
2150 		vm_deferred_reclamation_buffer_deallocate(task->deferred_reclamation_metadata);
2151 		task->deferred_reclamation_metadata = NULL;
2152 	}
2153 #endif /* CONFIG_DEFERRED_RECLAIM */
2154 
2155 	vm_map_deallocate(task->map);
2156 	if (task->is_large_corpse) {
2157 		assert(large_corpse_count > 0);
2158 		OSDecrementAtomic(&large_corpse_count);
2159 		task->is_large_corpse = false;
2160 	}
2161 	is_release(task->itk_space);
2162 
2163 	if (task->t_rr_ranges) {
2164 		restartable_ranges_release(task->t_rr_ranges);
2165 	}
2166 
2167 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
2168 	    &interrupt_wakeups, &debit);
2169 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
2170 	    &platform_idle_wakeups, &debit);
2171 
2172 	struct recount_times_mach sum = { 0 };
2173 	struct recount_times_mach p_only = { 0 };
2174 	recount_task_times_perf_only(task, &sum, &p_only);
2175 #if CONFIG_PERVASIVE_ENERGY
2176 	uint64_t energy = recount_task_energy_nj(task);
2177 #endif /* CONFIG_PERVASIVE_ENERGY */
2178 	recount_task_deinit(&task->tk_recount);
2179 
2180 	/* Accumulate statistics for dead tasks */
2181 	lck_spin_lock(&dead_task_statistics_lock);
2182 	dead_task_statistics.total_user_time += sum.rtm_user;
2183 	dead_task_statistics.total_system_time += sum.rtm_system;
2184 
2185 	dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
2186 	dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
2187 
2188 	dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
2189 	dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
2190 	dead_task_statistics.total_ptime += p_only.rtm_user + p_only.rtm_system;
2191 	dead_task_statistics.total_pset_switches += task->ps_switch;
2192 	dead_task_statistics.task_gpu_ns += task->task_gpu_ns;
2193 #if CONFIG_PERVASIVE_ENERGY
2194 	dead_task_statistics.task_energy += energy;
2195 #endif /* CONFIG_PERVASIVE_ENERGY */
2196 
2197 	lck_spin_unlock(&dead_task_statistics_lock);
2198 	lck_mtx_destroy(&task->lock, &task_lck_grp);
2199 
2200 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
2201 	    &debit)) {
2202 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
2203 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
2204 	}
2205 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
2206 	    &debit)) {
2207 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
2208 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
2209 	}
2210 	ledger_dereference(task->ledger);
2211 
2212 	counter_free(&task->faults);
2213 	counter_free(&task->pageins);
2214 	counter_free(&task->cow_faults);
2215 	counter_free(&task->messages_sent);
2216 	counter_free(&task->messages_received);
2217 
2218 #if CONFIG_COALITIONS
2219 	task_release_coalitions(task);
2220 #endif /* CONFIG_COALITIONS */
2221 
2222 	bzero(task->coalition, sizeof(task->coalition));
2223 
2224 #if MACH_BSD
2225 	/* clean up collected information since last reference to task is gone */
2226 	if (task->corpse_info) {
2227 		void *corpse_info_kernel = kcdata_memory_get_begin_addr(task->corpse_info);
2228 		task_crashinfo_destroy(task->corpse_info);
2229 		task->corpse_info = NULL;
2230 		kfree_data(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
2231 	}
2232 #endif
2233 
2234 #if CONFIG_MACF
2235 	if (get_task_crash_label(task)) {
2236 		mac_exc_free_label(get_task_crash_label(task));
2237 		set_task_crash_label(task, NULL);
2238 	}
2239 #endif
2240 
2241 	assert(queue_empty(&task->task_objq));
2242 	task_objq_lock_destroy(task);
2243 
2244 	if (task->corpse_vmobject_list) {
2245 		kfree_data(task->corpse_vmobject_list,
2246 		    (vm_size_t)task->corpse_vmobject_list_size);
2247 	}
2248 
2249 	task_ref_count_fini(task);
2250 	proc_ro_erase_task(task->bsd_info_ro);
2251 	task_release_proc_task_struct(task, task->bsd_info_ro);
2252 }
2253 
2254 /*
2255  *	task_name_deallocate_mig:
2256  *
2257  *	Drop a reference on a task name.
2258  */
2259 void
task_name_deallocate_mig(task_name_t task_name)2260 task_name_deallocate_mig(
2261 	task_name_t             task_name)
2262 {
2263 	return task_deallocate_grp((task_t)task_name, TASK_GRP_MIG);
2264 }
2265 
2266 /*
2267  *	task_policy_set_deallocate_mig:
2268  *
2269  *	Drop a reference on a task type.
2270  */
2271 void
task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)2272 task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)
2273 {
2274 	return task_deallocate_grp((task_t)task_policy_set, TASK_GRP_MIG);
2275 }
2276 
2277 /*
2278  *	task_policy_get_deallocate_mig:
2279  *
2280  *	Drop a reference on a task type.
2281  */
2282 void
task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)2283 task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)
2284 {
2285 	return task_deallocate_grp((task_t)task_policy_get, TASK_GRP_MIG);
2286 }
2287 
2288 /*
2289  *	task_inspect_deallocate_mig:
2290  *
2291  *	Drop a task inspection reference.
2292  */
2293 void
task_inspect_deallocate_mig(task_inspect_t task_inspect)2294 task_inspect_deallocate_mig(
2295 	task_inspect_t          task_inspect)
2296 {
2297 	return task_deallocate_grp((task_t)task_inspect, TASK_GRP_MIG);
2298 }
2299 
2300 /*
2301  *	task_read_deallocate_mig:
2302  *
2303  *	Drop a reference on task read port.
2304  */
2305 void
task_read_deallocate_mig(task_read_t task_read)2306 task_read_deallocate_mig(
2307 	task_read_t          task_read)
2308 {
2309 	return task_deallocate_grp((task_t)task_read, TASK_GRP_MIG);
2310 }
2311 
2312 /*
2313  *	task_suspension_token_deallocate:
2314  *
2315  *	Drop a reference on a task suspension token.
2316  */
2317 void
task_suspension_token_deallocate(task_suspension_token_t token)2318 task_suspension_token_deallocate(
2319 	task_suspension_token_t         token)
2320 {
2321 	return task_deallocate((task_t)token);
2322 }
2323 
2324 void
task_suspension_token_deallocate_grp(task_suspension_token_t token,task_grp_t grp)2325 task_suspension_token_deallocate_grp(
2326 	task_suspension_token_t         token,
2327 	task_grp_t                      grp)
2328 {
2329 	return task_deallocate_grp((task_t)token, grp);
2330 }
2331 
2332 /*
2333  * task_collect_crash_info:
2334  *
2335  * collect crash info from bsd and mach based data
2336  */
2337 kern_return_t
task_collect_crash_info(task_t task,struct label * crash_label,int is_corpse_fork)2338 task_collect_crash_info(
2339 	task_t task,
2340 #ifdef CONFIG_MACF
2341 	struct label *crash_label,
2342 #endif
2343 	int is_corpse_fork)
2344 {
2345 	kern_return_t kr = KERN_SUCCESS;
2346 
2347 	kcdata_descriptor_t crash_data = NULL;
2348 	kcdata_descriptor_t crash_data_release = NULL;
2349 	mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
2350 	mach_vm_offset_t crash_data_ptr = 0;
2351 	void *crash_data_kernel = NULL;
2352 	void *crash_data_kernel_release = NULL;
2353 #if CONFIG_MACF
2354 	struct label *label, *free_label;
2355 #endif
2356 
2357 	if (!corpses_enabled()) {
2358 		return KERN_NOT_SUPPORTED;
2359 	}
2360 
2361 #if CONFIG_MACF
2362 	free_label = label = mac_exc_create_label(NULL);
2363 #endif
2364 
2365 	task_lock(task);
2366 
2367 	assert(is_corpse_fork || get_bsdtask_info(task) != NULL);
2368 	if (task->corpse_info == NULL && (is_corpse_fork || get_bsdtask_info(task) != NULL)) {
2369 #if CONFIG_MACF
2370 		/* Set the crash label, used by the exception delivery mac hook */
2371 		free_label = get_task_crash_label(task);         // Most likely NULL.
2372 		set_task_crash_label(task, label);
2373 		mac_exc_update_task_crash_label(task, crash_label);
2374 #endif
2375 		task_unlock(task);
2376 
2377 		crash_data_kernel = kalloc_data(CORPSEINFO_ALLOCATION_SIZE,
2378 		    Z_WAITOK | Z_ZERO);
2379 		if (crash_data_kernel == NULL) {
2380 			kr = KERN_RESOURCE_SHORTAGE;
2381 			goto out_no_lock;
2382 		}
2383 		crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
2384 
2385 		/* Do not get a corpse ref for corpse fork */
2386 		crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size,
2387 		    is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF,
2388 		    KCFLAG_USE_MEMCOPY);
2389 		if (crash_data) {
2390 			task_lock(task);
2391 			crash_data_release = task->corpse_info;
2392 			crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);
2393 			task->corpse_info = crash_data;
2394 
2395 			task_unlock(task);
2396 			kr = KERN_SUCCESS;
2397 		} else {
2398 			kfree_data(crash_data_kernel,
2399 			    CORPSEINFO_ALLOCATION_SIZE);
2400 			kr = KERN_FAILURE;
2401 		}
2402 
2403 		if (crash_data_release != NULL) {
2404 			task_crashinfo_destroy(crash_data_release);
2405 		}
2406 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2407 	} else {
2408 		task_unlock(task);
2409 	}
2410 
2411 out_no_lock:
2412 #if CONFIG_MACF
2413 	if (free_label != NULL) {
2414 		mac_exc_free_label(free_label);
2415 	}
2416 #endif
2417 	return kr;
2418 }
2419 
2420 /*
2421  * task_deliver_crash_notification:
2422  *
2423  * Makes outcall to registered host port for a corpse.
2424  */
2425 kern_return_t
task_deliver_crash_notification(task_t corpse,thread_t thread,exception_type_t etype,mach_exception_subcode_t subcode)2426 task_deliver_crash_notification(
2427 	task_t corpse, /* corpse or corpse fork */
2428 	thread_t thread,
2429 	exception_type_t etype,
2430 	mach_exception_subcode_t subcode)
2431 {
2432 	kcdata_descriptor_t crash_info = corpse->corpse_info;
2433 	thread_t th_iter = NULL;
2434 	kern_return_t kr = KERN_SUCCESS;
2435 	wait_interrupt_t wsave;
2436 	mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2437 	ipc_port_t corpse_port;
2438 
2439 	if (crash_info == NULL) {
2440 		return KERN_FAILURE;
2441 	}
2442 
2443 	assert(task_is_a_corpse(corpse));
2444 
2445 	task_lock(corpse);
2446 
2447 	/*
2448 	 * Always populate code[0] as the effective exception type for EXC_CORPSE_NOTIFY.
2449 	 * Crash reporters should derive whether it's fatal from corpse blob.
2450 	 */
2451 	code[0] = etype;
2452 	code[1] = subcode;
2453 
2454 	queue_iterate(&corpse->threads, th_iter, thread_t, task_threads)
2455 	{
2456 		if (th_iter->corpse_dup == FALSE) {
2457 			ipc_thread_reset(th_iter);
2458 		}
2459 	}
2460 	task_unlock(corpse);
2461 
2462 	/* Arm the no-sender notification for taskport */
2463 	task_reference(corpse);
2464 	corpse_port = convert_corpse_to_port_and_nsrequest(corpse);
2465 
2466 	wsave = thread_interrupt_level(THREAD_UNINT);
2467 	kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
2468 	if (kr != KERN_SUCCESS) {
2469 		printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(corpse));
2470 	}
2471 
2472 	(void)thread_interrupt_level(wsave);
2473 
2474 	/*
2475 	 * Drop the send right on corpse port, will fire the
2476 	 * no-sender notification if exception deliver failed.
2477 	 */
2478 	ipc_port_release_send(corpse_port);
2479 	return kr;
2480 }
2481 
2482 /*
2483  *	task_terminate:
2484  *
2485  *	Terminate the specified task.  See comments on thread_terminate
2486  *	(kern/thread.c) about problems with terminating the "current task."
2487  */
2488 
2489 kern_return_t
task_terminate(task_t task)2490 task_terminate(
2491 	task_t          task)
2492 {
2493 	if (task == TASK_NULL) {
2494 		return KERN_INVALID_ARGUMENT;
2495 	}
2496 
2497 	if (get_bsdtask_info(task)) {
2498 		return KERN_FAILURE;
2499 	}
2500 
2501 	return task_terminate_internal(task);
2502 }
2503 
2504 #if MACH_ASSERT
2505 extern int proc_pid(struct proc *);
2506 extern void proc_name_kdp(struct proc *p, char *buf, int size);
2507 #endif /* MACH_ASSERT */
2508 
2509 static void
task_partial_reap(task_t task,__unused int pid)2510 __unused task_partial_reap(task_t task, __unused int pid)
2511 {
2512 	unsigned int    reclaimed_resident = 0;
2513 	unsigned int    reclaimed_compressed = 0;
2514 	uint64_t        task_page_count;
2515 
2516 	task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
2517 
2518 	KDBG(VMDBG_CODE(DBG_VM_MAP_PARTIAL_REAP) | DBG_FUNC_START,
2519 	    pid, task_page_count);
2520 
2521 	vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
2522 
2523 	KDBG(VMDBG_CODE(DBG_VM_MAP_PARTIAL_REAP) | DBG_FUNC_END,
2524 	    pid, reclaimed_resident, reclaimed_compressed);
2525 }
2526 
2527 /*
2528  * task_mark_corpse:
2529  *
2530  * Mark the task as a corpse. Called by crashing thread.
2531  */
2532 kern_return_t
task_mark_corpse(task_t task)2533 task_mark_corpse(task_t task)
2534 {
2535 	kern_return_t kr = KERN_SUCCESS;
2536 	thread_t self_thread;
2537 	(void) self_thread;
2538 	wait_interrupt_t wsave;
2539 #if CONFIG_MACF
2540 	struct label *crash_label = NULL;
2541 #endif
2542 
2543 	assert(task != kernel_task);
2544 	assert(task == current_task());
2545 	assert(!task_is_a_corpse(task));
2546 
2547 #if CONFIG_MACF
2548 	crash_label = mac_exc_create_label_for_proc((struct proc*)get_bsdtask_info(task));
2549 #endif
2550 
2551 	kr = task_collect_crash_info(task,
2552 #if CONFIG_MACF
2553 	    crash_label,
2554 #endif
2555 	    FALSE);
2556 	if (kr != KERN_SUCCESS) {
2557 		goto out;
2558 	}
2559 
2560 	self_thread = current_thread();
2561 
2562 	wsave = thread_interrupt_level(THREAD_UNINT);
2563 	task_lock(task);
2564 
2565 	/*
2566 	 * Check if any other thread called task_terminate_internal
2567 	 * and made the task inactive before we could mark it for
2568 	 * corpse pending report. Bail out if the task is inactive.
2569 	 */
2570 	if (!task->active) {
2571 		kcdata_descriptor_t crash_data_release = task->corpse_info;;
2572 		void *crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);;
2573 
2574 		task->corpse_info = NULL;
2575 		task_unlock(task);
2576 
2577 		if (crash_data_release != NULL) {
2578 			task_crashinfo_destroy(crash_data_release);
2579 		}
2580 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2581 		return KERN_TERMINATED;
2582 	}
2583 
2584 	task_set_corpse_pending_report(task);
2585 	task_set_corpse(task);
2586 	task->crashed_thread_id = thread_tid(self_thread);
2587 
2588 	kr = task_start_halt_locked(task, TRUE);
2589 	assert(kr == KERN_SUCCESS);
2590 
2591 	task_set_uniqueid(task);
2592 
2593 	task_unlock(task);
2594 
2595 	/*
2596 	 * ipc_task_reset() moved to last thread_terminate_self(): rdar://75737960.
2597 	 * disable old ports here instead.
2598 	 *
2599 	 * The vm_map and ipc_space must exist until this function returns,
2600 	 * convert_port_to_{map,space}_with_flavor relies on this behavior.
2601 	 */
2602 	ipc_task_disable(task);
2603 
2604 	/* let iokit know 1 */
2605 	iokit_task_terminate(task, 1);
2606 
2607 	/* terminate the ipc space */
2608 	ipc_space_terminate(task->itk_space);
2609 
2610 	/* Add it to global corpse task list */
2611 	task_add_to_corpse_task_list(task);
2612 
2613 	thread_terminate_internal(self_thread);
2614 
2615 	(void) thread_interrupt_level(wsave);
2616 	assert(task->halting == TRUE);
2617 
2618 out:
2619 #if CONFIG_MACF
2620 	mac_exc_free_label(crash_label);
2621 #endif
2622 	return kr;
2623 }
2624 
2625 /*
2626  *	task_set_uniqueid
2627  *
2628  *	Set task uniqueid to systemwide unique 64 bit value
2629  */
2630 void
task_set_uniqueid(task_t task)2631 task_set_uniqueid(task_t task)
2632 {
2633 	task->task_uniqueid = OSIncrementAtomic64(&next_taskuniqueid);
2634 }
2635 
2636 /*
2637  *	task_clear_corpse
2638  *
2639  *	Clears the corpse pending bit on task.
2640  *	Removes inspection bit on the threads.
2641  */
2642 void
task_clear_corpse(task_t task)2643 task_clear_corpse(task_t task)
2644 {
2645 	thread_t th_iter = NULL;
2646 
2647 	task_lock(task);
2648 	queue_iterate(&task->threads, th_iter, thread_t, task_threads)
2649 	{
2650 		thread_mtx_lock(th_iter);
2651 		th_iter->inspection = FALSE;
2652 		ipc_thread_disable(th_iter);
2653 		thread_mtx_unlock(th_iter);
2654 	}
2655 
2656 	thread_terminate_crashed_threads();
2657 	/* remove the pending corpse report flag */
2658 	task_clear_corpse_pending_report(task);
2659 
2660 	task_unlock(task);
2661 }
2662 
2663 /*
2664  *	task_port_no_senders
2665  *
2666  *	Called whenever the Mach port system detects no-senders on
2667  *	the task port of a corpse.
2668  *	Each notification that comes in should terminate the task (corpse).
2669  */
2670 static void
task_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)2671 task_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
2672 {
2673 	task_t task = ipc_kobject_get_locked(port, IKOT_TASK_CONTROL);
2674 
2675 	assert(task != TASK_NULL);
2676 	assert(task_is_a_corpse(task));
2677 
2678 	/* Remove the task from global corpse task list */
2679 	task_remove_from_corpse_task_list(task);
2680 
2681 	task_clear_corpse(task);
2682 	vm_map_unset_corpse_source(task->map);
2683 	task_terminate_internal(task);
2684 }
2685 
2686 /*
2687  *	task_port_with_flavor_no_senders
2688  *
2689  *	Called whenever the Mach port system detects no-senders on
2690  *	the task inspect or read port. These ports are allocated lazily and
2691  *	should be deallocated here when there are no senders remaining.
2692  */
2693 static void
task_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)2694 task_port_with_flavor_no_senders(
2695 	ipc_port_t          port,
2696 	mach_port_mscount_t mscount __unused)
2697 {
2698 	task_t task;
2699 	mach_task_flavor_t flavor;
2700 	ipc_kobject_type_t kotype;
2701 
2702 	ip_mq_lock(port);
2703 	if (port->ip_srights > 0) {
2704 		ip_mq_unlock(port);
2705 		return;
2706 	}
2707 	kotype = ip_kotype(port);
2708 	assert((IKOT_TASK_READ == kotype) || (IKOT_TASK_INSPECT == kotype));
2709 	task = ipc_kobject_get_locked(port, kotype);
2710 	if (task != TASK_NULL) {
2711 		task_reference(task);
2712 	}
2713 	ip_mq_unlock(port);
2714 
2715 	if (task == TASK_NULL) {
2716 		/* The task is exiting or disabled; it will eventually deallocate the port */
2717 		return;
2718 	}
2719 
2720 	if (kotype == IKOT_TASK_READ) {
2721 		flavor = TASK_FLAVOR_READ;
2722 	} else {
2723 		flavor = TASK_FLAVOR_INSPECT;
2724 	}
2725 
2726 	itk_lock(task);
2727 	ip_mq_lock(port);
2728 
2729 	/*
2730 	 * If the port is no longer active, then ipc_task_terminate() ran
2731 	 * and destroyed the kobject already. Just deallocate the task
2732 	 * ref we took and go away.
2733 	 *
2734 	 * It is also possible that several nsrequests are in flight,
2735 	 * only one shall NULL-out the port entry, and this is the one
2736 	 * that gets to dealloc the port.
2737 	 *
2738 	 * Check for a stale no-senders notification. A call to any function
2739 	 * that vends out send rights to this port could resurrect it between
2740 	 * this notification being generated and actually being handled here.
2741 	 */
2742 	if (!ip_active(port) ||
2743 	    task->itk_task_ports[flavor] != port ||
2744 	    port->ip_srights > 0) {
2745 		ip_mq_unlock(port);
2746 		itk_unlock(task);
2747 		task_deallocate(task);
2748 		return;
2749 	}
2750 
2751 	assert(task->itk_task_ports[flavor] == port);
2752 	task->itk_task_ports[flavor] = IP_NULL;
2753 	itk_unlock(task);
2754 
2755 	ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
2756 
2757 	task_deallocate(task);
2758 }
2759 
2760 /*
2761  *	task_wait_till_threads_terminate_locked
2762  *
2763  *	Wait till all the threads in the task are terminated.
2764  *	Might release the task lock and re-acquire it.
2765  */
2766 void
task_wait_till_threads_terminate_locked(task_t task)2767 task_wait_till_threads_terminate_locked(task_t task)
2768 {
2769 	/* wait for all the threads in the task to terminate */
2770 	while (task->active_thread_count != 0) {
2771 		assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
2772 		task_unlock(task);
2773 		thread_block(THREAD_CONTINUE_NULL);
2774 
2775 		task_lock(task);
2776 	}
2777 }
2778 
2779 /*
2780  *	task_duplicate_map_and_threads
2781  *
2782  *	Copy vmmap of source task.
2783  *	Copy active threads from source task to destination task.
2784  *	Source task would be suspended during the copy.
2785  */
2786 kern_return_t
task_duplicate_map_and_threads(task_t task,void * p,task_t new_task,thread_t * thread_ret,uint64_t ** udata_buffer,int * size,int * num_udata,bool for_exception)2787 task_duplicate_map_and_threads(
2788 	task_t task,
2789 	void *p,
2790 	task_t new_task,
2791 	thread_t *thread_ret,
2792 	uint64_t **udata_buffer,
2793 	int *size,
2794 	int *num_udata,
2795 	bool for_exception)
2796 {
2797 	kern_return_t kr = KERN_SUCCESS;
2798 	int active;
2799 	thread_t thread, self, thread_return = THREAD_NULL;
2800 	thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL;
2801 	thread_t *thread_array;
2802 	uint32_t active_thread_count = 0, array_count = 0, i;
2803 	vm_map_t oldmap;
2804 	uint64_t *buffer = NULL;
2805 	int buf_size = 0;
2806 	int est_knotes = 0, num_knotes = 0;
2807 
2808 	self = current_thread();
2809 
2810 	/*
2811 	 * Suspend the task to copy thread state, use the internal
2812 	 * variant so that no user-space process can resume
2813 	 * the task from under us
2814 	 */
2815 	kr = task_suspend_internal(task);
2816 	if (kr != KERN_SUCCESS) {
2817 		return kr;
2818 	}
2819 
2820 	if (task->map->disable_vmentry_reuse == TRUE) {
2821 		/*
2822 		 * Quite likely GuardMalloc (or some debugging tool)
2823 		 * is being used on this task. And it has gone through
2824 		 * its limit. Making a corpse will likely encounter
2825 		 * a lot of VM entries that will need COW.
2826 		 *
2827 		 * Skip it.
2828 		 */
2829 #if DEVELOPMENT || DEBUG
2830 		memorystatus_abort_vm_map_fork(task);
2831 #endif
2832 		ktriage_record(thread_tid(self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_FAIL_LIBGMALLOC), 0 /* arg */);
2833 		task_resume_internal(task);
2834 		return KERN_FAILURE;
2835 	}
2836 
2837 	/* Check with VM if vm_map_fork is allowed for this task */
2838 	bool is_large = false;
2839 	if (memorystatus_allowed_vm_map_fork(task, &is_large)) {
2840 		/* Setup new task's vmmap, switch from parent task's map to it COW map */
2841 		oldmap = new_task->map;
2842 		new_task->map = vm_map_fork(new_task->ledger,
2843 		    task->map,
2844 		    (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
2845 		    VM_MAP_FORK_PRESERVE_PURGEABLE |
2846 		    VM_MAP_FORK_CORPSE_FOOTPRINT |
2847 		    VM_MAP_FORK_SHARE_IF_OWNED));
2848 		if (new_task->map) {
2849 			new_task->is_large_corpse = is_large;
2850 			vm_map_deallocate(oldmap);
2851 
2852 			/* copy ledgers that impact the memory footprint */
2853 			vm_map_copy_footprint_ledgers(task, new_task);
2854 
2855 			/* Get all the udata pointers from kqueue */
2856 			est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2857 			if (est_knotes > 0) {
2858 				buf_size = (est_knotes + 32) * sizeof(uint64_t);
2859 				buffer = kalloc_data(buf_size, Z_WAITOK);
2860 				num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2861 				if (num_knotes > est_knotes + 32) {
2862 					num_knotes = est_knotes + 32;
2863 				}
2864 			}
2865 		} else {
2866 			if (is_large) {
2867 				assert(large_corpse_count > 0);
2868 				OSDecrementAtomic(&large_corpse_count);
2869 			}
2870 			new_task->map = oldmap;
2871 #if DEVELOPMENT || DEBUG
2872 			memorystatus_abort_vm_map_fork(task);
2873 #endif
2874 			task_resume_internal(task);
2875 			return KERN_NO_SPACE;
2876 		}
2877 	} else if (!for_exception) {
2878 #if DEVELOPMENT || DEBUG
2879 		memorystatus_abort_vm_map_fork(task);
2880 #endif
2881 		task_resume_internal(task);
2882 		return KERN_NO_SPACE;
2883 	}
2884 
2885 	active_thread_count = task->active_thread_count;
2886 	if (active_thread_count == 0) {
2887 		kfree_data(buffer, buf_size);
2888 		task_resume_internal(task);
2889 		return KERN_FAILURE;
2890 	}
2891 
2892 	thread_array = kalloc_type(thread_t, active_thread_count, Z_WAITOK);
2893 
2894 	/* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
2895 	task_lock(task);
2896 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2897 		/* Skip inactive threads */
2898 		active = thread->active;
2899 		if (!active) {
2900 			continue;
2901 		}
2902 
2903 		if (array_count >= active_thread_count) {
2904 			break;
2905 		}
2906 
2907 		thread_array[array_count++] = thread;
2908 		thread_reference(thread);
2909 	}
2910 	task_unlock(task);
2911 
2912 	for (i = 0; i < array_count; i++) {
2913 		kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
2914 		if (kr != KERN_SUCCESS) {
2915 			break;
2916 		}
2917 
2918 		/* Equivalent of current thread in corpse */
2919 		if (thread_array[i] == self) {
2920 			thread_return = new_thread;
2921 			new_task->crashed_thread_id = thread_tid(new_thread);
2922 		} else if (first_thread == NULL) {
2923 			first_thread = new_thread;
2924 		} else {
2925 			/* drop the extra ref returned by thread_create_with_continuation */
2926 			thread_deallocate(new_thread);
2927 		}
2928 
2929 		kr = thread_dup2(thread_array[i], new_thread);
2930 		if (kr != KERN_SUCCESS) {
2931 			thread_mtx_lock(new_thread);
2932 			new_thread->corpse_dup = TRUE;
2933 			thread_mtx_unlock(new_thread);
2934 			continue;
2935 		}
2936 
2937 		/* Copy thread name */
2938 		bsd_copythreadname(get_bsdthread_info(new_thread),
2939 		    get_bsdthread_info(thread_array[i]));
2940 		new_thread->thread_tag = thread_array[i]->thread_tag &
2941 		    ~THREAD_TAG_USER_JOIN;
2942 		thread_copy_resource_info(new_thread, thread_array[i]);
2943 	}
2944 
2945 	/* return the first thread if we couldn't find the equivalent of current */
2946 	if (thread_return == THREAD_NULL) {
2947 		thread_return = first_thread;
2948 	} else if (first_thread != THREAD_NULL) {
2949 		/* drop the extra ref returned by thread_create_with_continuation */
2950 		thread_deallocate(first_thread);
2951 	}
2952 
2953 	task_resume_internal(task);
2954 
2955 	for (i = 0; i < array_count; i++) {
2956 		thread_deallocate(thread_array[i]);
2957 	}
2958 	kfree_type(thread_t, active_thread_count, thread_array);
2959 
2960 	if (kr == KERN_SUCCESS) {
2961 		*thread_ret = thread_return;
2962 		*udata_buffer = buffer;
2963 		*size = buf_size;
2964 		*num_udata = num_knotes;
2965 	} else {
2966 		if (thread_return != THREAD_NULL) {
2967 			thread_deallocate(thread_return);
2968 		}
2969 		kfree_data(buffer, buf_size);
2970 	}
2971 
2972 	return kr;
2973 }
2974 
2975 #if CONFIG_SECLUDED_MEMORY
2976 extern void task_set_can_use_secluded_mem_locked(
2977 	task_t          task,
2978 	boolean_t       can_use_secluded_mem);
2979 #endif /* CONFIG_SECLUDED_MEMORY */
2980 
2981 #if MACH_ASSERT
2982 int debug4k_panic_on_terminate = 0;
2983 #endif /* MACH_ASSERT */
2984 kern_return_t
task_terminate_internal(task_t task)2985 task_terminate_internal(
2986 	task_t                  task)
2987 {
2988 	thread_t                        thread, self;
2989 	task_t                          self_task;
2990 	boolean_t                       interrupt_save;
2991 	int                             pid = 0;
2992 
2993 	assert(task != kernel_task);
2994 
2995 	self = current_thread();
2996 	self_task = current_task();
2997 
2998 	/*
2999 	 *	Get the task locked and make sure that we are not racing
3000 	 *	with someone else trying to terminate us.
3001 	 */
3002 	if (task == self_task) {
3003 		task_lock(task);
3004 	} else if (task < self_task) {
3005 		task_lock(task);
3006 		task_lock(self_task);
3007 	} else {
3008 		task_lock(self_task);
3009 		task_lock(task);
3010 	}
3011 
3012 #if CONFIG_SECLUDED_MEMORY
3013 	if (task->task_can_use_secluded_mem) {
3014 		task_set_can_use_secluded_mem_locked(task, FALSE);
3015 	}
3016 	task->task_could_use_secluded_mem = FALSE;
3017 	task->task_could_also_use_secluded_mem = FALSE;
3018 
3019 	if (task->task_suppressed_secluded) {
3020 		stop_secluded_suppression(task);
3021 	}
3022 #endif /* CONFIG_SECLUDED_MEMORY */
3023 
3024 	if (!task->active) {
3025 		/*
3026 		 *	Task is already being terminated.
3027 		 *	Just return an error. If we are dying, this will
3028 		 *	just get us to our AST special handler and that
3029 		 *	will get us to finalize the termination of ourselves.
3030 		 */
3031 		task_unlock(task);
3032 		if (self_task != task) {
3033 			task_unlock(self_task);
3034 		}
3035 
3036 		return KERN_FAILURE;
3037 	}
3038 
3039 	if (task_corpse_pending_report(task)) {
3040 		/*
3041 		 *	Task is marked for reporting as corpse.
3042 		 *	Just return an error. This will
3043 		 *	just get us to our AST special handler and that
3044 		 *	will get us to finish the path to death
3045 		 */
3046 		task_unlock(task);
3047 		if (self_task != task) {
3048 			task_unlock(self_task);
3049 		}
3050 
3051 		return KERN_FAILURE;
3052 	}
3053 
3054 	if (self_task != task) {
3055 		task_unlock(self_task);
3056 	}
3057 
3058 	/*
3059 	 * Make sure the current thread does not get aborted out of
3060 	 * the waits inside these operations.
3061 	 */
3062 	interrupt_save = thread_interrupt_level(THREAD_UNINT);
3063 
3064 	/*
3065 	 *	Indicate that we want all the threads to stop executing
3066 	 *	at user space by holding the task (we would have held
3067 	 *	each thread independently in thread_terminate_internal -
3068 	 *	but this way we may be more likely to already find it
3069 	 *	held there).  Mark the task inactive, and prevent
3070 	 *	further task operations via the task port.
3071 	 *
3072 	 *	The vm_map and ipc_space must exist until this function returns,
3073 	 *	convert_port_to_{map,space}_with_flavor relies on this behavior.
3074 	 */
3075 	bool first_suspension __unused = task_hold_locked(task);
3076 	task->active = FALSE;
3077 	ipc_task_disable(task);
3078 
3079 #if CONFIG_EXCLAVES
3080 	if (first_suspension) {
3081 		task_unlock(task);
3082 		task_suspend_conclave(task);
3083 		task_lock(task);
3084 	}
3085 #endif /* CONFIG_EXCLAVES */
3086 
3087 
3088 	/*
3089 	 *	Terminate each thread in the task.
3090 	 */
3091 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3092 		thread_terminate_internal(thread);
3093 	}
3094 
3095 #ifdef MACH_BSD
3096 	void *bsd_info = get_bsdtask_info(task);
3097 	if (bsd_info != NULL) {
3098 		pid = proc_pid(bsd_info);
3099 	}
3100 #endif /* MACH_BSD */
3101 
3102 	task_unlock(task);
3103 
3104 #if CONFIG_EXCLAVES
3105 	task_stop_conclave(task, false);
3106 #endif /* CONFIG_EXCLAVES */
3107 
3108 	proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
3109 	    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3110 
3111 	/* Early object reap phase */
3112 
3113 // PR-17045188: Revisit implementation
3114 //        task_partial_reap(task, pid);
3115 
3116 #if CONFIG_TASKWATCH
3117 	/*
3118 	 * remove all task watchers
3119 	 */
3120 	task_removewatchers(task);
3121 
3122 #endif /* CONFIG_TASKWATCH */
3123 
3124 	/*
3125 	 *	Destroy all synchronizers owned by the task.
3126 	 */
3127 	task_synchronizer_destroy_all(task);
3128 
3129 	/*
3130 	 *	Clear the watchport boost on the task.
3131 	 */
3132 	task_remove_turnstile_watchports(task);
3133 
3134 	/* let iokit know 1 */
3135 	iokit_task_terminate(task, 1);
3136 
3137 	/*
3138 	 *	Destroy the IPC space, leaving just a reference for it.
3139 	 */
3140 	ipc_space_terminate(task->itk_space);
3141 
3142 #if 00
3143 	/* if some ledgers go negative on tear-down again... */
3144 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3145 	    task_ledgers.phys_footprint);
3146 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3147 	    task_ledgers.internal);
3148 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3149 	    task_ledgers.iokit_mapped);
3150 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3151 	    task_ledgers.alternate_accounting);
3152 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3153 	    task_ledgers.alternate_accounting_compressed);
3154 #endif
3155 
3156 #if CONFIG_DEFERRED_RECLAIM
3157 	/*
3158 	 * Remove this tasks reclaim buffer from global queues.
3159 	 */
3160 	if (task->deferred_reclamation_metadata != NULL) {
3161 		vm_deferred_reclamation_buffer_uninstall(task->deferred_reclamation_metadata);
3162 	}
3163 #endif /* CONFIG_DEFERRED_RECLAIM */
3164 
3165 	/*
3166 	 * If the current thread is a member of the task
3167 	 * being terminated, then the last reference to
3168 	 * the task will not be dropped until the thread
3169 	 * is finally reaped.  To avoid incurring the
3170 	 * expense of removing the address space regions
3171 	 * at reap time, we do it explictly here.
3172 	 */
3173 
3174 #if MACH_ASSERT
3175 	/*
3176 	 * Identify the pmap's process, in case the pmap ledgers drift
3177 	 * and we have to report it.
3178 	 */
3179 	char procname[17];
3180 	void *proc = get_bsdtask_info(task);
3181 	if (proc) {
3182 		pid = proc_pid(proc);
3183 		proc_name_kdp(proc, procname, sizeof(procname));
3184 	} else {
3185 		pid = 0;
3186 		strlcpy(procname, "<unknown>", sizeof(procname));
3187 	}
3188 	pmap_set_process(task->map->pmap, pid, procname);
3189 	if (vm_map_page_shift(task->map) < (int)PAGE_SHIFT) {
3190 		DEBUG4K_LIFE("map %p procname: %s\n", task->map, procname);
3191 		if (debug4k_panic_on_terminate) {
3192 			panic("DEBUG4K: %s:%d %d[%s] map %p", __FUNCTION__, __LINE__, pid, procname, task->map);
3193 		}
3194 	}
3195 #endif /* MACH_ASSERT */
3196 
3197 	vm_map_terminate(task->map);
3198 
3199 	/* release our shared region */
3200 	vm_shared_region_set(task, NULL);
3201 
3202 #if __has_feature(ptrauth_calls)
3203 	task_set_shared_region_id(task, NULL);
3204 #endif /* __has_feature(ptrauth_calls) */
3205 
3206 	lck_mtx_lock(&tasks_threads_lock);
3207 	queue_remove(&tasks, task, task_t, tasks);
3208 	queue_enter(&terminated_tasks, task, task_t, tasks);
3209 	tasks_count--;
3210 	terminated_tasks_count++;
3211 	lck_mtx_unlock(&tasks_threads_lock);
3212 
3213 	/*
3214 	 * We no longer need to guard against being aborted, so restore
3215 	 * the previous interruptible state.
3216 	 */
3217 	thread_interrupt_level(interrupt_save);
3218 
3219 #if CONFIG_CPU_COUNTERS
3220 	/* force the task to release all ctrs */
3221 	if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) {
3222 		kpc_force_all_ctrs(task, 0);
3223 	}
3224 #endif /* CONFIG_CPU_COUNTERS */
3225 
3226 #if CONFIG_COALITIONS
3227 	/*
3228 	 * Leave the coalition for corpse task or task that
3229 	 * never had any active threads (e.g. fork, exec failure).
3230 	 * For task with active threads, the task will be removed
3231 	 * from coalition by last terminating thread.
3232 	 */
3233 	if (task->active_thread_count == 0) {
3234 		coalitions_remove_task(task);
3235 	}
3236 #endif
3237 
3238 #if CONFIG_FREEZE
3239 	extern int      vm_compressor_available;
3240 	if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE && vm_compressor_available) {
3241 		task_disown_frozen_csegs(task);
3242 		assert(queue_empty(&task->task_frozen_cseg_q));
3243 	}
3244 #endif /* CONFIG_FREEZE */
3245 
3246 
3247 	/*
3248 	 * Get rid of the task active reference on itself.
3249 	 */
3250 	task_deallocate_grp(task, TASK_GRP_INTERNAL);
3251 
3252 	return KERN_SUCCESS;
3253 }
3254 
3255 void
tasks_system_suspend(boolean_t suspend)3256 tasks_system_suspend(boolean_t suspend)
3257 {
3258 	task_t task;
3259 
3260 	KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SUSPEND_USERSPACE) |
3261 	    (suspend ? DBG_FUNC_START : DBG_FUNC_END));
3262 
3263 	lck_mtx_lock(&tasks_threads_lock);
3264 	assert(tasks_suspend_state != suspend);
3265 	tasks_suspend_state = suspend;
3266 	queue_iterate(&tasks, task, task_t, tasks) {
3267 		if (task == kernel_task) {
3268 			continue;
3269 		}
3270 		suspend ? task_suspend_internal(task) : task_resume_internal(task);
3271 	}
3272 	lck_mtx_unlock(&tasks_threads_lock);
3273 }
3274 
3275 /*
3276  * task_start_halt:
3277  *
3278  *      Shut the current task down (except for the current thread) in
3279  *	preparation for dramatic changes to the task (probably exec).
3280  *	We hold the task and mark all other threads in the task for
3281  *	termination.
3282  */
3283 kern_return_t
task_start_halt(task_t task)3284 task_start_halt(task_t task)
3285 {
3286 	kern_return_t kr = KERN_SUCCESS;
3287 	task_lock(task);
3288 	kr = task_start_halt_locked(task, FALSE);
3289 	task_unlock(task);
3290 	return kr;
3291 }
3292 
3293 static kern_return_t
task_start_halt_locked(task_t task,boolean_t should_mark_corpse)3294 task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
3295 {
3296 	thread_t thread, self;
3297 	uint64_t dispatchqueue_offset;
3298 
3299 	assert(task != kernel_task);
3300 
3301 	self = current_thread();
3302 
3303 	if (task != get_threadtask(self) && !task_is_a_corpse_fork(task)) {
3304 		return KERN_INVALID_ARGUMENT;
3305 	}
3306 
3307 	if (!should_mark_corpse &&
3308 	    (task->halting || !task->active || !self->active)) {
3309 		/*
3310 		 * Task or current thread is already being terminated.
3311 		 * Hurry up and return out of the current kernel context
3312 		 * so that we run our AST special handler to terminate
3313 		 * ourselves. If should_mark_corpse is set, the corpse
3314 		 * creation might have raced with exec, let the corpse
3315 		 * creation continue, once the current thread reaches AST
3316 		 * thread in exec will be woken up from task_complete_halt.
3317 		 * Exec will fail cause the proc was marked for exit.
3318 		 * Once the thread in exec reaches AST, it will call proc_exit
3319 		 * and deliver the EXC_CORPSE_NOTIFY.
3320 		 */
3321 		return KERN_FAILURE;
3322 	}
3323 
3324 	/* Thread creation will fail after this point of no return. */
3325 	task->halting = TRUE;
3326 
3327 	/*
3328 	 * Mark all the threads to keep them from starting any more
3329 	 * user-level execution. The thread_terminate_internal code
3330 	 * would do this on a thread by thread basis anyway, but this
3331 	 * gives us a better chance of not having to wait there.
3332 	 */
3333 	bool first_suspension __unused = task_hold_locked(task);
3334 
3335 #if CONFIG_EXCLAVES
3336 	if (should_mark_corpse) {
3337 		void *crash_info_ptr = task_get_corpseinfo(task);
3338 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
3339 			if (crash_info_ptr != NULL && thread->th_exclaves_ipc_ctx.ipcb != NULL) {
3340 				struct thread_crash_exclaves_info info = { 0 };
3341 
3342 				info.tcei_flags = kExclaveRPCActive;
3343 				info.tcei_scid = thread->th_exclaves_ipc_ctx.scid;
3344 				info.tcei_thread_id = thread->thread_id;
3345 
3346 				kcdata_push_data(crash_info_ptr,
3347 				    STACKSHOT_KCTYPE_KERN_EXCLAVES_CRASH_THREADINFO,
3348 				    sizeof(struct thread_crash_exclaves_info), &info);
3349 			}
3350 		}
3351 	}
3352 
3353 	if (first_suspension || should_mark_corpse) {
3354 		task_unlock(task);
3355 		if (first_suspension) {
3356 			task_suspend_conclave(task);
3357 		}
3358 
3359 		if (should_mark_corpse) {
3360 			task_stop_conclave(task, true);
3361 		}
3362 		task_lock(task);
3363 	}
3364 #endif /* CONFIG_EXCLAVES */
3365 
3366 	dispatchqueue_offset = get_dispatchqueue_offset_from_proc(get_bsdtask_info(task));
3367 	/*
3368 	 * Terminate all the other threads in the task.
3369 	 */
3370 	queue_iterate(&task->threads, thread, thread_t, task_threads)
3371 	{
3372 		/*
3373 		 * Remove priority throttles for threads to terminate timely. This has
3374 		 * to be done after task_hold_locked() traps all threads to AST, but before
3375 		 * threads are marked inactive in thread_terminate_internal(). Takes thread
3376 		 * mutex lock.
3377 		 *
3378 		 * We need task_is_a_corpse() check so that we don't accidently update policy
3379 		 * for tasks that are doing posix_spawn().
3380 		 *
3381 		 * See: thread_policy_update_tasklocked().
3382 		 */
3383 		if (task_is_a_corpse(task)) {
3384 			proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE,
3385 			    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3386 		}
3387 
3388 		if (should_mark_corpse) {
3389 			thread_mtx_lock(thread);
3390 			thread->inspection = TRUE;
3391 			thread_mtx_unlock(thread);
3392 		}
3393 		if (thread != self) {
3394 			thread_terminate_internal(thread);
3395 		}
3396 	}
3397 	task->dispatchqueue_offset = dispatchqueue_offset;
3398 
3399 	task_release_locked(task);
3400 
3401 	return KERN_SUCCESS;
3402 }
3403 
3404 
3405 /*
3406  * task_complete_halt:
3407  *
3408  *	Complete task halt by waiting for threads to terminate, then clean
3409  *	up task resources (VM, port namespace, etc...) and then let the
3410  *	current thread go in the (practically empty) task context.
3411  *
3412  *	Note: task->halting flag is not cleared in order to avoid creation
3413  *	of new thread in old exec'ed task.
3414  */
3415 void
task_complete_halt(task_t task)3416 task_complete_halt(task_t task)
3417 {
3418 	task_lock(task);
3419 	assert(task->halting);
3420 	assert(task == current_task());
3421 
3422 	/*
3423 	 *	Wait for the other threads to get shut down.
3424 	 *      When the last other thread is reaped, we'll be
3425 	 *	woken up.
3426 	 */
3427 	if (task->thread_count > 1) {
3428 		assert_wait((event_t)&task->halting, THREAD_UNINT);
3429 		task_unlock(task);
3430 		thread_block(THREAD_CONTINUE_NULL);
3431 	} else {
3432 		task_unlock(task);
3433 	}
3434 
3435 #if CONFIG_DEFERRED_RECLAIM
3436 	if (task->deferred_reclamation_metadata) {
3437 		vm_deferred_reclamation_buffer_uninstall(
3438 			task->deferred_reclamation_metadata);
3439 		vm_deferred_reclamation_buffer_deallocate(
3440 			task->deferred_reclamation_metadata);
3441 		task->deferred_reclamation_metadata = NULL;
3442 	}
3443 #endif /* CONFIG_DEFERRED_RECLAIM */
3444 
3445 	/*
3446 	 *	Give the machine dependent code a chance
3447 	 *	to perform cleanup of task-level resources
3448 	 *	associated with the current thread before
3449 	 *	ripping apart the task.
3450 	 */
3451 	machine_task_terminate(task);
3452 
3453 	/*
3454 	 *	Destroy all synchronizers owned by the task.
3455 	 */
3456 	task_synchronizer_destroy_all(task);
3457 
3458 	/* let iokit know 1 */
3459 	iokit_task_terminate(task, 1);
3460 
3461 	/*
3462 	 *	Terminate the IPC space.  A long time ago,
3463 	 *	this used to be ipc_space_clean() which would
3464 	 *	keep the space active but hollow it.
3465 	 *
3466 	 *	We really do not need this semantics given
3467 	 *	tasks die with exec now.
3468 	 */
3469 	ipc_space_terminate(task->itk_space);
3470 
3471 	/*
3472 	 * Clean out the address space, as we are going to be
3473 	 * getting a new one.
3474 	 */
3475 	vm_map_terminate(task->map);
3476 
3477 	/*
3478 	 * Kick out any IOKitUser handles to the task. At best they're stale,
3479 	 * at worst someone is racing a SUID exec.
3480 	 */
3481 	/* let iokit know 2 */
3482 	iokit_task_terminate(task, 2);
3483 }
3484 
3485 #ifdef CONFIG_TASK_SUSPEND_STATS
3486 
3487 static void
_task_mark_suspend_source(task_t task)3488 _task_mark_suspend_source(task_t task)
3489 {
3490 	int idx;
3491 	task_suspend_stats_t stats;
3492 	task_suspend_source_t source;
3493 	task_lock_assert_owned(task);
3494 	stats = &task->t_suspend_stats;
3495 
3496 	idx = stats->tss_count % TASK_SUSPEND_SOURCES_MAX;
3497 	source = &task->t_suspend_sources[idx];
3498 	bzero(source, sizeof(*source));
3499 
3500 	source->tss_time = mach_absolute_time();
3501 	source->tss_tid = current_thread()->thread_id;
3502 	source->tss_pid = task_pid(current_task());
3503 	strlcpy(source->tss_procname, task_best_name(current_task()),
3504 	    sizeof(source->tss_procname));
3505 
3506 	stats->tss_count++;
3507 }
3508 
3509 static inline void
_task_mark_suspend_start(task_t task)3510 _task_mark_suspend_start(task_t task)
3511 {
3512 	task_lock_assert_owned(task);
3513 	task->t_suspend_stats.tss_last_start = mach_absolute_time();
3514 }
3515 
3516 static inline void
_task_mark_suspend_end(task_t task)3517 _task_mark_suspend_end(task_t task)
3518 {
3519 	task_lock_assert_owned(task);
3520 	task->t_suspend_stats.tss_last_end = mach_absolute_time();
3521 	task->t_suspend_stats.tss_duration += (task->t_suspend_stats.tss_last_end -
3522 	    task->t_suspend_stats.tss_last_start);
3523 }
3524 
3525 static kern_return_t
_task_get_suspend_stats_locked(task_t task,task_suspend_stats_t stats)3526 _task_get_suspend_stats_locked(task_t task, task_suspend_stats_t stats)
3527 {
3528 	if (task == TASK_NULL || stats == NULL) {
3529 		return KERN_INVALID_ARGUMENT;
3530 	}
3531 	task_lock_assert_owned(task);
3532 	memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3533 	return KERN_SUCCESS;
3534 }
3535 
3536 static kern_return_t
_task_get_suspend_sources_locked(task_t task,task_suspend_source_t sources)3537 _task_get_suspend_sources_locked(task_t task, task_suspend_source_t sources)
3538 {
3539 	if (task == TASK_NULL || sources == NULL) {
3540 		return KERN_INVALID_ARGUMENT;
3541 	}
3542 	task_lock_assert_owned(task);
3543 	memcpy(sources, task->t_suspend_sources,
3544 	    sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3545 	return KERN_SUCCESS;
3546 }
3547 
3548 #endif /* CONFIG_TASK_SUSPEND_STATS */
3549 
3550 kern_return_t
task_get_suspend_stats(task_t task,task_suspend_stats_t stats)3551 task_get_suspend_stats(task_t task, task_suspend_stats_t stats)
3552 {
3553 #ifdef CONFIG_TASK_SUSPEND_STATS
3554 	kern_return_t kr;
3555 	if (task == TASK_NULL || stats == NULL) {
3556 		return KERN_INVALID_ARGUMENT;
3557 	}
3558 	task_lock(task);
3559 	kr = _task_get_suspend_stats_locked(task, stats);
3560 	task_unlock(task);
3561 	return kr;
3562 #else /* CONFIG_TASK_SUSPEND_STATS */
3563 	(void)task;
3564 	(void)stats;
3565 	return KERN_NOT_SUPPORTED;
3566 #endif
3567 }
3568 
3569 kern_return_t
task_get_suspend_stats_kdp(task_t task,task_suspend_stats_t stats)3570 task_get_suspend_stats_kdp(task_t task, task_suspend_stats_t stats)
3571 {
3572 #ifdef CONFIG_TASK_SUSPEND_STATS
3573 	if (task == TASK_NULL || stats == NULL) {
3574 		return KERN_INVALID_ARGUMENT;
3575 	}
3576 	memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3577 	return KERN_SUCCESS;
3578 #else /* CONFIG_TASK_SUSPEND_STATS */
3579 #pragma unused(task, stats)
3580 	return KERN_NOT_SUPPORTED;
3581 #endif /* CONFIG_TASK_SUSPEND_STATS */
3582 }
3583 
3584 kern_return_t
task_get_suspend_sources(task_t task,task_suspend_source_array_t sources)3585 task_get_suspend_sources(task_t task, task_suspend_source_array_t sources)
3586 {
3587 #ifdef CONFIG_TASK_SUSPEND_STATS
3588 	kern_return_t kr;
3589 	if (task == TASK_NULL || sources == NULL) {
3590 		return KERN_INVALID_ARGUMENT;
3591 	}
3592 	task_lock(task);
3593 	kr = _task_get_suspend_sources_locked(task, sources);
3594 	task_unlock(task);
3595 	return kr;
3596 #else /* CONFIG_TASK_SUSPEND_STATS */
3597 	(void)task;
3598 	(void)sources;
3599 	return KERN_NOT_SUPPORTED;
3600 #endif
3601 }
3602 
3603 kern_return_t
task_get_suspend_sources_kdp(task_t task,task_suspend_source_array_t sources)3604 task_get_suspend_sources_kdp(task_t task, task_suspend_source_array_t sources)
3605 {
3606 #ifdef CONFIG_TASK_SUSPEND_STATS
3607 	if (task == TASK_NULL || sources == NULL) {
3608 		return KERN_INVALID_ARGUMENT;
3609 	}
3610 	memcpy(sources, task->t_suspend_sources,
3611 	    sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3612 	return KERN_SUCCESS;
3613 #else /* CONFIG_TASK_SUSPEND_STATS */
3614 #pragma unused(task, sources)
3615 	return KERN_NOT_SUPPORTED;
3616 #endif
3617 }
3618 
3619 /*
3620  *	task_hold_locked:
3621  *
3622  *	Suspend execution of the specified task.
3623  *	This is a recursive-style suspension of the task, a count of
3624  *	suspends is maintained.
3625  *
3626  *	CONDITIONS: the task is locked and active.
3627  *	Returns true if this was first suspension
3628  */
3629 bool
task_hold_locked(task_t task)3630 task_hold_locked(
3631 	task_t          task)
3632 {
3633 	thread_t        thread;
3634 	void *bsd_info = get_bsdtask_info(task);
3635 
3636 	assert(task->active);
3637 
3638 	if (task->suspend_count++ > 0) {
3639 		return false;
3640 	}
3641 
3642 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_TASK_SUSPEND),
3643 	    task_pid(task), task->user_stop_count, task->pidsuspended);
3644 
3645 	if (bsd_info) {
3646 		workq_proc_suspended(bsd_info);
3647 	}
3648 
3649 	/*
3650 	 *	Iterate through all the threads and hold them.
3651 	 */
3652 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3653 		thread_mtx_lock(thread);
3654 		thread_hold(thread);
3655 		thread_mtx_unlock(thread);
3656 	}
3657 
3658 #ifdef CONFIG_TASK_SUSPEND_STATS
3659 	_task_mark_suspend_start(task);
3660 #endif
3661 	return true;
3662 }
3663 
3664 /*
3665  *	task_hold_and_wait
3666  *
3667  *	Same as the internal routine above, except that is must lock
3668  *	and verify that the task is active.  This differs from task_suspend
3669  *	in that it places a kernel hold on the task rather than just a
3670  *	user-level hold.  This keeps users from over resuming and setting
3671  *	it running out from under the kernel.
3672  *
3673  *      CONDITIONS: the caller holds a reference on the task
3674  */
3675 kern_return_t
task_hold_and_wait(task_t task,bool suspend_conclave __unused)3676 task_hold_and_wait(
3677 	task_t          task,
3678 	bool            suspend_conclave __unused)
3679 {
3680 	if (task == TASK_NULL) {
3681 		return KERN_INVALID_ARGUMENT;
3682 	}
3683 
3684 	task_lock(task);
3685 	if (!task->active) {
3686 		task_unlock(task);
3687 		return KERN_FAILURE;
3688 	}
3689 
3690 #ifdef CONFIG_TASK_SUSPEND_STATS
3691 	_task_mark_suspend_source(task);
3692 #endif /* CONFIG_TASK_SUSPEND_STATS */
3693 
3694 	bool first_suspension __unused = task_hold_locked(task);
3695 
3696 #if CONFIG_EXCLAVES
3697 	if (suspend_conclave && first_suspension) {
3698 		task_unlock(task);
3699 		task_suspend_conclave(task);
3700 		task_lock(task);
3701 		/*
3702 		 * If task terminated/resumed before we could wait on threads, then
3703 		 * it is a race we lost and we could treat that as termination/resume
3704 		 * happened after the wait and return SUCCESS.
3705 		 */
3706 		if (!task->active || task->suspend_count <= 0) {
3707 			task_unlock(task);
3708 			return KERN_SUCCESS;
3709 		}
3710 	}
3711 #endif /* CONFIG_EXCLAVES */
3712 
3713 	task_wait_locked(task, FALSE);
3714 	task_unlock(task);
3715 
3716 	return KERN_SUCCESS;
3717 }
3718 
3719 /*
3720  *	task_wait_locked:
3721  *
3722  *	Wait for all threads in task to stop.
3723  *
3724  * Conditions:
3725  *	Called with task locked, active, and held.
3726  */
3727 void
task_wait_locked(task_t task,boolean_t until_not_runnable)3728 task_wait_locked(
3729 	task_t          task,
3730 	boolean_t               until_not_runnable)
3731 {
3732 	thread_t        thread, self;
3733 
3734 	assert(task->active);
3735 	assert(task->suspend_count > 0);
3736 
3737 	self = current_thread();
3738 
3739 	/*
3740 	 *	Iterate through all the threads and wait for them to
3741 	 *	stop.  Do not wait for the current thread if it is within
3742 	 *	the task.
3743 	 */
3744 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3745 		if (thread != self) {
3746 			thread_wait(thread, until_not_runnable);
3747 		}
3748 	}
3749 }
3750 
3751 boolean_t
task_is_app_suspended(task_t task)3752 task_is_app_suspended(task_t task)
3753 {
3754 	return task->pidsuspended;
3755 }
3756 
3757 /*
3758  *	task_release_locked:
3759  *
3760  *	Release a kernel hold on a task.
3761  *
3762  *      CONDITIONS: the task is locked and active
3763  */
3764 void
task_release_locked(task_t task)3765 task_release_locked(
3766 	task_t          task)
3767 {
3768 	thread_t        thread;
3769 	void *bsd_info = get_bsdtask_info(task);
3770 
3771 	assert(task->active);
3772 	assert(task->suspend_count > 0);
3773 
3774 	if (--task->suspend_count > 0) {
3775 		return;
3776 	}
3777 
3778 	if (bsd_info) {
3779 		workq_proc_resumed(bsd_info);
3780 	}
3781 
3782 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3783 		thread_mtx_lock(thread);
3784 		thread_release(thread);
3785 		thread_mtx_unlock(thread);
3786 	}
3787 
3788 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_TASK_RESUME) | DBG_FUNC_NONE, task_pid(task));
3789 
3790 #if CONFIG_TASK_SUSPEND_STATS
3791 	_task_mark_suspend_end(task);
3792 #endif
3793 
3794 #if CONFIG_EXCLAVES
3795 	task_unlock(task);
3796 	task_resume_conclave(task);
3797 	task_lock(task);
3798 #endif /* CONFIG_EXCLAVES */
3799 }
3800 
3801 /*
3802  *	task_release:
3803  *
3804  *	Same as the internal routine above, except that it must lock
3805  *	and verify that the task is active.
3806  *
3807  *      CONDITIONS: The caller holds a reference to the task
3808  */
3809 kern_return_t
task_release(task_t task)3810 task_release(
3811 	task_t          task)
3812 {
3813 	if (task == TASK_NULL) {
3814 		return KERN_INVALID_ARGUMENT;
3815 	}
3816 
3817 	task_lock(task);
3818 
3819 	if (!task->active) {
3820 		task_unlock(task);
3821 
3822 		return KERN_FAILURE;
3823 	}
3824 
3825 	task_release_locked(task);
3826 	task_unlock(task);
3827 
3828 	return KERN_SUCCESS;
3829 }
3830 
3831 static kern_return_t
task_threads_internal(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * countp,mach_thread_flavor_t flavor)3832 task_threads_internal(
3833 	task_t                  task,
3834 	thread_act_array_t     *threads_out,
3835 	mach_msg_type_number_t *countp,
3836 	mach_thread_flavor_t    flavor)
3837 {
3838 	mach_msg_type_number_t  actual, count, count_needed;
3839 	thread_act_array_t      thread_list;
3840 	thread_t                thread;
3841 	unsigned int            i;
3842 
3843 	count = 0;
3844 	thread_list = NULL;
3845 
3846 	if (task == TASK_NULL) {
3847 		return KERN_INVALID_ARGUMENT;
3848 	}
3849 
3850 	assert(flavor <= THREAD_FLAVOR_INSPECT);
3851 
3852 	for (;;) {
3853 		task_lock(task);
3854 		if (!task->active) {
3855 			task_unlock(task);
3856 
3857 			mach_port_array_free(thread_list, count);
3858 			return KERN_FAILURE;
3859 		}
3860 
3861 		count_needed = actual = task->thread_count;
3862 		if (count_needed <= count) {
3863 			break;
3864 		}
3865 
3866 		/* unlock the task and allocate more memory */
3867 		task_unlock(task);
3868 
3869 		mach_port_array_free(thread_list, count);
3870 		count = count_needed;
3871 		thread_list = mach_port_array_alloc(count, Z_WAITOK);
3872 
3873 		if (thread_list == NULL) {
3874 			return KERN_RESOURCE_SHORTAGE;
3875 		}
3876 	}
3877 
3878 	i = 0;
3879 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3880 		assert(i < actual);
3881 		thread_reference(thread);
3882 		((thread_t *)thread_list)[i++] = thread;
3883 	}
3884 
3885 	count_needed = actual;
3886 
3887 	/* can unlock task now that we've got the thread refs */
3888 	task_unlock(task);
3889 
3890 	if (actual == 0) {
3891 		/* no threads, so return null pointer and deallocate memory */
3892 
3893 		mach_port_array_free(thread_list, count);
3894 
3895 		*threads_out = NULL;
3896 		*countp = 0;
3897 	} else {
3898 		/* if we allocated too much, must copy */
3899 		if (count_needed < count) {
3900 			mach_port_array_t newaddr;
3901 
3902 			newaddr = mach_port_array_alloc(count_needed, Z_WAITOK);
3903 			if (newaddr == NULL) {
3904 				for (i = 0; i < actual; ++i) {
3905 					thread_deallocate(((thread_t *)thread_list)[i]);
3906 				}
3907 				mach_port_array_free(thread_list, count);
3908 				return KERN_RESOURCE_SHORTAGE;
3909 			}
3910 
3911 			bcopy(thread_list, newaddr, count_needed * sizeof(thread_t));
3912 			mach_port_array_free(thread_list, count);
3913 			thread_list = newaddr;
3914 		}
3915 
3916 		/* do the conversion that Mig should handle */
3917 		convert_thread_array_to_ports(thread_list, actual, flavor);
3918 
3919 		*threads_out = thread_list;
3920 		*countp = actual;
3921 	}
3922 
3923 	return KERN_SUCCESS;
3924 }
3925 
3926 
3927 kern_return_t
task_threads_from_user(mach_port_t port,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3928 task_threads_from_user(
3929 	mach_port_t                 port,
3930 	thread_act_array_t         *threads_out,
3931 	mach_msg_type_number_t     *count)
3932 {
3933 	ipc_kobject_type_t kotype;
3934 	kern_return_t kr;
3935 
3936 	task_t task = convert_port_to_task_inspect_no_eval(port);
3937 
3938 	if (task == TASK_NULL) {
3939 		return KERN_INVALID_ARGUMENT;
3940 	}
3941 
3942 	kotype = ip_kotype(port);
3943 
3944 	switch (kotype) {
3945 	case IKOT_TASK_CONTROL:
3946 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3947 		break;
3948 	case IKOT_TASK_READ:
3949 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_READ);
3950 		break;
3951 	case IKOT_TASK_INSPECT:
3952 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_INSPECT);
3953 		break;
3954 	default:
3955 		panic("strange kobject type");
3956 		break;
3957 	}
3958 
3959 	task_deallocate(task);
3960 	return kr;
3961 }
3962 
3963 #define TASK_HOLD_NORMAL        0
3964 #define TASK_HOLD_PIDSUSPEND    1
3965 #define TASK_HOLD_LEGACY        2
3966 #define TASK_HOLD_LEGACY_ALL    3
3967 
3968 static kern_return_t
place_task_hold(task_t task,int mode)3969 place_task_hold(
3970 	task_t task,
3971 	int mode)
3972 {
3973 	if (!task->active && !task_is_a_corpse(task)) {
3974 		return KERN_FAILURE;
3975 	}
3976 
3977 	/* Return success for corpse task */
3978 	if (task_is_a_corpse(task)) {
3979 		return KERN_SUCCESS;
3980 	}
3981 
3982 #if MACH_ASSERT
3983 	current_task()->suspends_outstanding++;
3984 #endif
3985 
3986 	if (mode == TASK_HOLD_LEGACY) {
3987 		task->legacy_stop_count++;
3988 	}
3989 
3990 #ifdef CONFIG_TASK_SUSPEND_STATS
3991 	_task_mark_suspend_source(task);
3992 #endif /* CONFIG_TASK_SUSPEND_STATS */
3993 
3994 	if (task->user_stop_count++ > 0) {
3995 		/*
3996 		 *	If the stop count was positive, the task is
3997 		 *	already stopped and we can exit.
3998 		 */
3999 		return KERN_SUCCESS;
4000 	}
4001 
4002 	/*
4003 	 * Put a kernel-level hold on the threads in the task (all
4004 	 * user-level task suspensions added together represent a
4005 	 * single kernel-level hold).  We then wait for the threads
4006 	 * to stop executing user code.
4007 	 */
4008 	bool first_suspension __unused = task_hold_locked(task);
4009 
4010 #if CONFIG_EXCLAVES
4011 	if (first_suspension) {
4012 		task_unlock(task);
4013 		task_suspend_conclave(task);
4014 
4015 		/*
4016 		 * If task terminated/resumed before we could wait on threads, then
4017 		 * it is a race we lost and we could treat that as termination/resume
4018 		 * happened after the wait and return SUCCESS.
4019 		 */
4020 		task_lock(task);
4021 		if (!task->active || task->suspend_count <= 0) {
4022 			return KERN_SUCCESS;
4023 		}
4024 	}
4025 #endif /* CONFIG_EXCLAVES */
4026 
4027 	task_wait_locked(task, FALSE);
4028 
4029 	return KERN_SUCCESS;
4030 }
4031 
4032 static kern_return_t
release_task_hold(task_t task,int mode)4033 release_task_hold(
4034 	task_t          task,
4035 	int                     mode)
4036 {
4037 	boolean_t release = FALSE;
4038 
4039 	if (!task->active && !task_is_a_corpse(task)) {
4040 		return KERN_FAILURE;
4041 	}
4042 
4043 	/* Return success for corpse task */
4044 	if (task_is_a_corpse(task)) {
4045 		return KERN_SUCCESS;
4046 	}
4047 
4048 	if (mode == TASK_HOLD_PIDSUSPEND) {
4049 		if (task->pidsuspended == FALSE) {
4050 			return KERN_FAILURE;
4051 		}
4052 		task->pidsuspended = FALSE;
4053 	}
4054 
4055 	if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
4056 #if MACH_ASSERT
4057 		/*
4058 		 * This is obviously not robust; if we suspend one task and then resume a different one,
4059 		 * we'll fly under the radar. This is only meant to catch the common case of a crashed
4060 		 * or buggy suspender.
4061 		 */
4062 		current_task()->suspends_outstanding--;
4063 #endif
4064 
4065 		if (mode == TASK_HOLD_LEGACY_ALL) {
4066 			if (task->legacy_stop_count >= task->user_stop_count) {
4067 				task->user_stop_count = 0;
4068 				release = TRUE;
4069 			} else {
4070 				task->user_stop_count -= task->legacy_stop_count;
4071 			}
4072 			task->legacy_stop_count = 0;
4073 		} else {
4074 			if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) {
4075 				task->legacy_stop_count--;
4076 			}
4077 			if (--task->user_stop_count == 0) {
4078 				release = TRUE;
4079 			}
4080 		}
4081 	} else {
4082 		return KERN_FAILURE;
4083 	}
4084 
4085 	/*
4086 	 *	Release the task if necessary.
4087 	 */
4088 	if (release) {
4089 		task_release_locked(task);
4090 	}
4091 
4092 	return KERN_SUCCESS;
4093 }
4094 
4095 boolean_t
get_task_suspended(task_t task)4096 get_task_suspended(task_t task)
4097 {
4098 	return 0 != task->user_stop_count;
4099 }
4100 
4101 /*
4102  *	task_suspend:
4103  *
4104  *	Implement an (old-fashioned) user-level suspension on a task.
4105  *
4106  *	Because the user isn't expecting to have to manage a suspension
4107  *	token, we'll track it for him in the kernel in the form of a naked
4108  *	send right to the task's resume port.  All such send rights
4109  *	account for a single suspension against the task (unlike task_suspend2()
4110  *	where each caller gets a unique suspension count represented by a
4111  *	unique send-once right).
4112  *
4113  * Conditions:
4114  *      The caller holds a reference to the task
4115  */
4116 kern_return_t
task_suspend(task_t task)4117 task_suspend(
4118 	task_t          task)
4119 {
4120 	kern_return_t                   kr;
4121 	mach_port_t                     port;
4122 	mach_port_name_t                name;
4123 
4124 	if (task == TASK_NULL || task == kernel_task) {
4125 		return KERN_INVALID_ARGUMENT;
4126 	}
4127 
4128 	/*
4129 	 * place a legacy hold on the task.
4130 	 */
4131 	task_lock(task);
4132 	kr = place_task_hold(task, TASK_HOLD_LEGACY);
4133 	task_unlock(task);
4134 
4135 	if (kr != KERN_SUCCESS) {
4136 		return kr;
4137 	}
4138 
4139 	/*
4140 	 * Claim a send right on the task resume port, and request a no-senders
4141 	 * notification on that port (if none outstanding).
4142 	 */
4143 	itk_lock(task);
4144 	port = task->itk_resume;
4145 	if (port == IP_NULL) {
4146 		port = ipc_kobject_alloc_port(task, IKOT_TASK_RESUME,
4147 		    IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
4148 		task->itk_resume = port;
4149 	} else {
4150 		(void)ipc_kobject_make_send_nsrequest(port, task, IKOT_TASK_RESUME);
4151 	}
4152 	itk_unlock(task);
4153 
4154 	/*
4155 	 * Copyout the send right into the calling task's IPC space.  It won't know it is there,
4156 	 * but we'll look it up when calling a traditional resume.  Any IPC operations that
4157 	 * deallocate the send right will auto-release the suspension.
4158 	 */
4159 	if (IP_VALID(port)) {
4160 		kr = ipc_object_copyout(current_space(), ip_to_object(port),
4161 		    MACH_MSG_TYPE_MOVE_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
4162 		    NULL, NULL, &name);
4163 	} else {
4164 		kr = KERN_SUCCESS;
4165 	}
4166 	if (kr != KERN_SUCCESS) {
4167 		printf("warning: %s(%d) failed to copyout suspension "
4168 		    "token for pid %d with error: %d\n",
4169 		    proc_name_address(get_bsdtask_info(current_task())),
4170 		    proc_pid(get_bsdtask_info(current_task())),
4171 		    task_pid(task), kr);
4172 	}
4173 
4174 	return kr;
4175 }
4176 
4177 /*
4178  *	task_resume:
4179  *		Release a user hold on a task.
4180  *
4181  * Conditions:
4182  *		The caller holds a reference to the task
4183  */
4184 kern_return_t
task_resume(task_t task)4185 task_resume(
4186 	task_t  task)
4187 {
4188 	kern_return_t    kr;
4189 	mach_port_name_t resume_port_name;
4190 	ipc_entry_t              resume_port_entry;
4191 	ipc_space_t              space = current_task()->itk_space;
4192 
4193 	if (task == TASK_NULL || task == kernel_task) {
4194 		return KERN_INVALID_ARGUMENT;
4195 	}
4196 
4197 	/* release a legacy task hold */
4198 	task_lock(task);
4199 	kr = release_task_hold(task, TASK_HOLD_LEGACY);
4200 	task_unlock(task);
4201 
4202 	itk_lock(task); /* for itk_resume */
4203 	is_write_lock(space); /* spin lock */
4204 	if (is_active(space) && IP_VALID(task->itk_resume) &&
4205 	    ipc_hash_lookup(space, ip_to_object(task->itk_resume), &resume_port_name, &resume_port_entry) == TRUE) {
4206 		/*
4207 		 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
4208 		 * we are holding one less legacy hold on the task from this caller.  If the release failed,
4209 		 * go ahead and drop all the rights, as someone either already released our holds or the task
4210 		 * is gone.
4211 		 */
4212 		itk_unlock(task);
4213 		if (kr == KERN_SUCCESS) {
4214 			ipc_right_dealloc(space, resume_port_name, resume_port_entry);
4215 		} else {
4216 			ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0);
4217 		}
4218 		/* space unlocked */
4219 	} else {
4220 		itk_unlock(task);
4221 		is_write_unlock(space);
4222 		if (kr == KERN_SUCCESS) {
4223 			printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
4224 			    proc_name_address(get_bsdtask_info(current_task())), proc_pid(get_bsdtask_info(current_task())),
4225 			    task_pid(task));
4226 		}
4227 	}
4228 
4229 	return kr;
4230 }
4231 
4232 /*
4233  * Suspend the target task.
4234  * Making/holding a token/reference/port is the callers responsibility.
4235  */
4236 kern_return_t
task_suspend_internal(task_t task)4237 task_suspend_internal(task_t task)
4238 {
4239 	kern_return_t    kr;
4240 
4241 	if (task == TASK_NULL || task == kernel_task) {
4242 		return KERN_INVALID_ARGUMENT;
4243 	}
4244 
4245 	task_lock(task);
4246 	kr = place_task_hold(task, TASK_HOLD_NORMAL);
4247 	task_unlock(task);
4248 	return kr;
4249 }
4250 
4251 /*
4252  * Suspend the target task, and return a suspension token. The token
4253  * represents a reference on the suspended task.
4254  */
4255 static kern_return_t
task_suspend2_grp(task_t task,task_suspension_token_t * suspend_token,task_grp_t grp)4256 task_suspend2_grp(
4257 	task_t                  task,
4258 	task_suspension_token_t *suspend_token,
4259 	task_grp_t              grp)
4260 {
4261 	kern_return_t    kr;
4262 
4263 	kr = task_suspend_internal(task);
4264 	if (kr != KERN_SUCCESS) {
4265 		*suspend_token = TASK_NULL;
4266 		return kr;
4267 	}
4268 
4269 	/*
4270 	 * Take a reference on the target task and return that to the caller
4271 	 * as a "suspension token," which can be converted into an SO right to
4272 	 * the now-suspended task's resume port.
4273 	 */
4274 	task_reference_grp(task, grp);
4275 	*suspend_token = task;
4276 
4277 	return KERN_SUCCESS;
4278 }
4279 
4280 kern_return_t
task_suspend2_mig(task_t task,task_suspension_token_t * suspend_token)4281 task_suspend2_mig(
4282 	task_t                  task,
4283 	task_suspension_token_t *suspend_token)
4284 {
4285 	return task_suspend2_grp(task, suspend_token, TASK_GRP_MIG);
4286 }
4287 
4288 kern_return_t
task_suspend2_external(task_t task,task_suspension_token_t * suspend_token)4289 task_suspend2_external(
4290 	task_t                  task,
4291 	task_suspension_token_t *suspend_token)
4292 {
4293 	return task_suspend2_grp(task, suspend_token, TASK_GRP_EXTERNAL);
4294 }
4295 
4296 /*
4297  * Resume the task
4298  * (reference/token/port management is caller's responsibility).
4299  */
4300 kern_return_t
task_resume_internal(task_suspension_token_t task)4301 task_resume_internal(
4302 	task_suspension_token_t         task)
4303 {
4304 	kern_return_t kr;
4305 
4306 	if (task == TASK_NULL || task == kernel_task) {
4307 		return KERN_INVALID_ARGUMENT;
4308 	}
4309 
4310 	task_lock(task);
4311 	kr = release_task_hold(task, TASK_HOLD_NORMAL);
4312 	task_unlock(task);
4313 	return kr;
4314 }
4315 
4316 /*
4317  * Resume the task using a suspension token. Consumes the token's ref.
4318  */
4319 static kern_return_t
task_resume2_grp(task_suspension_token_t task,task_grp_t grp)4320 task_resume2_grp(
4321 	task_suspension_token_t         task,
4322 	task_grp_t                      grp)
4323 {
4324 	kern_return_t kr;
4325 
4326 	kr = task_resume_internal(task);
4327 	task_suspension_token_deallocate_grp(task, grp);
4328 
4329 	return kr;
4330 }
4331 
4332 kern_return_t
task_resume2_mig(task_suspension_token_t task)4333 task_resume2_mig(
4334 	task_suspension_token_t         task)
4335 {
4336 	return task_resume2_grp(task, TASK_GRP_MIG);
4337 }
4338 
4339 kern_return_t
task_resume2_external(task_suspension_token_t task)4340 task_resume2_external(
4341 	task_suspension_token_t         task)
4342 {
4343 	return task_resume2_grp(task, TASK_GRP_EXTERNAL);
4344 }
4345 
4346 static void
task_suspension_no_senders(ipc_port_t port,mach_port_mscount_t mscount)4347 task_suspension_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
4348 {
4349 	task_t task = convert_port_to_task_suspension_token(port);
4350 	kern_return_t kr;
4351 
4352 	if (task == TASK_NULL) {
4353 		return;
4354 	}
4355 
4356 	if (task == kernel_task) {
4357 		task_suspension_token_deallocate(task);
4358 		return;
4359 	}
4360 
4361 	task_lock(task);
4362 
4363 	kr = ipc_kobject_nsrequest(port, mscount, NULL);
4364 	if (kr == KERN_FAILURE) {
4365 		/* release all the [remaining] outstanding legacy holds */
4366 		release_task_hold(task, TASK_HOLD_LEGACY_ALL);
4367 	}
4368 
4369 	task_unlock(task);
4370 
4371 	task_suspension_token_deallocate(task);         /* drop token reference */
4372 }
4373 
4374 /*
4375  * Fires when a send once made
4376  * by convert_task_suspension_token_to_port() dies.
4377  */
4378 void
task_suspension_send_once(ipc_port_t port)4379 task_suspension_send_once(ipc_port_t port)
4380 {
4381 	task_t task = convert_port_to_task_suspension_token(port);
4382 
4383 	if (task == TASK_NULL || task == kernel_task) {
4384 		return; /* nothing to do */
4385 	}
4386 
4387 	/* release the hold held by this specific send-once right */
4388 	task_lock(task);
4389 	release_task_hold(task, TASK_HOLD_NORMAL);
4390 	task_unlock(task);
4391 
4392 	task_suspension_token_deallocate(task);         /* drop token reference */
4393 }
4394 
4395 static kern_return_t
task_pidsuspend_locked(task_t task)4396 task_pidsuspend_locked(task_t task)
4397 {
4398 	kern_return_t kr;
4399 
4400 	if (task->pidsuspended) {
4401 		kr = KERN_FAILURE;
4402 		goto out;
4403 	}
4404 
4405 	task->pidsuspended = TRUE;
4406 
4407 	kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
4408 	if (kr != KERN_SUCCESS) {
4409 		task->pidsuspended = FALSE;
4410 	}
4411 out:
4412 	return kr;
4413 }
4414 
4415 
4416 /*
4417  *	task_pidsuspend:
4418  *
4419  *	Suspends a task by placing a hold on its threads.
4420  *
4421  * Conditions:
4422  *      The caller holds a reference to the task
4423  */
4424 kern_return_t
task_pidsuspend(task_t task)4425 task_pidsuspend(
4426 	task_t          task)
4427 {
4428 	kern_return_t    kr;
4429 
4430 	if (task == TASK_NULL || task == kernel_task) {
4431 		return KERN_INVALID_ARGUMENT;
4432 	}
4433 
4434 	task_lock(task);
4435 
4436 	kr = task_pidsuspend_locked(task);
4437 
4438 	task_unlock(task);
4439 
4440 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4441 		iokit_task_app_suspended_changed(task);
4442 	}
4443 
4444 	return kr;
4445 }
4446 
4447 /*
4448  *	task_pidresume:
4449  *		Resumes a previously suspended task.
4450  *
4451  * Conditions:
4452  *		The caller holds a reference to the task
4453  */
4454 kern_return_t
task_pidresume(task_t task)4455 task_pidresume(
4456 	task_t  task)
4457 {
4458 	kern_return_t    kr;
4459 
4460 	if (task == TASK_NULL || task == kernel_task) {
4461 		return KERN_INVALID_ARGUMENT;
4462 	}
4463 
4464 	task_lock(task);
4465 
4466 #if CONFIG_FREEZE
4467 
4468 	while (task->changing_freeze_state) {
4469 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4470 		task_unlock(task);
4471 		thread_block(THREAD_CONTINUE_NULL);
4472 
4473 		task_lock(task);
4474 	}
4475 	task->changing_freeze_state = TRUE;
4476 #endif
4477 
4478 	kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
4479 
4480 	task_unlock(task);
4481 
4482 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4483 		iokit_task_app_suspended_changed(task);
4484 	}
4485 
4486 #if CONFIG_FREEZE
4487 
4488 	task_lock(task);
4489 
4490 	if (kr == KERN_SUCCESS) {
4491 		task->frozen = FALSE;
4492 	}
4493 	task->changing_freeze_state = FALSE;
4494 	thread_wakeup(&task->changing_freeze_state);
4495 
4496 	task_unlock(task);
4497 #endif
4498 
4499 	return kr;
4500 }
4501 
4502 os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
4503 
4504 /*
4505  *	task_add_turnstile_watchports:
4506  *		Setup watchports to boost the main thread of the task.
4507  *
4508  *	Arguments:
4509  *		task: task being spawned
4510  *		thread: main thread of task
4511  *		portwatch_ports: array of watchports
4512  *		portwatch_count: number of watchports
4513  *
4514  *	Conditions:
4515  *		Nothing locked.
4516  */
4517 void
task_add_turnstile_watchports(task_t task,thread_t thread,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4518 task_add_turnstile_watchports(
4519 	task_t          task,
4520 	thread_t        thread,
4521 	ipc_port_t      *portwatch_ports,
4522 	uint32_t        portwatch_count)
4523 {
4524 	struct task_watchports *watchports = NULL;
4525 	struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
4526 	os_ref_count_t refs;
4527 
4528 	/* Check if the task has terminated */
4529 	if (!task->active) {
4530 		return;
4531 	}
4532 
4533 	assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
4534 
4535 	watchports = task_watchports_alloc_init(task, thread, portwatch_count);
4536 
4537 	/* Lock the ipc space */
4538 	is_write_lock(task->itk_space);
4539 
4540 	/* Setup watchports to boost the main thread */
4541 	refs = task_add_turnstile_watchports_locked(task,
4542 	    watchports, previous_elem_array, portwatch_ports,
4543 	    portwatch_count);
4544 
4545 	/* Drop the space lock */
4546 	is_write_unlock(task->itk_space);
4547 
4548 	if (refs == 0) {
4549 		task_watchports_deallocate(watchports);
4550 	}
4551 
4552 	/* Drop the ref on previous_elem_array */
4553 	for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
4554 		task_watchport_elem_deallocate(previous_elem_array[i]);
4555 	}
4556 }
4557 
4558 /*
4559  *	task_remove_turnstile_watchports:
4560  *		Clear all turnstile boost on the task from watchports.
4561  *
4562  *	Arguments:
4563  *		task: task being terminated
4564  *
4565  *	Conditions:
4566  *		Nothing locked.
4567  */
4568 void
task_remove_turnstile_watchports(task_t task)4569 task_remove_turnstile_watchports(
4570 	task_t          task)
4571 {
4572 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4573 	struct task_watchports *watchports = NULL;
4574 	ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
4575 	uint32_t portwatch_count;
4576 
4577 	/* Lock the ipc space */
4578 	is_write_lock(task->itk_space);
4579 
4580 	/* Check if watchport boost exist */
4581 	if (task->watchports == NULL) {
4582 		is_write_unlock(task->itk_space);
4583 		return;
4584 	}
4585 	watchports = task->watchports;
4586 	portwatch_count = watchports->tw_elem_array_count;
4587 
4588 	refs = task_remove_turnstile_watchports_locked(task, watchports,
4589 	    port_freelist);
4590 
4591 	is_write_unlock(task->itk_space);
4592 
4593 	/* Drop all the port references */
4594 	for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
4595 		ip_release(port_freelist[i]);
4596 	}
4597 
4598 	/* Clear the task and thread references for task_watchport */
4599 	if (refs == 0) {
4600 		task_watchports_deallocate(watchports);
4601 	}
4602 }
4603 
4604 /*
4605  *	task_transfer_turnstile_watchports:
4606  *		Transfer all watchport turnstile boost from old task to new task.
4607  *
4608  *	Arguments:
4609  *		old_task: task calling exec
4610  *		new_task: new exec'ed task
4611  *		thread: main thread of new task
4612  *
4613  *	Conditions:
4614  *		Nothing locked.
4615  */
4616 void
task_transfer_turnstile_watchports(task_t old_task,task_t new_task,thread_t new_thread)4617 task_transfer_turnstile_watchports(
4618 	task_t   old_task,
4619 	task_t   new_task,
4620 	thread_t new_thread)
4621 {
4622 	struct task_watchports *old_watchports = NULL;
4623 	struct task_watchports *new_watchports = NULL;
4624 	os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
4625 	os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
4626 	uint32_t portwatch_count;
4627 
4628 	if (old_task->watchports == NULL || !new_task->active) {
4629 		return;
4630 	}
4631 
4632 	/* Get the watch port count from the old task */
4633 	is_write_lock(old_task->itk_space);
4634 	if (old_task->watchports == NULL) {
4635 		is_write_unlock(old_task->itk_space);
4636 		return;
4637 	}
4638 
4639 	portwatch_count = old_task->watchports->tw_elem_array_count;
4640 	is_write_unlock(old_task->itk_space);
4641 
4642 	new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
4643 
4644 	/* Lock the ipc space for old task */
4645 	is_write_lock(old_task->itk_space);
4646 
4647 	/* Lock the ipc space for new task */
4648 	is_write_lock(new_task->itk_space);
4649 
4650 	/* Check if watchport boost exist */
4651 	if (old_task->watchports == NULL || !new_task->active) {
4652 		is_write_unlock(new_task->itk_space);
4653 		is_write_unlock(old_task->itk_space);
4654 		(void)task_watchports_release(new_watchports);
4655 		task_watchports_deallocate(new_watchports);
4656 		return;
4657 	}
4658 
4659 	old_watchports = old_task->watchports;
4660 	assert(portwatch_count == old_task->watchports->tw_elem_array_count);
4661 
4662 	/* Setup new task watchports */
4663 	new_task->watchports = new_watchports;
4664 
4665 	for (uint32_t i = 0; i < portwatch_count; i++) {
4666 		ipc_port_t port = old_watchports->tw_elem[i].twe_port;
4667 
4668 		if (port == NULL) {
4669 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4670 			continue;
4671 		}
4672 
4673 		/* Lock the port and check if it has the entry */
4674 		ip_mq_lock(port);
4675 
4676 		task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
4677 
4678 		if (ipc_port_replace_watchport_elem_conditional_locked(port,
4679 		    &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
4680 			task_watchport_elem_clear(&old_watchports->tw_elem[i]);
4681 
4682 			task_watchports_retain(new_watchports);
4683 			old_refs = task_watchports_release(old_watchports);
4684 
4685 			/* Check if all ports are cleaned */
4686 			if (old_refs == 0) {
4687 				old_task->watchports = NULL;
4688 			}
4689 		} else {
4690 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4691 		}
4692 		/* port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
4693 	}
4694 
4695 	/* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
4696 	new_refs = task_watchports_release(new_watchports);
4697 	if (new_refs == 0) {
4698 		new_task->watchports = NULL;
4699 	}
4700 
4701 	is_write_unlock(new_task->itk_space);
4702 	is_write_unlock(old_task->itk_space);
4703 
4704 	/* Clear the task and thread references for old_watchport */
4705 	if (old_refs == 0) {
4706 		task_watchports_deallocate(old_watchports);
4707 	}
4708 
4709 	/* Clear the task and thread references for new_watchport */
4710 	if (new_refs == 0) {
4711 		task_watchports_deallocate(new_watchports);
4712 	}
4713 }
4714 
4715 /*
4716  *	task_add_turnstile_watchports_locked:
4717  *		Setup watchports to boost the main thread of the task.
4718  *
4719  *	Arguments:
4720  *		task: task to boost
4721  *		watchports: watchport structure to be attached to the task
4722  *		previous_elem_array: an array of old watchport_elem to be returned to caller
4723  *		portwatch_ports: array of watchports
4724  *		portwatch_count: number of watchports
4725  *
4726  *	Conditions:
4727  *		ipc space of the task locked.
4728  *		returns array of old watchport_elem in previous_elem_array
4729  */
4730 static os_ref_count_t
task_add_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,struct task_watchport_elem ** previous_elem_array,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4731 task_add_turnstile_watchports_locked(
4732 	task_t                      task,
4733 	struct task_watchports      *watchports,
4734 	struct task_watchport_elem  **previous_elem_array,
4735 	ipc_port_t                  *portwatch_ports,
4736 	uint32_t                    portwatch_count)
4737 {
4738 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4739 
4740 	/* Check if the task is still active */
4741 	if (!task->active) {
4742 		refs = task_watchports_release(watchports);
4743 		return refs;
4744 	}
4745 
4746 	assert(task->watchports == NULL);
4747 	task->watchports = watchports;
4748 
4749 	for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
4750 		ipc_port_t port = portwatch_ports[i];
4751 
4752 		task_watchport_elem_init(&watchports->tw_elem[i], task, port);
4753 		if (port == NULL) {
4754 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4755 			continue;
4756 		}
4757 
4758 		ip_mq_lock(port);
4759 
4760 		/* Check if port is in valid state to be setup as watchport */
4761 		if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
4762 		    &previous_elem_array[j]) != KERN_SUCCESS) {
4763 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4764 			continue;
4765 		}
4766 		/* port unlocked on return */
4767 
4768 		ip_reference(port);
4769 		task_watchports_retain(watchports);
4770 		if (previous_elem_array[j] != NULL) {
4771 			j++;
4772 		}
4773 	}
4774 
4775 	/* Drop the reference on task_watchport struct returned by os_ref_init */
4776 	refs = task_watchports_release(watchports);
4777 	if (refs == 0) {
4778 		task->watchports = NULL;
4779 	}
4780 
4781 	return refs;
4782 }
4783 
4784 /*
4785  *	task_remove_turnstile_watchports_locked:
4786  *		Clear all turnstile boost on the task from watchports.
4787  *
4788  *	Arguments:
4789  *		task: task to remove watchports from
4790  *		watchports: watchports structure for the task
4791  *		port_freelist: array of ports returned with ref to caller
4792  *
4793  *
4794  *	Conditions:
4795  *		ipc space of the task locked.
4796  *		array of ports with refs are returned in port_freelist
4797  */
4798 static os_ref_count_t
task_remove_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,ipc_port_t * port_freelist)4799 task_remove_turnstile_watchports_locked(
4800 	task_t                 task,
4801 	struct task_watchports *watchports,
4802 	ipc_port_t             *port_freelist)
4803 {
4804 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4805 
4806 	for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
4807 		ipc_port_t port = watchports->tw_elem[i].twe_port;
4808 		if (port == NULL) {
4809 			continue;
4810 		}
4811 
4812 		/* Lock the port and check if it has the entry */
4813 		ip_mq_lock(port);
4814 		if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
4815 		    &watchports->tw_elem[i]) == KERN_SUCCESS) {
4816 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4817 			port_freelist[j++] = port;
4818 			refs = task_watchports_release(watchports);
4819 
4820 			/* Check if all ports are cleaned */
4821 			if (refs == 0) {
4822 				task->watchports = NULL;
4823 				break;
4824 			}
4825 		}
4826 		/* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
4827 	}
4828 	return refs;
4829 }
4830 
4831 /*
4832  *	task_watchports_alloc_init:
4833  *		Allocate and initialize task watchport struct.
4834  *
4835  *	Conditions:
4836  *		Nothing locked.
4837  */
4838 static struct task_watchports *
task_watchports_alloc_init(task_t task,thread_t thread,uint32_t count)4839 task_watchports_alloc_init(
4840 	task_t        task,
4841 	thread_t      thread,
4842 	uint32_t      count)
4843 {
4844 	struct task_watchports *watchports = kalloc_type(struct task_watchports,
4845 	    struct task_watchport_elem, count, Z_WAITOK | Z_ZERO | Z_NOFAIL);
4846 
4847 	task_reference(task);
4848 	thread_reference(thread);
4849 	watchports->tw_task = task;
4850 	watchports->tw_thread = thread;
4851 	watchports->tw_elem_array_count = count;
4852 	os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
4853 
4854 	return watchports;
4855 }
4856 
4857 /*
4858  *	task_watchports_deallocate:
4859  *		Deallocate task watchport struct.
4860  *
4861  *	Conditions:
4862  *		Nothing locked.
4863  */
4864 static void
task_watchports_deallocate(struct task_watchports * watchports)4865 task_watchports_deallocate(
4866 	struct task_watchports *watchports)
4867 {
4868 	uint32_t portwatch_count = watchports->tw_elem_array_count;
4869 
4870 	task_deallocate(watchports->tw_task);
4871 	thread_deallocate(watchports->tw_thread);
4872 	kfree_type(struct task_watchports, struct task_watchport_elem,
4873 	    portwatch_count, watchports);
4874 }
4875 
4876 /*
4877  *	task_watchport_elem_deallocate:
4878  *		Deallocate task watchport element and release its ref on task_watchport.
4879  *
4880  *	Conditions:
4881  *		Nothing locked.
4882  */
4883 void
task_watchport_elem_deallocate(struct task_watchport_elem * watchport_elem)4884 task_watchport_elem_deallocate(
4885 	struct task_watchport_elem *watchport_elem)
4886 {
4887 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4888 	task_t task = watchport_elem->twe_task;
4889 	struct task_watchports *watchports = NULL;
4890 	ipc_port_t port = NULL;
4891 
4892 	assert(task != NULL);
4893 
4894 	/* Take the space lock to modify the elememt */
4895 	is_write_lock(task->itk_space);
4896 
4897 	watchports = task->watchports;
4898 	assert(watchports != NULL);
4899 
4900 	port = watchport_elem->twe_port;
4901 	assert(port != NULL);
4902 
4903 	task_watchport_elem_clear(watchport_elem);
4904 	refs = task_watchports_release(watchports);
4905 
4906 	if (refs == 0) {
4907 		task->watchports = NULL;
4908 	}
4909 
4910 	is_write_unlock(task->itk_space);
4911 
4912 	ip_release(port);
4913 	if (refs == 0) {
4914 		task_watchports_deallocate(watchports);
4915 	}
4916 }
4917 
4918 /*
4919  *	task_has_watchports:
4920  *		Return TRUE if task has watchport boosts.
4921  *
4922  *	Conditions:
4923  *		Nothing locked.
4924  */
4925 boolean_t
task_has_watchports(task_t task)4926 task_has_watchports(task_t task)
4927 {
4928 	return task->watchports != NULL;
4929 }
4930 
4931 #if DEVELOPMENT || DEBUG
4932 
4933 extern void IOSleep(int);
4934 
4935 kern_return_t
task_disconnect_page_mappings(task_t task)4936 task_disconnect_page_mappings(task_t task)
4937 {
4938 	int     n;
4939 
4940 	if (task == TASK_NULL || task == kernel_task) {
4941 		return KERN_INVALID_ARGUMENT;
4942 	}
4943 
4944 	/*
4945 	 * this function is used to strip all of the mappings from
4946 	 * the pmap for the specified task to force the task to
4947 	 * re-fault all of the pages it is actively using... this
4948 	 * allows us to approximate the true working set of the
4949 	 * specified task.  We only engage if at least 1 of the
4950 	 * threads in the task is runnable, but we want to continuously
4951 	 * sweep (at least for a while - I've arbitrarily set the limit at
4952 	 * 100 sweeps to be re-looked at as we gain experience) to get a better
4953 	 * view into what areas within a page are being visited (as opposed to only
4954 	 * seeing the first fault of a page after the task becomes
4955 	 * runnable)...  in the future I may
4956 	 * try to block until awakened by a thread in this task
4957 	 * being made runnable, but for now we'll periodically poll from the
4958 	 * user level debug tool driving the sysctl
4959 	 */
4960 	for (n = 0; n < 100; n++) {
4961 		thread_t        thread;
4962 		boolean_t       runnable;
4963 		boolean_t       do_unnest;
4964 		int             page_count;
4965 
4966 		runnable = FALSE;
4967 		do_unnest = FALSE;
4968 
4969 		task_lock(task);
4970 
4971 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
4972 			if (thread->state & TH_RUN) {
4973 				runnable = TRUE;
4974 				break;
4975 			}
4976 		}
4977 		if (n == 0) {
4978 			task->task_disconnected_count++;
4979 		}
4980 
4981 		if (task->task_unnested == FALSE) {
4982 			if (runnable == TRUE) {
4983 				task->task_unnested = TRUE;
4984 				do_unnest = TRUE;
4985 			}
4986 		}
4987 		task_unlock(task);
4988 
4989 		if (runnable == FALSE) {
4990 			break;
4991 		}
4992 
4993 		KDBG_RELEASE((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
4994 		    task, do_unnest, task->task_disconnected_count);
4995 
4996 		page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
4997 
4998 		KDBG_RELEASE((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
4999 		    task, page_count);
5000 
5001 		if ((n % 5) == 4) {
5002 			IOSleep(1);
5003 		}
5004 	}
5005 	return KERN_SUCCESS;
5006 }
5007 
5008 #endif
5009 
5010 
5011 #if CONFIG_FREEZE
5012 
5013 /*
5014  *	task_freeze:
5015  *
5016  *	Freeze a task.
5017  *
5018  * Conditions:
5019  *      The caller holds a reference to the task
5020  */
5021 extern struct freezer_context freezer_context_global;
5022 
5023 kern_return_t
task_freeze(task_t task,uint32_t * purgeable_count,uint32_t * wired_count,uint32_t * clean_count,uint32_t * dirty_count,uint32_t dirty_budget,uint32_t * shared_count,int * freezer_error_code,boolean_t eval_only)5024 task_freeze(
5025 	task_t    task,
5026 	uint32_t           *purgeable_count,
5027 	uint32_t           *wired_count,
5028 	uint32_t           *clean_count,
5029 	uint32_t           *dirty_count,
5030 	uint32_t           dirty_budget,
5031 	uint32_t           *shared_count,
5032 	int                *freezer_error_code,
5033 	boolean_t          eval_only)
5034 {
5035 	kern_return_t kr = KERN_SUCCESS;
5036 
5037 	if (task == TASK_NULL || task == kernel_task) {
5038 		return KERN_INVALID_ARGUMENT;
5039 	}
5040 
5041 	task_lock(task);
5042 
5043 	while (task->changing_freeze_state) {
5044 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5045 		task_unlock(task);
5046 		thread_block(THREAD_CONTINUE_NULL);
5047 
5048 		task_lock(task);
5049 	}
5050 	if (task->frozen) {
5051 		task_unlock(task);
5052 		return KERN_FAILURE;
5053 	}
5054 	task->changing_freeze_state = TRUE;
5055 
5056 	freezer_context_global.freezer_ctx_task = task;
5057 
5058 	task_unlock(task);
5059 
5060 	kr = vm_map_freeze(task,
5061 	    purgeable_count,
5062 	    wired_count,
5063 	    clean_count,
5064 	    dirty_count,
5065 	    dirty_budget,
5066 	    shared_count,
5067 	    freezer_error_code,
5068 	    eval_only);
5069 
5070 	task_lock(task);
5071 
5072 	if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) {
5073 		task->frozen = TRUE;
5074 
5075 		freezer_context_global.freezer_ctx_task = NULL;
5076 		freezer_context_global.freezer_ctx_uncompressed_pages = 0;
5077 
5078 		if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
5079 			/*
5080 			 * reset the counter tracking the # of swapped compressed pages
5081 			 * because we are now done with this freeze session and task.
5082 			 */
5083 
5084 			*dirty_count = (uint32_t) (freezer_context_global.freezer_ctx_swapped_bytes / PAGE_SIZE_64);         /*used to track pageouts*/
5085 		}
5086 
5087 		freezer_context_global.freezer_ctx_swapped_bytes = 0;
5088 	}
5089 
5090 	task->changing_freeze_state = FALSE;
5091 	thread_wakeup(&task->changing_freeze_state);
5092 
5093 	task_unlock(task);
5094 
5095 	if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
5096 	    (kr == KERN_SUCCESS) &&
5097 	    (eval_only == FALSE)) {
5098 		vm_wake_compactor_swapper();
5099 		/*
5100 		 * We do an explicit wakeup of the swapout thread here
5101 		 * because the compact_and_swap routines don't have
5102 		 * knowledge about these kind of "per-task packed c_segs"
5103 		 * and so will not be evaluating whether we need to do
5104 		 * a wakeup there.
5105 		 */
5106 		thread_wakeup((event_t)&vm_swapout_thread);
5107 	}
5108 
5109 	return kr;
5110 }
5111 
5112 /*
5113  *	task_thaw:
5114  *
5115  *	Thaw a currently frozen task.
5116  *
5117  * Conditions:
5118  *      The caller holds a reference to the task
5119  */
5120 kern_return_t
task_thaw(task_t task)5121 task_thaw(
5122 	task_t          task)
5123 {
5124 	if (task == TASK_NULL || task == kernel_task) {
5125 		return KERN_INVALID_ARGUMENT;
5126 	}
5127 
5128 	task_lock(task);
5129 
5130 	while (task->changing_freeze_state) {
5131 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5132 		task_unlock(task);
5133 		thread_block(THREAD_CONTINUE_NULL);
5134 
5135 		task_lock(task);
5136 	}
5137 	if (!task->frozen) {
5138 		task_unlock(task);
5139 		return KERN_FAILURE;
5140 	}
5141 	task->frozen = FALSE;
5142 
5143 	task_unlock(task);
5144 
5145 	return KERN_SUCCESS;
5146 }
5147 
5148 void
task_update_frozen_to_swap_acct(task_t task,int64_t amount,freezer_acct_op_t op)5149 task_update_frozen_to_swap_acct(task_t task, int64_t amount, freezer_acct_op_t op)
5150 {
5151 	/*
5152 	 * We don't assert that the task lock is held because we call this
5153 	 * routine from the decompression path and we won't be holding the
5154 	 * task lock. However, since we are in the context of the task we are
5155 	 * safe.
5156 	 * In the case of the task_freeze path, we call it from behind the task
5157 	 * lock but we don't need to because we have a reference on the proc
5158 	 * being frozen.
5159 	 */
5160 
5161 	assert(task);
5162 	if (amount == 0) {
5163 		return;
5164 	}
5165 
5166 	if (op == CREDIT_TO_SWAP) {
5167 		ledger_credit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5168 	} else if (op == DEBIT_FROM_SWAP) {
5169 		ledger_debit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5170 	} else {
5171 		panic("task_update_frozen_to_swap_acct: Invalid ledger op");
5172 	}
5173 }
5174 #endif /* CONFIG_FREEZE */
5175 
5176 kern_return_t
task_set_security_tokens(task_t task,security_token_t sec_token,audit_token_t audit_token,host_priv_t host_priv)5177 task_set_security_tokens(
5178 	task_t           task,
5179 	security_token_t sec_token,
5180 	audit_token_t    audit_token,
5181 	host_priv_t      host_priv)
5182 {
5183 	ipc_port_t       host_port = IP_NULL;
5184 	kern_return_t    kr;
5185 
5186 	if (task == TASK_NULL) {
5187 		return KERN_INVALID_ARGUMENT;
5188 	}
5189 
5190 	task_lock(task);
5191 	task_set_tokens(task, &sec_token, &audit_token);
5192 	task_unlock(task);
5193 
5194 	if (host_priv != HOST_PRIV_NULL) {
5195 		kr = host_get_host_priv_port(host_priv, &host_port);
5196 	} else {
5197 		kr = host_get_host_port(host_priv_self(), &host_port);
5198 	}
5199 	assert(kr == KERN_SUCCESS);
5200 
5201 	kr = task_set_special_port_internal(task, TASK_HOST_PORT, host_port);
5202 	return kr;
5203 }
5204 
5205 kern_return_t
task_send_trace_memory(__unused task_t target_task,__unused uint32_t pid,__unused uint64_t uniqueid)5206 task_send_trace_memory(
5207 	__unused task_t   target_task,
5208 	__unused uint32_t pid,
5209 	__unused uint64_t uniqueid)
5210 {
5211 	return KERN_INVALID_ARGUMENT;
5212 }
5213 
5214 /*
5215  * This routine was added, pretty much exclusively, for registering the
5216  * RPC glue vector for in-kernel short circuited tasks.  Rather than
5217  * removing it completely, I have only disabled that feature (which was
5218  * the only feature at the time).  It just appears that we are going to
5219  * want to add some user data to tasks in the future (i.e. bsd info,
5220  * task names, etc...), so I left it in the formal task interface.
5221  */
5222 kern_return_t
task_set_info(task_t task,task_flavor_t flavor,__unused task_info_t task_info_in,__unused mach_msg_type_number_t task_info_count)5223 task_set_info(
5224 	task_t          task,
5225 	task_flavor_t   flavor,
5226 	__unused task_info_t    task_info_in,           /* pointer to IN array */
5227 	__unused mach_msg_type_number_t task_info_count)
5228 {
5229 	if (task == TASK_NULL) {
5230 		return KERN_INVALID_ARGUMENT;
5231 	}
5232 	switch (flavor) {
5233 #if CONFIG_ATM
5234 	case TASK_TRACE_MEMORY_INFO:
5235 		return KERN_NOT_SUPPORTED;
5236 #endif // CONFIG_ATM
5237 	default:
5238 		return KERN_INVALID_ARGUMENT;
5239 	}
5240 }
5241 
5242 static void
_task_fill_times(task_t task,time_value_t * user_time,time_value_t * sys_time)5243 _task_fill_times(task_t task, time_value_t *user_time, time_value_t *sys_time)
5244 {
5245 	clock_sec_t sec;
5246 	clock_usec_t usec;
5247 
5248 	struct recount_times_mach times = recount_task_terminated_times(task);
5249 	absolutetime_to_microtime(times.rtm_user, &sec, &usec);
5250 	user_time->seconds = (typeof(user_time->seconds))sec;
5251 	user_time->microseconds = usec;
5252 	absolutetime_to_microtime(times.rtm_system, &sec, &usec);
5253 	sys_time->seconds = (typeof(sys_time->seconds))sec;
5254 	sys_time->microseconds = usec;
5255 }
5256 
5257 int radar_20146450 = 1;
5258 kern_return_t
task_info(task_t task,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)5259 task_info(
5260 	task_t                  task,
5261 	task_flavor_t           flavor,
5262 	task_info_t             task_info_out,
5263 	mach_msg_type_number_t  *task_info_count)
5264 {
5265 	kern_return_t error = KERN_SUCCESS;
5266 	mach_msg_type_number_t  original_task_info_count;
5267 	bool is_kernel_task = (task == kernel_task);
5268 
5269 	if (task == TASK_NULL) {
5270 		return KERN_INVALID_ARGUMENT;
5271 	}
5272 
5273 	original_task_info_count = *task_info_count;
5274 	task_lock(task);
5275 
5276 	if (task != current_task() && !task->active) {
5277 		task_unlock(task);
5278 		return KERN_INVALID_ARGUMENT;
5279 	}
5280 
5281 
5282 	switch (flavor) {
5283 	case TASK_BASIC_INFO_32:
5284 	case TASK_BASIC2_INFO_32:
5285 #if defined(__arm64__)
5286 	case TASK_BASIC_INFO_64:
5287 #endif
5288 		{
5289 			task_basic_info_32_t basic_info;
5290 			ledger_amount_t      tmp;
5291 
5292 			if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
5293 				error = KERN_INVALID_ARGUMENT;
5294 				break;
5295 			}
5296 
5297 			basic_info = (task_basic_info_32_t)task_info_out;
5298 
5299 			basic_info->virtual_size = (typeof(basic_info->virtual_size))
5300 			    vm_map_adjusted_size(is_kernel_task ? kernel_map : task->map);
5301 			if (flavor == TASK_BASIC2_INFO_32) {
5302 				/*
5303 				 * The "BASIC2" flavor gets the maximum resident
5304 				 * size instead of the current resident size...
5305 				 */
5306 				ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, &tmp);
5307 			} else {
5308 				ledger_get_balance(task->ledger, task_ledgers.phys_mem, &tmp);
5309 			}
5310 			basic_info->resident_size = (natural_t) MIN((ledger_amount_t) UINT32_MAX, tmp);
5311 
5312 			_task_fill_times(task, &basic_info->user_time,
5313 			    &basic_info->system_time);
5314 
5315 			basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5316 			basic_info->suspend_count = task->user_stop_count;
5317 
5318 			*task_info_count = TASK_BASIC_INFO_32_COUNT;
5319 			break;
5320 		}
5321 
5322 #if defined(__arm64__)
5323 	case TASK_BASIC_INFO_64_2:
5324 	{
5325 		task_basic_info_64_2_t  basic_info;
5326 
5327 		if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) {
5328 			error = KERN_INVALID_ARGUMENT;
5329 			break;
5330 		}
5331 
5332 		basic_info = (task_basic_info_64_2_t)task_info_out;
5333 
5334 		basic_info->virtual_size  = vm_map_adjusted_size(is_kernel_task ?
5335 		    kernel_map : task->map);
5336 		ledger_get_balance(task->ledger, task_ledgers.phys_mem,
5337 		    (ledger_amount_t *)&basic_info->resident_size);
5338 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5339 		basic_info->suspend_count = task->user_stop_count;
5340 		_task_fill_times(task, &basic_info->user_time,
5341 		    &basic_info->system_time);
5342 
5343 		*task_info_count = TASK_BASIC_INFO_64_2_COUNT;
5344 		break;
5345 	}
5346 
5347 #else /* defined(__arm64__) */
5348 	case TASK_BASIC_INFO_64:
5349 	{
5350 		task_basic_info_64_t basic_info;
5351 
5352 		if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
5353 			error = KERN_INVALID_ARGUMENT;
5354 			break;
5355 		}
5356 
5357 		basic_info = (task_basic_info_64_t)task_info_out;
5358 
5359 		basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5360 		    kernel_map : task->map);
5361 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *)&basic_info->resident_size);
5362 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5363 		basic_info->suspend_count = task->user_stop_count;
5364 		_task_fill_times(task, &basic_info->user_time,
5365 		    &basic_info->system_time);
5366 
5367 		*task_info_count = TASK_BASIC_INFO_64_COUNT;
5368 		break;
5369 	}
5370 #endif /* defined(__arm64__) */
5371 
5372 	case MACH_TASK_BASIC_INFO:
5373 	{
5374 		mach_task_basic_info_t  basic_info;
5375 
5376 		if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
5377 			error = KERN_INVALID_ARGUMENT;
5378 			break;
5379 		}
5380 
5381 		basic_info = (mach_task_basic_info_t)task_info_out;
5382 
5383 		basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5384 		    kernel_map : task->map);
5385 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size);
5386 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size_max);
5387 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5388 		basic_info->suspend_count = task->user_stop_count;
5389 		_task_fill_times(task, &basic_info->user_time,
5390 		    &basic_info->system_time);
5391 
5392 		*task_info_count = MACH_TASK_BASIC_INFO_COUNT;
5393 		break;
5394 	}
5395 
5396 	case TASK_THREAD_TIMES_INFO:
5397 	{
5398 		task_thread_times_info_t times_info;
5399 		thread_t                 thread;
5400 
5401 		if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
5402 			error = KERN_INVALID_ARGUMENT;
5403 			break;
5404 		}
5405 
5406 		times_info = (task_thread_times_info_t)task_info_out;
5407 		times_info->user_time = (time_value_t){ 0 };
5408 		times_info->system_time = (time_value_t){ 0 };
5409 
5410 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5411 			if ((thread->options & TH_OPT_IDLE_THREAD) == 0) {
5412 				time_value_t user_time, system_time;
5413 
5414 				thread_read_times(thread, &user_time, &system_time, NULL);
5415 				time_value_add(&times_info->user_time, &user_time);
5416 				time_value_add(&times_info->system_time, &system_time);
5417 			}
5418 		}
5419 
5420 		*task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
5421 		break;
5422 	}
5423 
5424 	case TASK_ABSOLUTETIME_INFO:
5425 	{
5426 		task_absolutetime_info_t        info;
5427 
5428 		if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
5429 			error = KERN_INVALID_ARGUMENT;
5430 			break;
5431 		}
5432 
5433 		info = (task_absolutetime_info_t)task_info_out;
5434 
5435 		struct recount_times_mach term_times =
5436 		    recount_task_terminated_times(task);
5437 		struct recount_times_mach total_times = recount_task_times(task);
5438 
5439 		info->total_user = total_times.rtm_user;
5440 		info->total_system = total_times.rtm_system;
5441 		info->threads_user = total_times.rtm_user - term_times.rtm_user;
5442 		info->threads_system += total_times.rtm_system - term_times.rtm_system;
5443 
5444 		*task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
5445 		break;
5446 	}
5447 
5448 	case TASK_DYLD_INFO:
5449 	{
5450 		task_dyld_info_t info;
5451 
5452 		/*
5453 		 * We added the format field to TASK_DYLD_INFO output.  For
5454 		 * temporary backward compatibility, accept the fact that
5455 		 * clients may ask for the old version - distinquished by the
5456 		 * size of the expected result structure.
5457 		 */
5458 #define TASK_LEGACY_DYLD_INFO_COUNT \
5459 	        offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
5460 
5461 		if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
5462 			error = KERN_INVALID_ARGUMENT;
5463 			break;
5464 		}
5465 
5466 		info = (task_dyld_info_t)task_info_out;
5467 		info->all_image_info_addr = task->all_image_info_addr;
5468 		info->all_image_info_size = task->all_image_info_size;
5469 
5470 		/* only set format on output for those expecting it */
5471 		if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
5472 			info->all_image_info_format = task_has_64Bit_addr(task) ?
5473 			    TASK_DYLD_ALL_IMAGE_INFO_64 :
5474 			    TASK_DYLD_ALL_IMAGE_INFO_32;
5475 			*task_info_count = TASK_DYLD_INFO_COUNT;
5476 		} else {
5477 			*task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
5478 		}
5479 		break;
5480 	}
5481 
5482 	case TASK_EXTMOD_INFO:
5483 	{
5484 		task_extmod_info_t info;
5485 		void *p;
5486 
5487 		if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
5488 			error = KERN_INVALID_ARGUMENT;
5489 			break;
5490 		}
5491 
5492 		info = (task_extmod_info_t)task_info_out;
5493 
5494 		p = get_bsdtask_info(task);
5495 		if (p) {
5496 			proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
5497 		} else {
5498 			bzero(info->task_uuid, sizeof(info->task_uuid));
5499 		}
5500 		info->extmod_statistics = task->extmod_statistics;
5501 		*task_info_count = TASK_EXTMOD_INFO_COUNT;
5502 
5503 		break;
5504 	}
5505 
5506 	case TASK_KERNELMEMORY_INFO:
5507 	{
5508 		task_kernelmemory_info_t        tkm_info;
5509 		ledger_amount_t                 credit, debit;
5510 
5511 		if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
5512 			error = KERN_INVALID_ARGUMENT;
5513 			break;
5514 		}
5515 
5516 		tkm_info = (task_kernelmemory_info_t) task_info_out;
5517 		tkm_info->total_palloc = 0;
5518 		tkm_info->total_pfree = 0;
5519 		tkm_info->total_salloc = 0;
5520 		tkm_info->total_sfree = 0;
5521 
5522 		if (task == kernel_task) {
5523 			/*
5524 			 * All shared allocs/frees from other tasks count against
5525 			 * the kernel private memory usage.  If we are looking up
5526 			 * info for the kernel task, gather from everywhere.
5527 			 */
5528 			task_unlock(task);
5529 
5530 			/* start by accounting for all the terminated tasks against the kernel */
5531 			tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
5532 			tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
5533 
5534 			/* count all other task/thread shared alloc/free against the kernel */
5535 			lck_mtx_lock(&tasks_threads_lock);
5536 
5537 			/* XXX this really shouldn't be using the function parameter 'task' as a local var! */
5538 			queue_iterate(&tasks, task, task_t, tasks) {
5539 				if (task == kernel_task) {
5540 					if (ledger_get_entries(task->ledger,
5541 					    task_ledgers.tkm_private, &credit,
5542 					    &debit) == KERN_SUCCESS) {
5543 						tkm_info->total_palloc += credit;
5544 						tkm_info->total_pfree += debit;
5545 					}
5546 				}
5547 				if (!ledger_get_entries(task->ledger,
5548 				    task_ledgers.tkm_shared, &credit, &debit)) {
5549 					tkm_info->total_palloc += credit;
5550 					tkm_info->total_pfree += debit;
5551 				}
5552 			}
5553 			lck_mtx_unlock(&tasks_threads_lock);
5554 		} else {
5555 			if (!ledger_get_entries(task->ledger,
5556 			    task_ledgers.tkm_private, &credit, &debit)) {
5557 				tkm_info->total_palloc = credit;
5558 				tkm_info->total_pfree = debit;
5559 			}
5560 			if (!ledger_get_entries(task->ledger,
5561 			    task_ledgers.tkm_shared, &credit, &debit)) {
5562 				tkm_info->total_salloc = credit;
5563 				tkm_info->total_sfree = debit;
5564 			}
5565 			task_unlock(task);
5566 		}
5567 
5568 		*task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
5569 		return KERN_SUCCESS;
5570 	}
5571 
5572 	/* OBSOLETE */
5573 	case TASK_SCHED_FIFO_INFO:
5574 	{
5575 		if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
5576 			error = KERN_INVALID_ARGUMENT;
5577 			break;
5578 		}
5579 
5580 		error = KERN_INVALID_POLICY;
5581 		break;
5582 	}
5583 
5584 	/* OBSOLETE */
5585 	case TASK_SCHED_RR_INFO:
5586 	{
5587 		policy_rr_base_t        rr_base;
5588 		uint32_t quantum_time;
5589 		uint64_t quantum_ns;
5590 
5591 		if (*task_info_count < POLICY_RR_BASE_COUNT) {
5592 			error = KERN_INVALID_ARGUMENT;
5593 			break;
5594 		}
5595 
5596 		rr_base = (policy_rr_base_t) task_info_out;
5597 
5598 		if (task != kernel_task) {
5599 			error = KERN_INVALID_POLICY;
5600 			break;
5601 		}
5602 
5603 		rr_base->base_priority = task->priority;
5604 
5605 		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
5606 		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
5607 
5608 		rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
5609 
5610 		*task_info_count = POLICY_RR_BASE_COUNT;
5611 		break;
5612 	}
5613 
5614 	/* OBSOLETE */
5615 	case TASK_SCHED_TIMESHARE_INFO:
5616 	{
5617 		policy_timeshare_base_t ts_base;
5618 
5619 		if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
5620 			error = KERN_INVALID_ARGUMENT;
5621 			break;
5622 		}
5623 
5624 		ts_base = (policy_timeshare_base_t) task_info_out;
5625 
5626 		if (task == kernel_task) {
5627 			error = KERN_INVALID_POLICY;
5628 			break;
5629 		}
5630 
5631 		ts_base->base_priority = task->priority;
5632 
5633 		*task_info_count = POLICY_TIMESHARE_BASE_COUNT;
5634 		break;
5635 	}
5636 
5637 	case TASK_SECURITY_TOKEN:
5638 	{
5639 		security_token_t        *sec_token_p;
5640 
5641 		if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
5642 			error = KERN_INVALID_ARGUMENT;
5643 			break;
5644 		}
5645 
5646 		sec_token_p = (security_token_t *) task_info_out;
5647 
5648 		*sec_token_p = *task_get_sec_token(task);
5649 
5650 		*task_info_count = TASK_SECURITY_TOKEN_COUNT;
5651 		break;
5652 	}
5653 
5654 	case TASK_AUDIT_TOKEN:
5655 	{
5656 		audit_token_t   *audit_token_p;
5657 
5658 		if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
5659 			error = KERN_INVALID_ARGUMENT;
5660 			break;
5661 		}
5662 
5663 		audit_token_p = (audit_token_t *) task_info_out;
5664 
5665 		*audit_token_p = *task_get_audit_token(task);
5666 
5667 		*task_info_count = TASK_AUDIT_TOKEN_COUNT;
5668 		break;
5669 	}
5670 
5671 	case TASK_SCHED_INFO:
5672 		error = KERN_INVALID_ARGUMENT;
5673 		break;
5674 
5675 	case TASK_EVENTS_INFO:
5676 	{
5677 		task_events_info_t      events_info;
5678 		thread_t                thread;
5679 		uint64_t                n_syscalls_mach, n_syscalls_unix, n_csw;
5680 
5681 		if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
5682 			error = KERN_INVALID_ARGUMENT;
5683 			break;
5684 		}
5685 
5686 		events_info = (task_events_info_t) task_info_out;
5687 
5688 
5689 		events_info->faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
5690 		events_info->pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
5691 		events_info->cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
5692 		events_info->messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
5693 		events_info->messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
5694 
5695 		n_syscalls_mach = task->syscalls_mach;
5696 		n_syscalls_unix = task->syscalls_unix;
5697 		n_csw = task->c_switch;
5698 
5699 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5700 			n_csw           += thread->c_switch;
5701 			n_syscalls_mach += thread->syscalls_mach;
5702 			n_syscalls_unix += thread->syscalls_unix;
5703 		}
5704 
5705 		events_info->syscalls_mach = (int32_t) MIN(n_syscalls_mach, INT32_MAX);
5706 		events_info->syscalls_unix = (int32_t) MIN(n_syscalls_unix, INT32_MAX);
5707 		events_info->csw = (int32_t) MIN(n_csw, INT32_MAX);
5708 
5709 		*task_info_count = TASK_EVENTS_INFO_COUNT;
5710 		break;
5711 	}
5712 	case TASK_AFFINITY_TAG_INFO:
5713 	{
5714 		if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
5715 			error = KERN_INVALID_ARGUMENT;
5716 			break;
5717 		}
5718 
5719 		error = task_affinity_info(task, task_info_out, task_info_count);
5720 		break;
5721 	}
5722 	case TASK_POWER_INFO:
5723 	{
5724 		if (*task_info_count < TASK_POWER_INFO_COUNT) {
5725 			error = KERN_INVALID_ARGUMENT;
5726 			break;
5727 		}
5728 
5729 		task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL, NULL);
5730 		break;
5731 	}
5732 
5733 	case TASK_POWER_INFO_V2:
5734 	{
5735 		if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
5736 			error = KERN_INVALID_ARGUMENT;
5737 			break;
5738 		}
5739 		task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
5740 		task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2, NULL);
5741 		break;
5742 	}
5743 
5744 	case TASK_VM_INFO:
5745 	case TASK_VM_INFO_PURGEABLE:
5746 	{
5747 		task_vm_info_t          vm_info;
5748 		vm_map_t                map;
5749 		ledger_amount_t         tmp_amount;
5750 
5751 		struct proc *p;
5752 		uint32_t platform, sdk;
5753 		p = current_proc();
5754 		platform = proc_platform(p);
5755 		sdk = proc_sdk(p);
5756 		if (original_task_info_count > TASK_VM_INFO_COUNT) {
5757 			/*
5758 			 * Some iOS apps pass an incorrect value for
5759 			 * task_info_count, expressed in number of bytes
5760 			 * instead of number of "natural_t" elements, which
5761 			 * can lead to binary compatibility issues (including
5762 			 * stack corruption) when the data structure is
5763 			 * expanded in the future.
5764 			 * Let's make this potential issue visible by
5765 			 * logging about it...
5766 			 */
5767 			if (!proc_is_simulated(p)) {
5768 				os_log(OS_LOG_DEFAULT, "%s[%d] task_info: possibly invalid "
5769 				    "task_info_count %d > TASK_VM_INFO_COUNT=%d on platform %d sdk "
5770 				    "%d.%d.%d - please use TASK_VM_INFO_COUNT",
5771 				    proc_name_address(p), proc_pid(p),
5772 				    original_task_info_count, TASK_VM_INFO_COUNT,
5773 				    platform, (sdk >> 16), ((sdk >> 8) & 0xff), (sdk & 0xff));
5774 			}
5775 			DTRACE_VM4(suspicious_task_vm_info_count,
5776 			    mach_msg_type_number_t, original_task_info_count,
5777 			    mach_msg_type_number_t, TASK_VM_INFO_COUNT,
5778 			    uint32_t, platform,
5779 			    uint32_t, sdk);
5780 		}
5781 #if __arm64__
5782 		if (original_task_info_count > TASK_VM_INFO_REV2_COUNT &&
5783 		    platform == PLATFORM_IOS &&
5784 		    sdk != 0 &&
5785 		    (sdk >> 16) <= 12) {
5786 			/*
5787 			 * Some iOS apps pass an incorrect value for
5788 			 * task_info_count, expressed in number of bytes
5789 			 * instead of number of "natural_t" elements.
5790 			 * For the sake of backwards binary compatibility
5791 			 * for apps built with an iOS12 or older SDK and using
5792 			 * the "rev2" data structure, let's fix task_info_count
5793 			 * for them, to avoid stomping past the actual end
5794 			 * of their buffer.
5795 			 */
5796 #if DEVELOPMENT || DEBUG
5797 			printf("%s:%d %d[%s] rdar://49484582 task_info_count %d -> %d "
5798 			    "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5799 			    proc_name_address(p), original_task_info_count,
5800 			    TASK_VM_INFO_REV2_COUNT, platform, (sdk >> 16),
5801 			    ((sdk >> 8) & 0xff), (sdk & 0xff));
5802 #endif /* DEVELOPMENT || DEBUG */
5803 			DTRACE_VM4(workaround_task_vm_info_count,
5804 			    mach_msg_type_number_t, original_task_info_count,
5805 			    mach_msg_type_number_t, TASK_VM_INFO_REV2_COUNT,
5806 			    uint32_t, platform,
5807 			    uint32_t, sdk);
5808 			original_task_info_count = TASK_VM_INFO_REV2_COUNT;
5809 			*task_info_count = original_task_info_count;
5810 		}
5811 		if (original_task_info_count > TASK_VM_INFO_REV5_COUNT &&
5812 		    platform == PLATFORM_IOS &&
5813 		    sdk != 0 &&
5814 		    (sdk >> 16) <= 15) {
5815 			/*
5816 			 * Some iOS apps pass an incorrect value for
5817 			 * task_info_count, expressed in number of bytes
5818 			 * instead of number of "natural_t" elements.
5819 			 */
5820 			printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5821 			    "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5822 			    proc_name_address(p), original_task_info_count,
5823 			    TASK_VM_INFO_REV5_COUNT, platform, (sdk >> 16),
5824 			    ((sdk >> 8) & 0xff), (sdk & 0xff));
5825 			DTRACE_VM4(workaround_task_vm_info_count,
5826 			    mach_msg_type_number_t, original_task_info_count,
5827 			    mach_msg_type_number_t, TASK_VM_INFO_REV5_COUNT,
5828 			    uint32_t, platform,
5829 			    uint32_t, sdk);
5830 #if DEVELOPMENT || DEBUG
5831 			/*
5832 			 * For the sake of internal builds livability,
5833 			 * work around this user-space bug by capping the
5834 			 * buffer's size to what it was with the iOS15 SDK.
5835 			 */
5836 			original_task_info_count = TASK_VM_INFO_REV5_COUNT;
5837 			*task_info_count = original_task_info_count;
5838 #endif /* DEVELOPMENT || DEBUG */
5839 		}
5840 
5841 		if (original_task_info_count > TASK_VM_INFO_REV7_COUNT &&
5842 		    platform == PLATFORM_IOS &&
5843 		    sdk != 0 &&
5844 		    (sdk >> 16) == 17) {
5845 			/*
5846 			 * Some iOS apps still pass an incorrect value for
5847 			 * task_info_count, expressed in number of bytes
5848 			 * instead of number of "natural_t" elements.
5849 			 */
5850 			printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5851 			    "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5852 			    proc_name_address(p), original_task_info_count,
5853 			    TASK_VM_INFO_REV7_COUNT, platform, (sdk >> 16),
5854 			    ((sdk >> 8) & 0xff), (sdk & 0xff));
5855 			DTRACE_VM4(workaround_task_vm_info_count,
5856 			    mach_msg_type_number_t, original_task_info_count,
5857 			    mach_msg_type_number_t, TASK_VM_INFO_REV6_COUNT,
5858 			    uint32_t, platform,
5859 			    uint32_t, sdk);
5860 #if DEVELOPMENT || DEBUG
5861 			/*
5862 			 * For the sake of internal builds livability,
5863 			 * work around this user-space bug by capping the
5864 			 * buffer's size to what it was with the iOS15 and iOS16 SDKs.
5865 			 */
5866 			original_task_info_count = TASK_VM_INFO_REV6_COUNT;
5867 			*task_info_count = original_task_info_count;
5868 #endif /* DEVELOPMENT || DEBUG */
5869 		}
5870 #endif /* __arm64__ */
5871 
5872 		if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
5873 			error = KERN_INVALID_ARGUMENT;
5874 			break;
5875 		}
5876 
5877 		vm_info = (task_vm_info_t)task_info_out;
5878 
5879 		/*
5880 		 * Do not hold both the task and map locks,
5881 		 * so convert the task lock into a map reference,
5882 		 * drop the task lock, then lock the map.
5883 		 */
5884 		if (is_kernel_task) {
5885 			map = kernel_map;
5886 			task_unlock(task);
5887 			/* no lock, no reference */
5888 		} else {
5889 			map = task->map;
5890 			vm_map_reference(map);
5891 			task_unlock(task);
5892 			vm_map_lock_read(map);
5893 		}
5894 
5895 		vm_info->virtual_size = (typeof(vm_info->virtual_size))vm_map_adjusted_size(map);
5896 		vm_info->region_count = map->hdr.nentries;
5897 		vm_info->page_size = vm_map_page_size(map);
5898 
5899 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size);
5900 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size_peak);
5901 
5902 		vm_info->device = 0;
5903 		vm_info->device_peak = 0;
5904 		ledger_get_balance(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external);
5905 		ledger_get_lifetime_max(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external_peak);
5906 		ledger_get_balance(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal);
5907 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal_peak);
5908 		ledger_get_balance(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable);
5909 		ledger_get_lifetime_max(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable_peak);
5910 		ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed);
5911 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_peak);
5912 		ledger_get_entries(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_lifetime, &tmp_amount);
5913 		ledger_get_balance(task->ledger, task_ledgers.neural_nofootprint_total, (ledger_amount_t *) &vm_info->ledger_tag_neural_nofootprint_total);
5914 		ledger_get_lifetime_max(task->ledger, task_ledgers.neural_nofootprint_total, (ledger_amount_t *) &vm_info->ledger_tag_neural_nofootprint_peak);
5915 
5916 		vm_info->purgeable_volatile_pmap = 0;
5917 		vm_info->purgeable_volatile_resident = 0;
5918 		vm_info->purgeable_volatile_virtual = 0;
5919 		if (is_kernel_task) {
5920 			/*
5921 			 * We do not maintain the detailed stats for the
5922 			 * kernel_pmap, so just count everything as
5923 			 * "internal"...
5924 			 */
5925 			vm_info->internal = vm_info->resident_size;
5926 			/*
5927 			 * ... but since the memory held by the VM compressor
5928 			 * in the kernel address space ought to be attributed
5929 			 * to user-space tasks, we subtract it from "internal"
5930 			 * to give memory reporting tools a more accurate idea
5931 			 * of what the kernel itself is actually using, instead
5932 			 * of making it look like the kernel is leaking memory
5933 			 * when the system is under memory pressure.
5934 			 */
5935 			vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
5936 			    PAGE_SIZE);
5937 		} else {
5938 			mach_vm_size_t  volatile_virtual_size;
5939 			mach_vm_size_t  volatile_resident_size;
5940 			mach_vm_size_t  volatile_compressed_size;
5941 			mach_vm_size_t  volatile_pmap_size;
5942 			mach_vm_size_t  volatile_compressed_pmap_size;
5943 			kern_return_t   kr;
5944 
5945 			if (flavor == TASK_VM_INFO_PURGEABLE) {
5946 				kr = vm_map_query_volatile(
5947 					map,
5948 					&volatile_virtual_size,
5949 					&volatile_resident_size,
5950 					&volatile_compressed_size,
5951 					&volatile_pmap_size,
5952 					&volatile_compressed_pmap_size);
5953 				if (kr == KERN_SUCCESS) {
5954 					vm_info->purgeable_volatile_pmap =
5955 					    volatile_pmap_size;
5956 					if (radar_20146450) {
5957 						vm_info->compressed -=
5958 						    volatile_compressed_pmap_size;
5959 					}
5960 					vm_info->purgeable_volatile_resident =
5961 					    volatile_resident_size;
5962 					vm_info->purgeable_volatile_virtual =
5963 					    volatile_virtual_size;
5964 				}
5965 			}
5966 		}
5967 		*task_info_count = TASK_VM_INFO_REV0_COUNT;
5968 
5969 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5970 			/* must be captured while we still have the map lock */
5971 			vm_info->min_address = map->min_offset;
5972 			vm_info->max_address = map->max_offset;
5973 		}
5974 
5975 		/*
5976 		 * Done with vm map things, can drop the map lock and reference,
5977 		 * and take the task lock back.
5978 		 *
5979 		 * Re-validate that the task didn't die on us.
5980 		 */
5981 		if (!is_kernel_task) {
5982 			vm_map_unlock_read(map);
5983 			vm_map_deallocate(map);
5984 		}
5985 		map = VM_MAP_NULL;
5986 
5987 		task_lock(task);
5988 
5989 		if ((task != current_task()) && (!task->active)) {
5990 			error = KERN_INVALID_ARGUMENT;
5991 			break;
5992 		}
5993 
5994 		if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
5995 			vm_info->phys_footprint =
5996 			    (mach_vm_size_t) get_task_phys_footprint(task);
5997 			*task_info_count = TASK_VM_INFO_REV1_COUNT;
5998 		}
5999 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
6000 			/* data was captured above */
6001 			*task_info_count = TASK_VM_INFO_REV2_COUNT;
6002 		}
6003 
6004 		if (original_task_info_count >= TASK_VM_INFO_REV3_COUNT) {
6005 			ledger_get_lifetime_max(task->ledger,
6006 			    task_ledgers.phys_footprint,
6007 			    &vm_info->ledger_phys_footprint_peak);
6008 			ledger_get_balance(task->ledger,
6009 			    task_ledgers.purgeable_nonvolatile,
6010 			    &vm_info->ledger_purgeable_nonvolatile);
6011 			ledger_get_balance(task->ledger,
6012 			    task_ledgers.purgeable_nonvolatile_compressed,
6013 			    &vm_info->ledger_purgeable_novolatile_compressed);
6014 			ledger_get_balance(task->ledger,
6015 			    task_ledgers.purgeable_volatile,
6016 			    &vm_info->ledger_purgeable_volatile);
6017 			ledger_get_balance(task->ledger,
6018 			    task_ledgers.purgeable_volatile_compressed,
6019 			    &vm_info->ledger_purgeable_volatile_compressed);
6020 			ledger_get_balance(task->ledger,
6021 			    task_ledgers.network_nonvolatile,
6022 			    &vm_info->ledger_tag_network_nonvolatile);
6023 			ledger_get_balance(task->ledger,
6024 			    task_ledgers.network_nonvolatile_compressed,
6025 			    &vm_info->ledger_tag_network_nonvolatile_compressed);
6026 			ledger_get_balance(task->ledger,
6027 			    task_ledgers.network_volatile,
6028 			    &vm_info->ledger_tag_network_volatile);
6029 			ledger_get_balance(task->ledger,
6030 			    task_ledgers.network_volatile_compressed,
6031 			    &vm_info->ledger_tag_network_volatile_compressed);
6032 			ledger_get_balance(task->ledger,
6033 			    task_ledgers.media_footprint,
6034 			    &vm_info->ledger_tag_media_footprint);
6035 			ledger_get_balance(task->ledger,
6036 			    task_ledgers.media_footprint_compressed,
6037 			    &vm_info->ledger_tag_media_footprint_compressed);
6038 			ledger_get_balance(task->ledger,
6039 			    task_ledgers.media_nofootprint,
6040 			    &vm_info->ledger_tag_media_nofootprint);
6041 			ledger_get_balance(task->ledger,
6042 			    task_ledgers.media_nofootprint_compressed,
6043 			    &vm_info->ledger_tag_media_nofootprint_compressed);
6044 			ledger_get_balance(task->ledger,
6045 			    task_ledgers.graphics_footprint,
6046 			    &vm_info->ledger_tag_graphics_footprint);
6047 			ledger_get_balance(task->ledger,
6048 			    task_ledgers.graphics_footprint_compressed,
6049 			    &vm_info->ledger_tag_graphics_footprint_compressed);
6050 			ledger_get_balance(task->ledger,
6051 			    task_ledgers.graphics_nofootprint,
6052 			    &vm_info->ledger_tag_graphics_nofootprint);
6053 			ledger_get_balance(task->ledger,
6054 			    task_ledgers.graphics_nofootprint_compressed,
6055 			    &vm_info->ledger_tag_graphics_nofootprint_compressed);
6056 			ledger_get_balance(task->ledger,
6057 			    task_ledgers.neural_footprint,
6058 			    &vm_info->ledger_tag_neural_footprint);
6059 			ledger_get_balance(task->ledger,
6060 			    task_ledgers.neural_footprint_compressed,
6061 			    &vm_info->ledger_tag_neural_footprint_compressed);
6062 			ledger_get_balance(task->ledger,
6063 			    task_ledgers.neural_nofootprint,
6064 			    &vm_info->ledger_tag_neural_nofootprint);
6065 			ledger_get_balance(task->ledger,
6066 			    task_ledgers.neural_nofootprint_compressed,
6067 			    &vm_info->ledger_tag_neural_nofootprint_compressed);
6068 			*task_info_count = TASK_VM_INFO_REV3_COUNT;
6069 		}
6070 		if (original_task_info_count >= TASK_VM_INFO_REV4_COUNT) {
6071 			if (get_bsdtask_info(task)) {
6072 				vm_info->limit_bytes_remaining =
6073 				    memorystatus_available_memory_internal(get_bsdtask_info(task));
6074 			} else {
6075 				vm_info->limit_bytes_remaining = 0;
6076 			}
6077 			*task_info_count = TASK_VM_INFO_REV4_COUNT;
6078 		}
6079 		if (original_task_info_count >= TASK_VM_INFO_REV5_COUNT) {
6080 			thread_t thread;
6081 			uint64_t total = task->decompressions;
6082 			queue_iterate(&task->threads, thread, thread_t, task_threads) {
6083 				total += thread->decompressions;
6084 			}
6085 			vm_info->decompressions = (int32_t) MIN(total, INT32_MAX);
6086 			*task_info_count = TASK_VM_INFO_REV5_COUNT;
6087 		}
6088 		if (original_task_info_count >= TASK_VM_INFO_REV6_COUNT) {
6089 			ledger_get_balance(task->ledger, task_ledgers.swapins,
6090 			    &vm_info->ledger_swapins);
6091 			*task_info_count = TASK_VM_INFO_REV6_COUNT;
6092 		}
6093 		if (original_task_info_count >= TASK_VM_INFO_REV7_COUNT) {
6094 			ledger_get_balance(task->ledger,
6095 			    task_ledgers.neural_nofootprint_total,
6096 			    &vm_info->ledger_tag_neural_nofootprint_total);
6097 			ledger_get_lifetime_max(task->ledger,
6098 			    task_ledgers.neural_nofootprint_total,
6099 			    &vm_info->ledger_tag_neural_nofootprint_peak);
6100 			*task_info_count = TASK_VM_INFO_REV7_COUNT;
6101 		}
6102 
6103 		break;
6104 	}
6105 
6106 	case TASK_WAIT_STATE_INFO:
6107 	{
6108 		/*
6109 		 * Deprecated flavor. Currently allowing some results until all users
6110 		 * stop calling it. The results may not be accurate.
6111 		 */
6112 		task_wait_state_info_t  wait_state_info;
6113 		uint64_t total_sfi_ledger_val = 0;
6114 
6115 		if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
6116 			error = KERN_INVALID_ARGUMENT;
6117 			break;
6118 		}
6119 
6120 		wait_state_info = (task_wait_state_info_t) task_info_out;
6121 
6122 		wait_state_info->total_wait_state_time = 0;
6123 		bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
6124 
6125 #if CONFIG_SCHED_SFI
6126 		int i, prev_lentry = -1;
6127 		int64_t  val_credit, val_debit;
6128 
6129 		for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
6130 			val_credit = 0;
6131 			/*
6132 			 * checking with prev_lentry != entry ensures adjacent classes
6133 			 * which share the same ledger do not add wait times twice.
6134 			 * Note: Use ledger() call to get data for each individual sfi class.
6135 			 */
6136 			if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
6137 			    KERN_SUCCESS == ledger_get_entries(task->ledger,
6138 			    task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
6139 				total_sfi_ledger_val += val_credit;
6140 			}
6141 			prev_lentry = task_ledgers.sfi_wait_times[i];
6142 		}
6143 
6144 #endif /* CONFIG_SCHED_SFI */
6145 		wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
6146 		*task_info_count = TASK_WAIT_STATE_INFO_COUNT;
6147 
6148 		break;
6149 	}
6150 	case TASK_VM_INFO_PURGEABLE_ACCOUNT:
6151 	{
6152 #if DEVELOPMENT || DEBUG
6153 		pvm_account_info_t      acnt_info;
6154 
6155 		if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
6156 			error = KERN_INVALID_ARGUMENT;
6157 			break;
6158 		}
6159 
6160 		if (task_info_out == NULL) {
6161 			error = KERN_INVALID_ARGUMENT;
6162 			break;
6163 		}
6164 
6165 		acnt_info = (pvm_account_info_t) task_info_out;
6166 
6167 		error = vm_purgeable_account(task, acnt_info);
6168 
6169 		*task_info_count = PVM_ACCOUNT_INFO_COUNT;
6170 
6171 		break;
6172 #else /* DEVELOPMENT || DEBUG */
6173 		error = KERN_NOT_SUPPORTED;
6174 		break;
6175 #endif /* DEVELOPMENT || DEBUG */
6176 	}
6177 	case TASK_FLAGS_INFO:
6178 	{
6179 		task_flags_info_t               flags_info;
6180 
6181 		if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
6182 			error = KERN_INVALID_ARGUMENT;
6183 			break;
6184 		}
6185 
6186 		flags_info = (task_flags_info_t)task_info_out;
6187 
6188 		/* only publish the 64-bit flag of the task */
6189 		flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA);
6190 
6191 		*task_info_count = TASK_FLAGS_INFO_COUNT;
6192 		break;
6193 	}
6194 
6195 	case TASK_DEBUG_INFO_INTERNAL:
6196 	{
6197 #if DEVELOPMENT || DEBUG
6198 		task_debug_info_internal_t dbg_info;
6199 		ipc_space_t space = task->itk_space;
6200 		if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
6201 			error = KERN_NOT_SUPPORTED;
6202 			break;
6203 		}
6204 
6205 		if (task_info_out == NULL) {
6206 			error = KERN_INVALID_ARGUMENT;
6207 			break;
6208 		}
6209 		dbg_info = (task_debug_info_internal_t) task_info_out;
6210 		dbg_info->ipc_space_size = 0;
6211 
6212 		if (space) {
6213 			smr_ipc_enter();
6214 			ipc_entry_table_t table = smr_entered_load(&space->is_table);
6215 			if (table) {
6216 				dbg_info->ipc_space_size =
6217 				    ipc_entry_table_count(table);
6218 			}
6219 			smr_ipc_leave();
6220 		}
6221 
6222 		dbg_info->suspend_count = task->suspend_count;
6223 
6224 		error = KERN_SUCCESS;
6225 		*task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
6226 		break;
6227 #else /* DEVELOPMENT || DEBUG */
6228 		error = KERN_NOT_SUPPORTED;
6229 		break;
6230 #endif /* DEVELOPMENT || DEBUG */
6231 	}
6232 	case TASK_SUSPEND_STATS_INFO:
6233 	{
6234 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6235 		if (*task_info_count < TASK_SUSPEND_STATS_INFO_COUNT || task_info_out == NULL) {
6236 			error = KERN_INVALID_ARGUMENT;
6237 			break;
6238 		}
6239 		error = _task_get_suspend_stats_locked(task, (task_suspend_stats_t)task_info_out);
6240 		*task_info_count = TASK_SUSPEND_STATS_INFO_COUNT;
6241 		break;
6242 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6243 		error = KERN_NOT_SUPPORTED;
6244 		break;
6245 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6246 	}
6247 	case TASK_SUSPEND_SOURCES_INFO:
6248 	{
6249 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6250 		if (*task_info_count < TASK_SUSPEND_SOURCES_INFO_COUNT || task_info_out == NULL) {
6251 			error = KERN_INVALID_ARGUMENT;
6252 			break;
6253 		}
6254 		error = _task_get_suspend_sources_locked(task, (task_suspend_source_t)task_info_out);
6255 		*task_info_count = TASK_SUSPEND_SOURCES_INFO_COUNT;
6256 		break;
6257 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6258 		error = KERN_NOT_SUPPORTED;
6259 		break;
6260 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6261 	}
6262 	default:
6263 		error = KERN_INVALID_ARGUMENT;
6264 	}
6265 
6266 	task_unlock(task);
6267 	return error;
6268 }
6269 
6270 /*
6271  * task_info_from_user
6272  *
6273  * When calling task_info from user space,
6274  * this function will be executed as mig server side
6275  * instead of calling directly into task_info.
6276  * This gives the possibility to perform more security
6277  * checks on task_port.
6278  *
6279  * In the case of TASK_DYLD_INFO, we require the more
6280  * privileged task_read_port not the less-privileged task_name_port.
6281  *
6282  */
6283 kern_return_t
task_info_from_user(mach_port_t task_port,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)6284 task_info_from_user(
6285 	mach_port_t             task_port,
6286 	task_flavor_t           flavor,
6287 	task_info_t             task_info_out,
6288 	mach_msg_type_number_t  *task_info_count)
6289 {
6290 	task_t task;
6291 	kern_return_t ret;
6292 
6293 	if (flavor == TASK_DYLD_INFO) {
6294 		task = convert_port_to_task_read(task_port);
6295 	} else {
6296 		task = convert_port_to_task_name(task_port);
6297 	}
6298 
6299 	ret = task_info(task, flavor, task_info_out, task_info_count);
6300 
6301 	task_deallocate(task);
6302 
6303 	return ret;
6304 }
6305 
6306 /*
6307  * Routine: task_dyld_process_info_update_helper
6308  *
6309  * Release send rights in release_ports.
6310  *
6311  * If no active ports found in task's dyld notifier array, unset the magic value
6312  * in user space to indicate so.
6313  *
6314  * Condition:
6315  *      task's itk_lock is locked, and is unlocked upon return.
6316  *      Global g_dyldinfo_mtx is locked, and is unlocked upon return.
6317  */
6318 void
task_dyld_process_info_update_helper(task_t task,size_t active_count,vm_map_address_t magic_addr,ipc_port_t * release_ports,size_t release_count)6319 task_dyld_process_info_update_helper(
6320 	task_t                  task,
6321 	size_t                  active_count,
6322 	vm_map_address_t        magic_addr,    /* a userspace address */
6323 	ipc_port_t             *release_ports,
6324 	size_t                  release_count)
6325 {
6326 	void *notifiers_ptr = NULL;
6327 
6328 	assert(release_count <= DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT);
6329 
6330 	if (active_count == 0) {
6331 		assert(task->itk_dyld_notify != NULL);
6332 		notifiers_ptr = task->itk_dyld_notify;
6333 		task->itk_dyld_notify = NULL;
6334 		itk_unlock(task);
6335 
6336 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6337 		(void)copyoutmap_atomic32(task->map, MACH_PORT_NULL, magic_addr); /* unset magic */
6338 	} else {
6339 		itk_unlock(task);
6340 		(void)copyoutmap_atomic32(task->map, (mach_port_name_t)DYLD_PROCESS_INFO_NOTIFY_MAGIC,
6341 		    magic_addr);     /* reset magic */
6342 	}
6343 
6344 	lck_mtx_unlock(&g_dyldinfo_mtx);
6345 
6346 	for (size_t i = 0; i < release_count; i++) {
6347 		ipc_port_release_send(release_ports[i]);
6348 	}
6349 }
6350 
6351 /*
6352  * Routine: task_dyld_process_info_notify_register
6353  *
6354  * Insert a send right to target task's itk_dyld_notify array. Allocate kernel
6355  * memory for the array if it's the first port to be registered. Also cleanup
6356  * any dead rights found in the array.
6357  *
6358  * Consumes sright if returns KERN_SUCCESS, otherwise MIG will destroy it.
6359  *
6360  * Args:
6361  *     task:   Target task for the registration.
6362  *     sright: A send right.
6363  *
6364  * Returns:
6365  *     KERN_SUCCESS: Registration succeeded.
6366  *     KERN_INVALID_TASK: task is invalid.
6367  *     KERN_INVALID_RIGHT: sright is invalid.
6368  *     KERN_DENIED: Security policy denied this call.
6369  *     KERN_RESOURCE_SHORTAGE: Kernel memory allocation failed.
6370  *     KERN_NO_SPACE: No available notifier port slot left for this task.
6371  *     KERN_RIGHT_EXISTS: The notifier port is already registered and active.
6372  *
6373  *     Other error code see task_info().
6374  *
6375  * See Also:
6376  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6377  */
6378 kern_return_t
task_dyld_process_info_notify_register(task_t task,ipc_port_t sright)6379 task_dyld_process_info_notify_register(
6380 	task_t                  task,
6381 	ipc_port_t              sright)
6382 {
6383 	struct task_dyld_info dyld_info;
6384 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6385 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6386 	uint32_t release_count = 0, active_count = 0;
6387 	mach_vm_address_t ports_addr; /* a user space address */
6388 	kern_return_t kr;
6389 	boolean_t right_exists = false;
6390 	ipc_port_t *notifiers_ptr = NULL;
6391 	ipc_port_t *portp;
6392 
6393 	if (task == TASK_NULL || task == kernel_task) {
6394 		return KERN_INVALID_TASK;
6395 	}
6396 
6397 	if (!IP_VALID(sright)) {
6398 		return KERN_INVALID_RIGHT;
6399 	}
6400 
6401 #if CONFIG_MACF
6402 	if (mac_task_check_dyld_process_info_notify_register()) {
6403 		return KERN_DENIED;
6404 	}
6405 #endif
6406 
6407 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6408 	if (kr) {
6409 		return kr;
6410 	}
6411 
6412 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6413 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6414 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6415 	} else {
6416 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6417 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6418 	}
6419 
6420 retry:
6421 	if (task->itk_dyld_notify == NULL) {
6422 		notifiers_ptr = kalloc_type(ipc_port_t,
6423 		    DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT,
6424 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
6425 	}
6426 
6427 	lck_mtx_lock(&g_dyldinfo_mtx);
6428 	itk_lock(task);
6429 
6430 	if (task->itk_dyld_notify == NULL) {
6431 		if (notifiers_ptr == NULL) {
6432 			itk_unlock(task);
6433 			lck_mtx_unlock(&g_dyldinfo_mtx);
6434 			goto retry;
6435 		}
6436 		task->itk_dyld_notify = notifiers_ptr;
6437 		notifiers_ptr = NULL;
6438 	}
6439 
6440 	assert(task->itk_dyld_notify != NULL);
6441 	/* First pass: clear dead names and check for duplicate registration */
6442 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6443 		portp = &task->itk_dyld_notify[slot];
6444 		if (*portp != IPC_PORT_NULL && !ip_active(*portp)) {
6445 			release_ports[release_count++] = *portp;
6446 			*portp = IPC_PORT_NULL;
6447 		} else if (*portp == sright) {
6448 			/* the port is already registered and is active */
6449 			right_exists = true;
6450 		}
6451 
6452 		if (*portp != IPC_PORT_NULL) {
6453 			active_count++;
6454 		}
6455 	}
6456 
6457 	if (right_exists) {
6458 		/* skip second pass */
6459 		kr = KERN_RIGHT_EXISTS;
6460 		goto out;
6461 	}
6462 
6463 	/* Second pass: register the port */
6464 	kr = KERN_NO_SPACE;
6465 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6466 		portp = &task->itk_dyld_notify[slot];
6467 		if (*portp == IPC_PORT_NULL) {
6468 			*portp = sright;
6469 			active_count++;
6470 			kr = KERN_SUCCESS;
6471 			break;
6472 		}
6473 	}
6474 
6475 out:
6476 	assert(active_count > 0);
6477 
6478 	task_dyld_process_info_update_helper(task, active_count,
6479 	    (vm_map_address_t)ports_addr, release_ports, release_count);
6480 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6481 
6482 	kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6483 
6484 	return kr;
6485 }
6486 
6487 /*
6488  * Routine: task_dyld_process_info_notify_deregister
6489  *
6490  * Remove a send right in target task's itk_dyld_notify array matching the receive
6491  * right name passed in. Deallocate kernel memory for the array if it's the last port to
6492  * be deregistered, or all ports have died. Also cleanup any dead rights found in the array.
6493  *
6494  * Does not consume any reference.
6495  *
6496  * Args:
6497  *     task: Target task for the deregistration.
6498  *     rcv_name: The name denoting the receive right in caller's space.
6499  *
6500  * Returns:
6501  *     KERN_SUCCESS: A matching entry found and degistration succeeded.
6502  *     KERN_INVALID_TASK: task is invalid.
6503  *     KERN_INVALID_NAME: name is invalid.
6504  *     KERN_DENIED: Security policy denied this call.
6505  *     KERN_FAILURE: A matching entry is not found.
6506  *     KERN_INVALID_RIGHT: The name passed in does not represent a valid rcv right.
6507  *
6508  *     Other error code see task_info().
6509  *
6510  * See Also:
6511  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6512  */
6513 kern_return_t
task_dyld_process_info_notify_deregister(task_t task,mach_port_name_t rcv_name)6514 task_dyld_process_info_notify_deregister(
6515 	task_t                  task,
6516 	mach_port_name_t        rcv_name)
6517 {
6518 	struct task_dyld_info dyld_info;
6519 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6520 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6521 	uint32_t release_count = 0, active_count = 0;
6522 	boolean_t port_found = false;
6523 	mach_vm_address_t ports_addr; /* a user space address */
6524 	ipc_port_t sright;
6525 	kern_return_t kr;
6526 	ipc_port_t *portp;
6527 
6528 	if (task == TASK_NULL || task == kernel_task) {
6529 		return KERN_INVALID_TASK;
6530 	}
6531 
6532 	if (!MACH_PORT_VALID(rcv_name)) {
6533 		return KERN_INVALID_NAME;
6534 	}
6535 
6536 #if CONFIG_MACF
6537 	if (mac_task_check_dyld_process_info_notify_register()) {
6538 		return KERN_DENIED;
6539 	}
6540 #endif
6541 
6542 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6543 	if (kr) {
6544 		return kr;
6545 	}
6546 
6547 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6548 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6549 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6550 	} else {
6551 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6552 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6553 	}
6554 
6555 	kr = ipc_port_translate_receive(current_space(), rcv_name, &sright); /* does not produce port ref */
6556 	if (kr) {
6557 		return KERN_INVALID_RIGHT;
6558 	}
6559 
6560 	ip_reference(sright);
6561 	ip_mq_unlock(sright);
6562 
6563 	assert(sright != IPC_PORT_NULL);
6564 
6565 	lck_mtx_lock(&g_dyldinfo_mtx);
6566 	itk_lock(task);
6567 
6568 	if (task->itk_dyld_notify == NULL) {
6569 		itk_unlock(task);
6570 		lck_mtx_unlock(&g_dyldinfo_mtx);
6571 		ip_release(sright);
6572 		return KERN_FAILURE;
6573 	}
6574 
6575 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6576 		portp = &task->itk_dyld_notify[slot];
6577 		if (*portp == sright) {
6578 			release_ports[release_count++] = *portp;
6579 			*portp = IPC_PORT_NULL;
6580 			port_found = true;
6581 		} else if ((*portp != IPC_PORT_NULL && !ip_active(*portp))) {
6582 			release_ports[release_count++] = *portp;
6583 			*portp = IPC_PORT_NULL;
6584 		}
6585 
6586 		if (*portp != IPC_PORT_NULL) {
6587 			active_count++;
6588 		}
6589 	}
6590 
6591 	task_dyld_process_info_update_helper(task, active_count,
6592 	    (vm_map_address_t)ports_addr, release_ports, release_count);
6593 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6594 
6595 	ip_release(sright);
6596 
6597 	return port_found ? KERN_SUCCESS : KERN_FAILURE;
6598 }
6599 
6600 /*
6601  *	task_power_info
6602  *
6603  *	Returns power stats for the task.
6604  *	Note: Called with task locked.
6605  */
6606 void
task_power_info_locked(task_t task,task_power_info_t info,gpu_energy_data_t ginfo,task_power_info_v2_t infov2,struct task_power_info_extra * extra_info)6607 task_power_info_locked(
6608 	task_t                        task,
6609 	task_power_info_t             info,
6610 	gpu_energy_data_t             ginfo,
6611 	task_power_info_v2_t          infov2,
6612 	struct task_power_info_extra *extra_info)
6613 {
6614 	thread_t                thread;
6615 	ledger_amount_t         tmp;
6616 
6617 	uint64_t                runnable_time_sum = 0;
6618 
6619 	task_lock_assert_owned(task);
6620 
6621 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
6622 	    (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
6623 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
6624 	    (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
6625 
6626 	info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
6627 	info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
6628 
6629 	struct recount_usage usage = { 0 };
6630 	struct recount_usage usage_perf = { 0 };
6631 	recount_task_usage_perf_only(task, &usage, &usage_perf);
6632 
6633 	info->total_user = usage.ru_metrics[RCT_LVL_USER].rm_time_mach;
6634 	info->total_system = recount_usage_system_time_mach(&usage);
6635 	runnable_time_sum = task->total_runnable_time;
6636 
6637 	if (ginfo) {
6638 		ginfo->task_gpu_utilisation = task->task_gpu_ns;
6639 	}
6640 
6641 	if (infov2) {
6642 		infov2->task_ptime = recount_usage_time_mach(&usage_perf);
6643 		infov2->task_pset_switches = task->ps_switch;
6644 #if CONFIG_PERVASIVE_ENERGY
6645 		infov2->task_energy = usage.ru_energy_nj;
6646 #endif /* CONFIG_PERVASIVE_ENERGY */
6647 	}
6648 
6649 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6650 		spl_t x;
6651 
6652 		if (thread->options & TH_OPT_IDLE_THREAD) {
6653 			continue;
6654 		}
6655 
6656 		x = splsched();
6657 		thread_lock(thread);
6658 
6659 		info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
6660 		info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
6661 
6662 		if (infov2) {
6663 			infov2->task_pset_switches += thread->ps_switch;
6664 		}
6665 
6666 		runnable_time_sum += timer_grab(&thread->runnable_timer);
6667 
6668 		if (ginfo) {
6669 			ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
6670 		}
6671 		thread_unlock(thread);
6672 		splx(x);
6673 	}
6674 
6675 	if (extra_info) {
6676 		extra_info->runnable_time = runnable_time_sum;
6677 #if CONFIG_PERVASIVE_CPI
6678 		extra_info->cycles = recount_usage_cycles(&usage);
6679 		extra_info->instructions = recount_usage_instructions(&usage);
6680 		extra_info->pcycles = recount_usage_cycles(&usage_perf);
6681 		extra_info->pinstructions = recount_usage_instructions(&usage_perf);
6682 		extra_info->user_ptime = usage_perf.ru_metrics[RCT_LVL_USER].rm_time_mach;
6683 		extra_info->system_ptime = recount_usage_system_time_mach(&usage_perf);
6684 #endif // CONFIG_PERVASIVE_CPI
6685 #if CONFIG_PERVASIVE_ENERGY
6686 		extra_info->energy = usage.ru_energy_nj;
6687 		extra_info->penergy = usage_perf.ru_energy_nj;
6688 #endif // CONFIG_PERVASIVE_ENERGY
6689 #if RECOUNT_SECURE_METRICS
6690 		if (PE_i_can_has_debugger(NULL)) {
6691 			extra_info->secure_time = usage.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6692 			extra_info->secure_ptime = usage_perf.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6693 		}
6694 #endif // RECOUNT_SECURE_METRICS
6695 	}
6696 }
6697 
6698 /*
6699  *	task_gpu_utilisation
6700  *
6701  *	Returns the total gpu time used by the all the threads of the task
6702  *  (both dead and alive)
6703  */
6704 uint64_t
task_gpu_utilisation(task_t task)6705 task_gpu_utilisation(
6706 	task_t  task)
6707 {
6708 	uint64_t gpu_time = 0;
6709 #if defined(__x86_64__)
6710 	thread_t thread;
6711 
6712 	task_lock(task);
6713 	gpu_time += task->task_gpu_ns;
6714 
6715 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6716 		spl_t x;
6717 		x = splsched();
6718 		thread_lock(thread);
6719 		gpu_time += ml_gpu_stat(thread);
6720 		thread_unlock(thread);
6721 		splx(x);
6722 	}
6723 
6724 	task_unlock(task);
6725 #else /* defined(__x86_64__) */
6726 	/* silence compiler warning */
6727 	(void)task;
6728 #endif /* defined(__x86_64__) */
6729 	return gpu_time;
6730 }
6731 
6732 /* This function updates the cpu time in the arrays for each
6733  * effective and requested QoS class
6734  */
6735 void
task_update_cpu_time_qos_stats(task_t task,uint64_t * eqos_stats,uint64_t * rqos_stats)6736 task_update_cpu_time_qos_stats(
6737 	task_t  task,
6738 	uint64_t *eqos_stats,
6739 	uint64_t *rqos_stats)
6740 {
6741 	if (!eqos_stats && !rqos_stats) {
6742 		return;
6743 	}
6744 
6745 	task_lock(task);
6746 	thread_t thread;
6747 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6748 		if (thread->options & TH_OPT_IDLE_THREAD) {
6749 			continue;
6750 		}
6751 
6752 		thread_update_qos_cpu_time(thread);
6753 	}
6754 
6755 	if (eqos_stats) {
6756 		eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default;
6757 		eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
6758 		eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background;
6759 		eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility;
6760 		eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy;
6761 		eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
6762 		eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
6763 	}
6764 
6765 	if (rqos_stats) {
6766 		rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default;
6767 		rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance;
6768 		rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background;
6769 		rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility;
6770 		rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy;
6771 		rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated;
6772 		rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive;
6773 	}
6774 
6775 	task_unlock(task);
6776 }
6777 
6778 kern_return_t
task_purgable_info(task_t task,task_purgable_info_t * stats)6779 task_purgable_info(
6780 	task_t                  task,
6781 	task_purgable_info_t    *stats)
6782 {
6783 	if (task == TASK_NULL || stats == NULL) {
6784 		return KERN_INVALID_ARGUMENT;
6785 	}
6786 	/* Take task reference */
6787 	task_reference(task);
6788 	vm_purgeable_stats((vm_purgeable_info_t)stats, task);
6789 	/* Drop task reference */
6790 	task_deallocate(task);
6791 	return KERN_SUCCESS;
6792 }
6793 
6794 void
task_vtimer_set(task_t task,integer_t which)6795 task_vtimer_set(
6796 	task_t          task,
6797 	integer_t       which)
6798 {
6799 	thread_t        thread;
6800 	spl_t           x;
6801 
6802 	task_lock(task);
6803 
6804 	task->vtimers |= which;
6805 
6806 	switch (which) {
6807 	case TASK_VTIMER_USER:
6808 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6809 			x = splsched();
6810 			thread_lock(thread);
6811 			struct recount_times_mach times = recount_thread_times(thread);
6812 			thread->vtimer_user_save = times.rtm_user;
6813 			thread_unlock(thread);
6814 			splx(x);
6815 		}
6816 		break;
6817 
6818 	case TASK_VTIMER_PROF:
6819 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6820 			x = splsched();
6821 			thread_lock(thread);
6822 			thread->vtimer_prof_save = recount_thread_time_mach(thread);
6823 			thread_unlock(thread);
6824 			splx(x);
6825 		}
6826 		break;
6827 
6828 	case TASK_VTIMER_RLIM:
6829 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6830 			x = splsched();
6831 			thread_lock(thread);
6832 			thread->vtimer_rlim_save = recount_thread_time_mach(thread);
6833 			thread_unlock(thread);
6834 			splx(x);
6835 		}
6836 		break;
6837 	}
6838 
6839 	task_unlock(task);
6840 }
6841 
6842 void
task_vtimer_clear(task_t task,integer_t which)6843 task_vtimer_clear(
6844 	task_t          task,
6845 	integer_t       which)
6846 {
6847 	task_lock(task);
6848 
6849 	task->vtimers &= ~which;
6850 
6851 	task_unlock(task);
6852 }
6853 
6854 void
task_vtimer_update(__unused task_t task,integer_t which,uint32_t * microsecs)6855 task_vtimer_update(
6856 	__unused
6857 	task_t          task,
6858 	integer_t       which,
6859 	uint32_t        *microsecs)
6860 {
6861 	thread_t        thread = current_thread();
6862 	uint32_t        tdelt = 0;
6863 	clock_sec_t     secs = 0;
6864 	uint64_t        tsum;
6865 
6866 	assert(task == current_task());
6867 
6868 	spl_t s = splsched();
6869 	thread_lock(thread);
6870 
6871 	if ((task->vtimers & which) != (uint32_t)which) {
6872 		thread_unlock(thread);
6873 		splx(s);
6874 		return;
6875 	}
6876 
6877 	switch (which) {
6878 	case TASK_VTIMER_USER:;
6879 		struct recount_times_mach times = recount_thread_times(thread);
6880 		tsum = times.rtm_user;
6881 		tdelt = (uint32_t)(tsum - thread->vtimer_user_save);
6882 		thread->vtimer_user_save = tsum;
6883 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6884 		break;
6885 
6886 	case TASK_VTIMER_PROF:
6887 		tsum = recount_current_thread_time_mach();
6888 		tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
6889 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6890 		/* if the time delta is smaller than a usec, ignore */
6891 		if (*microsecs != 0) {
6892 			thread->vtimer_prof_save = tsum;
6893 		}
6894 		break;
6895 
6896 	case TASK_VTIMER_RLIM:
6897 		tsum = recount_current_thread_time_mach();
6898 		tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
6899 		thread->vtimer_rlim_save = tsum;
6900 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6901 		break;
6902 	}
6903 
6904 	thread_unlock(thread);
6905 	splx(s);
6906 }
6907 
6908 uint64_t
get_task_dispatchqueue_offset(task_t task)6909 get_task_dispatchqueue_offset(
6910 	task_t          task)
6911 {
6912 	return task->dispatchqueue_offset;
6913 }
6914 
6915 void
task_synchronizer_destroy_all(task_t task)6916 task_synchronizer_destroy_all(task_t task)
6917 {
6918 	/*
6919 	 *  Destroy owned semaphores
6920 	 */
6921 	semaphore_destroy_all(task);
6922 }
6923 
6924 /*
6925  * Install default (machine-dependent) initial thread state
6926  * on the task.  Subsequent thread creation will have this initial
6927  * state set on the thread by machine_thread_inherit_taskwide().
6928  * Flavors and structures are exactly the same as those to thread_set_state()
6929  */
6930 kern_return_t
task_set_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t state_count)6931 task_set_state(
6932 	task_t task,
6933 	int flavor,
6934 	thread_state_t state,
6935 	mach_msg_type_number_t state_count)
6936 {
6937 	kern_return_t ret;
6938 
6939 	if (task == TASK_NULL) {
6940 		return KERN_INVALID_ARGUMENT;
6941 	}
6942 
6943 	task_lock(task);
6944 
6945 	if (!task->active) {
6946 		task_unlock(task);
6947 		return KERN_FAILURE;
6948 	}
6949 
6950 	ret = machine_task_set_state(task, flavor, state, state_count);
6951 
6952 	task_unlock(task);
6953 	return ret;
6954 }
6955 
6956 /*
6957  * Examine the default (machine-dependent) initial thread state
6958  * on the task, as set by task_set_state().  Flavors and structures
6959  * are exactly the same as those passed to thread_get_state().
6960  */
6961 kern_return_t
task_get_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)6962 task_get_state(
6963 	task_t  task,
6964 	int     flavor,
6965 	thread_state_t state,
6966 	mach_msg_type_number_t *state_count)
6967 {
6968 	kern_return_t ret;
6969 
6970 	if (task == TASK_NULL) {
6971 		return KERN_INVALID_ARGUMENT;
6972 	}
6973 
6974 	task_lock(task);
6975 
6976 	if (!task->active) {
6977 		task_unlock(task);
6978 		return KERN_FAILURE;
6979 	}
6980 
6981 	ret = machine_task_get_state(task, flavor, state, state_count);
6982 
6983 	task_unlock(task);
6984 	return ret;
6985 }
6986 
6987 
6988 static kern_return_t __attribute__((noinline, not_tail_called))
PROC_VIOLATED_GUARD__SEND_EXC_GUARD(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,boolean_t backtrace_only)6989 PROC_VIOLATED_GUARD__SEND_EXC_GUARD(
6990 	mach_exception_code_t code,
6991 	mach_exception_subcode_t subcode,
6992 	void *reason,
6993 	boolean_t backtrace_only)
6994 {
6995 #ifdef MACH_BSD
6996 	if (1 == proc_selfpid()) {
6997 		return KERN_NOT_SUPPORTED;              // initproc is immune
6998 	}
6999 #endif
7000 	mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
7001 		[0] = code,
7002 		[1] = subcode,
7003 	};
7004 	task_t task = current_task();
7005 	kern_return_t kr;
7006 	void *bsd_info = get_bsdtask_info(task);
7007 
7008 	/* (See jetsam-related comments below) */
7009 
7010 	proc_memstat_skip(bsd_info, TRUE);
7011 	kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason, backtrace_only);
7012 	proc_memstat_skip(bsd_info, FALSE);
7013 	return kr;
7014 }
7015 
7016 kern_return_t
task_violated_guard(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,bool backtrace_only)7017 task_violated_guard(
7018 	mach_exception_code_t code,
7019 	mach_exception_subcode_t subcode,
7020 	void *reason,
7021 	bool backtrace_only)
7022 {
7023 	return PROC_VIOLATED_GUARD__SEND_EXC_GUARD(code, subcode, reason, backtrace_only);
7024 }
7025 
7026 
7027 #if CONFIG_MEMORYSTATUS
7028 
7029 boolean_t
task_get_memlimit_is_active(task_t task)7030 task_get_memlimit_is_active(task_t task)
7031 {
7032 	assert(task != NULL);
7033 
7034 	if (task->memlimit_is_active == 1) {
7035 		return TRUE;
7036 	} else {
7037 		return FALSE;
7038 	}
7039 }
7040 
7041 void
task_set_memlimit_is_active(task_t task,boolean_t memlimit_is_active)7042 task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
7043 {
7044 	assert(task != NULL);
7045 
7046 	if (memlimit_is_active) {
7047 		task->memlimit_is_active = 1;
7048 	} else {
7049 		task->memlimit_is_active = 0;
7050 	}
7051 }
7052 
7053 boolean_t
task_get_memlimit_is_fatal(task_t task)7054 task_get_memlimit_is_fatal(task_t task)
7055 {
7056 	assert(task != NULL);
7057 
7058 	if (task->memlimit_is_fatal == 1) {
7059 		return TRUE;
7060 	} else {
7061 		return FALSE;
7062 	}
7063 }
7064 
7065 void
task_set_memlimit_is_fatal(task_t task,boolean_t memlimit_is_fatal)7066 task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
7067 {
7068 	assert(task != NULL);
7069 
7070 	if (memlimit_is_fatal) {
7071 		task->memlimit_is_fatal = 1;
7072 	} else {
7073 		task->memlimit_is_fatal = 0;
7074 	}
7075 }
7076 
7077 uint64_t
task_get_dirty_start(task_t task)7078 task_get_dirty_start(task_t task)
7079 {
7080 	return task->memstat_dirty_start;
7081 }
7082 
7083 void
task_set_dirty_start(task_t task,uint64_t start)7084 task_set_dirty_start(task_t task, uint64_t start)
7085 {
7086 	task_lock(task);
7087 	task->memstat_dirty_start = start;
7088 	task_unlock(task);
7089 }
7090 
7091 boolean_t
task_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)7092 task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
7093 {
7094 	boolean_t triggered = FALSE;
7095 
7096 	assert(task == current_task());
7097 
7098 	/*
7099 	 * Returns true, if task has already triggered an exc_resource exception.
7100 	 */
7101 
7102 	if (memlimit_is_active) {
7103 		triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
7104 	} else {
7105 		triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
7106 	}
7107 
7108 	return triggered;
7109 }
7110 
7111 void
task_mark_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)7112 task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
7113 {
7114 	assert(task == current_task());
7115 
7116 	/*
7117 	 * We allow one exc_resource per process per active/inactive limit.
7118 	 * The limit's fatal attribute does not come into play.
7119 	 */
7120 
7121 	if (memlimit_is_active) {
7122 		task->memlimit_active_exc_resource = 1;
7123 	} else {
7124 		task->memlimit_inactive_exc_resource = 1;
7125 	}
7126 }
7127 
7128 #define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
7129 
7130 void __attribute__((noinline))
PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb,send_exec_resource_options_t exception_options)7131 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options)
7132 {
7133 	task_t                                          task            = current_task();
7134 	int                                                     pid         = 0;
7135 	const char                                      *procname       = "unknown";
7136 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
7137 	boolean_t send_sync_exc_resource = FALSE;
7138 	void *cur_bsd_info = get_bsdtask_info(current_task());
7139 
7140 #ifdef MACH_BSD
7141 	pid = proc_selfpid();
7142 
7143 	if (pid == 1) {
7144 		/*
7145 		 * Cannot have ReportCrash analyzing
7146 		 * a suspended initproc.
7147 		 */
7148 		return;
7149 	}
7150 
7151 	if (cur_bsd_info != NULL) {
7152 		procname = proc_name_address(cur_bsd_info);
7153 		send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(cur_bsd_info);
7154 	}
7155 #endif
7156 #if CONFIG_COREDUMP
7157 	if (hwm_user_cores) {
7158 		int                             error;
7159 		uint64_t                starttime, end;
7160 		clock_sec_t             secs = 0;
7161 		uint32_t                microsecs = 0;
7162 
7163 		starttime = mach_absolute_time();
7164 		/*
7165 		 * Trigger a coredump of this process. Don't proceed unless we know we won't
7166 		 * be filling up the disk; and ignore the core size resource limit for this
7167 		 * core file.
7168 		 */
7169 		if ((error = coredump(cur_bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
7170 			printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
7171 		}
7172 		/*
7173 		 * coredump() leaves the task suspended.
7174 		 */
7175 		task_resume_internal(current_task());
7176 
7177 		end = mach_absolute_time();
7178 		absolutetime_to_microtime(end - starttime, &secs, &microsecs);
7179 		printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
7180 		    proc_name_address(cur_bsd_info), pid, (int)secs, microsecs);
7181 	}
7182 #endif /* CONFIG_COREDUMP */
7183 
7184 	if (disable_exc_resource) {
7185 		printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7186 		    "suppressed by a boot-arg.\n", procname, pid, max_footprint_mb);
7187 		return;
7188 	}
7189 	printf("process %s [%d] crossed memory %s (%d MB); EXC_RESOURCE "
7190 	    "\n", procname, pid, (!(exception_options & EXEC_RESOURCE_DIAGNOSTIC) ? "high watermark" : "diagnostics limit"), max_footprint_mb);
7191 
7192 	/*
7193 	 * A task that has triggered an EXC_RESOURCE, should not be
7194 	 * jetsammed when the device is under memory pressure.  Here
7195 	 * we set the P_MEMSTAT_SKIP flag so that the process
7196 	 * will be skipped if the memorystatus_thread wakes up.
7197 	 *
7198 	 * This is a debugging aid to ensure we can get a corpse before
7199 	 * the jetsam thread kills the process.
7200 	 * Note that proc_memstat_skip is a no-op on release kernels.
7201 	 */
7202 	proc_memstat_skip(cur_bsd_info, TRUE);
7203 
7204 	code[0] = code[1] = 0;
7205 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
7206 	/*
7207 	 * Regardless if there was a diag memlimit violation, fatal exceptions shall be notified always
7208 	 * as high level watermaks. In another words, if there was a diag limit and a watermark, and the
7209 	 * violation if for limit watermark, a watermark shall be reported.
7210 	 */
7211 	if (!(exception_options & EXEC_RESOURCE_FATAL)) {
7212 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], !(exception_options & EXEC_RESOURCE_DIAGNOSTIC)  ? FLAVOR_HIGH_WATERMARK : FLAVOR_DIAG_MEMLIMIT);
7213 	} else {
7214 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK );
7215 	}
7216 	EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
7217 	/*
7218 	 * Do not generate a corpse fork if the violation is a fatal one
7219 	 * or the process wants synchronous EXC_RESOURCE exceptions.
7220 	 */
7221 	if ((exception_options & EXEC_RESOURCE_FATAL) || send_sync_exc_resource || !exc_via_corpse_forking) {
7222 		if (exception_options & EXEC_RESOURCE_FATAL) {
7223 			vm_map_set_corpse_source(task->map);
7224 		}
7225 
7226 		/* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */
7227 		if (send_sync_exc_resource || !corpse_for_fatal_memkill) {
7228 			/*
7229 			 * Use the _internal_ variant so that no user-space
7230 			 * process can resume our task from under us.
7231 			 */
7232 			task_suspend_internal(task);
7233 			exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
7234 			task_resume_internal(task);
7235 		}
7236 	} else {
7237 		if (disable_exc_resource_during_audio && audio_active) {
7238 			printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7239 			    "suppressed due to audio playback.\n", procname, pid, max_footprint_mb);
7240 		} else {
7241 			task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
7242 			    code, EXCEPTION_CODE_MAX, NULL, FALSE);
7243 		}
7244 	}
7245 
7246 	/*
7247 	 * After the EXC_RESOURCE has been handled, we must clear the
7248 	 * P_MEMSTAT_SKIP flag so that the process can again be
7249 	 * considered for jetsam if the memorystatus_thread wakes up.
7250 	 */
7251 	proc_memstat_skip(cur_bsd_info, FALSE);         /* clear the flag */
7252 }
7253 /*
7254  * Callback invoked when a task exceeds its physical footprint limit.
7255  */
7256 void
task_footprint_exceeded(int warning,__unused const void * param0,__unused const void * param1)7257 task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
7258 {
7259 	ledger_amount_t max_footprint = 0;
7260 	ledger_amount_t max_footprint_mb = 0;
7261 #if DEBUG || DEVELOPMENT
7262 	ledger_amount_t diag_threshold_limit_mb = 0;
7263 	ledger_amount_t diag_threshold_limit = 0;
7264 #endif
7265 #if CONFIG_DEFERRED_RECLAIM
7266 	ledger_amount_t current_footprint;
7267 #endif /* CONFIG_DEFERRED_RECLAIM */
7268 	task_t task;
7269 	send_exec_resource_is_warning is_warning = IS_NOT_WARNING;
7270 	boolean_t memlimit_is_active;
7271 	send_exec_resource_is_fatal memlimit_is_fatal;
7272 	send_exec_resource_is_diagnostics is_diag_mem_threshold = IS_NOT_DIAGNOSTICS;
7273 	if (warning == LEDGER_WARNING_DIAG_MEM_THRESHOLD) {
7274 		is_diag_mem_threshold = IS_DIAGNOSTICS;
7275 		is_warning = IS_WARNING;
7276 	} else if (warning == LEDGER_WARNING_DIPPED_BELOW) {
7277 		/*
7278 		 * Task memory limits only provide a warning on the way up.
7279 		 */
7280 		return;
7281 	} else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
7282 		/*
7283 		 * This task is in danger of violating a memory limit,
7284 		 * It has exceeded a percentage level of the limit.
7285 		 */
7286 		is_warning = IS_WARNING;
7287 	} else {
7288 		/*
7289 		 * The task has exceeded the physical footprint limit.
7290 		 * This is not a warning but a true limit violation.
7291 		 */
7292 		is_warning = IS_NOT_WARNING;
7293 	}
7294 
7295 	task = current_task();
7296 
7297 	ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint);
7298 #if DEBUG || DEVELOPMENT
7299 	ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &diag_threshold_limit);
7300 #endif
7301 #if CONFIG_DEFERRED_RECLAIM
7302 	if (task->deferred_reclamation_metadata != NULL) {
7303 		/*
7304 		 * Task is enrolled in deferred reclamation.
7305 		 * Do a reclaim to ensure it's really over its limit.
7306 		 */
7307 		vm_deferred_reclamation_reclaim_from_task_sync(task, UINT64_MAX);
7308 		ledger_get_balance(task->ledger, task_ledgers.phys_footprint, &current_footprint);
7309 		if (current_footprint < max_footprint) {
7310 			return;
7311 		}
7312 	}
7313 #endif /* CONFIG_DEFERRED_RECLAIM */
7314 	max_footprint_mb = max_footprint >> 20;
7315 #if DEBUG || DEVELOPMENT
7316 	diag_threshold_limit_mb = diag_threshold_limit >> 20;
7317 #endif
7318 	memlimit_is_active = task_get_memlimit_is_active(task);
7319 	memlimit_is_fatal = task_get_memlimit_is_fatal(task) == FALSE ? IS_NOT_FATAL : IS_FATAL;
7320 #if DEBUG || DEVELOPMENT
7321 	if (is_diag_mem_threshold == IS_NOT_DIAGNOSTICS) {
7322 		task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7323 	} else {
7324 		task_process_crossed_limit_diag(diag_threshold_limit_mb);
7325 	}
7326 #else
7327 	task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7328 #endif
7329 }
7330 
7331 /*
7332  * Actions to perfrom when a process has crossed watermark or is a fatal consumption */
7333 static inline void
task_process_crossed_limit_no_diag(task_t task,ledger_amount_t ledger_limit_size,bool memlimit_is_fatal,bool memlimit_is_active,send_exec_resource_is_warning is_warning)7334 task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning)
7335 {
7336 	send_exec_resource_options_t exception_options = 0;
7337 	if (memlimit_is_fatal) {
7338 		exception_options |= EXEC_RESOURCE_FATAL;
7339 	}
7340 	/*
7341 	 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7342 	 * We only generate the exception once per process per memlimit (active/inactive limit).
7343 	 * To enforce this, we monitor state based on the  memlimit's active/inactive attribute
7344 	 * and we disable it by marking that memlimit as exception triggered.
7345 	 */
7346 	if (is_warning == IS_NOT_WARNING && !task_has_triggered_exc_resource(task, memlimit_is_active)) {
7347 		PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7348 		// If it was not a diag threshold (if was a memory limit), then we do not want more signalling,
7349 		// however, if was a diag limit, the user may reload a different limit and signal again the violation
7350 		memorystatus_log_exception((int)ledger_limit_size, memlimit_is_active, memlimit_is_fatal);
7351 		task_mark_has_triggered_exc_resource(task, memlimit_is_active);
7352 	}
7353 	memorystatus_on_ledger_footprint_exceeded(is_warning == IS_NOT_WARNING ? FALSE : TRUE, memlimit_is_active, memlimit_is_fatal);
7354 }
7355 
7356 #if DEBUG || DEVELOPMENT
7357 /**
7358  * Actions to take when a process has crossed the diagnostics limit
7359  */
7360 static inline void
task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)7361 task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)
7362 {
7363 	/*
7364 	 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7365 	 * In the case of the diagnostics thresholds, the exception will be signaled only once, but the
7366 	 * inhibit / rearm mechanism if performed at ledger level.
7367 	 */
7368 	send_exec_resource_options_t exception_options = EXEC_RESOURCE_DIAGNOSTIC;
7369 	PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7370 	memorystatus_log_diag_threshold_exception((int)ledger_limit_size);
7371 }
7372 #endif
7373 
7374 extern int proc_check_footprint_priv(void);
7375 
7376 kern_return_t
task_set_phys_footprint_limit(task_t task,int new_limit_mb,int * old_limit_mb)7377 task_set_phys_footprint_limit(
7378 	task_t task,
7379 	int new_limit_mb,
7380 	int *old_limit_mb)
7381 {
7382 	kern_return_t error;
7383 
7384 	boolean_t memlimit_is_active;
7385 	boolean_t memlimit_is_fatal;
7386 
7387 	if ((error = proc_check_footprint_priv())) {
7388 		return KERN_NO_ACCESS;
7389 	}
7390 
7391 	/*
7392 	 * This call should probably be obsoleted.
7393 	 * But for now, we default to current state.
7394 	 */
7395 	memlimit_is_active = task_get_memlimit_is_active(task);
7396 	memlimit_is_fatal = task_get_memlimit_is_fatal(task);
7397 
7398 	return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
7399 }
7400 
7401 /*
7402  * Set the limit of diagnostics memory consumption for a concrete task
7403  */
7404 #if CONFIG_MEMORYSTATUS
7405 #if DEVELOPMENT || DEBUG
7406 kern_return_t
task_set_diag_footprint_limit(task_t task,uint64_t new_limit_mb,uint64_t * old_limit_mb)7407 task_set_diag_footprint_limit(
7408 	task_t task,
7409 	uint64_t new_limit_mb,
7410 	uint64_t *old_limit_mb)
7411 {
7412 	kern_return_t error;
7413 
7414 	if ((error = proc_check_footprint_priv())) {
7415 		return KERN_NO_ACCESS;
7416 	}
7417 
7418 	return task_set_diag_footprint_limit_internal(task, new_limit_mb, old_limit_mb);
7419 }
7420 
7421 #endif // DEVELOPMENT || DEBUG
7422 #endif // CONFIG_MEMORYSTATUS
7423 
7424 kern_return_t
task_convert_phys_footprint_limit(int limit_mb,int * converted_limit_mb)7425 task_convert_phys_footprint_limit(
7426 	int limit_mb,
7427 	int *converted_limit_mb)
7428 {
7429 	if (limit_mb == -1) {
7430 		/*
7431 		 * No limit
7432 		 */
7433 		if (max_task_footprint != 0) {
7434 			*converted_limit_mb = (int)(max_task_footprint / 1024 / 1024);         /* bytes to MB */
7435 		} else {
7436 			*converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
7437 		}
7438 	} else {
7439 		/* nothing to convert */
7440 		*converted_limit_mb = limit_mb;
7441 	}
7442 	return KERN_SUCCESS;
7443 }
7444 
7445 kern_return_t
task_set_phys_footprint_limit_internal(task_t task,int new_limit_mb,int * old_limit_mb,boolean_t memlimit_is_active,boolean_t memlimit_is_fatal)7446 task_set_phys_footprint_limit_internal(
7447 	task_t task,
7448 	int new_limit_mb,
7449 	int *old_limit_mb,
7450 	boolean_t memlimit_is_active,
7451 	boolean_t memlimit_is_fatal)
7452 {
7453 	ledger_amount_t old;
7454 	kern_return_t ret;
7455 #if DEVELOPMENT || DEBUG
7456 	diagthreshold_check_return diag_threshold_validity;
7457 #endif
7458 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
7459 
7460 	if (ret != KERN_SUCCESS) {
7461 		return ret;
7462 	}
7463 	/**
7464 	 * Maybe we will need to re-enable the diag threshold, lets get the value
7465 	 * and the current status
7466 	 */
7467 #if DEVELOPMENT || DEBUG
7468 	diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_mb, false);
7469 	/**
7470 	 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7471 	 */
7472 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7473 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7474 	} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7475 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7476 	}
7477 #endif
7478 
7479 	/*
7480 	 * Check that limit >> 20 will not give an "unexpected" 32-bit
7481 	 * result. There are, however, implicit assumptions that -1 mb limit
7482 	 * equates to LEDGER_LIMIT_INFINITY.
7483 	 */
7484 	assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
7485 
7486 	if (old_limit_mb) {
7487 		*old_limit_mb = (int)(old >> 20);
7488 	}
7489 
7490 	if (new_limit_mb == -1) {
7491 		/*
7492 		 * Caller wishes to remove the limit.
7493 		 */
7494 		ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7495 		    max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
7496 		    max_task_footprint ? (uint8_t)max_task_footprint_warning_level : 0);
7497 
7498 		task_lock(task);
7499 		task_set_memlimit_is_active(task, memlimit_is_active);
7500 		task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7501 		task_unlock(task);
7502 		/**
7503 		 * If the diagnostics were disabled, and now we have a new limit, we have to re-enable it.
7504 		 */
7505 #if DEVELOPMENT || DEBUG
7506 		if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7507 			ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7508 		} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7509 			ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7510 		}
7511 	#endif
7512 		return KERN_SUCCESS;
7513 	}
7514 
7515 #ifdef CONFIG_NOMONITORS
7516 	return KERN_SUCCESS;
7517 #endif /* CONFIG_NOMONITORS */
7518 
7519 	task_lock(task);
7520 
7521 	if ((memlimit_is_active == task_get_memlimit_is_active(task)) &&
7522 	    (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) &&
7523 	    (((ledger_amount_t)new_limit_mb << 20) == old)) {
7524 		/*
7525 		 * memlimit state is not changing
7526 		 */
7527 		task_unlock(task);
7528 		return KERN_SUCCESS;
7529 	}
7530 
7531 	task_set_memlimit_is_active(task, memlimit_is_active);
7532 	task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7533 
7534 	ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7535 	    (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
7536 
7537 	if (task == current_task()) {
7538 		ledger_check_new_balance(current_thread(), task->ledger,
7539 		    task_ledgers.phys_footprint);
7540 	}
7541 
7542 	task_unlock(task);
7543 #if DEVELOPMENT || DEBUG
7544 	if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7545 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7546 	}
7547 	#endif
7548 
7549 	return KERN_SUCCESS;
7550 }
7551 
7552 #if RESETTABLE_DIAG_FOOTPRINT_LIMITS
7553 kern_return_t
task_set_diag_footprint_limit_internal(task_t task,uint64_t new_limit_bytes,uint64_t * old_limit_bytes)7554 task_set_diag_footprint_limit_internal(
7555 	task_t task,
7556 	uint64_t new_limit_bytes,
7557 	uint64_t *old_limit_bytes)
7558 {
7559 	ledger_amount_t old = 0;
7560 	kern_return_t ret = KERN_SUCCESS;
7561 	diagthreshold_check_return diag_threshold_validity;
7562 	ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &old);
7563 
7564 	if (ret != KERN_SUCCESS) {
7565 		return ret;
7566 	}
7567 	/**
7568 	 * Maybe we will need to re-enable the diag threshold, lets get the value
7569 	 * and the current status
7570 	 */
7571 	diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_bytes >> 20, true);
7572 	/**
7573 	 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7574 	 */
7575 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7576 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7577 	}
7578 
7579 	/*
7580 	 * Check that limit >> 20 will not give an "unexpected" 32-bit
7581 	 * result. There are, however, implicit assumptions that -1 mb limit
7582 	 * equates to LEDGER_LIMIT_INFINITY.
7583 	 */
7584 	if (old_limit_bytes) {
7585 		*old_limit_bytes = old;
7586 	}
7587 
7588 	if (new_limit_bytes == -1) {
7589 		/*
7590 		 * Caller wishes to remove the limit.
7591 		 */
7592 		ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7593 		    LEDGER_LIMIT_INFINITY);
7594 		/*
7595 		 * If the memory diagnostics flag was disabled, lets enable it again
7596 		 */
7597 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7598 		return KERN_SUCCESS;
7599 	}
7600 
7601 #ifdef CONFIG_NOMONITORS
7602 	return KERN_SUCCESS;
7603 #else
7604 
7605 	task_lock(task);
7606 	ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7607 	    (ledger_amount_t)new_limit_bytes );
7608 	if (task == current_task()) {
7609 		ledger_check_new_balance(current_thread(), task->ledger,
7610 		    task_ledgers.phys_footprint);
7611 	}
7612 
7613 	task_unlock(task);
7614 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7615 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7616 	} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7617 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7618 	}
7619 
7620 	return KERN_SUCCESS;
7621 #endif /* CONFIG_NOMONITORS */
7622 }
7623 
7624 kern_return_t
task_get_diag_footprint_limit_internal(task_t task,uint64_t * new_limit_bytes,bool * threshold_disabled)7625 task_get_diag_footprint_limit_internal(
7626 	task_t task,
7627 	uint64_t *new_limit_bytes,
7628 	bool *threshold_disabled)
7629 {
7630 	ledger_amount_t ledger_limit;
7631 	kern_return_t ret = KERN_SUCCESS;
7632 	if (new_limit_bytes == NULL || threshold_disabled == NULL) {
7633 		return KERN_INVALID_ARGUMENT;
7634 	}
7635 	ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &ledger_limit);
7636 	if (ledger_limit == LEDGER_LIMIT_INFINITY) {
7637 		ledger_limit = -1;
7638 	}
7639 	if (ret == KERN_SUCCESS) {
7640 		*new_limit_bytes = ledger_limit;
7641 		ret = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, threshold_disabled);
7642 	}
7643 	return ret;
7644 }
7645 #endif /* RESETTABLE_DIAG_FOOTPRINT_LIMITS */
7646 
7647 
7648 kern_return_t
task_get_phys_footprint_limit(task_t task,int * limit_mb)7649 task_get_phys_footprint_limit(
7650 	task_t task,
7651 	int *limit_mb)
7652 {
7653 	ledger_amount_t limit;
7654 	kern_return_t ret;
7655 
7656 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
7657 	if (ret != KERN_SUCCESS) {
7658 		return ret;
7659 	}
7660 
7661 	/*
7662 	 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
7663 	 * result. There are, however, implicit assumptions that -1 mb limit
7664 	 * equates to LEDGER_LIMIT_INFINITY.
7665 	 */
7666 	assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
7667 	*limit_mb = (int)(limit >> 20);
7668 
7669 	return KERN_SUCCESS;
7670 }
7671 #else /* CONFIG_MEMORYSTATUS */
7672 kern_return_t
task_set_phys_footprint_limit(__unused task_t task,__unused int new_limit_mb,__unused int * old_limit_mb)7673 task_set_phys_footprint_limit(
7674 	__unused task_t task,
7675 	__unused int new_limit_mb,
7676 	__unused int *old_limit_mb)
7677 {
7678 	return KERN_FAILURE;
7679 }
7680 
7681 kern_return_t
task_get_phys_footprint_limit(__unused task_t task,__unused int * limit_mb)7682 task_get_phys_footprint_limit(
7683 	__unused task_t task,
7684 	__unused int *limit_mb)
7685 {
7686 	return KERN_FAILURE;
7687 }
7688 #endif /* CONFIG_MEMORYSTATUS */
7689 
7690 security_token_t *
task_get_sec_token(task_t task)7691 task_get_sec_token(task_t task)
7692 {
7693 	return &task_get_ro(task)->task_tokens.sec_token;
7694 }
7695 
7696 void
task_set_sec_token(task_t task,security_token_t * token)7697 task_set_sec_token(task_t task, security_token_t *token)
7698 {
7699 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7700 	    task_tokens.sec_token, token);
7701 }
7702 
7703 audit_token_t *
task_get_audit_token(task_t task)7704 task_get_audit_token(task_t task)
7705 {
7706 	return &task_get_ro(task)->task_tokens.audit_token;
7707 }
7708 
7709 void
task_set_audit_token(task_t task,audit_token_t * token)7710 task_set_audit_token(task_t task, audit_token_t *token)
7711 {
7712 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7713 	    task_tokens.audit_token, token);
7714 }
7715 
7716 void
task_set_tokens(task_t task,security_token_t * sec_token,audit_token_t * audit_token)7717 task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token)
7718 {
7719 	struct task_token_ro_data tokens;
7720 
7721 	tokens = task_get_ro(task)->task_tokens;
7722 	tokens.sec_token = *sec_token;
7723 	tokens.audit_token = *audit_token;
7724 
7725 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task), task_tokens,
7726 	    &tokens);
7727 }
7728 
7729 boolean_t
task_is_privileged(task_t task)7730 task_is_privileged(task_t task)
7731 {
7732 	return task_get_sec_token(task)->val[0] == 0;
7733 }
7734 
7735 #ifdef CONFIG_MACF
7736 uint8_t *
task_get_mach_trap_filter_mask(task_t task)7737 task_get_mach_trap_filter_mask(task_t task)
7738 {
7739 	return task_get_ro(task)->task_filters.mach_trap_filter_mask;
7740 }
7741 
7742 void
task_set_mach_trap_filter_mask(task_t task,uint8_t * mask)7743 task_set_mach_trap_filter_mask(task_t task, uint8_t *mask)
7744 {
7745 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7746 	    task_filters.mach_trap_filter_mask, &mask);
7747 }
7748 
7749 uint8_t *
task_get_mach_kobj_filter_mask(task_t task)7750 task_get_mach_kobj_filter_mask(task_t task)
7751 {
7752 	return task_get_ro(task)->task_filters.mach_kobj_filter_mask;
7753 }
7754 
7755 mach_vm_address_t
task_get_all_image_info_addr(task_t task)7756 task_get_all_image_info_addr(task_t task)
7757 {
7758 	return task->all_image_info_addr;
7759 }
7760 
7761 void
task_set_mach_kobj_filter_mask(task_t task,uint8_t * mask)7762 task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask)
7763 {
7764 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7765 	    task_filters.mach_kobj_filter_mask, &mask);
7766 }
7767 
7768 #endif /* CONFIG_MACF */
7769 
7770 void
task_set_thread_limit(task_t task,uint16_t thread_limit)7771 task_set_thread_limit(task_t task, uint16_t thread_limit)
7772 {
7773 	assert(task != kernel_task);
7774 	if (thread_limit <= TASK_MAX_THREAD_LIMIT) {
7775 		task_lock(task);
7776 		task->task_thread_limit = thread_limit;
7777 		task_unlock(task);
7778 	}
7779 }
7780 
7781 #if CONFIG_PROC_RESOURCE_LIMITS
7782 kern_return_t
task_set_port_space_limits(task_t task,uint32_t soft_limit,uint32_t hard_limit)7783 task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit)
7784 {
7785 	return ipc_space_set_table_size_limits(task->itk_space, soft_limit, hard_limit);
7786 }
7787 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7788 
7789 #if XNU_TARGET_OS_OSX
7790 boolean_t
task_has_system_version_compat_enabled(task_t task)7791 task_has_system_version_compat_enabled(task_t task)
7792 {
7793 	boolean_t enabled = FALSE;
7794 
7795 	task_lock(task);
7796 	enabled = (task->t_flags & TF_SYS_VERSION_COMPAT);
7797 	task_unlock(task);
7798 
7799 	return enabled;
7800 }
7801 
7802 void
task_set_system_version_compat_enabled(task_t task,boolean_t enable_system_version_compat)7803 task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat)
7804 {
7805 	assert(task == current_task());
7806 	assert(task != kernel_task);
7807 
7808 	task_lock(task);
7809 	if (enable_system_version_compat) {
7810 		task->t_flags |= TF_SYS_VERSION_COMPAT;
7811 	} else {
7812 		task->t_flags &= ~TF_SYS_VERSION_COMPAT;
7813 	}
7814 	task_unlock(task);
7815 }
7816 #endif /* XNU_TARGET_OS_OSX */
7817 
7818 /*
7819  * We need to export some functions to other components that
7820  * are currently implemented in macros within the osfmk
7821  * component.  Just export them as functions of the same name.
7822  */
7823 boolean_t
is_kerneltask(task_t t)7824 is_kerneltask(task_t t)
7825 {
7826 	if (t == kernel_task) {
7827 		return TRUE;
7828 	}
7829 
7830 	return FALSE;
7831 }
7832 
7833 boolean_t
is_corpsefork(task_t t)7834 is_corpsefork(task_t t)
7835 {
7836 	return task_is_a_corpse_fork(t);
7837 }
7838 
7839 task_t
current_task_early(void)7840 current_task_early(void)
7841 {
7842 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
7843 		if (current_thread()->t_tro == NULL) {
7844 			return TASK_NULL;
7845 		}
7846 	}
7847 	return get_threadtask(current_thread());
7848 }
7849 
7850 task_t
current_task(void)7851 current_task(void)
7852 {
7853 	return get_threadtask(current_thread());
7854 }
7855 
7856 /* defined in bsd/kern/kern_prot.c */
7857 extern int get_audit_token_pid(audit_token_t *audit_token);
7858 
7859 int
task_pid(task_t task)7860 task_pid(task_t task)
7861 {
7862 	if (task) {
7863 		return get_audit_token_pid(task_get_audit_token(task));
7864 	}
7865 	return -1;
7866 }
7867 
7868 #if __has_feature(ptrauth_calls)
7869 /*
7870  * Get the shared region id and jop signing key for the task.
7871  * The function will allocate a kalloc buffer and return
7872  * it to caller, the caller needs to free it. This is used
7873  * for getting the information via task port.
7874  */
7875 char *
task_get_vm_shared_region_id_and_jop_pid(task_t task,uint64_t * jop_pid)7876 task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid)
7877 {
7878 	size_t len;
7879 	char *shared_region_id = NULL;
7880 
7881 	task_lock(task);
7882 	if (task->shared_region_id == NULL) {
7883 		task_unlock(task);
7884 		return NULL;
7885 	}
7886 	len = strlen(task->shared_region_id) + 1;
7887 
7888 	/* don't hold task lock while allocating */
7889 	task_unlock(task);
7890 	shared_region_id = kalloc_data(len, Z_WAITOK);
7891 	task_lock(task);
7892 
7893 	if (task->shared_region_id == NULL) {
7894 		task_unlock(task);
7895 		kfree_data(shared_region_id, len);
7896 		return NULL;
7897 	}
7898 	assert(len == strlen(task->shared_region_id) + 1);         /* should never change */
7899 	strlcpy(shared_region_id, task->shared_region_id, len);
7900 	task_unlock(task);
7901 
7902 	/* find key from its auth pager */
7903 	if (jop_pid != NULL) {
7904 		*jop_pid = shared_region_find_key(shared_region_id);
7905 	}
7906 
7907 	return shared_region_id;
7908 }
7909 
7910 /*
7911  * set the shared region id for a task
7912  */
7913 void
task_set_shared_region_id(task_t task,char * id)7914 task_set_shared_region_id(task_t task, char *id)
7915 {
7916 	char *old_id;
7917 
7918 	task_lock(task);
7919 	old_id = task->shared_region_id;
7920 	task->shared_region_id = id;
7921 	task->shared_region_auth_remapped = FALSE;
7922 	task_unlock(task);
7923 
7924 	/* free any pre-existing shared region id */
7925 	if (old_id != NULL) {
7926 		shared_region_key_dealloc(old_id);
7927 		kfree_data(old_id, strlen(old_id) + 1);
7928 	}
7929 }
7930 #endif /* __has_feature(ptrauth_calls) */
7931 
7932 /*
7933  * This routine finds a thread in a task by its unique id
7934  * Returns a referenced thread or THREAD_NULL if the thread was not found
7935  *
7936  * TODO: This is super inefficient - it's an O(threads in task) list walk!
7937  *       We should make a tid hash, or transition all tid clients to thread ports
7938  *
7939  * Precondition: No locks held (will take task lock)
7940  */
7941 thread_t
task_findtid(task_t task,uint64_t tid)7942 task_findtid(task_t task, uint64_t tid)
7943 {
7944 	thread_t self           = current_thread();
7945 	thread_t found_thread   = THREAD_NULL;
7946 	thread_t iter_thread    = THREAD_NULL;
7947 
7948 	/* Short-circuit the lookup if we're looking up ourselves */
7949 	if (tid == self->thread_id || tid == TID_NULL) {
7950 		assert(get_threadtask(self) == task);
7951 
7952 		thread_reference(self);
7953 
7954 		return self;
7955 	}
7956 
7957 	task_lock(task);
7958 
7959 	queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
7960 		if (iter_thread->thread_id == tid) {
7961 			found_thread = iter_thread;
7962 			thread_reference(found_thread);
7963 			break;
7964 		}
7965 	}
7966 
7967 	task_unlock(task);
7968 
7969 	return found_thread;
7970 }
7971 
7972 int
pid_from_task(task_t task)7973 pid_from_task(task_t task)
7974 {
7975 	int pid = -1;
7976 	void *bsd_info = get_bsdtask_info(task);
7977 
7978 	if (bsd_info) {
7979 		pid = proc_pid(bsd_info);
7980 	} else {
7981 		pid = task_pid(task);
7982 	}
7983 
7984 	return pid;
7985 }
7986 
7987 /*
7988  * Control the CPU usage monitor for a task.
7989  */
7990 kern_return_t
task_cpu_usage_monitor_ctl(task_t task,uint32_t * flags)7991 task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
7992 {
7993 	int error = KERN_SUCCESS;
7994 
7995 	if (*flags & CPUMON_MAKE_FATAL) {
7996 		task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
7997 	} else {
7998 		error = KERN_INVALID_ARGUMENT;
7999 	}
8000 
8001 	return error;
8002 }
8003 
8004 /*
8005  * Control the wakeups monitor for a task.
8006  */
8007 kern_return_t
task_wakeups_monitor_ctl(task_t task,uint32_t * flags,int32_t * rate_hz)8008 task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
8009 {
8010 	ledger_t ledger = task->ledger;
8011 
8012 	task_lock(task);
8013 	if (*flags & WAKEMON_GET_PARAMS) {
8014 		ledger_amount_t limit;
8015 		uint64_t                period;
8016 
8017 		ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
8018 		ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
8019 
8020 		if (limit != LEDGER_LIMIT_INFINITY) {
8021 			/*
8022 			 * An active limit means the wakeups monitor is enabled.
8023 			 */
8024 			*rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
8025 			*flags = WAKEMON_ENABLE;
8026 			if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
8027 				*flags |= WAKEMON_MAKE_FATAL;
8028 			}
8029 		} else {
8030 			*flags = WAKEMON_DISABLE;
8031 			*rate_hz = -1;
8032 		}
8033 
8034 		/*
8035 		 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
8036 		 */
8037 		task_unlock(task);
8038 		return KERN_SUCCESS;
8039 	}
8040 
8041 	if (*flags & WAKEMON_ENABLE) {
8042 		if (*flags & WAKEMON_SET_DEFAULTS) {
8043 			*rate_hz = task_wakeups_monitor_rate;
8044 		}
8045 
8046 #ifndef CONFIG_NOMONITORS
8047 		if (*flags & WAKEMON_MAKE_FATAL) {
8048 			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
8049 		}
8050 #endif /* CONFIG_NOMONITORS */
8051 
8052 		if (*rate_hz <= 0) {
8053 			task_unlock(task);
8054 			return KERN_INVALID_ARGUMENT;
8055 		}
8056 
8057 #ifndef CONFIG_NOMONITORS
8058 		ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
8059 		    (uint8_t)task_wakeups_monitor_ustackshots_trigger_pct);
8060 		ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
8061 		ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
8062 #endif /* CONFIG_NOMONITORS */
8063 	} else if (*flags & WAKEMON_DISABLE) {
8064 		/*
8065 		 * Caller wishes to disable wakeups monitor on the task.
8066 		 *
8067 		 * Remove the limit & callback on the wakeups ledger entry.
8068 		 */
8069 		ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
8070 		ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
8071 	}
8072 
8073 	task_unlock(task);
8074 	return KERN_SUCCESS;
8075 }
8076 
8077 void
task_wakeups_rate_exceeded(int warning,__unused const void * param0,__unused const void * param1)8078 task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
8079 {
8080 	if (warning == 0) {
8081 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
8082 	}
8083 }
8084 
8085 TUNABLE(bool, enable_wakeup_reports, "enable_wakeup_reports", false); /* Enable wakeup reports. */
8086 
8087 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)8088 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
8089 {
8090 	task_t                      task        = current_task();
8091 	int                         pid         = 0;
8092 	const char                  *procname   = "unknown";
8093 	boolean_t                   fatal;
8094 	kern_return_t               kr;
8095 #ifdef EXC_RESOURCE_MONITORS
8096 	mach_exception_data_type_t  code[EXCEPTION_CODE_MAX];
8097 #endif /* EXC_RESOURCE_MONITORS */
8098 	struct ledger_entry_info    lei;
8099 
8100 #ifdef MACH_BSD
8101 	pid = proc_selfpid();
8102 	if (get_bsdtask_info(task) != NULL) {
8103 		procname = proc_name_address(get_bsdtask_info(current_task()));
8104 	}
8105 #endif
8106 
8107 	ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
8108 
8109 	/*
8110 	 * Disable the exception notification so we don't overwhelm
8111 	 * the listener with an endless stream of redundant exceptions.
8112 	 * TODO: detect whether another thread is already reporting the violation.
8113 	 */
8114 	uint32_t flags = WAKEMON_DISABLE;
8115 	task_wakeups_monitor_ctl(task, &flags, NULL);
8116 
8117 	fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
8118 	trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
8119 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
8120 	    "over ~%llu seconds, averaging %llu wakes / second and "
8121 	    "violating a %slimit of %llu wakes over %llu seconds.\n",
8122 	    procname, pid,
8123 	    lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
8124 	    lei.lei_last_refill == 0 ? 0 :
8125 	    (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
8126 	    fatal ? "FATAL " : "",
8127 	    lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
8128 
8129 	if (enable_wakeup_reports) {
8130 		kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
8131 		    fatal ? kRNFatalLimitFlag : 0);
8132 		if (kr) {
8133 			printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
8134 		}
8135 	}
8136 
8137 #ifdef EXC_RESOURCE_MONITORS
8138 	if (disable_exc_resource) {
8139 		printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8140 		    "suppressed by a boot-arg\n", procname, pid);
8141 		return;
8142 	}
8143 	if (disable_exc_resource_during_audio && audio_active) {
8144 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8145 		    "suppressed due to audio playback\n", procname, pid);
8146 		return;
8147 	}
8148 	if (lei.lei_last_refill == 0) {
8149 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8150 		    "suppressed due to lei.lei_last_refill = 0 \n", procname, pid);
8151 	}
8152 
8153 	code[0] = code[1] = 0;
8154 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
8155 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
8156 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
8157 	    NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
8158 	EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
8159 	    lei.lei_last_refill);
8160 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
8161 	    NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
8162 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8163 #endif /* EXC_RESOURCE_MONITORS */
8164 
8165 	if (fatal) {
8166 		task_terminate_internal(task);
8167 	}
8168 }
8169 
8170 static boolean_t
global_update_logical_writes(int64_t io_delta,int64_t * global_write_count)8171 global_update_logical_writes(int64_t io_delta, int64_t *global_write_count)
8172 {
8173 	int64_t old_count, new_count;
8174 	boolean_t needs_telemetry;
8175 
8176 	do {
8177 		new_count = old_count = *global_write_count;
8178 		new_count += io_delta;
8179 		if (new_count >= io_telemetry_limit) {
8180 			new_count = 0;
8181 			needs_telemetry = TRUE;
8182 		} else {
8183 			needs_telemetry = FALSE;
8184 		}
8185 	} while (!OSCompareAndSwap64(old_count, new_count, global_write_count));
8186 	return needs_telemetry;
8187 }
8188 
8189 void
task_update_physical_writes(__unused task_t task,__unused task_physical_write_flavor_t flavor,__unused uint64_t io_size,__unused task_balance_flags_t flags)8190 task_update_physical_writes(__unused task_t task, __unused task_physical_write_flavor_t flavor, __unused uint64_t io_size, __unused task_balance_flags_t flags)
8191 {
8192 #if CONFIG_PHYS_WRITE_ACCT
8193 	if (!io_size) {
8194 		return;
8195 	}
8196 
8197 	/*
8198 	 * task == NULL means that we have to update kernel_task ledgers
8199 	 */
8200 	if (!task) {
8201 		task = kernel_task;
8202 	}
8203 
8204 	KDBG((VMDBG_CODE(DBG_VM_PHYS_WRITE_ACCT)) | DBG_FUNC_NONE,
8205 	    task_pid(task), flavor, io_size, flags);
8206 	DTRACE_IO4(physical_writes, struct task *, task, task_physical_write_flavor_t, flavor, uint64_t, io_size, task_balance_flags_t, flags);
8207 
8208 	if (flags & TASK_BALANCE_CREDIT) {
8209 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8210 			OSAddAtomic64(io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8211 			ledger_credit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8212 		}
8213 	} else if (flags & TASK_BALANCE_DEBIT) {
8214 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8215 			OSAddAtomic64(-1 * io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8216 			ledger_debit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8217 		}
8218 	}
8219 #endif /* CONFIG_PHYS_WRITE_ACCT */
8220 }
8221 
8222 void
task_update_logical_writes(task_t task,uint32_t io_size,int flags,void * vp)8223 task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
8224 {
8225 	int64_t io_delta = 0;
8226 	int64_t * global_counter_to_update;
8227 	boolean_t needs_telemetry = FALSE;
8228 	boolean_t is_external_device = FALSE;
8229 	int ledger_to_update = 0;
8230 	struct task_writes_counters * writes_counters_to_update;
8231 
8232 	if ((!task) || (!io_size) || (!vp)) {
8233 		return;
8234 	}
8235 
8236 	KDBG((VMDBG_CODE(DBG_VM_DATA_WRITE)) | DBG_FUNC_NONE,
8237 	    task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp));
8238 	DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
8239 
8240 	// Is the drive backing this vnode internal or external to the system?
8241 	if (vnode_isonexternalstorage(vp) == false) {
8242 		global_counter_to_update = &global_logical_writes_count;
8243 		ledger_to_update = task_ledgers.logical_writes;
8244 		writes_counters_to_update = &task->task_writes_counters_internal;
8245 		is_external_device = FALSE;
8246 	} else {
8247 		global_counter_to_update = &global_logical_writes_to_external_count;
8248 		ledger_to_update = task_ledgers.logical_writes_to_external;
8249 		writes_counters_to_update = &task->task_writes_counters_external;
8250 		is_external_device = TRUE;
8251 	}
8252 
8253 	switch (flags) {
8254 	case TASK_WRITE_IMMEDIATE:
8255 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_immediate_writes));
8256 		ledger_credit(task->ledger, ledger_to_update, io_size);
8257 		if (!is_external_device) {
8258 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8259 		}
8260 		break;
8261 	case TASK_WRITE_DEFERRED:
8262 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_deferred_writes));
8263 		ledger_credit(task->ledger, ledger_to_update, io_size);
8264 		if (!is_external_device) {
8265 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8266 		}
8267 		break;
8268 	case TASK_WRITE_INVALIDATED:
8269 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_invalidated_writes));
8270 		ledger_debit(task->ledger, ledger_to_update, io_size);
8271 		if (!is_external_device) {
8272 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, FALSE, io_size);
8273 		}
8274 		break;
8275 	case TASK_WRITE_METADATA:
8276 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_metadata_writes));
8277 		ledger_credit(task->ledger, ledger_to_update, io_size);
8278 		if (!is_external_device) {
8279 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8280 		}
8281 		break;
8282 	}
8283 
8284 	io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
8285 	if (io_telemetry_limit != 0) {
8286 		/* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
8287 		needs_telemetry = global_update_logical_writes(io_delta, global_counter_to_update);
8288 		if (needs_telemetry && !is_external_device) {
8289 			act_set_io_telemetry_ast(current_thread());
8290 		}
8291 	}
8292 }
8293 
8294 /*
8295  * Control the I/O monitor for a task.
8296  */
8297 kern_return_t
task_io_monitor_ctl(task_t task,uint32_t * flags)8298 task_io_monitor_ctl(task_t task, uint32_t *flags)
8299 {
8300 	ledger_t ledger = task->ledger;
8301 
8302 	task_lock(task);
8303 	if (*flags & IOMON_ENABLE) {
8304 		/* Configure the physical I/O ledger */
8305 		ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
8306 		ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
8307 	} else if (*flags & IOMON_DISABLE) {
8308 		/*
8309 		 * Caller wishes to disable I/O monitor on the task.
8310 		 */
8311 		ledger_disable_refill(ledger, task_ledgers.physical_writes);
8312 		ledger_disable_callback(ledger, task_ledgers.physical_writes);
8313 	}
8314 
8315 	task_unlock(task);
8316 	return KERN_SUCCESS;
8317 }
8318 
8319 void
task_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)8320 task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
8321 {
8322 	if (warning == 0) {
8323 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
8324 	}
8325 }
8326 
8327 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)8328 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
8329 {
8330 	int                             pid = 0;
8331 	task_t                          task = current_task();
8332 #ifdef EXC_RESOURCE_MONITORS
8333 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
8334 #endif /* EXC_RESOURCE_MONITORS */
8335 	struct ledger_entry_info        lei = {};
8336 	kern_return_t                   kr;
8337 
8338 #ifdef MACH_BSD
8339 	pid = proc_selfpid();
8340 #endif
8341 	/*
8342 	 * Get the ledger entry info. We need to do this before disabling the exception
8343 	 * to get correct values for all fields.
8344 	 */
8345 	switch (flavor) {
8346 	case FLAVOR_IO_PHYSICAL_WRITES:
8347 		ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
8348 		break;
8349 	}
8350 
8351 
8352 	/*
8353 	 * Disable the exception notification so we don't overwhelm
8354 	 * the listener with an endless stream of redundant exceptions.
8355 	 * TODO: detect whether another thread is already reporting the violation.
8356 	 */
8357 	uint32_t flags = IOMON_DISABLE;
8358 	task_io_monitor_ctl(task, &flags);
8359 
8360 	if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
8361 		trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
8362 	}
8363 	os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
8364 	    pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
8365 
8366 	kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
8367 	if (kr) {
8368 		printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
8369 	}
8370 
8371 #ifdef EXC_RESOURCE_MONITORS
8372 	code[0] = code[1] = 0;
8373 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
8374 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
8375 	EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
8376 	EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
8377 	EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
8378 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8379 #endif /* EXC_RESOURCE_MONITORS */
8380 }
8381 
8382 void
task_port_space_ast(__unused task_t task)8383 task_port_space_ast(__unused task_t task)
8384 {
8385 	uint32_t current_size, soft_limit, hard_limit;
8386 	assert(task == current_task());
8387 	bool should_notify = ipc_space_check_table_size_limit(task->itk_space,
8388 	    &current_size, &soft_limit, &hard_limit);
8389 	if (should_notify) {
8390 		SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task, current_size, soft_limit, hard_limit);
8391 	}
8392 }
8393 
8394 #if CONFIG_PROC_RESOURCE_LIMITS
8395 static mach_port_t
task_allocate_fatal_port(void)8396 task_allocate_fatal_port(void)
8397 {
8398 	mach_port_t task_fatal_port = MACH_PORT_NULL;
8399 	task_id_token_t token;
8400 
8401 	kern_return_t kr = task_create_identity_token(current_task(), &token); /* Takes a reference on the token */
8402 	if (kr) {
8403 		return MACH_PORT_NULL;
8404 	}
8405 	task_fatal_port = ipc_kobject_alloc_port((ipc_kobject_t)token, IKOT_TASK_FATAL,
8406 	    IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
8407 
8408 	task_id_token_set_port(token, task_fatal_port);
8409 
8410 	return task_fatal_port;
8411 }
8412 
8413 static void
task_fatal_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)8414 task_fatal_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
8415 {
8416 	task_t task = TASK_NULL;
8417 	kern_return_t kr;
8418 
8419 	task_id_token_t token = ipc_kobject_get_stable(port, IKOT_TASK_FATAL);
8420 
8421 	assert(token != NULL);
8422 	if (token) {
8423 		kr = task_identity_token_get_task_grp(token, &task, TASK_GRP_KERNEL); /* takes a reference on task */
8424 		if (task) {
8425 			task_bsdtask_kill(task);
8426 			task_deallocate(task);
8427 		}
8428 		task_id_token_release(token); /* consumes ref given by notification */
8429 	}
8430 }
8431 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8432 
8433 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,uint32_t current_size,uint32_t soft_limit,uint32_t hard_limit)8434 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task, uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit)
8435 {
8436 	int pid = 0;
8437 	char *procname = (char *) "unknown";
8438 	__unused kern_return_t kr;
8439 	__unused resource_notify_flags_t flags = kRNFlagsNone;
8440 	__unused uint32_t limit;
8441 	__unused mach_port_t task_fatal_port = MACH_PORT_NULL;
8442 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
8443 
8444 	pid = proc_selfpid();
8445 	if (get_bsdtask_info(task) != NULL) {
8446 		procname = proc_name_address(get_bsdtask_info(task));
8447 	}
8448 
8449 	/*
8450 	 * Only kernel_task and launchd may be allowed to
8451 	 * have really large ipc space.
8452 	 */
8453 	if (pid == 0 || pid == 1) {
8454 		return;
8455 	}
8456 
8457 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many mach ports. \
8458 	    Num of ports allocated %u; \n", procname, pid, current_size);
8459 
8460 	/* Abort the process if it has hit the system-wide limit for ipc port table size */
8461 	if (!hard_limit && !soft_limit) {
8462 		code[0] = code[1] = 0;
8463 		EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_PORTS);
8464 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_PORT_SPACE_FULL);
8465 		EXC_RESOURCE_PORTS_ENCODE_PORTS(code[0], current_size);
8466 
8467 		exception_info_t info = {
8468 			.os_reason = OS_REASON_PORT_SPACE,
8469 			.exception_type = EXC_RESOURCE,
8470 			.mx_code = code[0],
8471 			.mx_subcode = code[1]
8472 		};
8473 
8474 		exit_with_mach_exception(current_proc(), info, PX_DEBUG_NO_HONOR);
8475 		return;
8476 	}
8477 
8478 #if CONFIG_PROC_RESOURCE_LIMITS
8479 	if (hard_limit > 0) {
8480 		flags |= kRNHardLimitFlag;
8481 		limit = hard_limit;
8482 		task_fatal_port = task_allocate_fatal_port();
8483 		if (!task_fatal_port) {
8484 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8485 			task_bsdtask_kill(task);
8486 		}
8487 	} else {
8488 		flags |= kRNSoftLimitFlag;
8489 		limit = soft_limit;
8490 	}
8491 
8492 	kr = send_resource_violation_with_fatal_port(send_port_space_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8493 	if (kr) {
8494 		os_log(OS_LOG_DEFAULT, "send_resource_violation(ports, ...): error %#x\n", kr);
8495 	}
8496 	if (task_fatal_port) {
8497 		ipc_port_release_send(task_fatal_port);
8498 	}
8499 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8500 }
8501 
8502 #if CONFIG_PROC_RESOURCE_LIMITS
8503 void
task_kqworkloop_ast(task_t task,int current_size,int soft_limit,int hard_limit)8504 task_kqworkloop_ast(task_t task, int current_size, int soft_limit, int hard_limit)
8505 {
8506 	assert(task == current_task());
8507 	return SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task, current_size, soft_limit, hard_limit);
8508 }
8509 
8510 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task,int current_size,int soft_limit,int hard_limit)8511 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit)
8512 {
8513 	int pid = 0;
8514 	char *procname = (char *) "unknown";
8515 #ifdef MACH_BSD
8516 	pid = proc_selfpid();
8517 	if (get_bsdtask_info(task) != NULL) {
8518 		procname = proc_name_address(get_bsdtask_info(task));
8519 	}
8520 #endif
8521 	if (pid == 0 || pid == 1) {
8522 		return;
8523 	}
8524 
8525 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many kqworkloops. \
8526 	    Num of kqworkloops allocated %u; \n", procname, pid, current_size);
8527 
8528 	int limit = 0;
8529 	resource_notify_flags_t flags = kRNFlagsNone;
8530 	mach_port_t task_fatal_port = MACH_PORT_NULL;
8531 	if (hard_limit) {
8532 		flags |= kRNHardLimitFlag;
8533 		limit = hard_limit;
8534 
8535 		task_fatal_port = task_allocate_fatal_port();
8536 		if (task_fatal_port == MACH_PORT_NULL) {
8537 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8538 			task_bsdtask_kill(task);
8539 		}
8540 	} else {
8541 		flags |= kRNSoftLimitFlag;
8542 		limit = soft_limit;
8543 	}
8544 
8545 	kern_return_t kr;
8546 	kr = send_resource_violation_with_fatal_port(send_kqworkloops_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8547 	if (kr) {
8548 		os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(kqworkloops, ...): error %#x\n", kr);
8549 	}
8550 	if (task_fatal_port) {
8551 		ipc_port_release_send(task_fatal_port);
8552 	}
8553 }
8554 
8555 
8556 void
task_filedesc_ast(__unused task_t task,__unused int current_size,__unused int soft_limit,__unused int hard_limit)8557 task_filedesc_ast(__unused task_t task, __unused int current_size, __unused int soft_limit, __unused int hard_limit)
8558 {
8559 	assert(task == current_task());
8560 	SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task, current_size, soft_limit, hard_limit);
8561 }
8562 
8563 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task,int current_size,int soft_limit,int hard_limit)8564 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit)
8565 {
8566 	int pid = 0;
8567 	char *procname = (char *) "unknown";
8568 	kern_return_t kr;
8569 	resource_notify_flags_t flags = kRNFlagsNone;
8570 	int limit;
8571 	mach_port_t task_fatal_port = MACH_PORT_NULL;
8572 
8573 #ifdef MACH_BSD
8574 	pid = proc_selfpid();
8575 	if (get_bsdtask_info(task) != NULL) {
8576 		procname = proc_name_address(get_bsdtask_info(task));
8577 	}
8578 #endif
8579 	/*
8580 	 * Only kernel_task and launchd may be allowed to
8581 	 * have really large ipc space.
8582 	 */
8583 	if (pid == 0 || pid == 1) {
8584 		return;
8585 	}
8586 
8587 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many file descriptors. \
8588 	    Num of fds allocated %u; \n", procname, pid, current_size);
8589 
8590 	if (hard_limit > 0) {
8591 		flags |= kRNHardLimitFlag;
8592 		limit = hard_limit;
8593 		task_fatal_port = task_allocate_fatal_port();
8594 		if (!task_fatal_port) {
8595 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8596 			task_bsdtask_kill(task);
8597 		}
8598 	} else {
8599 		flags |= kRNSoftLimitFlag;
8600 		limit = soft_limit;
8601 	}
8602 
8603 	kr = send_resource_violation_with_fatal_port(send_file_descriptors_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8604 	if (kr) {
8605 		os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(filedesc, ...): error %#x\n", kr);
8606 	}
8607 	if (task_fatal_port) {
8608 		ipc_port_release_send(task_fatal_port);
8609 	}
8610 }
8611 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8612 
8613 /* Placeholders for the task set/get voucher interfaces */
8614 kern_return_t
task_get_mach_voucher(task_t task,mach_voucher_selector_t __unused which,ipc_voucher_t * voucher)8615 task_get_mach_voucher(
8616 	task_t                  task,
8617 	mach_voucher_selector_t __unused which,
8618 	ipc_voucher_t           *voucher)
8619 {
8620 	if (TASK_NULL == task) {
8621 		return KERN_INVALID_TASK;
8622 	}
8623 
8624 	*voucher = NULL;
8625 	return KERN_SUCCESS;
8626 }
8627 
8628 kern_return_t
task_set_mach_voucher(task_t task,ipc_voucher_t __unused voucher)8629 task_set_mach_voucher(
8630 	task_t                  task,
8631 	ipc_voucher_t           __unused voucher)
8632 {
8633 	if (TASK_NULL == task) {
8634 		return KERN_INVALID_TASK;
8635 	}
8636 
8637 	return KERN_SUCCESS;
8638 }
8639 
8640 kern_return_t
task_swap_mach_voucher(__unused task_t task,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)8641 task_swap_mach_voucher(
8642 	__unused task_t         task,
8643 	__unused ipc_voucher_t  new_voucher,
8644 	ipc_voucher_t          *in_out_old_voucher)
8645 {
8646 	/*
8647 	 * Currently this function is only called from a MIG generated
8648 	 * routine which doesn't release the reference on the voucher
8649 	 * addressed by in_out_old_voucher. To avoid leaking this reference,
8650 	 * a call to release it has been added here.
8651 	 */
8652 	ipc_voucher_release(*in_out_old_voucher);
8653 	OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
8654 }
8655 
8656 void
task_set_gpu_denied(task_t task,boolean_t denied)8657 task_set_gpu_denied(task_t task, boolean_t denied)
8658 {
8659 	task_lock(task);
8660 
8661 	if (denied) {
8662 		task->t_flags |= TF_GPU_DENIED;
8663 	} else {
8664 		task->t_flags &= ~TF_GPU_DENIED;
8665 	}
8666 
8667 	task_unlock(task);
8668 }
8669 
8670 boolean_t
task_is_gpu_denied(task_t task)8671 task_is_gpu_denied(task_t task)
8672 {
8673 	/* We don't need the lock to read this flag */
8674 	return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE;
8675 }
8676 
8677 /*
8678  * Task policy termination uses this path to clear the bit the final time
8679  * during the termination flow, and the TASK_POLICY_TERMINATED bit guarantees
8680  * that it won't be changed again on a terminated task.
8681  */
8682 bool
task_set_game_mode_locked(task_t task,bool enabled)8683 task_set_game_mode_locked(task_t task, bool enabled)
8684 {
8685 	task_lock_assert_owned(task);
8686 
8687 	if (enabled) {
8688 		assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
8689 	}
8690 
8691 	bool previously_enabled = task_get_game_mode(task);
8692 	bool needs_update = false;
8693 	uint32_t new_count = 0;
8694 
8695 	if (enabled) {
8696 		task->t_flags |= TF_GAME_MODE;
8697 	} else {
8698 		task->t_flags &= ~TF_GAME_MODE;
8699 	}
8700 
8701 	if (enabled && !previously_enabled) {
8702 		if (task_coalition_adjust_game_mode_count(task, 1, &new_count) && (new_count == 1)) {
8703 			needs_update = true;
8704 		}
8705 	} else if (!enabled && previously_enabled) {
8706 		if (task_coalition_adjust_game_mode_count(task, -1, &new_count) && (new_count == 0)) {
8707 			needs_update = true;
8708 		}
8709 	}
8710 
8711 	return needs_update;
8712 }
8713 
8714 void
task_set_game_mode(task_t task,bool enabled)8715 task_set_game_mode(task_t task, bool enabled)
8716 {
8717 	bool needs_update = false;
8718 
8719 	task_lock(task);
8720 
8721 	/* After termination, further updates are no longer effective */
8722 	if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
8723 		needs_update = task_set_game_mode_locked(task, enabled);
8724 	}
8725 
8726 	task_unlock(task);
8727 
8728 #if CONFIG_THREAD_GROUPS
8729 	if (needs_update) {
8730 		task_coalition_thread_group_game_mode_update(task);
8731 	}
8732 #endif /* CONFIG_THREAD_GROUPS */
8733 }
8734 
8735 bool
task_get_game_mode(task_t task)8736 task_get_game_mode(task_t task)
8737 {
8738 	/* We don't need the lock to read this flag */
8739 	return task->t_flags & TF_GAME_MODE;
8740 }
8741 
8742 bool
task_set_carplay_mode_locked(task_t task,bool enabled)8743 task_set_carplay_mode_locked(task_t task, bool enabled)
8744 {
8745 	task_lock_assert_owned(task);
8746 
8747 	if (enabled) {
8748 		assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
8749 	}
8750 
8751 	bool previously_enabled = task_get_carplay_mode(task);
8752 	bool needs_update = false;
8753 	uint32_t new_count = 0;
8754 
8755 	if (enabled) {
8756 		task->t_flags |= TF_CARPLAY_MODE;
8757 	} else {
8758 		task->t_flags &= ~TF_CARPLAY_MODE;
8759 	}
8760 
8761 	if (enabled && !previously_enabled) {
8762 		if (task_coalition_adjust_carplay_mode_count(task, 1, &new_count) && (new_count == 1)) {
8763 			needs_update = true;
8764 		}
8765 	} else if (!enabled && previously_enabled) {
8766 		if (task_coalition_adjust_carplay_mode_count(task, -1, &new_count) && (new_count == 0)) {
8767 			needs_update = true;
8768 		}
8769 	}
8770 	return needs_update;
8771 }
8772 
8773 void
task_set_carplay_mode(task_t task,bool enabled)8774 task_set_carplay_mode(task_t task, bool enabled)
8775 {
8776 	bool needs_update = false;
8777 
8778 	task_lock(task);
8779 
8780 	/* After termination, further updates are no longer effective */
8781 	if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
8782 		needs_update = task_set_carplay_mode_locked(task, enabled);
8783 	}
8784 
8785 	task_unlock(task);
8786 
8787 #if CONFIG_THREAD_GROUPS
8788 	if (needs_update) {
8789 		task_coalition_thread_group_carplay_mode_update(task);
8790 	}
8791 #endif /* CONFIG_THREAD_GROUPS */
8792 }
8793 
8794 bool
task_get_carplay_mode(task_t task)8795 task_get_carplay_mode(task_t task)
8796 {
8797 	/* We don't need the lock to read this flag */
8798 	return task->t_flags & TF_CARPLAY_MODE;
8799 }
8800 
8801 uint64_t
get_task_memory_region_count(task_t task)8802 get_task_memory_region_count(task_t task)
8803 {
8804 	vm_map_t map;
8805 	map = (task == kernel_task) ? kernel_map: task->map;
8806 	return (uint64_t)get_map_nentries(map);
8807 }
8808 
8809 static void
kdebug_trace_dyld_internal(uint32_t base_code,struct dyld_kernel_image_info * info)8810 kdebug_trace_dyld_internal(uint32_t base_code,
8811     struct dyld_kernel_image_info *info)
8812 {
8813 	static_assert(sizeof(info->uuid) >= 16);
8814 
8815 #if defined(__LP64__)
8816 	uint64_t *uuid = (uint64_t *)&(info->uuid);
8817 
8818 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8819 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
8820 	    uuid[1], info->load_addr,
8821 	    (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
8822 	    0);
8823 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8824 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
8825 	    (uint64_t)info->fsobjid.fid_objno |
8826 	    ((uint64_t)info->fsobjid.fid_generation << 32),
8827 	    0, 0, 0, 0);
8828 #else /* defined(__LP64__) */
8829 	uint32_t *uuid = (uint32_t *)&(info->uuid);
8830 
8831 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8832 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
8833 	    uuid[1], uuid[2], uuid[3], 0);
8834 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8835 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
8836 	    (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
8837 	    info->fsobjid.fid_objno, 0);
8838 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8839 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
8840 	    info->fsobjid.fid_generation, 0, 0, 0, 0);
8841 #endif /* !defined(__LP64__) */
8842 }
8843 
8844 static kern_return_t
kdebug_trace_dyld(task_t task,uint32_t base_code,vm_map_copy_t infos_copy,mach_msg_type_number_t infos_len)8845 kdebug_trace_dyld(task_t task, uint32_t base_code,
8846     vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
8847 {
8848 	kern_return_t kr;
8849 	dyld_kernel_image_info_array_t infos;
8850 	vm_map_offset_t map_data;
8851 	vm_offset_t data;
8852 
8853 	if (!infos_copy) {
8854 		return KERN_INVALID_ADDRESS;
8855 	}
8856 
8857 	if (!kdebug_enable ||
8858 	    !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) {
8859 		vm_map_copy_discard(infos_copy);
8860 		return KERN_SUCCESS;
8861 	}
8862 
8863 	if (task == NULL || task != current_task()) {
8864 		return KERN_INVALID_TASK;
8865 	}
8866 
8867 	kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
8868 	if (kr != KERN_SUCCESS) {
8869 		return kr;
8870 	}
8871 
8872 	infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
8873 
8874 	for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
8875 		kdebug_trace_dyld_internal(base_code, &(infos[i]));
8876 	}
8877 
8878 	data = CAST_DOWN(vm_offset_t, map_data);
8879 	mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
8880 	return KERN_SUCCESS;
8881 }
8882 
8883 kern_return_t
task_register_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8884 task_register_dyld_image_infos(task_t task,
8885     dyld_kernel_image_info_array_t infos_copy,
8886     mach_msg_type_number_t infos_len)
8887 {
8888 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
8889 	           (vm_map_copy_t)infos_copy, infos_len);
8890 }
8891 
8892 kern_return_t
task_unregister_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8893 task_unregister_dyld_image_infos(task_t task,
8894     dyld_kernel_image_info_array_t infos_copy,
8895     mach_msg_type_number_t infos_len)
8896 {
8897 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
8898 	           (vm_map_copy_t)infos_copy, infos_len);
8899 }
8900 
8901 kern_return_t
task_get_dyld_image_infos(__unused task_t task,__unused dyld_kernel_image_info_array_t * dyld_images,__unused mach_msg_type_number_t * dyld_imagesCnt)8902 task_get_dyld_image_infos(__unused task_t task,
8903     __unused dyld_kernel_image_info_array_t * dyld_images,
8904     __unused mach_msg_type_number_t * dyld_imagesCnt)
8905 {
8906 	return KERN_NOT_SUPPORTED;
8907 }
8908 
8909 kern_return_t
task_register_dyld_shared_cache_image_info(task_t task,dyld_kernel_image_info_t cache_img,__unused boolean_t no_cache,__unused boolean_t private_cache)8910 task_register_dyld_shared_cache_image_info(task_t task,
8911     dyld_kernel_image_info_t cache_img,
8912     __unused boolean_t no_cache,
8913     __unused boolean_t private_cache)
8914 {
8915 	if (task == NULL || task != current_task()) {
8916 		return KERN_INVALID_TASK;
8917 	}
8918 
8919 	kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
8920 	return KERN_SUCCESS;
8921 }
8922 
8923 kern_return_t
task_register_dyld_set_dyld_state(__unused task_t task,__unused uint8_t dyld_state)8924 task_register_dyld_set_dyld_state(__unused task_t task,
8925     __unused uint8_t dyld_state)
8926 {
8927 	return KERN_NOT_SUPPORTED;
8928 }
8929 
8930 kern_return_t
task_register_dyld_get_process_state(__unused task_t task,__unused dyld_kernel_process_info_t * dyld_process_state)8931 task_register_dyld_get_process_state(__unused task_t task,
8932     __unused dyld_kernel_process_info_t * dyld_process_state)
8933 {
8934 	return KERN_NOT_SUPPORTED;
8935 }
8936 
8937 kern_return_t
task_inspect(task_inspect_t task_insp,task_inspect_flavor_t flavor,task_inspect_info_t info_out,mach_msg_type_number_t * size_in_out)8938 task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
8939     task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out)
8940 {
8941 #if CONFIG_PERVASIVE_CPI
8942 	task_t task = (task_t)task_insp;
8943 	kern_return_t kr = KERN_SUCCESS;
8944 	mach_msg_type_number_t size;
8945 
8946 	if (task == TASK_NULL) {
8947 		return KERN_INVALID_ARGUMENT;
8948 	}
8949 
8950 	size = *size_in_out;
8951 
8952 	switch (flavor) {
8953 	case TASK_INSPECT_BASIC_COUNTS: {
8954 		struct task_inspect_basic_counts *bc =
8955 		    (struct task_inspect_basic_counts *)info_out;
8956 		struct recount_usage stats = { 0 };
8957 		if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
8958 			kr = KERN_INVALID_ARGUMENT;
8959 			break;
8960 		}
8961 
8962 		recount_sum(&recount_task_plan, task->tk_recount.rtk_lifetime, &stats);
8963 		bc->instructions = recount_usage_instructions(&stats);
8964 		bc->cycles = recount_usage_cycles(&stats);
8965 		size = TASK_INSPECT_BASIC_COUNTS_COUNT;
8966 		break;
8967 	}
8968 	default:
8969 		kr = KERN_INVALID_ARGUMENT;
8970 		break;
8971 	}
8972 
8973 	if (kr == KERN_SUCCESS) {
8974 		*size_in_out = size;
8975 	}
8976 	return kr;
8977 #else /* CONFIG_PERVASIVE_CPI */
8978 #pragma unused(task_insp, flavor, info_out, size_in_out)
8979 	return KERN_NOT_SUPPORTED;
8980 #endif /* !CONFIG_PERVASIVE_CPI */
8981 }
8982 
8983 #if CONFIG_SECLUDED_MEMORY
8984 int num_tasks_can_use_secluded_mem = 0;
8985 
8986 void
task_set_can_use_secluded_mem(task_t task,boolean_t can_use_secluded_mem)8987 task_set_can_use_secluded_mem(
8988 	task_t          task,
8989 	boolean_t       can_use_secluded_mem)
8990 {
8991 	if (!task->task_could_use_secluded_mem) {
8992 		return;
8993 	}
8994 	task_lock(task);
8995 	task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
8996 	task_unlock(task);
8997 }
8998 
8999 void
task_set_can_use_secluded_mem_locked(task_t task,boolean_t can_use_secluded_mem)9000 task_set_can_use_secluded_mem_locked(
9001 	task_t          task,
9002 	boolean_t       can_use_secluded_mem)
9003 {
9004 	assert(task->task_could_use_secluded_mem);
9005 	if (can_use_secluded_mem &&
9006 	    secluded_for_apps &&         /* global boot-arg */
9007 	    !task->task_can_use_secluded_mem) {
9008 		assert(num_tasks_can_use_secluded_mem >= 0);
9009 		OSAddAtomic(+1,
9010 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
9011 		task->task_can_use_secluded_mem = TRUE;
9012 	} else if (!can_use_secluded_mem &&
9013 	    task->task_can_use_secluded_mem) {
9014 		assert(num_tasks_can_use_secluded_mem > 0);
9015 		OSAddAtomic(-1,
9016 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
9017 		task->task_can_use_secluded_mem = FALSE;
9018 	}
9019 }
9020 
9021 void
task_set_could_use_secluded_mem(task_t task,boolean_t could_use_secluded_mem)9022 task_set_could_use_secluded_mem(
9023 	task_t          task,
9024 	boolean_t       could_use_secluded_mem)
9025 {
9026 	task->task_could_use_secluded_mem = !!could_use_secluded_mem;
9027 }
9028 
9029 void
task_set_could_also_use_secluded_mem(task_t task,boolean_t could_also_use_secluded_mem)9030 task_set_could_also_use_secluded_mem(
9031 	task_t          task,
9032 	boolean_t       could_also_use_secluded_mem)
9033 {
9034 	task->task_could_also_use_secluded_mem = !!could_also_use_secluded_mem;
9035 }
9036 
9037 boolean_t
task_can_use_secluded_mem(task_t task,boolean_t is_alloc)9038 task_can_use_secluded_mem(
9039 	task_t          task,
9040 	boolean_t       is_alloc)
9041 {
9042 	if (task->task_can_use_secluded_mem) {
9043 		assert(task->task_could_use_secluded_mem);
9044 		assert(num_tasks_can_use_secluded_mem > 0);
9045 		return TRUE;
9046 	}
9047 	if (task->task_could_also_use_secluded_mem &&
9048 	    num_tasks_can_use_secluded_mem > 0) {
9049 		assert(num_tasks_can_use_secluded_mem > 0);
9050 		return TRUE;
9051 	}
9052 
9053 	/*
9054 	 * If a single task is using more than some large amount of
9055 	 * memory (i.e. secluded_shutoff_trigger) and is approaching
9056 	 * its task limit, allow it to dip into secluded and begin
9057 	 * suppression of rebuilding secluded memory until that task exits.
9058 	 */
9059 	if (is_alloc && secluded_shutoff_trigger != 0) {
9060 		uint64_t phys_used = get_task_phys_footprint(task);
9061 		uint64_t limit = get_task_phys_footprint_limit(task);
9062 		if (phys_used > secluded_shutoff_trigger &&
9063 		    limit > secluded_shutoff_trigger &&
9064 		    phys_used > limit - secluded_shutoff_headroom) {
9065 			start_secluded_suppression(task);
9066 			return TRUE;
9067 		}
9068 	}
9069 
9070 	return FALSE;
9071 }
9072 
9073 boolean_t
task_could_use_secluded_mem(task_t task)9074 task_could_use_secluded_mem(
9075 	task_t  task)
9076 {
9077 	return task->task_could_use_secluded_mem;
9078 }
9079 
9080 boolean_t
task_could_also_use_secluded_mem(task_t task)9081 task_could_also_use_secluded_mem(
9082 	task_t  task)
9083 {
9084 	return task->task_could_also_use_secluded_mem;
9085 }
9086 #endif /* CONFIG_SECLUDED_MEMORY */
9087 
9088 queue_head_t *
task_io_user_clients(task_t task)9089 task_io_user_clients(task_t task)
9090 {
9091 	return &task->io_user_clients;
9092 }
9093 
9094 void
task_set_message_app_suspended(task_t task,boolean_t enable)9095 task_set_message_app_suspended(task_t task, boolean_t enable)
9096 {
9097 	task->message_app_suspended = enable;
9098 }
9099 
9100 void
task_copy_fields_for_exec(task_t dst_task,task_t src_task)9101 task_copy_fields_for_exec(task_t dst_task, task_t src_task)
9102 {
9103 	dst_task->vtimers = src_task->vtimers;
9104 }
9105 
9106 #if DEVELOPMENT || DEBUG
9107 int vm_region_footprint = 0;
9108 #endif /* DEVELOPMENT || DEBUG */
9109 
9110 boolean_t
task_self_region_footprint(void)9111 task_self_region_footprint(void)
9112 {
9113 #if DEVELOPMENT || DEBUG
9114 	if (vm_region_footprint) {
9115 		/* system-wide override */
9116 		return TRUE;
9117 	}
9118 #endif /* DEVELOPMENT || DEBUG */
9119 	return current_task()->task_region_footprint;
9120 }
9121 
9122 void
task_self_region_footprint_set(boolean_t newval)9123 task_self_region_footprint_set(
9124 	boolean_t newval)
9125 {
9126 	task_t  curtask;
9127 
9128 	curtask = current_task();
9129 	task_lock(curtask);
9130 	if (newval) {
9131 		curtask->task_region_footprint = TRUE;
9132 	} else {
9133 		curtask->task_region_footprint = FALSE;
9134 	}
9135 	task_unlock(curtask);
9136 }
9137 
9138 int
task_self_region_info_flags(void)9139 task_self_region_info_flags(void)
9140 {
9141 	return current_task()->task_region_info_flags;
9142 }
9143 
9144 kern_return_t
task_self_region_info_flags_set(int newval)9145 task_self_region_info_flags_set(
9146 	int newval)
9147 {
9148 	task_t  curtask;
9149 	kern_return_t err = KERN_SUCCESS;
9150 
9151 	curtask = current_task();
9152 	task_lock(curtask);
9153 	curtask->task_region_info_flags = newval;
9154 	/* check for overflow (flag added without increasing bitfield size?) */
9155 	if (curtask->task_region_info_flags != newval) {
9156 		err = KERN_INVALID_ARGUMENT;
9157 	}
9158 	task_unlock(curtask);
9159 
9160 	return err;
9161 }
9162 
9163 void
task_set_darkwake_mode(task_t task,boolean_t set_mode)9164 task_set_darkwake_mode(task_t task, boolean_t set_mode)
9165 {
9166 	assert(task);
9167 
9168 	task_lock(task);
9169 
9170 	if (set_mode) {
9171 		task->t_flags |= TF_DARKWAKE_MODE;
9172 	} else {
9173 		task->t_flags &= ~(TF_DARKWAKE_MODE);
9174 	}
9175 
9176 	task_unlock(task);
9177 }
9178 
9179 boolean_t
task_get_darkwake_mode(task_t task)9180 task_get_darkwake_mode(task_t task)
9181 {
9182 	assert(task);
9183 	return (task->t_flags & TF_DARKWAKE_MODE) != 0;
9184 }
9185 
9186 /*
9187  * Set default behavior for task's control port and EXC_GUARD variants that have
9188  * settable behavior.
9189  *
9190  * Platform binaries typically have one behavior, third parties another -
9191  * but there are special exception we may need to account for.
9192  */
9193 void
task_set_exc_guard_ctrl_port_default(task_t task,thread_t main_thread,const char * name,unsigned int namelen,boolean_t is_simulated,uint32_t platform,uint32_t sdk)9194 task_set_exc_guard_ctrl_port_default(
9195 	task_t task,
9196 	thread_t main_thread,
9197 	const char *name,
9198 	unsigned int namelen,
9199 	boolean_t is_simulated,
9200 	uint32_t platform,
9201 	uint32_t sdk)
9202 {
9203 	task_control_port_options_t opts = TASK_CONTROL_PORT_OPTIONS_NONE;
9204 
9205 	if (task_is_hardened_binary(task)) {
9206 		/* set exc guard default behavior for hardened binaries */
9207 		task->task_exc_guard = (task_exc_guard_default & TASK_EXC_GUARD_ALL);
9208 
9209 		if (1 == task_pid(task)) {
9210 			/* special flags for inittask - delivery every instance as corpse */
9211 			task->task_exc_guard = _TASK_EXC_GUARD_ALL_CORPSE;
9212 		} else if (task_exc_guard_default & TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS) {
9213 			/* honor by-name default setting overrides */
9214 
9215 			int count = sizeof(task_exc_guard_named_defaults) / sizeof(struct task_exc_guard_named_default);
9216 
9217 			for (int i = 0; i < count; i++) {
9218 				const struct task_exc_guard_named_default *named_default =
9219 				    &task_exc_guard_named_defaults[i];
9220 				if (strncmp(named_default->name, name, namelen) == 0 &&
9221 				    strlen(named_default->name) == namelen) {
9222 					task->task_exc_guard = named_default->behavior;
9223 					break;
9224 				}
9225 			}
9226 		}
9227 
9228 		/* set control port options for 1p code, inherited from parent task by default */
9229 		opts = ipc_control_port_options & ICP_OPTIONS_1P_MASK;
9230 	} else {
9231 		/* set exc guard default behavior for third-party code */
9232 		task->task_exc_guard = ((task_exc_guard_default >> TASK_EXC_GUARD_THIRD_PARTY_DEFAULT_SHIFT) & TASK_EXC_GUARD_ALL);
9233 		/* set control port options for 3p code, inherited from parent task by default */
9234 		opts = (ipc_control_port_options & ICP_OPTIONS_3P_MASK) >> ICP_OPTIONS_3P_SHIFT;
9235 	}
9236 
9237 	if (is_simulated) {
9238 		/* If simulated and built against pre-iOS 15 SDK, disable all EXC_GUARD */
9239 		if ((platform == PLATFORM_IOSSIMULATOR && sdk < 0xf0000) ||
9240 		    (platform == PLATFORM_TVOSSIMULATOR && sdk < 0xf0000) ||
9241 		    (platform == PLATFORM_WATCHOSSIMULATOR && sdk < 0x80000)) {
9242 			task->task_exc_guard = TASK_EXC_GUARD_NONE;
9243 		}
9244 		/* Disable protection for control ports for simulated binaries */
9245 		opts = TASK_CONTROL_PORT_OPTIONS_NONE;
9246 	}
9247 
9248 
9249 	task_set_control_port_options(task, opts);
9250 
9251 	task_set_immovable_pinned(task);
9252 	main_thread_set_immovable_pinned(main_thread);
9253 }
9254 
9255 kern_return_t
task_get_exc_guard_behavior(task_t task,task_exc_guard_behavior_t * behaviorp)9256 task_get_exc_guard_behavior(
9257 	task_t task,
9258 	task_exc_guard_behavior_t *behaviorp)
9259 {
9260 	if (task == TASK_NULL) {
9261 		return KERN_INVALID_TASK;
9262 	}
9263 	*behaviorp = task->task_exc_guard;
9264 	return KERN_SUCCESS;
9265 }
9266 
9267 kern_return_t
task_set_exc_guard_behavior(task_t task,task_exc_guard_behavior_t new_behavior)9268 task_set_exc_guard_behavior(
9269 	task_t task,
9270 	task_exc_guard_behavior_t new_behavior)
9271 {
9272 	if (task == TASK_NULL) {
9273 		return KERN_INVALID_TASK;
9274 	}
9275 	if (new_behavior & ~TASK_EXC_GUARD_ALL) {
9276 		return KERN_INVALID_VALUE;
9277 	}
9278 
9279 	/* limit setting to that allowed for this config */
9280 	new_behavior = new_behavior & task_exc_guard_config_mask;
9281 
9282 #if !defined (DEBUG) && !defined (DEVELOPMENT)
9283 	/* On release kernels, only allow _upgrading_ exc guard behavior */
9284 	task_exc_guard_behavior_t cur_behavior;
9285 
9286 	os_atomic_rmw_loop(&task->task_exc_guard, cur_behavior, new_behavior, relaxed, {
9287 		if ((cur_behavior & task_exc_guard_no_unset_mask) & ~(new_behavior & task_exc_guard_no_unset_mask)) {
9288 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
9289 		}
9290 
9291 		if ((new_behavior & task_exc_guard_no_set_mask) & ~(cur_behavior & task_exc_guard_no_set_mask)) {
9292 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
9293 		}
9294 
9295 		/* no restrictions on CORPSE bit */
9296 	});
9297 #else
9298 	task->task_exc_guard = new_behavior;
9299 #endif
9300 	return KERN_SUCCESS;
9301 }
9302 
9303 kern_return_t
task_set_corpse_forking_behavior(task_t task,task_corpse_forking_behavior_t behavior)9304 task_set_corpse_forking_behavior(task_t task, task_corpse_forking_behavior_t behavior)
9305 {
9306 #if DEVELOPMENT || DEBUG
9307 	if (task == TASK_NULL) {
9308 		return KERN_INVALID_TASK;
9309 	}
9310 
9311 	task_lock(task);
9312 	if (behavior & TASK_CORPSE_FORKING_DISABLED_MEM_DIAG) {
9313 		task->t_flags |= TF_NO_CORPSE_FORKING;
9314 	} else {
9315 		task->t_flags &= ~TF_NO_CORPSE_FORKING;
9316 	}
9317 	task_unlock(task);
9318 
9319 	return KERN_SUCCESS;
9320 #else
9321 	(void)task;
9322 	(void)behavior;
9323 	return KERN_NOT_SUPPORTED;
9324 #endif
9325 }
9326 
9327 boolean_t
task_corpse_forking_disabled(task_t task)9328 task_corpse_forking_disabled(task_t task)
9329 {
9330 	boolean_t disabled = FALSE;
9331 
9332 	task_lock(task);
9333 	disabled = (task->t_flags & TF_NO_CORPSE_FORKING);
9334 	task_unlock(task);
9335 
9336 	return disabled;
9337 }
9338 
9339 #if __arm64__
9340 extern int legacy_footprint_entitlement_mode;
9341 extern void memorystatus_act_on_legacy_footprint_entitlement(struct proc *, boolean_t);
9342 extern void memorystatus_act_on_ios13extended_footprint_entitlement(struct proc *);
9343 
9344 
9345 void
task_set_legacy_footprint(task_t task)9346 task_set_legacy_footprint(
9347 	task_t task)
9348 {
9349 	task_lock(task);
9350 	task->task_legacy_footprint = TRUE;
9351 	task_unlock(task);
9352 }
9353 
9354 void
task_set_extra_footprint_limit(task_t task)9355 task_set_extra_footprint_limit(
9356 	task_t task)
9357 {
9358 	if (task->task_extra_footprint_limit) {
9359 		return;
9360 	}
9361 	task_lock(task);
9362 	if (task->task_extra_footprint_limit) {
9363 		task_unlock(task);
9364 		return;
9365 	}
9366 	task->task_extra_footprint_limit = TRUE;
9367 	task_unlock(task);
9368 	memorystatus_act_on_legacy_footprint_entitlement(get_bsdtask_info(task), TRUE);
9369 }
9370 
9371 void
task_set_ios13extended_footprint_limit(task_t task)9372 task_set_ios13extended_footprint_limit(
9373 	task_t task)
9374 {
9375 	if (task->task_ios13extended_footprint_limit) {
9376 		return;
9377 	}
9378 	task_lock(task);
9379 	if (task->task_ios13extended_footprint_limit) {
9380 		task_unlock(task);
9381 		return;
9382 	}
9383 	task->task_ios13extended_footprint_limit = TRUE;
9384 	task_unlock(task);
9385 	memorystatus_act_on_ios13extended_footprint_entitlement(get_bsdtask_info(task));
9386 }
9387 #endif /* __arm64__ */
9388 
9389 static inline ledger_amount_t
task_ledger_get_balance(ledger_t ledger,int ledger_idx)9390 task_ledger_get_balance(
9391 	ledger_t        ledger,
9392 	int             ledger_idx)
9393 {
9394 	ledger_amount_t amount;
9395 	amount = 0;
9396 	ledger_get_balance(ledger, ledger_idx, &amount);
9397 	return amount;
9398 }
9399 
9400 /*
9401  * Gather the amount of memory counted in a task's footprint due to
9402  * being in a specific set of ledgers.
9403  */
9404 void
task_ledgers_footprint(ledger_t ledger,ledger_amount_t * ledger_resident,ledger_amount_t * ledger_compressed)9405 task_ledgers_footprint(
9406 	ledger_t        ledger,
9407 	ledger_amount_t *ledger_resident,
9408 	ledger_amount_t *ledger_compressed)
9409 {
9410 	*ledger_resident = 0;
9411 	*ledger_compressed = 0;
9412 
9413 	/* purgeable non-volatile memory */
9414 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile);
9415 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile_compressed);
9416 
9417 	/* "default" tagged memory */
9418 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint);
9419 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint_compressed);
9420 
9421 	/* "network" currently never counts in the footprint... */
9422 
9423 	/* "media" tagged memory */
9424 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.media_footprint);
9425 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.media_footprint_compressed);
9426 
9427 	/* "graphics" tagged memory */
9428 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint);
9429 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint_compressed);
9430 
9431 	/* "neural" tagged memory */
9432 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.neural_footprint);
9433 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.neural_footprint_compressed);
9434 }
9435 
9436 #if CONFIG_MEMORYSTATUS
9437 /*
9438  * Credit any outstanding task dirty time to the ledger.
9439  * memstat_dirty_start is pushed forward to prevent any possibility of double
9440  * counting, making it safe to call this as often as necessary to ensure that
9441  * anyone reading the ledger gets up-to-date information.
9442  */
9443 void
task_ledger_settle_dirty_time(task_t t)9444 task_ledger_settle_dirty_time(task_t t)
9445 {
9446 	task_lock(t);
9447 
9448 	uint64_t start = t->memstat_dirty_start;
9449 	if (start) {
9450 		uint64_t now = mach_absolute_time();
9451 
9452 		uint64_t duration;
9453 		absolutetime_to_nanoseconds(now - start, &duration);
9454 
9455 		ledger_t ledger = get_task_ledger(t);
9456 		ledger_credit(ledger, task_ledgers.memorystatus_dirty_time, duration);
9457 
9458 		t->memstat_dirty_start = now;
9459 	}
9460 
9461 	task_unlock(t);
9462 }
9463 #endif /* CONFIG_MEMORYSTATUS */
9464 
9465 void
task_set_memory_ownership_transfer(task_t task,boolean_t value)9466 task_set_memory_ownership_transfer(
9467 	task_t    task,
9468 	boolean_t value)
9469 {
9470 	task_lock(task);
9471 	task->task_can_transfer_memory_ownership = !!value;
9472 	task_unlock(task);
9473 }
9474 
9475 #if DEVELOPMENT || DEBUG
9476 
9477 void
task_set_no_footprint_for_debug(task_t task,boolean_t value)9478 task_set_no_footprint_for_debug(task_t task, boolean_t value)
9479 {
9480 	task_lock(task);
9481 	task->task_no_footprint_for_debug = !!value;
9482 	task_unlock(task);
9483 }
9484 
9485 int
task_get_no_footprint_for_debug(task_t task)9486 task_get_no_footprint_for_debug(task_t task)
9487 {
9488 	return task->task_no_footprint_for_debug;
9489 }
9490 
9491 #endif /* DEVELOPMENT || DEBUG */
9492 
9493 void
task_copy_vmobjects(task_t task,vm_object_query_t query,size_t len,size_t * num)9494 task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num)
9495 {
9496 	vm_object_t find_vmo;
9497 	size_t size = 0;
9498 
9499 	/*
9500 	 * Allocate a save area for FP state before taking task_objq lock,
9501 	 * if necessary, to ensure that VM_KERNEL_ADDRHASH() doesn't cause
9502 	 * an FP state allocation while holding VM locks.
9503 	 */
9504 	ml_fp_save_area_prealloc();
9505 
9506 	task_objq_lock(task);
9507 	if (query != NULL) {
9508 		queue_iterate(&task->task_objq, find_vmo, vm_object_t, task_objq)
9509 		{
9510 			vm_object_query_t p = &query[size++];
9511 
9512 			/* make sure to not overrun */
9513 			if (size * sizeof(vm_object_query_data_t) > len) {
9514 				--size;
9515 				break;
9516 			}
9517 
9518 			bzero(p, sizeof(*p));
9519 			p->object_id = (vm_object_id_t) VM_KERNEL_ADDRHASH(find_vmo);
9520 			p->virtual_size = find_vmo->internal ? find_vmo->vo_size : 0;
9521 			p->resident_size = find_vmo->resident_page_count * PAGE_SIZE;
9522 			p->wired_size = find_vmo->wired_page_count * PAGE_SIZE;
9523 			p->reusable_size = find_vmo->reusable_page_count * PAGE_SIZE;
9524 			p->vo_no_footprint = find_vmo->vo_no_footprint;
9525 			p->vo_ledger_tag = find_vmo->vo_ledger_tag;
9526 			p->purgable = find_vmo->purgable;
9527 
9528 			if (find_vmo->internal && find_vmo->pager_created && find_vmo->pager != NULL) {
9529 				p->compressed_size = vm_compressor_pager_get_count(find_vmo->pager) * PAGE_SIZE;
9530 			} else {
9531 				p->compressed_size = 0;
9532 			}
9533 		}
9534 	} else {
9535 		size = (size_t)task->task_owned_objects;
9536 	}
9537 	task_objq_unlock(task);
9538 
9539 	*num = size;
9540 }
9541 
9542 void
task_get_owned_vmobjects(task_t task,size_t buffer_size,vmobject_list_output_t buffer,size_t * output_size,size_t * entries)9543 task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries)
9544 {
9545 	assert(output_size);
9546 	assert(entries);
9547 
9548 	/* copy the vmobjects and vmobject data out of the task */
9549 	if (buffer_size == 0) {
9550 		task_copy_vmobjects(task, NULL, 0, entries);
9551 		*output_size = (*entries > 0) ? *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer) : 0;
9552 	} else {
9553 		assert(buffer);
9554 		task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(*buffer), entries);
9555 		buffer->entries = (uint64_t)*entries;
9556 		*output_size = *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer);
9557 	}
9558 }
9559 
9560 void
task_store_owned_vmobject_info(task_t to_task,task_t from_task)9561 task_store_owned_vmobject_info(task_t to_task, task_t from_task)
9562 {
9563 	size_t buffer_size;
9564 	vmobject_list_output_t buffer;
9565 	size_t output_size;
9566 	size_t entries;
9567 
9568 	assert(to_task != from_task);
9569 
9570 	/* get the size, allocate a bufferr, and populate */
9571 	entries = 0;
9572 	output_size = 0;
9573 	task_get_owned_vmobjects(from_task, 0, NULL, &output_size, &entries);
9574 
9575 	if (output_size) {
9576 		buffer_size = output_size;
9577 		buffer = kalloc_data(buffer_size, Z_WAITOK);
9578 
9579 		if (buffer) {
9580 			entries = 0;
9581 			output_size = 0;
9582 
9583 			task_get_owned_vmobjects(from_task, buffer_size, buffer, &output_size, &entries);
9584 
9585 			if (entries) {
9586 				to_task->corpse_vmobject_list = buffer;
9587 				to_task->corpse_vmobject_list_size = buffer_size;
9588 			}
9589 		}
9590 	}
9591 }
9592 
9593 void
task_set_filter_msg_flag(task_t task,boolean_t flag)9594 task_set_filter_msg_flag(
9595 	task_t task,
9596 	boolean_t flag)
9597 {
9598 	assert(task != TASK_NULL);
9599 
9600 	if (flag) {
9601 		task_ro_flags_set(task, TFRO_FILTER_MSG);
9602 	} else {
9603 		task_ro_flags_clear(task, TFRO_FILTER_MSG);
9604 	}
9605 }
9606 
9607 boolean_t
task_get_filter_msg_flag(task_t task)9608 task_get_filter_msg_flag(
9609 	task_t task)
9610 {
9611 	if (!task) {
9612 		return false;
9613 	}
9614 
9615 	return (task_ro_flags_get(task) & TFRO_FILTER_MSG) ? TRUE : FALSE;
9616 }
9617 bool
task_is_exotic(task_t task)9618 task_is_exotic(
9619 	task_t task)
9620 {
9621 	if (task == TASK_NULL) {
9622 		return false;
9623 	}
9624 	return vm_map_is_exotic(get_task_map(task));
9625 }
9626 
9627 bool
task_is_alien(task_t task)9628 task_is_alien(
9629 	task_t task)
9630 {
9631 	if (task == TASK_NULL) {
9632 		return false;
9633 	}
9634 	return vm_map_is_alien(get_task_map(task));
9635 }
9636 
9637 
9638 
9639 #if CONFIG_MACF
9640 uint8_t *
mac_task_get_mach_filter_mask(task_t task)9641 mac_task_get_mach_filter_mask(task_t task)
9642 {
9643 	assert(task);
9644 	return task_get_mach_trap_filter_mask(task);
9645 }
9646 
9647 uint8_t *
mac_task_get_kobj_filter_mask(task_t task)9648 mac_task_get_kobj_filter_mask(task_t task)
9649 {
9650 	assert(task);
9651 	return task_get_mach_kobj_filter_mask(task);
9652 }
9653 
9654 /* Set the filter mask for Mach traps. */
9655 void
mac_task_set_mach_filter_mask(task_t task,uint8_t * maskptr)9656 mac_task_set_mach_filter_mask(task_t task, uint8_t *maskptr)
9657 {
9658 	assert(task);
9659 
9660 	task_set_mach_trap_filter_mask(task, maskptr);
9661 }
9662 
9663 /* Set the filter mask for kobject msgs. */
9664 void
mac_task_set_kobj_filter_mask(task_t task,uint8_t * maskptr)9665 mac_task_set_kobj_filter_mask(task_t task, uint8_t *maskptr)
9666 {
9667 	assert(task);
9668 
9669 	task_set_mach_kobj_filter_mask(task, maskptr);
9670 }
9671 
9672 /* Hook for mach trap/sc filter evaluation policy. */
9673 SECURITY_READ_ONLY_LATE(mac_task_mach_filter_cbfunc_t) mac_task_mach_trap_evaluate = NULL;
9674 
9675 /* Hook for kobj message filter evaluation policy. */
9676 SECURITY_READ_ONLY_LATE(mac_task_kobj_filter_cbfunc_t) mac_task_kobj_msg_evaluate = NULL;
9677 
9678 /* Set the callback hooks for the filtering policy. */
9679 int
mac_task_register_filter_callbacks(const mac_task_mach_filter_cbfunc_t mach_cbfunc,const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)9680 mac_task_register_filter_callbacks(
9681 	const mac_task_mach_filter_cbfunc_t mach_cbfunc,
9682 	const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)
9683 {
9684 	if (mach_cbfunc != NULL) {
9685 		if (mac_task_mach_trap_evaluate != NULL) {
9686 			return KERN_FAILURE;
9687 		}
9688 		mac_task_mach_trap_evaluate = mach_cbfunc;
9689 	}
9690 	if (kobj_cbfunc != NULL) {
9691 		if (mac_task_kobj_msg_evaluate != NULL) {
9692 			return KERN_FAILURE;
9693 		}
9694 		mac_task_kobj_msg_evaluate = kobj_cbfunc;
9695 	}
9696 
9697 	return KERN_SUCCESS;
9698 }
9699 #endif /* CONFIG_MACF */
9700 
9701 #if CONFIG_ROSETTA
9702 bool
task_is_translated(task_t task)9703 task_is_translated(task_t task)
9704 {
9705 	extern boolean_t proc_is_translated(struct proc* p);
9706 	return task && proc_is_translated(get_bsdtask_info(task));
9707 }
9708 #endif
9709 
9710 
9711 
9712 #if __has_feature(ptrauth_calls)
9713 /* On FPAC, we want to deliver all PAC violations as fatal exceptions, regardless
9714  * of the enable_pac_exception boot-arg value or any other entitlements.
9715  * The only case where we allow non-fatal PAC exceptions on FPAC is for debugging,
9716  * which requires Developer Mode enabled.
9717  *
9718  * On non-FPAC hardware, we gate the decision behind entitlements and the
9719  * enable_pac_exception boot-arg.
9720  */
9721 extern int gARM_FEAT_FPAC;
9722 /*
9723  * Having the PAC_EXCEPTION_ENTITLEMENT entitlement means we always enforce all
9724  * of the PAC exception hardening: fatal exceptions and signed user state.
9725  */
9726 #define PAC_EXCEPTION_ENTITLEMENT "com.apple.private.pac.exception"
9727 /*
9728  * On non-FPAC hardware, when enable_pac_exception boot-arg is set to true,
9729  * processes can choose to get non-fatal PAC exception delivery by setting
9730  * the SKIP_PAC_EXCEPTION_ENTITLEMENT entitlement.
9731  */
9732 #define SKIP_PAC_EXCEPTION_ENTITLEMENT "com.apple.private.skip.pac.exception"
9733 
9734 void
task_set_pac_exception_fatal_flag(task_t task)9735 task_set_pac_exception_fatal_flag(
9736 	task_t task)
9737 {
9738 	assert(task != TASK_NULL);
9739 	bool pac_hardened_task = false;
9740 	uint32_t set_flags = 0;
9741 
9742 	/*
9743 	 * We must not apply this security policy on tasks which have opted out of mach hardening to
9744 	 * avoid regressions in third party plugins and third party apps when using AMFI boot-args
9745 	 */
9746 	bool platform_binary = task_get_platform_binary(task);
9747 #if XNU_TARGET_OS_OSX
9748 	platform_binary &= !task_opted_out_mach_hardening(task);
9749 #endif /* XNU_TARGET_OS_OSX */
9750 
9751 	/*
9752 	 * On non-FPAC hardware, we allow gating PAC exceptions behind
9753 	 * SKIP_PAC_EXCEPTION_ENTITLEMENT and the boot-arg.
9754 	 */
9755 	if (!gARM_FEAT_FPAC && enable_pac_exception &&
9756 	    IOTaskHasEntitlement(task, SKIP_PAC_EXCEPTION_ENTITLEMENT)) {
9757 		return;
9758 	}
9759 
9760 	if (IOTaskHasEntitlement(task, PAC_EXCEPTION_ENTITLEMENT) || task_get_hardened_runtime(task)) {
9761 		pac_hardened_task = true;
9762 		set_flags |= TFRO_PAC_ENFORCE_USER_STATE;
9763 	}
9764 
9765 	/* On non-FPAC hardware, gate the fatal property behind entitlements and boot-arg. */
9766 	if (pac_hardened_task ||
9767 	    ((enable_pac_exception || gARM_FEAT_FPAC) && platform_binary)) {
9768 		set_flags |= TFRO_PAC_EXC_FATAL;
9769 	}
9770 
9771 	if (set_flags != 0) {
9772 		task_ro_flags_set(task, set_flags);
9773 	}
9774 }
9775 
9776 bool
task_is_pac_exception_fatal(task_t task)9777 task_is_pac_exception_fatal(
9778 	task_t task)
9779 {
9780 	assert(task != TASK_NULL);
9781 	return !!(task_ro_flags_get(task) & TFRO_PAC_EXC_FATAL);
9782 }
9783 #endif /* __has_feature(ptrauth_calls) */
9784 
9785 /*
9786  * FATAL_EXCEPTION_ENTITLEMENT, if present, will contain a list of
9787  * conditions for which access violations should deliver SIGKILL rather than
9788  * SIGSEGV.  This is a hardening measure intended for use by applications
9789  * that are able to handle the stricter error handling behavior.  Currently
9790  * this supports FATAL_EXCEPTION_ENTITLEMENT_JIT, which is documented in
9791  * user_fault_in_self_restrict_mode().
9792  */
9793 #define FATAL_EXCEPTION_ENTITLEMENT "com.apple.security.fatal-exceptions"
9794 #define FATAL_EXCEPTION_ENTITLEMENT_JIT "jit"
9795 
9796 void
task_set_jit_exception_fatal_flag(task_t task)9797 task_set_jit_exception_fatal_flag(
9798 	task_t task)
9799 {
9800 	assert(task != TASK_NULL);
9801 	if (IOTaskHasStringEntitlement(task, FATAL_EXCEPTION_ENTITLEMENT, FATAL_EXCEPTION_ENTITLEMENT_JIT)) {
9802 		task_ro_flags_set(task, TFRO_JIT_EXC_FATAL);
9803 	}
9804 }
9805 
9806 bool
task_is_jit_exception_fatal(__unused task_t task)9807 task_is_jit_exception_fatal(
9808 	__unused task_t task)
9809 {
9810 #if !defined(XNU_PLATFORM_MacOSX)
9811 	return true;
9812 #else
9813 	assert(task != TASK_NULL);
9814 	return !!(task_ro_flags_get(task) & TFRO_JIT_EXC_FATAL);
9815 #endif
9816 }
9817 
9818 bool
task_needs_user_signed_thread_state(task_t task)9819 task_needs_user_signed_thread_state(
9820 	task_t task)
9821 {
9822 	assert(task != TASK_NULL);
9823 	return !!(task_ro_flags_get(task) & TFRO_PAC_ENFORCE_USER_STATE);
9824 }
9825 
9826 void
task_set_tecs(task_t task)9827 task_set_tecs(task_t task)
9828 {
9829 	if (task == TASK_NULL) {
9830 		task = current_task();
9831 	}
9832 
9833 	if (!machine_csv(CPUVN_CI)) {
9834 		return;
9835 	}
9836 
9837 	LCK_MTX_ASSERT(&task->lock, LCK_MTX_ASSERT_NOTOWNED);
9838 
9839 	task_lock(task);
9840 
9841 	task->t_flags |= TF_TECS;
9842 
9843 	thread_t thread;
9844 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
9845 		machine_tecs(thread);
9846 	}
9847 	task_unlock(task);
9848 }
9849 
9850 kern_return_t
task_test_sync_upcall(task_t task,ipc_port_t send_port)9851 task_test_sync_upcall(
9852 	task_t     task,
9853 	ipc_port_t send_port)
9854 {
9855 #if DEVELOPMENT || DEBUG
9856 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9857 		return KERN_INVALID_ARGUMENT;
9858 	}
9859 
9860 	/* Block on sync kernel upcall on the given send port */
9861 	mach_test_sync_upcall(send_port);
9862 
9863 	ipc_port_release_send(send_port);
9864 	return KERN_SUCCESS;
9865 #else
9866 	(void)task;
9867 	(void)send_port;
9868 	return KERN_NOT_SUPPORTED;
9869 #endif
9870 }
9871 
9872 kern_return_t
task_test_async_upcall_propagation(task_t task,ipc_port_t send_port,int qos,int iotier)9873 task_test_async_upcall_propagation(
9874 	task_t      task,
9875 	ipc_port_t  send_port,
9876 	int         qos,
9877 	int         iotier)
9878 {
9879 #if DEVELOPMENT || DEBUG
9880 	kern_return_t kr;
9881 
9882 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9883 		return KERN_INVALID_ARGUMENT;
9884 	}
9885 
9886 	if (qos < THREAD_QOS_DEFAULT || qos > THREAD_QOS_USER_INTERACTIVE ||
9887 	    iotier < THROTTLE_LEVEL_START || iotier > THROTTLE_LEVEL_END) {
9888 		return KERN_INVALID_ARGUMENT;
9889 	}
9890 
9891 	struct thread_attr_for_ipc_propagation attr = {
9892 		.tafip_iotier = iotier,
9893 		.tafip_qos = qos
9894 	};
9895 
9896 	/* Apply propagate attr to port */
9897 	kr = ipc_port_propagate_thread_attr(send_port, attr);
9898 	if (kr != KERN_SUCCESS) {
9899 		return kr;
9900 	}
9901 
9902 	thread_enable_send_importance(current_thread(), TRUE);
9903 
9904 	/* Perform an async kernel upcall on the given send port */
9905 	mach_test_async_upcall(send_port);
9906 	thread_enable_send_importance(current_thread(), FALSE);
9907 
9908 	ipc_port_release_send(send_port);
9909 	return KERN_SUCCESS;
9910 #else
9911 	(void)task;
9912 	(void)send_port;
9913 	(void)qos;
9914 	(void)iotier;
9915 	return KERN_NOT_SUPPORTED;
9916 #endif
9917 }
9918 
9919 #if CONFIG_PROC_RESOURCE_LIMITS
9920 mach_port_name_t
current_task_get_fatal_port_name(void)9921 current_task_get_fatal_port_name(void)
9922 {
9923 	mach_port_t task_fatal_port = MACH_PORT_NULL;
9924 	mach_port_name_t port_name = 0;
9925 
9926 	task_fatal_port = task_allocate_fatal_port();
9927 
9928 	if (task_fatal_port) {
9929 		ipc_object_copyout(current_space(), ip_to_object(task_fatal_port), MACH_MSG_TYPE_PORT_SEND,
9930 		    IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, &port_name);
9931 	}
9932 
9933 	return port_name;
9934 }
9935 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
9936 
9937 #if defined(__x86_64__)
9938 bool
curtask_get_insn_copy_optout(void)9939 curtask_get_insn_copy_optout(void)
9940 {
9941 	bool optout;
9942 	task_t cur_task = current_task();
9943 
9944 	task_lock(cur_task);
9945 	optout = (cur_task->t_flags & TF_INSN_COPY_OPTOUT) ? true : false;
9946 	task_unlock(cur_task);
9947 
9948 	return optout;
9949 }
9950 
9951 void
curtask_set_insn_copy_optout(void)9952 curtask_set_insn_copy_optout(void)
9953 {
9954 	task_t cur_task = current_task();
9955 
9956 	task_lock(cur_task);
9957 
9958 	cur_task->t_flags |= TF_INSN_COPY_OPTOUT;
9959 
9960 	thread_t thread;
9961 	queue_iterate(&cur_task->threads, thread, thread_t, task_threads) {
9962 		machine_thread_set_insn_copy_optout(thread);
9963 	}
9964 	task_unlock(cur_task);
9965 }
9966 #endif /* defined(__x86_64__) */
9967 
9968 void
task_get_corpse_vmobject_list(task_t task,vmobject_list_output_t * list,size_t * list_size)9969 task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size)
9970 {
9971 	assert(task);
9972 	assert(list_size);
9973 
9974 	*list = task->corpse_vmobject_list;
9975 	*list_size = (size_t)task->corpse_vmobject_list_size;
9976 }
9977 
9978 __abortlike
9979 static void
panic_proc_ro_task_backref_mismatch(task_t t,proc_ro_t ro)9980 panic_proc_ro_task_backref_mismatch(task_t t, proc_ro_t ro)
9981 {
9982 	panic("proc_ro->task backref mismatch: t=%p, ro=%p, "
9983 	    "proc_ro_task(ro)=%p", t, ro, proc_ro_task(ro));
9984 }
9985 
9986 proc_ro_t
task_get_ro(task_t t)9987 task_get_ro(task_t t)
9988 {
9989 	proc_ro_t ro = (proc_ro_t)t->bsd_info_ro;
9990 
9991 	zone_require_ro(ZONE_ID_PROC_RO, sizeof(struct proc_ro), ro);
9992 	if (__improbable(proc_ro_task(ro) != t)) {
9993 		panic_proc_ro_task_backref_mismatch(t, ro);
9994 	}
9995 
9996 	return ro;
9997 }
9998 
9999 uint32_t
task_ro_flags_get(task_t task)10000 task_ro_flags_get(task_t task)
10001 {
10002 	return task_get_ro(task)->t_flags_ro;
10003 }
10004 
10005 void
task_ro_flags_set(task_t task,uint32_t flags)10006 task_ro_flags_set(task_t task, uint32_t flags)
10007 {
10008 	zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
10009 	    t_flags_ro, ZRO_ATOMIC_OR_32, flags);
10010 }
10011 
10012 void
task_ro_flags_clear(task_t task,uint32_t flags)10013 task_ro_flags_clear(task_t task, uint32_t flags)
10014 {
10015 	zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
10016 	    t_flags_ro, ZRO_ATOMIC_AND_32, ~flags);
10017 }
10018 
10019 task_control_port_options_t
task_get_control_port_options(task_t task)10020 task_get_control_port_options(task_t task)
10021 {
10022 	return task_get_ro(task)->task_control_port_options;
10023 }
10024 
10025 void
task_set_control_port_options(task_t task,task_control_port_options_t opts)10026 task_set_control_port_options(task_t task, task_control_port_options_t opts)
10027 {
10028 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
10029 	    task_control_port_options, &opts);
10030 }
10031 
10032 /*!
10033  * @function kdp_task_is_locked
10034  *
10035  * @abstract
10036  * Checks if task is locked.
10037  *
10038  * @discussion
10039  * NOT SAFE: To be used only by kernel debugger.
10040  *
10041  * @param task task to check
10042  *
10043  * @returns TRUE if the task is locked.
10044  */
10045 boolean_t
kdp_task_is_locked(task_t task)10046 kdp_task_is_locked(task_t task)
10047 {
10048 	return kdp_lck_mtx_lock_spin_is_acquired(&task->lock);
10049 }
10050 
10051 #if DEBUG || DEVELOPMENT
10052 /**
10053  *
10054  * Check if a threshold limit is valid based on the actual phys memory
10055  * limit. If they are same, race conditions may arise, so we have to prevent
10056  * it to happen.
10057  */
10058 static diagthreshold_check_return
task_check_memorythreshold_is_valid(task_t task,uint64_t new_limit,bool is_diagnostics_value)10059 task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value)
10060 {
10061 	int phys_limit_mb;
10062 	kern_return_t ret_value;
10063 	bool threshold_enabled;
10064 	bool dummy;
10065 	ret_value = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, &threshold_enabled);
10066 	if (ret_value != KERN_SUCCESS) {
10067 		return ret_value;
10068 	}
10069 	if (is_diagnostics_value == true) {
10070 		ret_value = task_get_phys_footprint_limit(task, &phys_limit_mb);
10071 	} else {
10072 		uint64_t diag_limit;
10073 		ret_value = task_get_diag_footprint_limit_internal(task, &diag_limit, &dummy);
10074 		phys_limit_mb = (int)(diag_limit >> 20);
10075 	}
10076 	if (ret_value != KERN_SUCCESS) {
10077 		return ret_value;
10078 	}
10079 	if (phys_limit_mb == (int)  new_limit) {
10080 		if (threshold_enabled == false) {
10081 			return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED;
10082 		} else {
10083 			return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED;
10084 		}
10085 	}
10086 	if (threshold_enabled == false) {
10087 		return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED;
10088 	} else {
10089 		return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED;
10090 	}
10091 }
10092 #endif
10093 
10094 #if CONFIG_EXCLAVES
10095 kern_return_t
task_add_conclave(task_t task,void * vnode,int64_t off,const char * task_conclave_id)10096 task_add_conclave(task_t task, void *vnode, int64_t off, const char *task_conclave_id)
10097 {
10098 	/*
10099 	 * Only launchd or properly entitled tasks can attach tasks to
10100 	 * conclaves.
10101 	 */
10102 	if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10103 		return KERN_DENIED;
10104 	}
10105 
10106 	/*
10107 	 * Only entitled tasks can have conclaves attached.
10108 	 * Allow tasks which have the SPAWN privilege to also host conclaves.
10109 	 * This allows xpc proxy to add a conclave before execing a daemon.
10110 	 */
10111 	if (!exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_HOST) &&
10112 	    !exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10113 		return KERN_DENIED;
10114 	}
10115 
10116 	return exclaves_conclave_attach(task_conclave_id, task);
10117 }
10118 
10119 kern_return_t
task_launch_conclave(mach_port_name_t port __unused)10120 task_launch_conclave(mach_port_name_t port __unused)
10121 {
10122 	kern_return_t kr = KERN_FAILURE;
10123 	assert3u(port, ==, MACH_PORT_NULL);
10124 	exclaves_resource_t *conclave = task_get_conclave(current_task());
10125 	if (conclave == NULL) {
10126 		return kr;
10127 	}
10128 
10129 	kr = exclaves_conclave_launch(conclave);
10130 	if (kr != KERN_SUCCESS) {
10131 		return kr;
10132 	}
10133 	task_set_conclave_taint(current_task());
10134 
10135 	return KERN_SUCCESS;
10136 }
10137 
10138 kern_return_t
task_inherit_conclave(task_t old_task,task_t new_task,void * vnode,int64_t off)10139 task_inherit_conclave(task_t old_task, task_t new_task, void *vnode, int64_t off)
10140 {
10141 	if (old_task->conclave == NULL ||
10142 	    !exclaves_conclave_is_attached(old_task->conclave)) {
10143 		return KERN_SUCCESS;
10144 	}
10145 
10146 	/*
10147 	 * Only launchd or properly entitled tasks can attach tasks to
10148 	 * conclaves.
10149 	 */
10150 	if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10151 		return KERN_DENIED;
10152 	}
10153 
10154 	/*
10155 	 * Only entitled tasks can have conclaves attached.
10156 	 */
10157 	if (!exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_HOST)) {
10158 		return KERN_DENIED;
10159 	}
10160 
10161 	return exclaves_conclave_inherit(old_task->conclave, old_task, new_task);
10162 }
10163 
10164 void
task_clear_conclave(task_t task)10165 task_clear_conclave(task_t task)
10166 {
10167 	if (task->exclave_crash_info) {
10168 		kfree_data(task->exclave_crash_info, CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE);
10169 		task->exclave_crash_info = NULL;
10170 	}
10171 
10172 	if (task->conclave == NULL) {
10173 		return;
10174 	}
10175 
10176 	/*
10177 	 * XXX
10178 	 * This should only fail if either the conclave is in an unexpected
10179 	 * state (i.e. not ATTACHED) or if the wrong port is supplied.
10180 	 * We should re-visit this and make sure we guarantee the above
10181 	 * constraints.
10182 	 */
10183 	__assert_only kern_return_t ret =
10184 	    exclaves_conclave_detach(task->conclave, task);
10185 	assert3u(ret, ==, KERN_SUCCESS);
10186 }
10187 
10188 void
task_stop_conclave(task_t task,bool gather_crash_bt)10189 task_stop_conclave(task_t task, bool gather_crash_bt)
10190 {
10191 	thread_t thread = current_thread();
10192 
10193 	if (task->conclave == NULL) {
10194 		return;
10195 	}
10196 
10197 	if (task_should_panic_on_exit_due_to_conclave_taint(task)) {
10198 		panic("Conclave tainted task %p terminated\n", task);
10199 	}
10200 
10201 	/* Stash the task on current thread for conclave teardown */
10202 	thread->conclave_stop_task = task;
10203 
10204 	__assert_only kern_return_t ret =
10205 	    exclaves_conclave_stop(task->conclave, gather_crash_bt);
10206 
10207 	thread->conclave_stop_task = TASK_NULL;
10208 
10209 	assert3u(ret, ==, KERN_SUCCESS);
10210 }
10211 
10212 void
task_suspend_conclave(task_t task)10213 task_suspend_conclave(task_t task)
10214 {
10215 	thread_t thread = current_thread();
10216 
10217 	if (task->conclave == NULL) {
10218 		return;
10219 	}
10220 
10221 	/* Stash the task on current thread for conclave teardown */
10222 	thread->conclave_stop_task = task;
10223 
10224 	__assert_only kern_return_t ret =
10225 	    exclaves_conclave_suspend(task->conclave);
10226 
10227 	thread->conclave_stop_task = TASK_NULL;
10228 
10229 	assert3u(ret, ==, KERN_SUCCESS);
10230 }
10231 
10232 void
task_resume_conclave(task_t task)10233 task_resume_conclave(task_t task)
10234 {
10235 	thread_t thread = current_thread();
10236 
10237 	if (task->conclave == NULL) {
10238 		return;
10239 	}
10240 
10241 	/* Stash the task on current thread for conclave teardown */
10242 	thread->conclave_stop_task = task;
10243 
10244 	__assert_only kern_return_t ret =
10245 	    exclaves_conclave_resume(task->conclave);
10246 
10247 	thread->conclave_stop_task = TASK_NULL;
10248 
10249 	assert3u(ret, ==, KERN_SUCCESS);
10250 }
10251 
10252 kern_return_t
task_stop_conclave_upcall(void)10253 task_stop_conclave_upcall(void)
10254 {
10255 	task_t task = current_task();
10256 	if (task->conclave == NULL) {
10257 		return KERN_INVALID_TASK;
10258 	}
10259 
10260 	return exclaves_conclave_stop_upcall(task->conclave);
10261 }
10262 
10263 kern_return_t
task_stop_conclave_upcall_complete(void)10264 task_stop_conclave_upcall_complete(void)
10265 {
10266 	task_t task = current_task();
10267 	thread_t thread = current_thread();
10268 
10269 	if (!(thread->th_exclaves_state & TH_EXCLAVES_STOP_UPCALL_PENDING)) {
10270 		return KERN_SUCCESS;
10271 	}
10272 
10273 	assert3p(task->conclave, !=, NULL);
10274 
10275 	return exclaves_conclave_stop_upcall_complete(task->conclave, task);
10276 }
10277 
10278 kern_return_t
task_suspend_conclave_upcall(uint64_t * scid_list,size_t scid_list_count)10279 task_suspend_conclave_upcall(uint64_t *scid_list, size_t scid_list_count)
10280 {
10281 	task_t task = current_task();
10282 	thread_t thread;
10283 	int scid_count = 0;
10284 	kern_return_t kr;
10285 	if (task->conclave == NULL) {
10286 		return KERN_INVALID_TASK;
10287 	}
10288 
10289 	kr = task_hold_and_wait(task, false);
10290 
10291 	task_lock(task);
10292 	queue_iterate(&task->threads, thread, thread_t, task_threads)
10293 	{
10294 		if (thread->th_exclaves_state & TH_EXCLAVES_RPC) {
10295 			scid_list[scid_count++] = thread->th_exclaves_ipc_ctx.scid;
10296 			if (scid_count >= scid_list_count) {
10297 				break;
10298 			}
10299 		}
10300 	}
10301 
10302 	task_unlock(task);
10303 	return kr;
10304 }
10305 
10306 kern_return_t
task_crash_info_conclave_upcall(task_t task,const struct conclave_sharedbuffer_t * shared_buf,uint32_t length)10307 task_crash_info_conclave_upcall(task_t task, const struct conclave_sharedbuffer_t *shared_buf,
10308     uint32_t length)
10309 {
10310 	if (task->conclave == NULL) {
10311 		return KERN_INVALID_TASK;
10312 	}
10313 
10314 	/* Allocate the buffer and memcpy it */
10315 	int task_crash_info_buffer_size = 0;
10316 	uint8_t * task_crash_info_buffer;
10317 
10318 	if (!length) {
10319 		printf("Conclave upcall: task_crash_info_conclave_upcall did not return any page addresses\n");
10320 		return KERN_INVALID_ARGUMENT;
10321 	}
10322 
10323 	task_crash_info_buffer_size = CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE;
10324 	assert3u(task_crash_info_buffer_size, >=, length);
10325 
10326 	task_crash_info_buffer = kalloc_data(task_crash_info_buffer_size, Z_WAITOK);
10327 	if (!task_crash_info_buffer) {
10328 		panic("task_crash_info_conclave_upcall: cannot allocate buffer for task_info shared memory");
10329 		return KERN_INVALID_ARGUMENT;
10330 	}
10331 
10332 	uint8_t * dst = task_crash_info_buffer;
10333 	uint32_t remaining = length;
10334 	for (size_t i = 0; i < CONCLAVE_CRASH_BUFFER_PAGECOUNT; i++) {
10335 		if (remaining) {
10336 			memcpy(dst, (uint8_t*)phystokv((pmap_paddr_t)shared_buf->physaddr[i]), PAGE_SIZE);
10337 			remaining = (remaining >= PAGE_SIZE) ? remaining - PAGE_SIZE : 0;
10338 			dst += PAGE_SIZE;
10339 		}
10340 	}
10341 
10342 	task_lock(task);
10343 	if (task->exclave_crash_info == NULL && task->active) {
10344 		task->exclave_crash_info = task_crash_info_buffer;
10345 		task->exclave_crash_info_length = length;
10346 		task_crash_info_buffer = NULL;
10347 	}
10348 	task_unlock(task);
10349 
10350 	if (task_crash_info_buffer) {
10351 		kfree_data(task_crash_info_buffer, task_crash_info_buffer_size);
10352 	}
10353 
10354 	return KERN_SUCCESS;
10355 }
10356 
10357 exclaves_resource_t *
task_get_conclave(task_t task)10358 task_get_conclave(task_t task)
10359 {
10360 	return task->conclave;
10361 }
10362 
10363 extern boolean_t IOPMRootDomainGetWillShutdown(void);
10364 
10365 TUNABLE(bool, disable_conclave_taint, "disable_conclave_taint", true); /* Do not taint processes when they talk to conclave, so system does not panic when exit. */
10366 
10367 static bool
task_should_panic_on_exit_due_to_conclave_taint(task_t task)10368 task_should_panic_on_exit_due_to_conclave_taint(task_t task)
10369 {
10370 	/* Check if boot-arg to disable conclave taint is set */
10371 	if (disable_conclave_taint) {
10372 		return false;
10373 	}
10374 
10375 	/* Check if the system is shutting down */
10376 	if (IOPMRootDomainGetWillShutdown()) {
10377 		return false;
10378 	}
10379 
10380 	return task_is_conclave_tainted(task);
10381 }
10382 
10383 static bool
task_is_conclave_tainted(task_t task)10384 task_is_conclave_tainted(task_t task)
10385 {
10386 	return (task->t_exclave_state & TES_CONCLAVE_TAINTED) != 0 &&
10387 	       !(task->t_exclave_state & TES_CONCLAVE_UNTAINTABLE);
10388 }
10389 
10390 static void
task_set_conclave_taint(task_t task)10391 task_set_conclave_taint(task_t task)
10392 {
10393 	os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_TAINTED, relaxed);
10394 }
10395 
10396 void
task_set_conclave_untaintable(task_t task)10397 task_set_conclave_untaintable(task_t task)
10398 {
10399 	os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_UNTAINTABLE, relaxed);
10400 }
10401 
10402 void
task_add_conclave_crash_info(task_t task,void * crash_info_ptr)10403 task_add_conclave_crash_info(task_t task, void *crash_info_ptr)
10404 {
10405 	__block kern_return_t error = KERN_SUCCESS;
10406 	tb_error_t tberr = TB_ERROR_SUCCESS;
10407 	void *crash_info;
10408 	uint32_t crash_info_length = 0;
10409 
10410 	if (task->conclave == NULL) {
10411 		return;
10412 	}
10413 
10414 	if (task->exclave_crash_info_length == 0) {
10415 		return;
10416 	}
10417 
10418 	error = kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_BEGIN,
10419 	    STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10420 	if (error != KERN_SUCCESS) {
10421 		return;
10422 	}
10423 
10424 	crash_info = task->exclave_crash_info;
10425 	crash_info_length = task->exclave_crash_info_length;
10426 
10427 	tberr = stackshot_stackshotresult__unmarshal(crash_info,
10428 	    (uint64_t)crash_info_length, ^(stackshot_stackshotresult_s result){
10429 		error = stackshot_exclaves_process_stackshot(&result, crash_info_ptr, false);
10430 		if (error != KERN_SUCCESS) {
10431 		        printf("task_add_conclave_crash_info: error processing stackshot result %d\n", error);
10432 		}
10433 	});
10434 	if (tberr != TB_ERROR_SUCCESS) {
10435 		printf("task_conclave_crash: task_add_conclave_crash_info could not unmarshal stackshot data 0x%x\n", tberr);
10436 		error = KERN_FAILURE;
10437 		goto error_exit;
10438 	}
10439 
10440 error_exit:
10441 	kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_END,
10442 	    STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10443 
10444 	return;
10445 }
10446 
10447 #endif /* CONFIG_EXCLAVES */
10448 
10449 /* defined in bsd/kern/kern_proc.c */
10450 extern void proc_name(int pid, char *buf, int size);
10451 extern const char *proc_best_name(struct proc *p);
10452 
10453 void
task_procname(task_t task,char * buf,int size)10454 task_procname(task_t task, char *buf, int size)
10455 {
10456 	proc_name(task_pid(task), buf, size);
10457 }
10458 
10459 const char *
task_best_name(task_t task)10460 task_best_name(task_t task)
10461 {
10462 	return proc_best_name(task_get_proc_raw(task));
10463 }
10464