xref: /xnu-11215.1.10/osfmk/kern/task.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  *	File:	kern/task.c
58  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59  *		David Black
60  *
61  *	Task management primitives implementation.
62  */
63 /*
64  * Copyright (c) 1993 The University of Utah and
65  * the Computer Systems Laboratory (CSL).  All rights reserved.
66  *
67  * Permission to use, copy, modify and distribute this software and its
68  * documentation is hereby granted, provided that both the copyright
69  * notice and this permission notice appear in all copies of the
70  * software, derivative works or modified versions, and any portions
71  * thereof, and that both notices appear in supporting documentation.
72  *
73  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76  *
77  * CSL requests users of this software to return to [email protected] any
78  * improvements that they make and grant CSL redistribution rights.
79  *
80  */
81 /*
82  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83  * support for mandatory and extensible security protections.  This notice
84  * is included in support of clause 2.2 (b) of the Apple Public License,
85  * Version 2.0.
86  * Copyright (c) 2005 SPARTA, Inc.
87  */
88 
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/host_priv.h>
92 #include <mach/machine/vm_types.h>
93 #include <mach/vm_param.h>
94 #include <mach/mach_vm.h>
95 #include <mach/semaphore.h>
96 #include <mach/task_info.h>
97 #include <mach/task_inspect.h>
98 #include <mach/task_special_ports.h>
99 #include <mach/sdt.h>
100 #include <mach/mach_test_upcall.h>
101 
102 #include <ipc/ipc_importance.h>
103 #include <ipc/ipc_types.h>
104 #include <ipc/ipc_space.h>
105 #include <ipc/ipc_entry.h>
106 #include <ipc/ipc_hash.h>
107 #include <ipc/ipc_init.h>
108 
109 #include <kern/kern_types.h>
110 #include <kern/mach_param.h>
111 #include <kern/misc_protos.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/coalition.h>
115 #include <kern/zalloc.h>
116 #include <kern/kalloc.h>
117 #include <kern/kern_cdata.h>
118 #include <kern/processor.h>
119 #include <kern/recount.h>
120 #include <kern/sched_prim.h>    /* for thread_wakeup */
121 #include <kern/ipc_tt.h>
122 #include <kern/host.h>
123 #include <kern/clock.h>
124 #include <kern/timer.h>
125 #include <kern/assert.h>
126 #include <kern/affinity.h>
127 #include <kern/exc_resource.h>
128 #include <kern/machine.h>
129 #include <kern/policy_internal.h>
130 #include <kern/restartable.h>
131 #include <kern/ipc_kobject.h>
132 
133 #include <corpses/task_corpse.h>
134 #if CONFIG_TELEMETRY
135 #include <kern/telemetry.h>
136 #endif
137 
138 #if CONFIG_PERVASIVE_CPI
139 #include <kern/monotonic.h>
140 #include <machine/monotonic.h>
141 #endif /* CONFIG_PERVASIVE_CPI */
142 
143 #if CONFIG_EXCLAVES
144 #include "exclaves_boot.h"
145 #include "exclaves_resource.h"
146 #include "exclaves_boot.h"
147 #include "exclaves_inspection.h"
148 #include "kern/exclaves.tightbeam.h"
149 #endif /* CONFIG_EXCLAVES */
150 
151 #include <os/log.h>
152 
153 #include <vm/pmap.h>
154 #include <vm/vm_map_xnu.h>
155 #include <vm/vm_kern_xnu.h>         /* for kernel_map, ipc_kernel_map */
156 #include <vm/vm_pageout_xnu.h>
157 #include <vm/vm_protos.h>
158 #include <vm/vm_purgeable_xnu.h>
159 #include <vm/vm_compressor_pager_xnu.h>
160 #include <vm/vm_reclaim_xnu.h>
161 #include <vm/vm_compressor_xnu.h>
162 
163 #include <sys/kdebug.h>
164 #include <sys/proc_ro.h>
165 #include <sys/resource.h>
166 #include <sys/signalvar.h> /* for coredump */
167 #include <sys/bsdtask_info.h>
168 #include <sys/kdebug_triage.h>
169 #include <sys/code_signing.h> /* for address_space_debugged */
170 #include <sys/reason.h>
171 
172 /*
173  * Exported interfaces
174  */
175 
176 #include <mach/task_server.h>
177 #include <mach/mach_host_server.h>
178 #include <mach/mach_port_server.h>
179 
180 #include <vm/vm_shared_region_xnu.h>
181 
182 #include <libkern/OSDebug.h>
183 #include <libkern/OSAtomic.h>
184 #include <libkern/section_keywords.h>
185 
186 #include <mach-o/loader.h>
187 #include <kdp/kdp_dyld.h>
188 
189 #include <kern/sfi.h>           /* picks up ledger.h */
190 
191 #if CONFIG_MACF
192 #include <security/mac_mach_internal.h>
193 #endif
194 
195 #include <IOKit/IOBSD.h>
196 #include <kdp/processor_core.h>
197 
198 #include <string.h>
199 
200 #if KPERF
201 extern int kpc_force_all_ctrs(task_t, int);
202 #endif
203 
204 SECURITY_READ_ONLY_LATE(task_t) kernel_task;
205 
206 int64_t         next_taskuniqueid = 0;
207 const size_t task_alignment = _Alignof(struct task);
208 extern const size_t proc_alignment;
209 extern size_t proc_struct_size;
210 extern size_t proc_and_task_size;
211 size_t task_struct_size;
212 
213 extern uint32_t ipc_control_port_options;
214 
215 extern int large_corpse_count;
216 
217 extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p);
218 extern boolean_t proc_is_simulated(const proc_t);
219 
220 static void task_port_no_senders(ipc_port_t, mach_msg_type_number_t);
221 static void task_port_with_flavor_no_senders(ipc_port_t, mach_msg_type_number_t);
222 static void task_suspension_no_senders(ipc_port_t, mach_msg_type_number_t);
223 static inline void task_zone_init(void);
224 
225 #if CONFIG_EXCLAVES
226 static bool task_should_panic_on_exit_due_to_conclave_taint(task_t task);
227 static bool task_is_conclave_tainted(task_t task);
228 static void task_set_conclave_taint(task_t task);
229 kern_return_t task_crash_info_conclave_upcall(task_t task,
230     const xnuupcalls_conclavesharedbuffer_s *shared_buf, uint32_t length);
231 #endif /* CONFIG_EXCLAVES */
232 
233 IPC_KOBJECT_DEFINE(IKOT_TASK_NAME);
234 IPC_KOBJECT_DEFINE(IKOT_TASK_CONTROL,
235     .iko_op_no_senders = task_port_no_senders);
236 IPC_KOBJECT_DEFINE(IKOT_TASK_READ,
237     .iko_op_no_senders = task_port_with_flavor_no_senders);
238 IPC_KOBJECT_DEFINE(IKOT_TASK_INSPECT,
239     .iko_op_no_senders = task_port_with_flavor_no_senders);
240 IPC_KOBJECT_DEFINE(IKOT_TASK_RESUME,
241     .iko_op_no_senders = task_suspension_no_senders);
242 
243 #if CONFIG_PROC_RESOURCE_LIMITS
244 static void task_fatal_port_no_senders(ipc_port_t, mach_msg_type_number_t);
245 static mach_port_t task_allocate_fatal_port(void);
246 
247 IPC_KOBJECT_DEFINE(IKOT_TASK_FATAL,
248     .iko_op_stable     = true,
249     .iko_op_no_senders = task_fatal_port_no_senders);
250 
251 extern void task_id_token_set_port(task_id_token_t token, ipc_port_t port);
252 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
253 
254 /* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
255 int audio_active = 0;
256 
257 /*
258  *	structure for tracking zone usage
259  *	Used either one per task/thread for all zones or <per-task,per-zone>.
260  */
261 typedef struct zinfo_usage_store_t {
262 	/* These fields may be updated atomically, and so must be 8 byte aligned */
263 	uint64_t        alloc __attribute__((aligned(8)));              /* allocation counter */
264 	uint64_t        free __attribute__((aligned(8)));               /* free counter */
265 } zinfo_usage_store_t;
266 
267 /**
268  * Return codes related to diag threshold and memory limit
269  */
270 __options_decl(diagthreshold_check_return, int, {
271 	THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED        = 0,
272 	THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED         = 1,
273 	THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED    = 2,
274 	THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED     = 3,
275 });
276 
277 /**
278  * Return codes related to diag threshold and memory limit
279  */
280 __options_decl(current_, int, {
281 	THRESHOLD_IS_SAME_AS_LIMIT      = 0,
282 	THRESHOLD_IS_NOT_SAME_AS_LIMIT  = 1
283 });
284 
285 zinfo_usage_store_t tasks_tkm_private;
286 zinfo_usage_store_t tasks_tkm_shared;
287 
288 /* A container to accumulate statistics for expired tasks */
289 expired_task_statistics_t               dead_task_statistics;
290 LCK_SPIN_DECLARE_ATTR(dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
291 
292 ledger_template_t task_ledger_template = NULL;
293 
294 /* global lock for task_dyld_process_info_notify_{register, deregister, get_trap} */
295 LCK_GRP_DECLARE(g_dyldinfo_mtx_grp, "g_dyldinfo");
296 LCK_MTX_DECLARE(g_dyldinfo_mtx, &g_dyldinfo_mtx_grp);
297 
298 SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__((used)) =
299 {.cpu_time = -1,
300  .tkm_private = -1,
301  .tkm_shared = -1,
302  .phys_mem = -1,
303  .wired_mem = -1,
304  .internal = -1,
305  .iokit_mapped = -1,
306  .external = -1,
307  .reusable = -1,
308  .alternate_accounting = -1,
309  .alternate_accounting_compressed = -1,
310  .page_table = -1,
311  .phys_footprint = -1,
312  .internal_compressed = -1,
313  .purgeable_volatile = -1,
314  .purgeable_nonvolatile = -1,
315  .purgeable_volatile_compressed = -1,
316  .purgeable_nonvolatile_compressed = -1,
317  .tagged_nofootprint = -1,
318  .tagged_footprint = -1,
319  .tagged_nofootprint_compressed = -1,
320  .tagged_footprint_compressed = -1,
321  .network_volatile = -1,
322  .network_nonvolatile = -1,
323  .network_volatile_compressed = -1,
324  .network_nonvolatile_compressed = -1,
325  .media_nofootprint = -1,
326  .media_footprint = -1,
327  .media_nofootprint_compressed = -1,
328  .media_footprint_compressed = -1,
329  .graphics_nofootprint = -1,
330  .graphics_footprint = -1,
331  .graphics_nofootprint_compressed = -1,
332  .graphics_footprint_compressed = -1,
333  .neural_nofootprint = -1,
334  .neural_footprint = -1,
335  .neural_nofootprint_compressed = -1,
336  .neural_footprint_compressed = -1,
337  .neural_nofootprint_total = -1,
338  .platform_idle_wakeups = -1,
339  .interrupt_wakeups = -1,
340 #if CONFIG_SCHED_SFI
341  .sfi_wait_times = { 0 /* initialized at runtime */},
342 #endif /* CONFIG_SCHED_SFI */
343  .cpu_time_billed_to_me = -1,
344  .cpu_time_billed_to_others = -1,
345  .physical_writes = -1,
346  .logical_writes = -1,
347  .logical_writes_to_external = -1,
348 #if DEBUG || DEVELOPMENT
349  .pages_grabbed = -1,
350  .pages_grabbed_kern = -1,
351  .pages_grabbed_iopl = -1,
352  .pages_grabbed_upl = -1,
353 #endif
354 #if CONFIG_FREEZE
355  .frozen_to_swap = -1,
356 #endif /* CONFIG_FREEZE */
357  .energy_billed_to_me = -1,
358  .energy_billed_to_others = -1,
359 #if CONFIG_PHYS_WRITE_ACCT
360  .fs_metadata_writes = -1,
361 #endif /* CONFIG_PHYS_WRITE_ACCT */
362 #if CONFIG_MEMORYSTATUS
363  .memorystatus_dirty_time = -1,
364 #endif /* CONFIG_MEMORYSTATUS */
365  .swapins = -1,
366  .conclave_mem = -1, };
367 
368 /* System sleep state */
369 boolean_t tasks_suspend_state;
370 
371 __options_decl(send_exec_resource_is_fatal, bool, {
372 	IS_NOT_FATAL            = false,
373 	IS_FATAL                = true
374 });
375 
376 __options_decl(send_exec_resource_is_diagnostics, bool, {
377 	IS_NOT_DIAGNOSTICS      = false,
378 	IS_DIAGNOSTICS          = true
379 });
380 
381 __options_decl(send_exec_resource_is_warning, bool, {
382 	IS_NOT_WARNING          = false,
383 	IS_WARNING              = true
384 });
385 
386 __options_decl(send_exec_resource_options_t, uint8_t, {
387 	EXEC_RESOURCE_FATAL = 0x01,
388 	EXEC_RESOURCE_DIAGNOSTIC = 0x02,
389 	EXEC_RESOURCE_WARNING = 0x04,
390 });
391 
392 /**
393  * Actions to take when a process has reached the memory limit or the diagnostics threshold limits
394  */
395 static inline void task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning);
396 #if DEBUG || DEVELOPMENT
397 static inline void task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size);
398 #endif
399 void init_task_ledgers(void);
400 void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
401 void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
402 void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
403 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
404 void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options);
405 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
406 #if CONFIG_PROC_RESOURCE_LIMITS
407 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit);
408 mach_port_name_t current_task_get_fatal_port_name(void);
409 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit);
410 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
411 
412 kern_return_t task_suspend_internal(task_t);
413 kern_return_t task_resume_internal(task_t);
414 static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
415 
416 extern kern_return_t iokit_task_terminate(task_t task, int phase);
417 extern void          iokit_task_app_suspended_changed(task_t task);
418 
419 extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
420 extern void bsd_copythreadname(void *dst_uth, void *src_uth);
421 extern kern_return_t thread_resume(thread_t thread);
422 
423 // Condition to include diag footprints
424 #define RESETTABLE_DIAG_FOOTPRINT_LIMITS ((DEBUG || DEVELOPMENT) && CONFIG_MEMORYSTATUS)
425 
426 // Warn tasks when they hit 80% of their memory limit.
427 #define PHYS_FOOTPRINT_WARNING_LEVEL 80
428 
429 #define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT              150 /* wakeups per second */
430 #define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL   300 /* in seconds. */
431 
432 /*
433  * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
434  *
435  * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
436  *  stacktraces, aka micro-stackshots)
437  */
438 #define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER        70
439 
440 int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
441 int task_wakeups_monitor_rate;     /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
442 
443 unsigned int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
444 
445 TUNABLE(bool, disable_exc_resource, "disable_exc_resource", false); /* Global override to suppress EXC_RESOURCE for resource monitor violations. */
446 TUNABLE(bool, disable_exc_resource_during_audio, "disable_exc_resource_during_audio", true); /* Global override to suppress EXC_RESOURCE while audio is active */
447 
448 ledger_amount_t max_task_footprint = 0;  /* Per-task limit on physical memory consumption in bytes     */
449 unsigned int max_task_footprint_warning_level = 0;  /* Per-task limit warning percentage */
450 
451 /*
452  * Configure per-task memory limit.
453  * The boot-arg is interpreted as Megabytes,
454  * and takes precedence over the device tree.
455  * Setting the boot-arg to 0 disables task limits.
456  */
457 TUNABLE_DT_WRITEABLE(int, max_task_footprint_mb, "/defaults", "kern.max_task_pmem", "max_task_pmem", 0, TUNABLE_DT_NONE);
458 
459 /* I/O Monitor Limits */
460 #define IOMON_DEFAULT_LIMIT                     (20480ull)      /* MB of logical/physical I/O */
461 #define IOMON_DEFAULT_INTERVAL                  (86400ull)      /* in seconds */
462 
463 uint64_t task_iomon_limit_mb;           /* Per-task I/O monitor limit in MBs */
464 uint64_t task_iomon_interval_secs;      /* Per-task I/O monitor interval in secs */
465 
466 #define IO_TELEMETRY_DEFAULT_LIMIT              (10ll * 1024ll * 1024ll)
467 int64_t io_telemetry_limit;                     /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
468 int64_t global_logical_writes_count = 0;        /* Global count for logical writes */
469 int64_t global_logical_writes_to_external_count = 0;        /* Global count for logical writes to external storage*/
470 static boolean_t global_update_logical_writes(int64_t, int64_t*);
471 
472 #if DEBUG || DEVELOPMENT
473 static diagthreshold_check_return task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value);
474 #endif
475 #define TASK_MAX_THREAD_LIMIT 256
476 
477 #if MACH_ASSERT
478 int pmap_ledgers_panic = 1;
479 int pmap_ledgers_panic_leeway = 3;
480 #endif /* MACH_ASSERT */
481 
482 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
483 
484 #if CONFIG_COREDUMP
485 int hwm_user_cores = 0; /* high watermark violations generate user core files */
486 #endif
487 
488 #ifdef MACH_BSD
489 extern uint32_t proc_platform(const struct proc *);
490 extern uint32_t proc_sdk(struct proc *);
491 extern void     proc_getexecutableuuid(void *, unsigned char *, unsigned long);
492 extern int      proc_pid(struct proc *p);
493 extern int      proc_selfpid(void);
494 extern struct proc *current_proc(void);
495 extern char     *proc_name_address(struct proc *p);
496 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
497 extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize);
498 extern void workq_proc_suspended(struct proc *p);
499 extern void workq_proc_resumed(struct proc *p);
500 extern struct proc *kernproc;
501 
502 #if CONFIG_MEMORYSTATUS
503 extern void     proc_memstat_skip(struct proc* p, boolean_t set);
504 extern void     memorystatus_on_ledger_footprint_exceeded(int warning, bool memlimit_is_active, bool memlimit_is_fatal);
505 extern void     memorystatus_log_exception(const int max_footprint_mb, bool memlimit_is_active, bool memlimit_is_fatal);
506 extern void     memorystatus_log_diag_threshold_exception(const int diag_threshold_value);
507 extern boolean_t memorystatus_allowed_vm_map_fork(task_t task, bool *is_large);
508 extern uint64_t  memorystatus_available_memory_internal(struct proc *p);
509 
510 #if DEVELOPMENT || DEBUG
511 extern void memorystatus_abort_vm_map_fork(task_t);
512 #endif
513 
514 #endif /* CONFIG_MEMORYSTATUS */
515 
516 #endif /* MACH_BSD */
517 
518 /* Boot-arg that turns on fatal pac exception delivery for all first-party apps */
519 static TUNABLE(bool, enable_pac_exception, "enable_pac_exception", false);
520 
521 /*
522  * Defaults for controllable EXC_GUARD behaviors
523  *
524  * Internal builds are fatal by default (except BRIDGE).
525  * Create an alternate set of defaults for special processes by name.
526  */
527 struct task_exc_guard_named_default {
528 	char *name;
529 	uint32_t behavior;
530 };
531 #define _TASK_EXC_GUARD_MP_CORPSE  (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_CORPSE)
532 #define _TASK_EXC_GUARD_MP_ONCE    (_TASK_EXC_GUARD_MP_CORPSE | TASK_EXC_GUARD_MP_ONCE)
533 #define _TASK_EXC_GUARD_MP_FATAL   (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_FATAL)
534 
535 #define _TASK_EXC_GUARD_VM_CORPSE  (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_ONCE)
536 #define _TASK_EXC_GUARD_VM_ONCE    (_TASK_EXC_GUARD_VM_CORPSE | TASK_EXC_GUARD_VM_ONCE)
537 #define _TASK_EXC_GUARD_VM_FATAL   (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_FATAL)
538 
539 #define _TASK_EXC_GUARD_ALL_CORPSE (_TASK_EXC_GUARD_MP_CORPSE | _TASK_EXC_GUARD_VM_CORPSE)
540 #define _TASK_EXC_GUARD_ALL_ONCE   (_TASK_EXC_GUARD_MP_ONCE | _TASK_EXC_GUARD_VM_ONCE)
541 #define _TASK_EXC_GUARD_ALL_FATAL  (_TASK_EXC_GUARD_MP_FATAL | _TASK_EXC_GUARD_VM_FATAL)
542 
543 /* cannot turn off FATAL and DELIVER bit if set */
544 uint32_t task_exc_guard_no_unset_mask = TASK_EXC_GUARD_MP_FATAL | TASK_EXC_GUARD_VM_FATAL |
545     TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_VM_DELIVER;
546 /* cannot turn on ONCE bit if unset */
547 uint32_t task_exc_guard_no_set_mask = TASK_EXC_GUARD_MP_ONCE | TASK_EXC_GUARD_VM_ONCE;
548 
549 #if !defined(XNU_TARGET_OS_BRIDGE)
550 
551 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_FATAL;
552 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
553 /*
554  * These "by-process-name" default overrides are intended to be a short-term fix to
555  * quickly get over races between changes introducing new EXC_GUARD raising behaviors
556  * in some process and a change in default behavior for same. We should ship with
557  * these lists empty (by fixing the bugs, or explicitly changing the task's EXC_GUARD
558  * exception behavior via task_set_exc_guard_behavior()).
559  *
560  * XXX Remember to add/remove TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS back to
561  * task_exc_guard_default when transitioning this list between empty and
562  * non-empty.
563  */
564 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
565 
566 #else /* !defined(XNU_TARGET_OS_BRIDGE) */
567 
568 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_ONCE;
569 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
570 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
571 
572 #endif /* !defined(XNU_TARGET_OS_BRIDGE) */
573 
574 /* Forwards */
575 
576 static void task_hold_locked(task_t task);
577 static void task_wait_locked(task_t task, boolean_t until_not_runnable);
578 static void task_release_locked(task_t task);
579 extern task_t proc_get_task_raw(void *proc);
580 extern void task_ref_hold_proc_task_struct(task_t task);
581 extern void task_release_proc_task_struct(task_t task, proc_ro_t proc_ro);
582 
583 static void task_synchronizer_destroy_all(task_t task);
584 static os_ref_count_t
585 task_add_turnstile_watchports_locked(
586 	task_t                      task,
587 	struct task_watchports      *watchports,
588 	struct task_watchport_elem  **previous_elem_array,
589 	ipc_port_t                  *portwatch_ports,
590 	uint32_t                    portwatch_count);
591 
592 static os_ref_count_t
593 task_remove_turnstile_watchports_locked(
594 	task_t                 task,
595 	struct task_watchports *watchports,
596 	ipc_port_t             *port_freelist);
597 
598 static struct task_watchports *
599 task_watchports_alloc_init(
600 	task_t        task,
601 	thread_t      thread,
602 	uint32_t      count);
603 
604 static void
605 task_watchports_deallocate(
606 	struct task_watchports *watchports);
607 
608 void
task_set_64bit(task_t task,boolean_t is_64bit,boolean_t is_64bit_data)609 task_set_64bit(
610 	task_t task,
611 	boolean_t is_64bit,
612 	boolean_t is_64bit_data)
613 {
614 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
615 	thread_t thread;
616 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
617 
618 	task_lock(task);
619 
620 	/*
621 	 * Switching to/from 64-bit address spaces
622 	 */
623 	if (is_64bit) {
624 		if (!task_has_64Bit_addr(task)) {
625 			task_set_64Bit_addr(task);
626 		}
627 	} else {
628 		if (task_has_64Bit_addr(task)) {
629 			task_clear_64Bit_addr(task);
630 		}
631 	}
632 
633 	/*
634 	 * Switching to/from 64-bit register state.
635 	 */
636 	if (is_64bit_data) {
637 		if (task_has_64Bit_data(task)) {
638 			goto out;
639 		}
640 
641 		task_set_64Bit_data(task);
642 	} else {
643 		if (!task_has_64Bit_data(task)) {
644 			goto out;
645 		}
646 
647 		task_clear_64Bit_data(task);
648 	}
649 
650 	/* FIXME: On x86, the thread save state flavor can diverge from the
651 	 * task's 64-bit feature flag due to the 32-bit/64-bit register save
652 	 * state dichotomy. Since we can be pre-empted in this interval,
653 	 * certain routines may observe the thread as being in an inconsistent
654 	 * state with respect to its task's 64-bitness.
655 	 */
656 
657 #if defined(__x86_64__) || defined(__arm64__)
658 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
659 		thread_mtx_lock(thread);
660 		machine_thread_switch_addrmode(thread);
661 		thread_mtx_unlock(thread);
662 	}
663 #endif /* defined(__x86_64__) || defined(__arm64__) */
664 
665 out:
666 	task_unlock(task);
667 }
668 
669 bool
task_get_64bit_addr(task_t task)670 task_get_64bit_addr(task_t task)
671 {
672 	return task_has_64Bit_addr(task);
673 }
674 
675 bool
task_get_64bit_data(task_t task)676 task_get_64bit_data(task_t task)
677 {
678 	return task_has_64Bit_data(task);
679 }
680 
681 void
task_set_platform_binary(task_t task,boolean_t is_platform)682 task_set_platform_binary(
683 	task_t task,
684 	boolean_t is_platform)
685 {
686 	if (is_platform) {
687 		task_ro_flags_set(task, TFRO_PLATFORM);
688 	} else {
689 		task_ro_flags_clear(task, TFRO_PLATFORM);
690 	}
691 }
692 
693 #if XNU_TARGET_OS_OSX
694 #if DEVELOPMENT || DEBUG
695 SECURITY_READ_ONLY_LATE(bool) AMFI_bootarg_disable_mach_hardening = false;
696 #endif /* DEVELOPMENT || DEBUG */
697 
698 void
task_disable_mach_hardening(task_t task)699 task_disable_mach_hardening(task_t task)
700 {
701 	task_ro_flags_set(task, TFRO_MACH_HARDENING_OPT_OUT);
702 }
703 
704 bool
task_opted_out_mach_hardening(task_t task)705 task_opted_out_mach_hardening(task_t task)
706 {
707 	return task_ro_flags_get(task) & TFRO_MACH_HARDENING_OPT_OUT;
708 }
709 #endif /* XNU_TARGET_OS_OSX */
710 
711 /*
712  * Use the `task_is_hardened_binary` macro below
713  * when applying new security policies.
714  *
715  * Kernel security policies now generally apply to
716  * "hardened binaries" - which are platform binaries, and
717  * third party binaries who adopt hardened runtime on ios.
718  */
719 boolean_t
task_get_platform_binary(task_t task)720 task_get_platform_binary(task_t task)
721 {
722 	return (task_ro_flags_get(task) & TFRO_PLATFORM) != 0;
723 }
724 
725 static boolean_t
task_get_hardened_runtime(task_t task)726 task_get_hardened_runtime(task_t task)
727 {
728 	return (task_ro_flags_get(task) & TFRO_HARDENED) != 0;
729 }
730 
731 boolean_t
task_is_hardened_binary(task_t task)732 task_is_hardened_binary(task_t task)
733 {
734 	return task_get_platform_binary(task) ||
735 	       task_get_hardened_runtime(task);
736 }
737 
738 void
task_set_hardened_runtime(task_t task,bool is_hardened)739 task_set_hardened_runtime(
740 	task_t task,
741 	bool is_hardened)
742 {
743 	if (is_hardened) {
744 		task_ro_flags_set(task, TFRO_HARDENED);
745 	} else {
746 		task_ro_flags_clear(task, TFRO_HARDENED);
747 	}
748 }
749 
750 boolean_t
task_is_a_corpse(task_t task)751 task_is_a_corpse(task_t task)
752 {
753 	return (task_ro_flags_get(task) & TFRO_CORPSE) != 0;
754 }
755 
756 boolean_t
task_is_ipc_active(task_t task)757 task_is_ipc_active(task_t task)
758 {
759 	return task->ipc_active;
760 }
761 
762 void
task_set_corpse(task_t task)763 task_set_corpse(task_t task)
764 {
765 	return task_ro_flags_set(task, TFRO_CORPSE);
766 }
767 
768 void
task_set_immovable_pinned(task_t task)769 task_set_immovable_pinned(task_t task)
770 {
771 	ipc_task_set_immovable_pinned(task);
772 }
773 
774 /*
775  * Set or clear per-task TF_CA_CLIENT_WI flag according to specified argument.
776  * Returns "false" if flag is already set, and "true" in other cases.
777  */
778 bool
task_set_ca_client_wi(task_t task,boolean_t set_or_clear)779 task_set_ca_client_wi(
780 	task_t task,
781 	boolean_t set_or_clear)
782 {
783 	bool ret = true;
784 	task_lock(task);
785 	if (set_or_clear) {
786 		/* Tasks can have only one CA_CLIENT work interval */
787 		if (task->t_flags & TF_CA_CLIENT_WI) {
788 			ret = false;
789 		} else {
790 			task->t_flags |= TF_CA_CLIENT_WI;
791 		}
792 	} else {
793 		task->t_flags &= ~TF_CA_CLIENT_WI;
794 	}
795 	task_unlock(task);
796 	return ret;
797 }
798 
799 /*
800  * task_set_dyld_info() is called at most three times.
801  * 1) at task struct creation to set addr/size to zero.
802  * 2) in mach_loader.c to set location of __all_image_info section in loaded dyld
803  * 3) is from dyld itself to update location of all_image_info
804  * For security any calls after that are ignored.  The TF_DYLD_ALL_IMAGE_SET bit is used to determine state.
805  */
806 kern_return_t
task_set_dyld_info(task_t task,mach_vm_address_t addr,mach_vm_size_t size,bool finalize_value)807 task_set_dyld_info(
808 	task_t            task,
809 	mach_vm_address_t addr,
810 	mach_vm_size_t    size,
811 	bool              finalize_value)
812 {
813 	mach_vm_address_t end;
814 	if (os_add_overflow(addr, size, &end)) {
815 		return KERN_FAILURE;
816 	}
817 
818 	task_lock(task);
819 	/* don't accept updates if all_image_info_addr is final */
820 	if ((task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) == 0) {
821 		bool inputNonZero   = ((addr != 0) || (size != 0));
822 		bool currentNonZero = ((task->all_image_info_addr != 0) || (task->all_image_info_size != 0));
823 		task->all_image_info_addr = addr;
824 		task->all_image_info_size = size;
825 		/* can only change from a non-zero value to another non-zero once */
826 		if ((inputNonZero && currentNonZero) || finalize_value) {
827 			task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
828 		}
829 		task_unlock(task);
830 		return KERN_SUCCESS;
831 	} else {
832 		task_unlock(task);
833 		return KERN_FAILURE;
834 	}
835 }
836 
837 bool
task_donates_own_pages(task_t task)838 task_donates_own_pages(
839 	task_t task)
840 {
841 	return task->donates_own_pages;
842 }
843 
844 void
task_set_mach_header_address(task_t task,mach_vm_address_t addr)845 task_set_mach_header_address(
846 	task_t task,
847 	mach_vm_address_t addr)
848 {
849 	task_lock(task);
850 	task->mach_header_vm_address = addr;
851 	task_unlock(task);
852 }
853 
854 void
task_bank_reset(__unused task_t task)855 task_bank_reset(__unused task_t task)
856 {
857 	if (task->bank_context != NULL) {
858 		bank_task_destroy(task);
859 	}
860 }
861 
862 /*
863  * NOTE: This should only be called when the P_LINTRANSIT
864  *	 flag is set (the proc_trans lock is held) on the
865  *	 proc associated with the task.
866  */
867 void
task_bank_init(__unused task_t task)868 task_bank_init(__unused task_t task)
869 {
870 	if (task->bank_context != NULL) {
871 		panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
872 	}
873 	bank_task_initialize(task);
874 }
875 
876 void
task_set_did_exec_flag(task_t task)877 task_set_did_exec_flag(task_t task)
878 {
879 	task->t_procflags |= TPF_DID_EXEC;
880 }
881 
882 void
task_clear_exec_copy_flag(task_t task)883 task_clear_exec_copy_flag(task_t task)
884 {
885 	task->t_procflags &= ~TPF_EXEC_COPY;
886 }
887 
888 event_t
task_get_return_wait_event(task_t task)889 task_get_return_wait_event(task_t task)
890 {
891 	return (event_t)&task->returnwait_inheritor;
892 }
893 
894 void
task_clear_return_wait(task_t task,uint32_t flags)895 task_clear_return_wait(task_t task, uint32_t flags)
896 {
897 	if (flags & TCRW_CLEAR_INITIAL_WAIT) {
898 		thread_wakeup(task_get_return_wait_event(task));
899 	}
900 
901 	if (flags & TCRW_CLEAR_FINAL_WAIT) {
902 		is_write_lock(task->itk_space);
903 
904 		task->t_returnwaitflags &= ~TRW_LRETURNWAIT;
905 		task->returnwait_inheritor = NULL;
906 
907 		if (flags & TCRW_CLEAR_EXEC_COMPLETE) {
908 			task->t_returnwaitflags &= ~TRW_LEXEC_COMPLETE;
909 		}
910 
911 		if (task->t_returnwaitflags & TRW_LRETURNWAITER) {
912 			struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
913 			    TURNSTILE_ULOCK);
914 
915 			waitq_wakeup64_all(&turnstile->ts_waitq,
916 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
917 			    THREAD_AWAKENED, WAITQ_UPDATE_INHERITOR);
918 
919 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_HELD);
920 
921 			turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
922 			turnstile_cleanup();
923 			task->t_returnwaitflags &= ~TRW_LRETURNWAITER;
924 		}
925 		is_write_unlock(task->itk_space);
926 	}
927 }
928 
929 void __attribute__((noreturn))
task_wait_to_return(void)930 task_wait_to_return(void)
931 {
932 	task_t task = current_task();
933 	uint8_t returnwaitflags;
934 
935 	is_write_lock(task->itk_space);
936 
937 	if (task->t_returnwaitflags & TRW_LRETURNWAIT) {
938 		struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
939 		    TURNSTILE_ULOCK);
940 
941 		do {
942 			task->t_returnwaitflags |= TRW_LRETURNWAITER;
943 			turnstile_update_inheritor(turnstile, task->returnwait_inheritor,
944 			    (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
945 
946 			waitq_assert_wait64(&turnstile->ts_waitq,
947 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
948 			    THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
949 
950 			is_write_unlock(task->itk_space);
951 
952 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
953 
954 			thread_block(THREAD_CONTINUE_NULL);
955 
956 			is_write_lock(task->itk_space);
957 		} while (task->t_returnwaitflags & TRW_LRETURNWAIT);
958 
959 		turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
960 	}
961 
962 	returnwaitflags = task->t_returnwaitflags;
963 	is_write_unlock(task->itk_space);
964 	turnstile_cleanup();
965 
966 	/**
967 	 * In posix_spawn() path, process_signature() is guaranteed to complete
968 	 * when the "second wait" is cleared. Call out to execute whatever depends
969 	 * on the result of that before we return to EL0.
970 	 */
971 	task_post_signature_processing_hook(task);
972 #if CONFIG_MACF
973 	/*
974 	 * Before jumping to userspace and allowing this process
975 	 * to execute any code, make sure its credentials are cached,
976 	 * and notify any interested parties.
977 	 */
978 	extern void current_cached_proc_cred_update(void);
979 
980 	current_cached_proc_cred_update();
981 	if (returnwaitflags & TRW_LEXEC_COMPLETE) {
982 		mac_proc_notify_exec_complete(current_proc());
983 	}
984 #endif
985 
986 	thread_bootstrap_return();
987 }
988 
989 /**
990  * A callout by task_wait_to_return on the main thread of a newly spawned task
991  * after process_signature() is completed by the parent task.
992  *
993  * @param task The newly spawned task
994  */
995 void
task_post_signature_processing_hook(task_t task)996 task_post_signature_processing_hook(task_t task)
997 {
998 	ml_task_post_signature_processing_hook(task);
999 }
1000 
1001 boolean_t
task_is_exec_copy(task_t task)1002 task_is_exec_copy(task_t task)
1003 {
1004 	return task_is_exec_copy_internal(task);
1005 }
1006 
1007 boolean_t
task_did_exec(task_t task)1008 task_did_exec(task_t task)
1009 {
1010 	return task_did_exec_internal(task);
1011 }
1012 
1013 boolean_t
task_is_active(task_t task)1014 task_is_active(task_t task)
1015 {
1016 	return task->active;
1017 }
1018 
1019 boolean_t
task_is_halting(task_t task)1020 task_is_halting(task_t task)
1021 {
1022 	return task->halting;
1023 }
1024 
1025 void
task_init(void)1026 task_init(void)
1027 {
1028 	if (max_task_footprint_mb != 0) {
1029 #if CONFIG_MEMORYSTATUS
1030 		if (max_task_footprint_mb < 50) {
1031 			printf("Warning: max_task_pmem %d below minimum.\n",
1032 			    max_task_footprint_mb);
1033 			max_task_footprint_mb = 50;
1034 		}
1035 		printf("Limiting task physical memory footprint to %d MB\n",
1036 		    max_task_footprint_mb);
1037 
1038 		max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024;         // Convert MB to bytes
1039 
1040 		/*
1041 		 * Configure the per-task memory limit warning level.
1042 		 * This is computed as a percentage.
1043 		 */
1044 		max_task_footprint_warning_level = 0;
1045 
1046 		if (max_mem < 0x40000000) {
1047 			/*
1048 			 * On devices with < 1GB of memory:
1049 			 *    -- set warnings to 50MB below the per-task limit.
1050 			 */
1051 			if (max_task_footprint_mb > 50) {
1052 				max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
1053 			}
1054 		} else {
1055 			/*
1056 			 * On devices with >= 1GB of memory:
1057 			 *    -- set warnings to 100MB below the per-task limit.
1058 			 */
1059 			if (max_task_footprint_mb > 100) {
1060 				max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
1061 			}
1062 		}
1063 
1064 		/*
1065 		 * Never allow warning level to land below the default.
1066 		 */
1067 		if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
1068 			max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
1069 		}
1070 
1071 		printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
1072 
1073 #else
1074 		printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
1075 #endif /* CONFIG_MEMORYSTATUS */
1076 	}
1077 
1078 #if DEVELOPMENT || DEBUG
1079 	PE_parse_boot_argn("task_exc_guard_default",
1080 	    &task_exc_guard_default,
1081 	    sizeof(task_exc_guard_default));
1082 #endif /* DEVELOPMENT || DEBUG */
1083 
1084 #if CONFIG_COREDUMP
1085 	if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
1086 	    sizeof(hwm_user_cores))) {
1087 		hwm_user_cores = 0;
1088 	}
1089 #endif
1090 
1091 	proc_init_cpumon_params();
1092 
1093 	if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof(task_wakeups_monitor_rate))) {
1094 		task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
1095 	}
1096 
1097 	if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof(task_wakeups_monitor_interval))) {
1098 		task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
1099 	}
1100 
1101 	if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
1102 	    sizeof(task_wakeups_monitor_ustackshots_trigger_pct))) {
1103 		task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
1104 	}
1105 
1106 	if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof(task_iomon_limit_mb))) {
1107 		task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
1108 	}
1109 
1110 	if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof(task_iomon_interval_secs))) {
1111 		task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
1112 	}
1113 
1114 	if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof(io_telemetry_limit))) {
1115 		io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
1116 	}
1117 
1118 /*
1119  * If we have coalitions, coalition_init() will call init_task_ledgers() as it
1120  * sets up the ledgers for the default coalition. If we don't have coalitions,
1121  * then we have to call it now.
1122  */
1123 #if CONFIG_COALITIONS
1124 	assert(task_ledger_template);
1125 #else /* CONFIG_COALITIONS */
1126 	init_task_ledgers();
1127 #endif /* CONFIG_COALITIONS */
1128 
1129 	task_ref_init();
1130 	task_zone_init();
1131 
1132 #ifdef __LP64__
1133 	boolean_t is_64bit = TRUE;
1134 #else
1135 	boolean_t is_64bit = FALSE;
1136 #endif
1137 
1138 	kernproc = (struct proc *)zalloc_flags(proc_task_zone, Z_WAITOK | Z_ZERO);
1139 	kernel_task = proc_get_task_raw(kernproc);
1140 
1141 	/*
1142 	 * Create the kernel task as the first task.
1143 	 */
1144 	if (task_create_internal(TASK_NULL, NULL, NULL, FALSE, is_64bit,
1145 	    is_64bit, TF_NONE, TF_NONE, TPF_NONE, TWF_NONE, kernel_task) != KERN_SUCCESS) {
1146 		panic("task_init");
1147 	}
1148 
1149 	ipc_task_enable(kernel_task);
1150 
1151 #if defined(HAS_APPLE_PAC)
1152 	kernel_task->rop_pid = ml_default_rop_pid();
1153 	kernel_task->jop_pid = ml_default_jop_pid();
1154 	// kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
1155 	// disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
1156 	ml_task_set_disable_user_jop(kernel_task, FALSE);
1157 #endif
1158 
1159 	vm_map_deallocate(kernel_task->map);
1160 	kernel_task->map = kernel_map;
1161 }
1162 
1163 static inline void
task_zone_init(void)1164 task_zone_init(void)
1165 {
1166 	proc_struct_size = roundup(proc_struct_size, task_alignment);
1167 	task_struct_size = roundup(sizeof(struct task), proc_alignment);
1168 	proc_and_task_size = proc_struct_size + task_struct_size;
1169 
1170 	proc_task_zone = zone_create_ext("proc_task", proc_and_task_size,
1171 	    ZC_ZFREE_CLEARMEM | ZC_SEQUESTER, ZONE_ID_PROC_TASK, NULL); /* sequester is needed for proc_rele() */
1172 }
1173 
1174 /*
1175  * Task ledgers
1176  * ------------
1177  *
1178  * phys_footprint
1179  *   Physical footprint: This is the sum of:
1180  *     + (internal - alternate_accounting)
1181  *     + (internal_compressed - alternate_accounting_compressed)
1182  *     + iokit_mapped
1183  *     + purgeable_nonvolatile
1184  *     + purgeable_nonvolatile_compressed
1185  *     + page_table
1186  *
1187  * internal
1188  *   The task's anonymous memory, which on iOS is always resident.
1189  *
1190  * internal_compressed
1191  *   Amount of this task's internal memory which is held by the compressor.
1192  *   Such memory is no longer actually resident for the task [i.e., resident in its pmap],
1193  *   and could be either decompressed back into memory, or paged out to storage, depending
1194  *   on our implementation.
1195  *
1196  * iokit_mapped
1197  *   IOKit mappings: The total size of all IOKit mappings in this task, regardless of
1198  *    clean/dirty or internal/external state].
1199  *
1200  * alternate_accounting
1201  *   The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
1202  *   are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
1203  *   double counting.
1204  *
1205  * pages_grabbed
1206  *   pages_grabbed counts all page grabs in a task.  It is also broken out into three subtypes
1207  *   which track UPL, IOPL and Kernel page grabs.
1208  */
1209 void
init_task_ledgers(void)1210 init_task_ledgers(void)
1211 {
1212 	ledger_template_t t;
1213 
1214 	assert(task_ledger_template == NULL);
1215 	assert(kernel_task == TASK_NULL);
1216 
1217 #if MACH_ASSERT
1218 	PE_parse_boot_argn("pmap_ledgers_panic",
1219 	    &pmap_ledgers_panic,
1220 	    sizeof(pmap_ledgers_panic));
1221 	PE_parse_boot_argn("pmap_ledgers_panic_leeway",
1222 	    &pmap_ledgers_panic_leeway,
1223 	    sizeof(pmap_ledgers_panic_leeway));
1224 #endif /* MACH_ASSERT */
1225 
1226 	if ((t = ledger_template_create("Per-task ledger")) == NULL) {
1227 		panic("couldn't create task ledger template");
1228 	}
1229 
1230 	task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
1231 	task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
1232 	    "physmem", "bytes");
1233 	task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
1234 	    "bytes");
1235 	task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
1236 	    "bytes");
1237 	task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
1238 	    "bytes");
1239 	task_ledgers.conclave_mem = ledger_entry_add_with_flags(t, "conclave_mem", "physmem", "count",
1240 	    LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE | LEDGER_ENTRY_ALLOW_DEBIT);
1241 	task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
1242 	    "bytes");
1243 	task_ledgers.iokit_mapped = ledger_entry_add_with_flags(t, "iokit_mapped", "mappings",
1244 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1245 	task_ledgers.alternate_accounting = ledger_entry_add_with_flags(t, "alternate_accounting", "physmem",
1246 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1247 	task_ledgers.alternate_accounting_compressed = ledger_entry_add_with_flags(t, "alternate_accounting_compressed", "physmem",
1248 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1249 	task_ledgers.page_table = ledger_entry_add_with_flags(t, "page_table", "physmem",
1250 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1251 	task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
1252 	    "bytes");
1253 	task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
1254 	    "bytes");
1255 	task_ledgers.reusable = ledger_entry_add(t, "reusable", "physmem", "bytes");
1256 	task_ledgers.external = ledger_entry_add(t, "external", "physmem", "bytes");
1257 	task_ledgers.purgeable_volatile = ledger_entry_add_with_flags(t, "purgeable_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1258 	task_ledgers.purgeable_nonvolatile = ledger_entry_add_with_flags(t, "purgeable_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1259 	task_ledgers.purgeable_volatile_compressed = ledger_entry_add_with_flags(t, "purgeable_volatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1260 	task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add_with_flags(t, "purgeable_nonvolatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1261 #if DEBUG || DEVELOPMENT
1262 	task_ledgers.pages_grabbed = ledger_entry_add_with_flags(t, "pages_grabbed", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1263 	task_ledgers.pages_grabbed_kern = ledger_entry_add_with_flags(t, "pages_grabbed_kern", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1264 	task_ledgers.pages_grabbed_iopl = ledger_entry_add_with_flags(t, "pages_grabbed_iopl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1265 	task_ledgers.pages_grabbed_upl = ledger_entry_add_with_flags(t, "pages_grabbed_upl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1266 #endif
1267 	task_ledgers.tagged_nofootprint = ledger_entry_add_with_flags(t, "tagged_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1268 	task_ledgers.tagged_footprint = ledger_entry_add_with_flags(t, "tagged_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1269 	task_ledgers.tagged_nofootprint_compressed = ledger_entry_add_with_flags(t, "tagged_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1270 	task_ledgers.tagged_footprint_compressed = ledger_entry_add_with_flags(t, "tagged_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1271 	task_ledgers.network_volatile = ledger_entry_add_with_flags(t, "network_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1272 	task_ledgers.network_nonvolatile = ledger_entry_add_with_flags(t, "network_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1273 	task_ledgers.network_volatile_compressed = ledger_entry_add_with_flags(t, "network_volatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1274 	task_ledgers.network_nonvolatile_compressed = ledger_entry_add_with_flags(t, "network_nonvolatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1275 	task_ledgers.media_nofootprint = ledger_entry_add_with_flags(t, "media_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1276 	task_ledgers.media_footprint = ledger_entry_add_with_flags(t, "media_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1277 	task_ledgers.media_nofootprint_compressed = ledger_entry_add_with_flags(t, "media_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1278 	task_ledgers.media_footprint_compressed = ledger_entry_add_with_flags(t, "media_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1279 	task_ledgers.graphics_nofootprint = ledger_entry_add_with_flags(t, "graphics_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1280 	task_ledgers.graphics_footprint = ledger_entry_add_with_flags(t, "graphics_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1281 	task_ledgers.graphics_nofootprint_compressed = ledger_entry_add_with_flags(t, "graphics_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1282 	task_ledgers.graphics_footprint_compressed = ledger_entry_add_with_flags(t, "graphics_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1283 	task_ledgers.neural_nofootprint = ledger_entry_add_with_flags(t, "neural_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1284 	task_ledgers.neural_footprint = ledger_entry_add_with_flags(t, "neural_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1285 	task_ledgers.neural_nofootprint_compressed = ledger_entry_add_with_flags(t, "neural_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1286 	task_ledgers.neural_footprint_compressed = ledger_entry_add_with_flags(t, "neural_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1287 	task_ledgers.neural_nofootprint_total = ledger_entry_add(t, "neural_nofootprint_total", "physmem", "bytes");
1288 
1289 #if CONFIG_FREEZE
1290 	task_ledgers.frozen_to_swap = ledger_entry_add(t, "frozen_to_swap", "physmem", "bytes");
1291 #endif /* CONFIG_FREEZE */
1292 
1293 	task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
1294 	    "count");
1295 	task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
1296 	    "count");
1297 
1298 #if CONFIG_SCHED_SFI
1299 	sfi_class_id_t class_id, ledger_alias;
1300 	for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1301 		task_ledgers.sfi_wait_times[class_id] = -1;
1302 	}
1303 
1304 	/* don't account for UNSPECIFIED */
1305 	for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
1306 		ledger_alias = sfi_get_ledger_alias_for_class(class_id);
1307 		if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
1308 			/* Check to see if alias has been registered yet */
1309 			if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
1310 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
1311 			} else {
1312 				/* Otherwise, initialize it first */
1313 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
1314 			}
1315 		} else {
1316 			task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
1317 		}
1318 
1319 		if (task_ledgers.sfi_wait_times[class_id] < 0) {
1320 			panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
1321 		}
1322 	}
1323 
1324 	assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID - 1] != -1);
1325 #endif /* CONFIG_SCHED_SFI */
1326 
1327 	task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
1328 	task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
1329 	task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
1330 	task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
1331 	task_ledgers.logical_writes_to_external = ledger_entry_add(t, "logical_writes_to_external", "res", "bytes");
1332 #if CONFIG_PHYS_WRITE_ACCT
1333 	task_ledgers.fs_metadata_writes = ledger_entry_add(t, "fs_metadata_writes", "res", "bytes");
1334 #endif /* CONFIG_PHYS_WRITE_ACCT */
1335 	task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj");
1336 	task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj");
1337 
1338 #if CONFIG_MEMORYSTATUS
1339 	task_ledgers.memorystatus_dirty_time = ledger_entry_add(t, "memorystatus_dirty_time", "physmem", "ns");
1340 #endif /* CONFIG_MEMORYSTATUS */
1341 
1342 	task_ledgers.swapins = ledger_entry_add_with_flags(t, "swapins", "physmem", "bytes",
1343 	    LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1344 
1345 	if ((task_ledgers.cpu_time < 0) ||
1346 	    (task_ledgers.tkm_private < 0) ||
1347 	    (task_ledgers.tkm_shared < 0) ||
1348 	    (task_ledgers.phys_mem < 0) ||
1349 	    (task_ledgers.wired_mem < 0) ||
1350 	    (task_ledgers.conclave_mem < 0) ||
1351 	    (task_ledgers.internal < 0) ||
1352 	    (task_ledgers.external < 0) ||
1353 	    (task_ledgers.reusable < 0) ||
1354 	    (task_ledgers.iokit_mapped < 0) ||
1355 	    (task_ledgers.alternate_accounting < 0) ||
1356 	    (task_ledgers.alternate_accounting_compressed < 0) ||
1357 	    (task_ledgers.page_table < 0) ||
1358 	    (task_ledgers.phys_footprint < 0) ||
1359 	    (task_ledgers.internal_compressed < 0) ||
1360 	    (task_ledgers.purgeable_volatile < 0) ||
1361 	    (task_ledgers.purgeable_nonvolatile < 0) ||
1362 	    (task_ledgers.purgeable_volatile_compressed < 0) ||
1363 	    (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
1364 	    (task_ledgers.tagged_nofootprint < 0) ||
1365 	    (task_ledgers.tagged_footprint < 0) ||
1366 	    (task_ledgers.tagged_nofootprint_compressed < 0) ||
1367 	    (task_ledgers.tagged_footprint_compressed < 0) ||
1368 #if CONFIG_FREEZE
1369 	    (task_ledgers.frozen_to_swap < 0) ||
1370 #endif /* CONFIG_FREEZE */
1371 	    (task_ledgers.network_volatile < 0) ||
1372 	    (task_ledgers.network_nonvolatile < 0) ||
1373 	    (task_ledgers.network_volatile_compressed < 0) ||
1374 	    (task_ledgers.network_nonvolatile_compressed < 0) ||
1375 	    (task_ledgers.media_nofootprint < 0) ||
1376 	    (task_ledgers.media_footprint < 0) ||
1377 	    (task_ledgers.media_nofootprint_compressed < 0) ||
1378 	    (task_ledgers.media_footprint_compressed < 0) ||
1379 	    (task_ledgers.graphics_nofootprint < 0) ||
1380 	    (task_ledgers.graphics_footprint < 0) ||
1381 	    (task_ledgers.graphics_nofootprint_compressed < 0) ||
1382 	    (task_ledgers.graphics_footprint_compressed < 0) ||
1383 	    (task_ledgers.neural_nofootprint < 0) ||
1384 	    (task_ledgers.neural_footprint < 0) ||
1385 	    (task_ledgers.neural_nofootprint_compressed < 0) ||
1386 	    (task_ledgers.neural_footprint_compressed < 0) ||
1387 	    (task_ledgers.neural_nofootprint_total < 0) ||
1388 	    (task_ledgers.platform_idle_wakeups < 0) ||
1389 	    (task_ledgers.interrupt_wakeups < 0) ||
1390 	    (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
1391 	    (task_ledgers.physical_writes < 0) ||
1392 	    (task_ledgers.logical_writes < 0) ||
1393 	    (task_ledgers.logical_writes_to_external < 0) ||
1394 #if CONFIG_PHYS_WRITE_ACCT
1395 	    (task_ledgers.fs_metadata_writes < 0) ||
1396 #endif /* CONFIG_PHYS_WRITE_ACCT */
1397 #if CONFIG_MEMORYSTATUS
1398 	    (task_ledgers.memorystatus_dirty_time < 0) ||
1399 #endif /* CONFIG_MEMORYSTATUS */
1400 	    (task_ledgers.energy_billed_to_me < 0) ||
1401 	    (task_ledgers.energy_billed_to_others < 0) ||
1402 	    (task_ledgers.swapins < 0)
1403 	    ) {
1404 		panic("couldn't create entries for task ledger template");
1405 	}
1406 
1407 	ledger_track_credit_only(t, task_ledgers.phys_footprint);
1408 	ledger_track_credit_only(t, task_ledgers.internal);
1409 	ledger_track_credit_only(t, task_ledgers.external);
1410 	ledger_track_credit_only(t, task_ledgers.reusable);
1411 
1412 	ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
1413 	ledger_track_maximum(t, task_ledgers.phys_mem, 60);
1414 	ledger_track_maximum(t, task_ledgers.internal, 60);
1415 	ledger_track_maximum(t, task_ledgers.internal_compressed, 60);
1416 	ledger_track_maximum(t, task_ledgers.reusable, 60);
1417 	ledger_track_maximum(t, task_ledgers.external, 60);
1418 	ledger_track_maximum(t, task_ledgers.neural_nofootprint_total, 60);
1419 #if MACH_ASSERT
1420 	if (pmap_ledgers_panic) {
1421 		ledger_panic_on_negative(t, task_ledgers.phys_footprint);
1422 		ledger_panic_on_negative(t, task_ledgers.conclave_mem);
1423 		ledger_panic_on_negative(t, task_ledgers.page_table);
1424 		ledger_panic_on_negative(t, task_ledgers.internal);
1425 		ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
1426 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
1427 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
1428 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
1429 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
1430 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
1431 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
1432 #if CONFIG_PHYS_WRITE_ACCT
1433 		ledger_panic_on_negative(t, task_ledgers.fs_metadata_writes);
1434 #endif /* CONFIG_PHYS_WRITE_ACCT */
1435 
1436 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint);
1437 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint);
1438 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint_compressed);
1439 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint_compressed);
1440 		ledger_panic_on_negative(t, task_ledgers.network_volatile);
1441 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile);
1442 		ledger_panic_on_negative(t, task_ledgers.network_volatile_compressed);
1443 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile_compressed);
1444 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint);
1445 		ledger_panic_on_negative(t, task_ledgers.media_footprint);
1446 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint_compressed);
1447 		ledger_panic_on_negative(t, task_ledgers.media_footprint_compressed);
1448 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint);
1449 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint);
1450 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint_compressed);
1451 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint_compressed);
1452 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint);
1453 		ledger_panic_on_negative(t, task_ledgers.neural_footprint);
1454 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint_compressed);
1455 		ledger_panic_on_negative(t, task_ledgers.neural_footprint_compressed);
1456 	}
1457 #endif /* MACH_ASSERT */
1458 
1459 #if CONFIG_MEMORYSTATUS
1460 	ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
1461 #endif /* CONFIG_MEMORYSTATUS */
1462 
1463 	ledger_set_callback(t, task_ledgers.interrupt_wakeups,
1464 	    task_wakeups_rate_exceeded, NULL, NULL);
1465 	ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
1466 
1467 #if CONFIG_SPTM || !XNU_MONITOR
1468 	ledger_template_complete(t);
1469 #else /* CONFIG_SPTM || !XNU_MONITOR */
1470 	ledger_template_complete_secure_alloc(t);
1471 #endif /* XNU_MONITOR */
1472 	task_ledger_template = t;
1473 }
1474 
1475 /* Create a task, but leave the task ports disabled */
1476 kern_return_t
task_create_internal(task_t parent_task,proc_ro_t proc_ro,coalition_t * parent_coalitions __unused,boolean_t inherit_memory,boolean_t is_64bit,boolean_t is_64bit_data,uint32_t t_flags,uint32_t t_flags_ro,uint32_t t_procflags,uint8_t t_returnwaitflags,task_t child_task)1477 task_create_internal(
1478 	task_t             parent_task,            /* Null-able */
1479 	proc_ro_t          proc_ro,
1480 	coalition_t        *parent_coalitions __unused,
1481 	boolean_t          inherit_memory,
1482 	boolean_t          is_64bit,
1483 	boolean_t          is_64bit_data,
1484 	uint32_t           t_flags,
1485 	uint32_t           t_flags_ro,
1486 	uint32_t           t_procflags,
1487 	uint8_t            t_returnwaitflags,
1488 	task_t             child_task)
1489 {
1490 	task_t                  new_task;
1491 	vm_shared_region_t      shared_region;
1492 	ledger_t                ledger = NULL;
1493 	struct task_ro_data     task_ro_data = {};
1494 	uint32_t                parent_t_flags_ro = 0;
1495 
1496 	new_task = child_task;
1497 
1498 	if (task_ref_count_init(new_task) != KERN_SUCCESS) {
1499 		return KERN_RESOURCE_SHORTAGE;
1500 	}
1501 
1502 	/* allocate with active entries */
1503 	assert(task_ledger_template != NULL);
1504 	ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1505 	if (ledger == NULL) {
1506 		task_ref_count_fini(new_task);
1507 		return KERN_RESOURCE_SHORTAGE;
1508 	}
1509 
1510 	counter_alloc(&(new_task->faults));
1511 
1512 #if defined(HAS_APPLE_PAC)
1513 	const uint8_t disable_user_jop = inherit_memory ? parent_task->disable_user_jop : FALSE;
1514 	ml_task_set_rop_pid(new_task, parent_task, inherit_memory);
1515 	ml_task_set_jop_pid(new_task, parent_task, inherit_memory, disable_user_jop);
1516 	ml_task_set_disable_user_jop(new_task, disable_user_jop);
1517 #endif
1518 
1519 
1520 	new_task->ledger = ledger;
1521 
1522 	/* if inherit_memory is true, parent_task MUST not be NULL */
1523 	if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) {
1524 #if CONFIG_DEFERRED_RECLAIM
1525 		if (parent_task->deferred_reclamation_metadata) {
1526 			/*
1527 			 * Prevent concurrent reclaims while we're forking the parent_task's map,
1528 			 * so that the child's map is in sync with the forked reclamation
1529 			 * metadata.
1530 			 */
1531 			vm_deferred_reclamation_buffer_own(
1532 				parent_task->deferred_reclamation_metadata);
1533 		}
1534 #endif /* CONFIG_DEFERRED_RECLAIM */
1535 		new_task->map = vm_map_fork(ledger, parent_task->map, 0);
1536 #if CONFIG_DEFERRED_RECLAIM
1537 		if (new_task->map != NULL &&
1538 		    parent_task->deferred_reclamation_metadata) {
1539 			new_task->deferred_reclamation_metadata =
1540 			    vm_deferred_reclamation_buffer_fork(new_task,
1541 			    parent_task->deferred_reclamation_metadata);
1542 		}
1543 #endif /* CONFIG_DEFERRED_RECLAIM */
1544 	} else {
1545 		unsigned int pmap_flags = is_64bit ? PMAP_CREATE_64BIT : 0;
1546 		pmap_t pmap = pmap_create_options(ledger, 0, pmap_flags);
1547 		vm_map_t new_map;
1548 
1549 		if (pmap == NULL) {
1550 			counter_free(&new_task->faults);
1551 			ledger_dereference(ledger);
1552 			task_ref_count_fini(new_task);
1553 			return KERN_RESOURCE_SHORTAGE;
1554 		}
1555 		new_map = vm_map_create_options(pmap,
1556 		    (vm_map_offset_t)(VM_MIN_ADDRESS),
1557 		    (vm_map_offset_t)(VM_MAX_ADDRESS),
1558 		    VM_MAP_CREATE_PAGEABLE);
1559 		if (parent_task) {
1560 			vm_map_inherit_limits(new_map, parent_task->map);
1561 		}
1562 		new_task->map = new_map;
1563 	}
1564 
1565 	if (new_task->map == NULL) {
1566 		counter_free(&new_task->faults);
1567 		ledger_dereference(ledger);
1568 		task_ref_count_fini(new_task);
1569 		return KERN_RESOURCE_SHORTAGE;
1570 	}
1571 
1572 	lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
1573 	queue_init(&new_task->threads);
1574 	new_task->suspend_count = 0;
1575 	new_task->thread_count = 0;
1576 	new_task->active_thread_count = 0;
1577 	new_task->user_stop_count = 0;
1578 	new_task->legacy_stop_count = 0;
1579 	new_task->active = TRUE;
1580 	new_task->halting = FALSE;
1581 	new_task->priv_flags = 0;
1582 	new_task->t_flags = t_flags;
1583 	task_ro_data.t_flags_ro = t_flags_ro;
1584 	new_task->t_procflags = t_procflags;
1585 	new_task->t_returnwaitflags = t_returnwaitflags;
1586 	new_task->returnwait_inheritor = current_thread();
1587 	new_task->importance = 0;
1588 	new_task->crashed_thread_id = 0;
1589 	new_task->watchports = NULL;
1590 	new_task->t_rr_ranges = NULL;
1591 
1592 	new_task->bank_context = NULL;
1593 
1594 	if (parent_task) {
1595 		parent_t_flags_ro = task_ro_flags_get(parent_task);
1596 	}
1597 
1598 	if (parent_task && inherit_memory) {
1599 #if __has_feature(ptrauth_calls)
1600 		/* Inherit the pac exception flags from parent if in fork */
1601 		task_ro_data.t_flags_ro |= (parent_t_flags_ro & (TFRO_PAC_ENFORCE_USER_STATE |
1602 		    TFRO_PAC_EXC_FATAL));
1603 #endif /* __has_feature(ptrauth_calls) */
1604 		/* Inherit the hardened binary flags from parent if in fork */
1605 		task_ro_data.t_flags_ro |= parent_t_flags_ro & (TFRO_HARDENED | TFRO_PLATFORM | TFRO_JIT_EXC_FATAL);
1606 #if XNU_TARGET_OS_OSX
1607 		task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_MACH_HARDENING_OPT_OUT;
1608 #endif /* XNU_TARGET_OS_OSX */
1609 	}
1610 
1611 #ifdef MACH_BSD
1612 	new_task->corpse_info = NULL;
1613 #endif /* MACH_BSD */
1614 
1615 	/* kern_task not created by this function has unique id 0, start with 1 here. */
1616 	task_set_uniqueid(new_task);
1617 
1618 #if CONFIG_MACF
1619 	set_task_crash_label(new_task, NULL);
1620 
1621 	task_ro_data.task_filters.mach_trap_filter_mask = NULL;
1622 	task_ro_data.task_filters.mach_kobj_filter_mask = NULL;
1623 #endif
1624 
1625 #if CONFIG_MEMORYSTATUS
1626 	if (max_task_footprint != 0) {
1627 		ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1628 	}
1629 #endif /* CONFIG_MEMORYSTATUS */
1630 
1631 	if (task_wakeups_monitor_rate != 0) {
1632 		uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1633 		int32_t  rate;        // Ignored because of WAKEMON_SET_DEFAULTS
1634 		task_wakeups_monitor_ctl(new_task, &flags, &rate);
1635 	}
1636 
1637 #if CONFIG_IO_ACCOUNTING
1638 	uint32_t flags = IOMON_ENABLE;
1639 	task_io_monitor_ctl(new_task, &flags);
1640 #endif /* CONFIG_IO_ACCOUNTING */
1641 
1642 	machine_task_init(new_task, parent_task, inherit_memory);
1643 
1644 	new_task->task_debug = NULL;
1645 
1646 #if DEVELOPMENT || DEBUG
1647 	new_task->task_unnested = FALSE;
1648 	new_task->task_disconnected_count = 0;
1649 #endif
1650 	queue_init(&new_task->semaphore_list);
1651 	new_task->semaphores_owned = 0;
1652 
1653 	new_task->vtimers = 0;
1654 
1655 	new_task->shared_region = NULL;
1656 
1657 	new_task->affinity_space = NULL;
1658 
1659 #if CONFIG_CPU_COUNTERS
1660 	new_task->t_kpc = 0;
1661 #endif /* CONFIG_CPU_COUNTERS */
1662 
1663 	new_task->pidsuspended = FALSE;
1664 	new_task->frozen = FALSE;
1665 	new_task->changing_freeze_state = FALSE;
1666 	new_task->rusage_cpu_flags = 0;
1667 	new_task->rusage_cpu_percentage = 0;
1668 	new_task->rusage_cpu_interval = 0;
1669 	new_task->rusage_cpu_deadline = 0;
1670 	new_task->rusage_cpu_callt = NULL;
1671 #if MACH_ASSERT
1672 	new_task->suspends_outstanding = 0;
1673 #endif
1674 	recount_task_init(&new_task->tk_recount);
1675 
1676 #if HYPERVISOR
1677 	new_task->hv_task_target = NULL;
1678 #endif /* HYPERVISOR */
1679 
1680 #if CONFIG_TASKWATCH
1681 	queue_init(&new_task->task_watchers);
1682 	new_task->num_taskwatchers  = 0;
1683 	new_task->watchapplying  = 0;
1684 #endif /* CONFIG_TASKWATCH */
1685 
1686 	new_task->mem_notify_reserved = 0;
1687 	new_task->memlimit_attrs_reserved = 0;
1688 
1689 	new_task->requested_policy = default_task_requested_policy;
1690 	new_task->effective_policy = default_task_effective_policy;
1691 
1692 	new_task->task_shared_region_slide = -1;
1693 
1694 	if (parent_task != NULL) {
1695 		task_ro_data.task_tokens.sec_token = *task_get_sec_token(parent_task);
1696 		task_ro_data.task_tokens.audit_token = *task_get_audit_token(parent_task);
1697 
1698 		/* only inherit the option bits, no effect until task_set_immovable_pinned() */
1699 		task_ro_data.task_control_port_options = task_get_control_port_options(parent_task);
1700 
1701 		task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_FILTER_MSG;
1702 #if CONFIG_MACF
1703 		if (!(t_flags & TF_CORPSE_FORK)) {
1704 			task_ro_data.task_filters.mach_trap_filter_mask = task_get_mach_trap_filter_mask(parent_task);
1705 			task_ro_data.task_filters.mach_kobj_filter_mask = task_get_mach_kobj_filter_mask(parent_task);
1706 		}
1707 #endif
1708 	} else {
1709 		task_ro_data.task_tokens.sec_token = KERNEL_SECURITY_TOKEN;
1710 		task_ro_data.task_tokens.audit_token = KERNEL_AUDIT_TOKEN;
1711 
1712 		task_ro_data.task_control_port_options = TASK_CONTROL_PORT_OPTIONS_NONE;
1713 	}
1714 
1715 	/* must set before task_importance_init_from_parent: */
1716 	if (proc_ro != NULL) {
1717 		new_task->bsd_info_ro = proc_ro_ref_task(proc_ro, new_task, &task_ro_data);
1718 	} else {
1719 		new_task->bsd_info_ro = proc_ro_alloc(NULL, NULL, new_task, &task_ro_data);
1720 	}
1721 
1722 	ipc_task_init(new_task, parent_task);
1723 
1724 	task_importance_init_from_parent(new_task, parent_task);
1725 
1726 	new_task->corpse_vmobject_list = NULL;
1727 
1728 	if (parent_task != TASK_NULL) {
1729 		/* inherit the parent's shared region */
1730 		shared_region = vm_shared_region_get(parent_task);
1731 		if (shared_region != NULL) {
1732 			vm_shared_region_set(new_task, shared_region);
1733 		}
1734 
1735 #if __has_feature(ptrauth_calls)
1736 		/* use parent's shared_region_id */
1737 		char *shared_region_id = task_get_vm_shared_region_id_and_jop_pid(parent_task, NULL);
1738 		if (shared_region_id != NULL) {
1739 			shared_region_key_alloc(shared_region_id, FALSE, 0);         /* get a reference */
1740 		}
1741 		task_set_shared_region_id(new_task, shared_region_id);
1742 #endif /* __has_feature(ptrauth_calls) */
1743 
1744 		if (task_has_64Bit_addr(parent_task)) {
1745 			task_set_64Bit_addr(new_task);
1746 		}
1747 
1748 		if (task_has_64Bit_data(parent_task)) {
1749 			task_set_64Bit_data(new_task);
1750 		}
1751 
1752 		if (inherit_memory) {
1753 			new_task->all_image_info_addr = parent_task->all_image_info_addr;
1754 			new_task->all_image_info_size = parent_task->all_image_info_size;
1755 			if (parent_task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) {
1756 				new_task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
1757 			}
1758 		}
1759 		new_task->mach_header_vm_address = 0;
1760 
1761 		if (inherit_memory && parent_task->affinity_space) {
1762 			task_affinity_create(parent_task, new_task);
1763 		}
1764 
1765 		new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
1766 
1767 		new_task->task_exc_guard = parent_task->task_exc_guard;
1768 		if (parent_task->t_flags & TF_NO_SMT) {
1769 			new_task->t_flags |= TF_NO_SMT;
1770 		}
1771 
1772 		if (parent_task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE) {
1773 			new_task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
1774 		}
1775 
1776 		if (parent_task->t_flags & TF_TECS) {
1777 			new_task->t_flags |= TF_TECS;
1778 		}
1779 
1780 #if defined(__x86_64__)
1781 		if (parent_task->t_flags & TF_INSN_COPY_OPTOUT) {
1782 			new_task->t_flags |= TF_INSN_COPY_OPTOUT;
1783 		}
1784 #endif
1785 
1786 		new_task->priority = BASEPRI_DEFAULT;
1787 		new_task->max_priority = MAXPRI_USER;
1788 	} else {
1789 #ifdef __LP64__
1790 		if (is_64bit) {
1791 			task_set_64Bit_addr(new_task);
1792 		}
1793 #endif
1794 
1795 		if (is_64bit_data) {
1796 			task_set_64Bit_data(new_task);
1797 		}
1798 
1799 		new_task->all_image_info_addr = (mach_vm_address_t)0;
1800 		new_task->all_image_info_size = (mach_vm_size_t)0;
1801 
1802 		new_task->pset_hint = PROCESSOR_SET_NULL;
1803 
1804 		new_task->task_exc_guard = TASK_EXC_GUARD_NONE;
1805 
1806 		if (new_task == kernel_task) {
1807 			new_task->priority = BASEPRI_KERNEL;
1808 			new_task->max_priority = MAXPRI_KERNEL;
1809 		} else {
1810 			new_task->priority = BASEPRI_DEFAULT;
1811 			new_task->max_priority = MAXPRI_USER;
1812 		}
1813 	}
1814 
1815 	bzero(new_task->coalition, sizeof(new_task->coalition));
1816 	for (int i = 0; i < COALITION_NUM_TYPES; i++) {
1817 		queue_chain_init(new_task->task_coalition[i]);
1818 	}
1819 
1820 	/* Allocate I/O Statistics */
1821 	new_task->task_io_stats = kalloc_data(sizeof(struct io_stat_info),
1822 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1823 
1824 	bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats));
1825 	bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats));
1826 
1827 	bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
1828 
1829 	counter_alloc(&(new_task->pageins));
1830 	counter_alloc(&(new_task->cow_faults));
1831 	counter_alloc(&(new_task->messages_sent));
1832 	counter_alloc(&(new_task->messages_received));
1833 
1834 	/* Copy resource acc. info from Parent for Corpe Forked task. */
1835 	if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
1836 		task_rollup_accounting_info(new_task, parent_task);
1837 		task_store_owned_vmobject_info(new_task, parent_task);
1838 	} else {
1839 		/* Initialize to zero for standard fork/spawn case */
1840 		new_task->total_runnable_time = 0;
1841 		new_task->syscalls_mach = 0;
1842 		new_task->syscalls_unix = 0;
1843 		new_task->c_switch = 0;
1844 		new_task->p_switch = 0;
1845 		new_task->ps_switch = 0;
1846 		new_task->decompressions = 0;
1847 		new_task->low_mem_notified_warn = 0;
1848 		new_task->low_mem_notified_critical = 0;
1849 		new_task->purged_memory_warn = 0;
1850 		new_task->purged_memory_critical = 0;
1851 		new_task->low_mem_privileged_listener = 0;
1852 		new_task->memlimit_is_active = 0;
1853 		new_task->memlimit_is_fatal = 0;
1854 		new_task->memlimit_active_exc_resource = 0;
1855 		new_task->memlimit_inactive_exc_resource = 0;
1856 		new_task->task_timer_wakeups_bin_1 = 0;
1857 		new_task->task_timer_wakeups_bin_2 = 0;
1858 		new_task->task_gpu_ns = 0;
1859 		new_task->task_writes_counters_internal.task_immediate_writes = 0;
1860 		new_task->task_writes_counters_internal.task_deferred_writes = 0;
1861 		new_task->task_writes_counters_internal.task_invalidated_writes = 0;
1862 		new_task->task_writes_counters_internal.task_metadata_writes = 0;
1863 		new_task->task_writes_counters_external.task_immediate_writes = 0;
1864 		new_task->task_writes_counters_external.task_deferred_writes = 0;
1865 		new_task->task_writes_counters_external.task_invalidated_writes = 0;
1866 		new_task->task_writes_counters_external.task_metadata_writes = 0;
1867 #if CONFIG_PHYS_WRITE_ACCT
1868 		new_task->task_fs_metadata_writes = 0;
1869 #endif /* CONFIG_PHYS_WRITE_ACCT */
1870 	}
1871 
1872 
1873 	new_task->donates_own_pages = FALSE;
1874 #if CONFIG_COALITIONS
1875 	if (!(t_flags & TF_CORPSE_FORK)) {
1876 		/* TODO: there is no graceful failure path here... */
1877 		if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1878 			coalitions_adopt_task(parent_coalitions, new_task);
1879 			if (parent_coalitions[COALITION_TYPE_JETSAM]) {
1880 				new_task->donates_own_pages = coalition_is_swappable(parent_coalitions[COALITION_TYPE_JETSAM]);
1881 			}
1882 		} else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1883 			/*
1884 			 * all tasks at least have a resource coalition, so
1885 			 * if the parent has one then inherit all coalitions
1886 			 * the parent is a part of
1887 			 */
1888 			coalitions_adopt_task(parent_task->coalition, new_task);
1889 			if (parent_task->coalition[COALITION_TYPE_JETSAM]) {
1890 				new_task->donates_own_pages = coalition_is_swappable(parent_task->coalition[COALITION_TYPE_JETSAM]);
1891 			}
1892 		} else {
1893 			/* TODO: assert that new_task will be PID 1 (launchd) */
1894 			coalitions_adopt_init_task(new_task);
1895 		}
1896 		/*
1897 		 * on exec, we need to transfer the coalition roles from the
1898 		 * parent task to the exec copy task.
1899 		 */
1900 		if (parent_task && (t_procflags & TPF_EXEC_COPY)) {
1901 			int coal_roles[COALITION_NUM_TYPES];
1902 			task_coalition_roles(parent_task, coal_roles);
1903 			(void)coalitions_set_roles(new_task->coalition, new_task, coal_roles);
1904 		}
1905 	} else {
1906 		coalitions_adopt_corpse_task(new_task);
1907 	}
1908 
1909 	if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1910 		panic("created task is not a member of a resource coalition");
1911 	}
1912 	task_set_coalition_member(new_task);
1913 #endif /* CONFIG_COALITIONS */
1914 
1915 	if (parent_task != TASK_NULL) {
1916 		/* task_policy_create queries the adopted coalition */
1917 		task_policy_create(new_task, parent_task);
1918 	}
1919 
1920 	new_task->dispatchqueue_offset = 0;
1921 	if (parent_task != NULL) {
1922 		new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
1923 	}
1924 
1925 	new_task->task_can_transfer_memory_ownership = FALSE;
1926 	new_task->task_volatile_objects = 0;
1927 	new_task->task_nonvolatile_objects = 0;
1928 	new_task->task_objects_disowning = FALSE;
1929 	new_task->task_objects_disowned = FALSE;
1930 	new_task->task_owned_objects = 0;
1931 	queue_init(&new_task->task_objq);
1932 
1933 #if CONFIG_FREEZE
1934 	queue_init(&new_task->task_frozen_cseg_q);
1935 #endif /* CONFIG_FREEZE */
1936 
1937 	task_objq_lock_init(new_task);
1938 
1939 #if __arm64__
1940 	new_task->task_legacy_footprint = FALSE;
1941 	new_task->task_extra_footprint_limit = FALSE;
1942 	new_task->task_ios13extended_footprint_limit = FALSE;
1943 #endif /* __arm64__ */
1944 	new_task->task_region_footprint = FALSE;
1945 	new_task->task_has_crossed_thread_limit = FALSE;
1946 	new_task->task_thread_limit = 0;
1947 #if CONFIG_SECLUDED_MEMORY
1948 	new_task->task_can_use_secluded_mem = FALSE;
1949 	new_task->task_could_use_secluded_mem = FALSE;
1950 	new_task->task_could_also_use_secluded_mem = FALSE;
1951 	new_task->task_suppressed_secluded = FALSE;
1952 #endif /* CONFIG_SECLUDED_MEMORY */
1953 
1954 
1955 	/*
1956 	 * t_flags is set up above. But since we don't
1957 	 * support darkwake mode being set that way
1958 	 * currently, we clear it out here explicitly.
1959 	 */
1960 	new_task->t_flags &= ~(TF_DARKWAKE_MODE);
1961 
1962 	queue_init(&new_task->io_user_clients);
1963 	new_task->loadTag = 0;
1964 
1965 	lck_mtx_lock(&tasks_threads_lock);
1966 	queue_enter(&tasks, new_task, task_t, tasks);
1967 	tasks_count++;
1968 	if (tasks_suspend_state) {
1969 		task_suspend_internal(new_task);
1970 	}
1971 	lck_mtx_unlock(&tasks_threads_lock);
1972 	task_ref_hold_proc_task_struct(new_task);
1973 
1974 	return KERN_SUCCESS;
1975 }
1976 
1977 /*
1978  *	task_rollup_accounting_info
1979  *
1980  *	Roll up accounting stats. Used to rollup stats
1981  *	for exec copy task and corpse fork.
1982  */
1983 void
task_rollup_accounting_info(task_t to_task,task_t from_task)1984 task_rollup_accounting_info(task_t to_task, task_t from_task)
1985 {
1986 	assert(from_task != to_task);
1987 
1988 	recount_task_copy(&to_task->tk_recount, &from_task->tk_recount);
1989 	to_task->total_runnable_time = from_task->total_runnable_time;
1990 	counter_add(&to_task->faults, counter_load(&from_task->faults));
1991 	counter_add(&to_task->pageins, counter_load(&from_task->pageins));
1992 	counter_add(&to_task->cow_faults, counter_load(&from_task->cow_faults));
1993 	counter_add(&to_task->messages_sent, counter_load(&from_task->messages_sent));
1994 	counter_add(&to_task->messages_received, counter_load(&from_task->messages_received));
1995 	to_task->decompressions = from_task->decompressions;
1996 	to_task->syscalls_mach = from_task->syscalls_mach;
1997 	to_task->syscalls_unix = from_task->syscalls_unix;
1998 	to_task->c_switch = from_task->c_switch;
1999 	to_task->p_switch = from_task->p_switch;
2000 	to_task->ps_switch = from_task->ps_switch;
2001 	to_task->extmod_statistics = from_task->extmod_statistics;
2002 	to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
2003 	to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
2004 	to_task->purged_memory_warn = from_task->purged_memory_warn;
2005 	to_task->purged_memory_critical = from_task->purged_memory_critical;
2006 	to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
2007 	*to_task->task_io_stats = *from_task->task_io_stats;
2008 	to_task->cpu_time_eqos_stats = from_task->cpu_time_eqos_stats;
2009 	to_task->cpu_time_rqos_stats = from_task->cpu_time_rqos_stats;
2010 	to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
2011 	to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
2012 	to_task->task_gpu_ns = from_task->task_gpu_ns;
2013 	to_task->task_writes_counters_internal.task_immediate_writes = from_task->task_writes_counters_internal.task_immediate_writes;
2014 	to_task->task_writes_counters_internal.task_deferred_writes = from_task->task_writes_counters_internal.task_deferred_writes;
2015 	to_task->task_writes_counters_internal.task_invalidated_writes = from_task->task_writes_counters_internal.task_invalidated_writes;
2016 	to_task->task_writes_counters_internal.task_metadata_writes = from_task->task_writes_counters_internal.task_metadata_writes;
2017 	to_task->task_writes_counters_external.task_immediate_writes = from_task->task_writes_counters_external.task_immediate_writes;
2018 	to_task->task_writes_counters_external.task_deferred_writes = from_task->task_writes_counters_external.task_deferred_writes;
2019 	to_task->task_writes_counters_external.task_invalidated_writes = from_task->task_writes_counters_external.task_invalidated_writes;
2020 	to_task->task_writes_counters_external.task_metadata_writes = from_task->task_writes_counters_external.task_metadata_writes;
2021 #if CONFIG_PHYS_WRITE_ACCT
2022 	to_task->task_fs_metadata_writes = from_task->task_fs_metadata_writes;
2023 #endif /* CONFIG_PHYS_WRITE_ACCT */
2024 
2025 #if CONFIG_MEMORYSTATUS
2026 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.memorystatus_dirty_time);
2027 #endif /* CONFIG_MEMORYSTATUS */
2028 
2029 	/* Skip ledger roll up for memory accounting entries */
2030 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
2031 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
2032 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
2033 #if CONFIG_SCHED_SFI
2034 	for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
2035 		ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
2036 	}
2037 #endif
2038 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
2039 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
2040 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
2041 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
2042 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_me);
2043 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_others);
2044 }
2045 
2046 /*
2047  *	task_deallocate_internal:
2048  *
2049  *	Drop a reference on a task.
2050  *	Don't call this directly.
2051  */
2052 extern void task_deallocate_internal(task_t task, os_ref_count_t refs);
2053 void
task_deallocate_internal(task_t task,os_ref_count_t refs)2054 task_deallocate_internal(
2055 	task_t          task,
2056 	os_ref_count_t  refs)
2057 {
2058 	ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
2059 
2060 	if (task == TASK_NULL) {
2061 		return;
2062 	}
2063 
2064 #if IMPORTANCE_INHERITANCE
2065 	if (refs == 1) {
2066 		/*
2067 		 * If last ref potentially comes from the task's importance,
2068 		 * disconnect it.  But more task refs may be added before
2069 		 * that completes, so wait for the reference to go to zero
2070 		 * naturally (it may happen on a recursive task_deallocate()
2071 		 * from the ipc_importance_disconnect_task() call).
2072 		 */
2073 		if (IIT_NULL != task->task_imp_base) {
2074 			ipc_importance_disconnect_task(task);
2075 		}
2076 		return;
2077 	}
2078 #endif /* IMPORTANCE_INHERITANCE */
2079 
2080 	if (refs > 0) {
2081 		return;
2082 	}
2083 
2084 	/*
2085 	 * The task should be dead at this point. Ensure other resources
2086 	 * like threads, are gone before we trash the world.
2087 	 */
2088 	assert(queue_empty(&task->threads));
2089 	assert(get_bsdtask_info(task) == NULL);
2090 	assert(!is_active(task->itk_space));
2091 	assert(!task->active);
2092 	assert(task->active_thread_count == 0);
2093 	assert(!task_get_game_mode(task));
2094 	assert(!task_get_carplay_mode(task));
2095 
2096 	lck_mtx_lock(&tasks_threads_lock);
2097 	assert(terminated_tasks_count > 0);
2098 	queue_remove(&terminated_tasks, task, task_t, tasks);
2099 	terminated_tasks_count--;
2100 	lck_mtx_unlock(&tasks_threads_lock);
2101 
2102 	/*
2103 	 * remove the reference on bank context
2104 	 */
2105 	task_bank_reset(task);
2106 
2107 	kfree_data(task->task_io_stats, sizeof(struct io_stat_info));
2108 
2109 	/*
2110 	 *	Give the machine dependent code a chance
2111 	 *	to perform cleanup before ripping apart
2112 	 *	the task.
2113 	 */
2114 	machine_task_terminate(task);
2115 
2116 	ipc_task_terminate(task);
2117 
2118 	/* let iokit know 2 */
2119 	iokit_task_terminate(task, 2);
2120 
2121 	/* Unregister task from userspace coredumps on panic */
2122 	kern_unregister_userspace_coredump(task);
2123 
2124 	if (task->affinity_space) {
2125 		task_affinity_deallocate(task);
2126 	}
2127 
2128 #if MACH_ASSERT
2129 	if (task->ledger != NULL &&
2130 	    task->map != NULL &&
2131 	    task->map->pmap != NULL &&
2132 	    task->map->pmap->ledger != NULL) {
2133 		assert(task->ledger == task->map->pmap->ledger);
2134 	}
2135 #endif /* MACH_ASSERT */
2136 
2137 	vm_owned_objects_disown(task);
2138 	assert(task->task_objects_disowned);
2139 	if (task->task_owned_objects != 0) {
2140 		panic("task_deallocate(%p): "
2141 		    "volatile_objects=%d nonvolatile_objects=%d owned=%d\n",
2142 		    task,
2143 		    task->task_volatile_objects,
2144 		    task->task_nonvolatile_objects,
2145 		    task->task_owned_objects);
2146 	}
2147 
2148 #if CONFIG_DEFERRED_RECLAIM
2149 	if (task->deferred_reclamation_metadata != NULL) {
2150 		vm_deferred_reclamation_buffer_deallocate(task->deferred_reclamation_metadata);
2151 		task->deferred_reclamation_metadata = NULL;
2152 	}
2153 #endif /* CONFIG_DEFERRED_RECLAIM */
2154 
2155 	vm_map_deallocate(task->map);
2156 	if (task->is_large_corpse) {
2157 		assert(large_corpse_count > 0);
2158 		OSDecrementAtomic(&large_corpse_count);
2159 		task->is_large_corpse = false;
2160 	}
2161 	is_release(task->itk_space);
2162 
2163 	if (task->t_rr_ranges) {
2164 		restartable_ranges_release(task->t_rr_ranges);
2165 	}
2166 
2167 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
2168 	    &interrupt_wakeups, &debit);
2169 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
2170 	    &platform_idle_wakeups, &debit);
2171 
2172 	struct recount_times_mach sum = { 0 };
2173 	struct recount_times_mach p_only = { 0 };
2174 	recount_task_times_perf_only(task, &sum, &p_only);
2175 #if CONFIG_PERVASIVE_ENERGY
2176 	uint64_t energy = recount_task_energy_nj(task);
2177 #endif /* CONFIG_PERVASIVE_ENERGY */
2178 	recount_task_deinit(&task->tk_recount);
2179 
2180 	/* Accumulate statistics for dead tasks */
2181 	lck_spin_lock(&dead_task_statistics_lock);
2182 	dead_task_statistics.total_user_time += sum.rtm_user;
2183 	dead_task_statistics.total_system_time += sum.rtm_system;
2184 
2185 	dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
2186 	dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
2187 
2188 	dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
2189 	dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
2190 	dead_task_statistics.total_ptime += p_only.rtm_user + p_only.rtm_system;
2191 	dead_task_statistics.total_pset_switches += task->ps_switch;
2192 	dead_task_statistics.task_gpu_ns += task->task_gpu_ns;
2193 #if CONFIG_PERVASIVE_ENERGY
2194 	dead_task_statistics.task_energy += energy;
2195 #endif /* CONFIG_PERVASIVE_ENERGY */
2196 
2197 	lck_spin_unlock(&dead_task_statistics_lock);
2198 	lck_mtx_destroy(&task->lock, &task_lck_grp);
2199 
2200 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
2201 	    &debit)) {
2202 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
2203 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
2204 	}
2205 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
2206 	    &debit)) {
2207 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
2208 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
2209 	}
2210 	ledger_dereference(task->ledger);
2211 
2212 	counter_free(&task->faults);
2213 	counter_free(&task->pageins);
2214 	counter_free(&task->cow_faults);
2215 	counter_free(&task->messages_sent);
2216 	counter_free(&task->messages_received);
2217 
2218 #if CONFIG_COALITIONS
2219 	task_release_coalitions(task);
2220 #endif /* CONFIG_COALITIONS */
2221 
2222 	bzero(task->coalition, sizeof(task->coalition));
2223 
2224 #if MACH_BSD
2225 	/* clean up collected information since last reference to task is gone */
2226 	if (task->corpse_info) {
2227 		void *corpse_info_kernel = kcdata_memory_get_begin_addr(task->corpse_info);
2228 		task_crashinfo_destroy(task->corpse_info);
2229 		task->corpse_info = NULL;
2230 		kfree_data(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
2231 	}
2232 #endif
2233 
2234 #if CONFIG_MACF
2235 	if (get_task_crash_label(task)) {
2236 		mac_exc_free_label(get_task_crash_label(task));
2237 		set_task_crash_label(task, NULL);
2238 	}
2239 #endif
2240 
2241 	assert(queue_empty(&task->task_objq));
2242 	task_objq_lock_destroy(task);
2243 
2244 	if (task->corpse_vmobject_list) {
2245 		kfree_data(task->corpse_vmobject_list,
2246 		    (vm_size_t)task->corpse_vmobject_list_size);
2247 	}
2248 
2249 	task_ref_count_fini(task);
2250 	proc_ro_erase_task(task->bsd_info_ro);
2251 	task_release_proc_task_struct(task, task->bsd_info_ro);
2252 }
2253 
2254 /*
2255  *	task_name_deallocate_mig:
2256  *
2257  *	Drop a reference on a task name.
2258  */
2259 void
task_name_deallocate_mig(task_name_t task_name)2260 task_name_deallocate_mig(
2261 	task_name_t             task_name)
2262 {
2263 	return task_deallocate_grp((task_t)task_name, TASK_GRP_MIG);
2264 }
2265 
2266 /*
2267  *	task_policy_set_deallocate_mig:
2268  *
2269  *	Drop a reference on a task type.
2270  */
2271 void
task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)2272 task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)
2273 {
2274 	return task_deallocate_grp((task_t)task_policy_set, TASK_GRP_MIG);
2275 }
2276 
2277 /*
2278  *	task_policy_get_deallocate_mig:
2279  *
2280  *	Drop a reference on a task type.
2281  */
2282 void
task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)2283 task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)
2284 {
2285 	return task_deallocate_grp((task_t)task_policy_get, TASK_GRP_MIG);
2286 }
2287 
2288 /*
2289  *	task_inspect_deallocate_mig:
2290  *
2291  *	Drop a task inspection reference.
2292  */
2293 void
task_inspect_deallocate_mig(task_inspect_t task_inspect)2294 task_inspect_deallocate_mig(
2295 	task_inspect_t          task_inspect)
2296 {
2297 	return task_deallocate_grp((task_t)task_inspect, TASK_GRP_MIG);
2298 }
2299 
2300 /*
2301  *	task_read_deallocate_mig:
2302  *
2303  *	Drop a reference on task read port.
2304  */
2305 void
task_read_deallocate_mig(task_read_t task_read)2306 task_read_deallocate_mig(
2307 	task_read_t          task_read)
2308 {
2309 	return task_deallocate_grp((task_t)task_read, TASK_GRP_MIG);
2310 }
2311 
2312 /*
2313  *	task_suspension_token_deallocate:
2314  *
2315  *	Drop a reference on a task suspension token.
2316  */
2317 void
task_suspension_token_deallocate(task_suspension_token_t token)2318 task_suspension_token_deallocate(
2319 	task_suspension_token_t         token)
2320 {
2321 	return task_deallocate((task_t)token);
2322 }
2323 
2324 void
task_suspension_token_deallocate_grp(task_suspension_token_t token,task_grp_t grp)2325 task_suspension_token_deallocate_grp(
2326 	task_suspension_token_t         token,
2327 	task_grp_t                      grp)
2328 {
2329 	return task_deallocate_grp((task_t)token, grp);
2330 }
2331 
2332 /*
2333  * task_collect_crash_info:
2334  *
2335  * collect crash info from bsd and mach based data
2336  */
2337 kern_return_t
task_collect_crash_info(task_t task,struct label * crash_label,int is_corpse_fork)2338 task_collect_crash_info(
2339 	task_t task,
2340 #ifdef CONFIG_MACF
2341 	struct label *crash_label,
2342 #endif
2343 	int is_corpse_fork)
2344 {
2345 	kern_return_t kr = KERN_SUCCESS;
2346 
2347 	kcdata_descriptor_t crash_data = NULL;
2348 	kcdata_descriptor_t crash_data_release = NULL;
2349 	mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
2350 	mach_vm_offset_t crash_data_ptr = 0;
2351 	void *crash_data_kernel = NULL;
2352 	void *crash_data_kernel_release = NULL;
2353 #if CONFIG_MACF
2354 	struct label *label, *free_label;
2355 #endif
2356 
2357 	if (!corpses_enabled()) {
2358 		return KERN_NOT_SUPPORTED;
2359 	}
2360 
2361 #if CONFIG_MACF
2362 	free_label = label = mac_exc_create_label(NULL);
2363 #endif
2364 
2365 	task_lock(task);
2366 
2367 	assert(is_corpse_fork || get_bsdtask_info(task) != NULL);
2368 	if (task->corpse_info == NULL && (is_corpse_fork || get_bsdtask_info(task) != NULL)) {
2369 #if CONFIG_MACF
2370 		/* Set the crash label, used by the exception delivery mac hook */
2371 		free_label = get_task_crash_label(task);         // Most likely NULL.
2372 		set_task_crash_label(task, label);
2373 		mac_exc_update_task_crash_label(task, crash_label);
2374 #endif
2375 		task_unlock(task);
2376 
2377 		crash_data_kernel = kalloc_data(CORPSEINFO_ALLOCATION_SIZE,
2378 		    Z_WAITOK | Z_ZERO);
2379 		if (crash_data_kernel == NULL) {
2380 			kr = KERN_RESOURCE_SHORTAGE;
2381 			goto out_no_lock;
2382 		}
2383 		crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
2384 
2385 		/* Do not get a corpse ref for corpse fork */
2386 		crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size,
2387 		    is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF,
2388 		    KCFLAG_USE_MEMCOPY);
2389 		if (crash_data) {
2390 			task_lock(task);
2391 			crash_data_release = task->corpse_info;
2392 			crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);
2393 			task->corpse_info = crash_data;
2394 
2395 			task_unlock(task);
2396 			kr = KERN_SUCCESS;
2397 		} else {
2398 			kfree_data(crash_data_kernel,
2399 			    CORPSEINFO_ALLOCATION_SIZE);
2400 			kr = KERN_FAILURE;
2401 		}
2402 
2403 		if (crash_data_release != NULL) {
2404 			task_crashinfo_destroy(crash_data_release);
2405 		}
2406 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2407 	} else {
2408 		task_unlock(task);
2409 	}
2410 
2411 out_no_lock:
2412 #if CONFIG_MACF
2413 	if (free_label != NULL) {
2414 		mac_exc_free_label(free_label);
2415 	}
2416 #endif
2417 	return kr;
2418 }
2419 
2420 /*
2421  * task_deliver_crash_notification:
2422  *
2423  * Makes outcall to registered host port for a corpse.
2424  */
2425 kern_return_t
task_deliver_crash_notification(task_t corpse,thread_t thread,exception_type_t etype,mach_exception_subcode_t subcode)2426 task_deliver_crash_notification(
2427 	task_t corpse, /* corpse or corpse fork */
2428 	thread_t thread,
2429 	exception_type_t etype,
2430 	mach_exception_subcode_t subcode)
2431 {
2432 	kcdata_descriptor_t crash_info = corpse->corpse_info;
2433 	thread_t th_iter = NULL;
2434 	kern_return_t kr = KERN_SUCCESS;
2435 	wait_interrupt_t wsave;
2436 	mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2437 	ipc_port_t corpse_port;
2438 
2439 	if (crash_info == NULL) {
2440 		return KERN_FAILURE;
2441 	}
2442 
2443 	assert(task_is_a_corpse(corpse));
2444 
2445 	task_lock(corpse);
2446 
2447 	/*
2448 	 * Always populate code[0] as the effective exception type for EXC_CORPSE_NOTIFY.
2449 	 * Crash reporters should derive whether it's fatal from corpse blob.
2450 	 */
2451 	code[0] = etype;
2452 	code[1] = subcode;
2453 
2454 	queue_iterate(&corpse->threads, th_iter, thread_t, task_threads)
2455 	{
2456 		if (th_iter->corpse_dup == FALSE) {
2457 			ipc_thread_reset(th_iter);
2458 		}
2459 	}
2460 	task_unlock(corpse);
2461 
2462 	/* Arm the no-sender notification for taskport */
2463 	task_reference(corpse);
2464 	corpse_port = convert_corpse_to_port_and_nsrequest(corpse);
2465 
2466 	wsave = thread_interrupt_level(THREAD_UNINT);
2467 	kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
2468 	if (kr != KERN_SUCCESS) {
2469 		printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(corpse));
2470 	}
2471 
2472 	(void)thread_interrupt_level(wsave);
2473 
2474 	/*
2475 	 * Drop the send right on corpse port, will fire the
2476 	 * no-sender notification if exception deliver failed.
2477 	 */
2478 	ipc_port_release_send(corpse_port);
2479 	return kr;
2480 }
2481 
2482 /*
2483  *	task_terminate:
2484  *
2485  *	Terminate the specified task.  See comments on thread_terminate
2486  *	(kern/thread.c) about problems with terminating the "current task."
2487  */
2488 
2489 kern_return_t
task_terminate(task_t task)2490 task_terminate(
2491 	task_t          task)
2492 {
2493 	if (task == TASK_NULL) {
2494 		return KERN_INVALID_ARGUMENT;
2495 	}
2496 
2497 	if (get_bsdtask_info(task)) {
2498 		return KERN_FAILURE;
2499 	}
2500 
2501 	return task_terminate_internal(task);
2502 }
2503 
2504 #if MACH_ASSERT
2505 extern int proc_pid(struct proc *);
2506 extern void proc_name_kdp(struct proc *p, char *buf, int size);
2507 #endif /* MACH_ASSERT */
2508 
2509 static void
task_partial_reap(task_t task,__unused int pid)2510 __unused task_partial_reap(task_t task, __unused int pid)
2511 {
2512 	unsigned int    reclaimed_resident = 0;
2513 	unsigned int    reclaimed_compressed = 0;
2514 	uint64_t        task_page_count;
2515 
2516 	task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
2517 
2518 	KDBG(VMDBG_CODE(DBG_VM_MAP_PARTIAL_REAP) | DBG_FUNC_START,
2519 	    pid, task_page_count);
2520 
2521 	vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
2522 
2523 	KDBG(VMDBG_CODE(DBG_VM_MAP_PARTIAL_REAP) | DBG_FUNC_END,
2524 	    pid, reclaimed_resident, reclaimed_compressed);
2525 }
2526 
2527 /*
2528  * task_mark_corpse:
2529  *
2530  * Mark the task as a corpse. Called by crashing thread.
2531  */
2532 kern_return_t
task_mark_corpse(task_t task)2533 task_mark_corpse(task_t task)
2534 {
2535 	kern_return_t kr = KERN_SUCCESS;
2536 	thread_t self_thread;
2537 	(void) self_thread;
2538 	wait_interrupt_t wsave;
2539 #if CONFIG_MACF
2540 	struct label *crash_label = NULL;
2541 #endif
2542 
2543 	assert(task != kernel_task);
2544 	assert(task == current_task());
2545 	assert(!task_is_a_corpse(task));
2546 
2547 #if CONFIG_MACF
2548 	crash_label = mac_exc_create_label_for_proc((struct proc*)get_bsdtask_info(task));
2549 #endif
2550 
2551 	kr = task_collect_crash_info(task,
2552 #if CONFIG_MACF
2553 	    crash_label,
2554 #endif
2555 	    FALSE);
2556 	if (kr != KERN_SUCCESS) {
2557 		goto out;
2558 	}
2559 
2560 	self_thread = current_thread();
2561 
2562 	wsave = thread_interrupt_level(THREAD_UNINT);
2563 	task_lock(task);
2564 
2565 	/*
2566 	 * Check if any other thread called task_terminate_internal
2567 	 * and made the task inactive before we could mark it for
2568 	 * corpse pending report. Bail out if the task is inactive.
2569 	 */
2570 	if (!task->active) {
2571 		kcdata_descriptor_t crash_data_release = task->corpse_info;;
2572 		void *crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);;
2573 
2574 		task->corpse_info = NULL;
2575 		task_unlock(task);
2576 
2577 		if (crash_data_release != NULL) {
2578 			task_crashinfo_destroy(crash_data_release);
2579 		}
2580 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2581 		return KERN_TERMINATED;
2582 	}
2583 
2584 	task_set_corpse_pending_report(task);
2585 	task_set_corpse(task);
2586 	task->crashed_thread_id = thread_tid(self_thread);
2587 
2588 	kr = task_start_halt_locked(task, TRUE);
2589 	assert(kr == KERN_SUCCESS);
2590 
2591 	task_set_uniqueid(task);
2592 
2593 	task_unlock(task);
2594 
2595 	/*
2596 	 * ipc_task_reset() moved to last thread_terminate_self(): rdar://75737960.
2597 	 * disable old ports here instead.
2598 	 *
2599 	 * The vm_map and ipc_space must exist until this function returns,
2600 	 * convert_port_to_{map,space}_with_flavor relies on this behavior.
2601 	 */
2602 	ipc_task_disable(task);
2603 
2604 	/* let iokit know 1 */
2605 	iokit_task_terminate(task, 1);
2606 
2607 	/* terminate the ipc space */
2608 	ipc_space_terminate(task->itk_space);
2609 
2610 	/* Add it to global corpse task list */
2611 	task_add_to_corpse_task_list(task);
2612 
2613 	thread_terminate_internal(self_thread);
2614 
2615 	(void) thread_interrupt_level(wsave);
2616 	assert(task->halting == TRUE);
2617 
2618 out:
2619 #if CONFIG_MACF
2620 	mac_exc_free_label(crash_label);
2621 #endif
2622 	return kr;
2623 }
2624 
2625 /*
2626  *	task_set_uniqueid
2627  *
2628  *	Set task uniqueid to systemwide unique 64 bit value
2629  */
2630 void
task_set_uniqueid(task_t task)2631 task_set_uniqueid(task_t task)
2632 {
2633 	task->task_uniqueid = OSIncrementAtomic64(&next_taskuniqueid);
2634 }
2635 
2636 /*
2637  *	task_clear_corpse
2638  *
2639  *	Clears the corpse pending bit on task.
2640  *	Removes inspection bit on the threads.
2641  */
2642 void
task_clear_corpse(task_t task)2643 task_clear_corpse(task_t task)
2644 {
2645 	thread_t th_iter = NULL;
2646 
2647 	task_lock(task);
2648 	queue_iterate(&task->threads, th_iter, thread_t, task_threads)
2649 	{
2650 		thread_mtx_lock(th_iter);
2651 		th_iter->inspection = FALSE;
2652 		ipc_thread_disable(th_iter);
2653 		thread_mtx_unlock(th_iter);
2654 	}
2655 
2656 	thread_terminate_crashed_threads();
2657 	/* remove the pending corpse report flag */
2658 	task_clear_corpse_pending_report(task);
2659 
2660 	task_unlock(task);
2661 }
2662 
2663 /*
2664  *	task_port_no_senders
2665  *
2666  *	Called whenever the Mach port system detects no-senders on
2667  *	the task port of a corpse.
2668  *	Each notification that comes in should terminate the task (corpse).
2669  */
2670 static void
task_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)2671 task_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
2672 {
2673 	task_t task = ipc_kobject_get_locked(port, IKOT_TASK_CONTROL);
2674 
2675 	assert(task != TASK_NULL);
2676 	assert(task_is_a_corpse(task));
2677 
2678 	/* Remove the task from global corpse task list */
2679 	task_remove_from_corpse_task_list(task);
2680 
2681 	task_clear_corpse(task);
2682 	vm_map_unset_corpse_source(task->map);
2683 	task_terminate_internal(task);
2684 }
2685 
2686 /*
2687  *	task_port_with_flavor_no_senders
2688  *
2689  *	Called whenever the Mach port system detects no-senders on
2690  *	the task inspect or read port. These ports are allocated lazily and
2691  *	should be deallocated here when there are no senders remaining.
2692  */
2693 static void
task_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)2694 task_port_with_flavor_no_senders(
2695 	ipc_port_t          port,
2696 	mach_port_mscount_t mscount __unused)
2697 {
2698 	task_t task;
2699 	mach_task_flavor_t flavor;
2700 	ipc_kobject_type_t kotype;
2701 
2702 	ip_mq_lock(port);
2703 	if (port->ip_srights > 0) {
2704 		ip_mq_unlock(port);
2705 		return;
2706 	}
2707 	kotype = ip_kotype(port);
2708 	assert((IKOT_TASK_READ == kotype) || (IKOT_TASK_INSPECT == kotype));
2709 	task = ipc_kobject_get_locked(port, kotype);
2710 	if (task != TASK_NULL) {
2711 		task_reference(task);
2712 	}
2713 	ip_mq_unlock(port);
2714 
2715 	if (task == TASK_NULL) {
2716 		/* The task is exiting or disabled; it will eventually deallocate the port */
2717 		return;
2718 	}
2719 
2720 	if (kotype == IKOT_TASK_READ) {
2721 		flavor = TASK_FLAVOR_READ;
2722 	} else {
2723 		flavor = TASK_FLAVOR_INSPECT;
2724 	}
2725 
2726 	itk_lock(task);
2727 	ip_mq_lock(port);
2728 
2729 	/*
2730 	 * If the port is no longer active, then ipc_task_terminate() ran
2731 	 * and destroyed the kobject already. Just deallocate the task
2732 	 * ref we took and go away.
2733 	 *
2734 	 * It is also possible that several nsrequests are in flight,
2735 	 * only one shall NULL-out the port entry, and this is the one
2736 	 * that gets to dealloc the port.
2737 	 *
2738 	 * Check for a stale no-senders notification. A call to any function
2739 	 * that vends out send rights to this port could resurrect it between
2740 	 * this notification being generated and actually being handled here.
2741 	 */
2742 	if (!ip_active(port) ||
2743 	    task->itk_task_ports[flavor] != port ||
2744 	    port->ip_srights > 0) {
2745 		ip_mq_unlock(port);
2746 		itk_unlock(task);
2747 		task_deallocate(task);
2748 		return;
2749 	}
2750 
2751 	assert(task->itk_task_ports[flavor] == port);
2752 	task->itk_task_ports[flavor] = IP_NULL;
2753 	itk_unlock(task);
2754 
2755 	ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
2756 
2757 	task_deallocate(task);
2758 }
2759 
2760 /*
2761  *	task_wait_till_threads_terminate_locked
2762  *
2763  *	Wait till all the threads in the task are terminated.
2764  *	Might release the task lock and re-acquire it.
2765  */
2766 void
task_wait_till_threads_terminate_locked(task_t task)2767 task_wait_till_threads_terminate_locked(task_t task)
2768 {
2769 	/* wait for all the threads in the task to terminate */
2770 	while (task->active_thread_count != 0) {
2771 		assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
2772 		task_unlock(task);
2773 		thread_block(THREAD_CONTINUE_NULL);
2774 
2775 		task_lock(task);
2776 	}
2777 }
2778 
2779 /*
2780  *	task_duplicate_map_and_threads
2781  *
2782  *	Copy vmmap of source task.
2783  *	Copy active threads from source task to destination task.
2784  *	Source task would be suspended during the copy.
2785  */
2786 kern_return_t
task_duplicate_map_and_threads(task_t task,void * p,task_t new_task,thread_t * thread_ret,uint64_t ** udata_buffer,int * size,int * num_udata,bool for_exception)2787 task_duplicate_map_and_threads(
2788 	task_t task,
2789 	void *p,
2790 	task_t new_task,
2791 	thread_t *thread_ret,
2792 	uint64_t **udata_buffer,
2793 	int *size,
2794 	int *num_udata,
2795 	bool for_exception)
2796 {
2797 	kern_return_t kr = KERN_SUCCESS;
2798 	int active;
2799 	thread_t thread, self, thread_return = THREAD_NULL;
2800 	thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL;
2801 	thread_t *thread_array;
2802 	uint32_t active_thread_count = 0, array_count = 0, i;
2803 	vm_map_t oldmap;
2804 	uint64_t *buffer = NULL;
2805 	int buf_size = 0;
2806 	int est_knotes = 0, num_knotes = 0;
2807 
2808 	self = current_thread();
2809 
2810 	/*
2811 	 * Suspend the task to copy thread state, use the internal
2812 	 * variant so that no user-space process can resume
2813 	 * the task from under us
2814 	 */
2815 	kr = task_suspend_internal(task);
2816 	if (kr != KERN_SUCCESS) {
2817 		return kr;
2818 	}
2819 
2820 	if (task->map->disable_vmentry_reuse == TRUE) {
2821 		/*
2822 		 * Quite likely GuardMalloc (or some debugging tool)
2823 		 * is being used on this task. And it has gone through
2824 		 * its limit. Making a corpse will likely encounter
2825 		 * a lot of VM entries that will need COW.
2826 		 *
2827 		 * Skip it.
2828 		 */
2829 #if DEVELOPMENT || DEBUG
2830 		memorystatus_abort_vm_map_fork(task);
2831 #endif
2832 		ktriage_record(thread_tid(self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_FAIL_LIBGMALLOC), 0 /* arg */);
2833 		task_resume_internal(task);
2834 		return KERN_FAILURE;
2835 	}
2836 
2837 	/* Check with VM if vm_map_fork is allowed for this task */
2838 	bool is_large = false;
2839 	if (memorystatus_allowed_vm_map_fork(task, &is_large)) {
2840 		/* Setup new task's vmmap, switch from parent task's map to it COW map */
2841 		oldmap = new_task->map;
2842 		new_task->map = vm_map_fork(new_task->ledger,
2843 		    task->map,
2844 		    (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
2845 		    VM_MAP_FORK_PRESERVE_PURGEABLE |
2846 		    VM_MAP_FORK_CORPSE_FOOTPRINT |
2847 		    VM_MAP_FORK_SHARE_IF_OWNED));
2848 		if (new_task->map) {
2849 			new_task->is_large_corpse = is_large;
2850 			vm_map_deallocate(oldmap);
2851 
2852 			/* copy ledgers that impact the memory footprint */
2853 			vm_map_copy_footprint_ledgers(task, new_task);
2854 
2855 			/* Get all the udata pointers from kqueue */
2856 			est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2857 			if (est_knotes > 0) {
2858 				buf_size = (est_knotes + 32) * sizeof(uint64_t);
2859 				buffer = kalloc_data(buf_size, Z_WAITOK);
2860 				num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2861 				if (num_knotes > est_knotes + 32) {
2862 					num_knotes = est_knotes + 32;
2863 				}
2864 			}
2865 		} else {
2866 			if (is_large) {
2867 				assert(large_corpse_count > 0);
2868 				OSDecrementAtomic(&large_corpse_count);
2869 			}
2870 			new_task->map = oldmap;
2871 #if DEVELOPMENT || DEBUG
2872 			memorystatus_abort_vm_map_fork(task);
2873 #endif
2874 			task_resume_internal(task);
2875 			return KERN_NO_SPACE;
2876 		}
2877 	} else if (!for_exception) {
2878 #if DEVELOPMENT || DEBUG
2879 		memorystatus_abort_vm_map_fork(task);
2880 #endif
2881 		task_resume_internal(task);
2882 		return KERN_NO_SPACE;
2883 	}
2884 
2885 	active_thread_count = task->active_thread_count;
2886 	if (active_thread_count == 0) {
2887 		kfree_data(buffer, buf_size);
2888 		task_resume_internal(task);
2889 		return KERN_FAILURE;
2890 	}
2891 
2892 	thread_array = kalloc_type(thread_t, active_thread_count, Z_WAITOK);
2893 
2894 	/* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
2895 	task_lock(task);
2896 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2897 		/* Skip inactive threads */
2898 		active = thread->active;
2899 		if (!active) {
2900 			continue;
2901 		}
2902 
2903 		if (array_count >= active_thread_count) {
2904 			break;
2905 		}
2906 
2907 		thread_array[array_count++] = thread;
2908 		thread_reference(thread);
2909 	}
2910 	task_unlock(task);
2911 
2912 	for (i = 0; i < array_count; i++) {
2913 		kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
2914 		if (kr != KERN_SUCCESS) {
2915 			break;
2916 		}
2917 
2918 		/* Equivalent of current thread in corpse */
2919 		if (thread_array[i] == self) {
2920 			thread_return = new_thread;
2921 			new_task->crashed_thread_id = thread_tid(new_thread);
2922 		} else if (first_thread == NULL) {
2923 			first_thread = new_thread;
2924 		} else {
2925 			/* drop the extra ref returned by thread_create_with_continuation */
2926 			thread_deallocate(new_thread);
2927 		}
2928 
2929 		kr = thread_dup2(thread_array[i], new_thread);
2930 		if (kr != KERN_SUCCESS) {
2931 			thread_mtx_lock(new_thread);
2932 			new_thread->corpse_dup = TRUE;
2933 			thread_mtx_unlock(new_thread);
2934 			continue;
2935 		}
2936 
2937 		/* Copy thread name */
2938 		bsd_copythreadname(get_bsdthread_info(new_thread),
2939 		    get_bsdthread_info(thread_array[i]));
2940 		new_thread->thread_tag = thread_array[i]->thread_tag &
2941 		    ~THREAD_TAG_USER_JOIN;
2942 		thread_copy_resource_info(new_thread, thread_array[i]);
2943 	}
2944 
2945 	/* return the first thread if we couldn't find the equivalent of current */
2946 	if (thread_return == THREAD_NULL) {
2947 		thread_return = first_thread;
2948 	} else if (first_thread != THREAD_NULL) {
2949 		/* drop the extra ref returned by thread_create_with_continuation */
2950 		thread_deallocate(first_thread);
2951 	}
2952 
2953 	task_resume_internal(task);
2954 
2955 	for (i = 0; i < array_count; i++) {
2956 		thread_deallocate(thread_array[i]);
2957 	}
2958 	kfree_type(thread_t, active_thread_count, thread_array);
2959 
2960 	if (kr == KERN_SUCCESS) {
2961 		*thread_ret = thread_return;
2962 		*udata_buffer = buffer;
2963 		*size = buf_size;
2964 		*num_udata = num_knotes;
2965 	} else {
2966 		if (thread_return != THREAD_NULL) {
2967 			thread_deallocate(thread_return);
2968 		}
2969 		kfree_data(buffer, buf_size);
2970 	}
2971 
2972 	return kr;
2973 }
2974 
2975 #if CONFIG_SECLUDED_MEMORY
2976 extern void task_set_can_use_secluded_mem_locked(
2977 	task_t          task,
2978 	boolean_t       can_use_secluded_mem);
2979 #endif /* CONFIG_SECLUDED_MEMORY */
2980 
2981 #if MACH_ASSERT
2982 int debug4k_panic_on_terminate = 0;
2983 #endif /* MACH_ASSERT */
2984 kern_return_t
task_terminate_internal(task_t task)2985 task_terminate_internal(
2986 	task_t                  task)
2987 {
2988 	thread_t                        thread, self;
2989 	task_t                          self_task;
2990 	boolean_t                       interrupt_save;
2991 	int                             pid = 0;
2992 
2993 	assert(task != kernel_task);
2994 
2995 	self = current_thread();
2996 	self_task = current_task();
2997 
2998 	/*
2999 	 *	Get the task locked and make sure that we are not racing
3000 	 *	with someone else trying to terminate us.
3001 	 */
3002 	if (task == self_task) {
3003 		task_lock(task);
3004 	} else if (task < self_task) {
3005 		task_lock(task);
3006 		task_lock(self_task);
3007 	} else {
3008 		task_lock(self_task);
3009 		task_lock(task);
3010 	}
3011 
3012 #if CONFIG_SECLUDED_MEMORY
3013 	if (task->task_can_use_secluded_mem) {
3014 		task_set_can_use_secluded_mem_locked(task, FALSE);
3015 	}
3016 	task->task_could_use_secluded_mem = FALSE;
3017 	task->task_could_also_use_secluded_mem = FALSE;
3018 
3019 	if (task->task_suppressed_secluded) {
3020 		stop_secluded_suppression(task);
3021 	}
3022 #endif /* CONFIG_SECLUDED_MEMORY */
3023 
3024 	if (!task->active) {
3025 		/*
3026 		 *	Task is already being terminated.
3027 		 *	Just return an error. If we are dying, this will
3028 		 *	just get us to our AST special handler and that
3029 		 *	will get us to finalize the termination of ourselves.
3030 		 */
3031 		task_unlock(task);
3032 		if (self_task != task) {
3033 			task_unlock(self_task);
3034 		}
3035 
3036 		return KERN_FAILURE;
3037 	}
3038 
3039 	if (task_corpse_pending_report(task)) {
3040 		/*
3041 		 *	Task is marked for reporting as corpse.
3042 		 *	Just return an error. This will
3043 		 *	just get us to our AST special handler and that
3044 		 *	will get us to finish the path to death
3045 		 */
3046 		task_unlock(task);
3047 		if (self_task != task) {
3048 			task_unlock(self_task);
3049 		}
3050 
3051 		return KERN_FAILURE;
3052 	}
3053 
3054 	if (self_task != task) {
3055 		task_unlock(self_task);
3056 	}
3057 
3058 	/*
3059 	 * Make sure the current thread does not get aborted out of
3060 	 * the waits inside these operations.
3061 	 */
3062 	interrupt_save = thread_interrupt_level(THREAD_UNINT);
3063 
3064 	/*
3065 	 *	Indicate that we want all the threads to stop executing
3066 	 *	at user space by holding the task (we would have held
3067 	 *	each thread independently in thread_terminate_internal -
3068 	 *	but this way we may be more likely to already find it
3069 	 *	held there).  Mark the task inactive, and prevent
3070 	 *	further task operations via the task port.
3071 	 *
3072 	 *	The vm_map and ipc_space must exist until this function returns,
3073 	 *	convert_port_to_{map,space}_with_flavor relies on this behavior.
3074 	 */
3075 	task_hold_locked(task);
3076 	task->active = FALSE;
3077 	ipc_task_disable(task);
3078 
3079 #if CONFIG_EXCLAVES
3080 	task_stop_conclave(task, false);
3081 #endif /* CONFIG_EXCLAVES */
3082 
3083 #if CONFIG_TELEMETRY
3084 	/*
3085 	 * Notify telemetry that this task is going away.
3086 	 */
3087 	telemetry_task_ctl_locked(task, TF_TELEMETRY, 0);
3088 #endif
3089 
3090 	/*
3091 	 *	Terminate each thread in the task.
3092 	 */
3093 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3094 		thread_terminate_internal(thread);
3095 	}
3096 
3097 #ifdef MACH_BSD
3098 	void *bsd_info = get_bsdtask_info(task);
3099 	if (bsd_info != NULL) {
3100 		pid = proc_pid(bsd_info);
3101 	}
3102 #endif /* MACH_BSD */
3103 
3104 	task_unlock(task);
3105 
3106 	proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
3107 	    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3108 
3109 	/* Early object reap phase */
3110 
3111 // PR-17045188: Revisit implementation
3112 //        task_partial_reap(task, pid);
3113 
3114 #if CONFIG_TASKWATCH
3115 	/*
3116 	 * remove all task watchers
3117 	 */
3118 	task_removewatchers(task);
3119 
3120 #endif /* CONFIG_TASKWATCH */
3121 
3122 	/*
3123 	 *	Destroy all synchronizers owned by the task.
3124 	 */
3125 	task_synchronizer_destroy_all(task);
3126 
3127 	/*
3128 	 *	Clear the watchport boost on the task.
3129 	 */
3130 	task_remove_turnstile_watchports(task);
3131 
3132 	/* let iokit know 1 */
3133 	iokit_task_terminate(task, 1);
3134 
3135 	/*
3136 	 *	Destroy the IPC space, leaving just a reference for it.
3137 	 */
3138 	ipc_space_terminate(task->itk_space);
3139 
3140 #if 00
3141 	/* if some ledgers go negative on tear-down again... */
3142 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3143 	    task_ledgers.phys_footprint);
3144 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3145 	    task_ledgers.internal);
3146 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3147 	    task_ledgers.iokit_mapped);
3148 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3149 	    task_ledgers.alternate_accounting);
3150 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
3151 	    task_ledgers.alternate_accounting_compressed);
3152 #endif
3153 
3154 #if CONFIG_DEFERRED_RECLAIM
3155 	/*
3156 	 * Remove this tasks reclaim buffer from global queues.
3157 	 */
3158 	if (task->deferred_reclamation_metadata != NULL) {
3159 		vm_deferred_reclamation_buffer_uninstall(task->deferred_reclamation_metadata);
3160 	}
3161 #endif /* CONFIG_DEFERRED_RECLAIM */
3162 
3163 	/*
3164 	 * If the current thread is a member of the task
3165 	 * being terminated, then the last reference to
3166 	 * the task will not be dropped until the thread
3167 	 * is finally reaped.  To avoid incurring the
3168 	 * expense of removing the address space regions
3169 	 * at reap time, we do it explictly here.
3170 	 */
3171 
3172 #if MACH_ASSERT
3173 	/*
3174 	 * Identify the pmap's process, in case the pmap ledgers drift
3175 	 * and we have to report it.
3176 	 */
3177 	char procname[17];
3178 	void *proc = get_bsdtask_info(task);
3179 	if (proc) {
3180 		pid = proc_pid(proc);
3181 		proc_name_kdp(proc, procname, sizeof(procname));
3182 	} else {
3183 		pid = 0;
3184 		strlcpy(procname, "<unknown>", sizeof(procname));
3185 	}
3186 	pmap_set_process(task->map->pmap, pid, procname);
3187 	if (vm_map_page_shift(task->map) < (int)PAGE_SHIFT) {
3188 		DEBUG4K_LIFE("map %p procname: %s\n", task->map, procname);
3189 		if (debug4k_panic_on_terminate) {
3190 			panic("DEBUG4K: %s:%d %d[%s] map %p", __FUNCTION__, __LINE__, pid, procname, task->map);
3191 		}
3192 	}
3193 #endif /* MACH_ASSERT */
3194 
3195 	vm_map_terminate(task->map);
3196 
3197 	/* release our shared region */
3198 	vm_shared_region_set(task, NULL);
3199 
3200 #if __has_feature(ptrauth_calls)
3201 	task_set_shared_region_id(task, NULL);
3202 #endif /* __has_feature(ptrauth_calls) */
3203 
3204 	lck_mtx_lock(&tasks_threads_lock);
3205 	queue_remove(&tasks, task, task_t, tasks);
3206 	queue_enter(&terminated_tasks, task, task_t, tasks);
3207 	tasks_count--;
3208 	terminated_tasks_count++;
3209 	lck_mtx_unlock(&tasks_threads_lock);
3210 
3211 	/*
3212 	 * We no longer need to guard against being aborted, so restore
3213 	 * the previous interruptible state.
3214 	 */
3215 	thread_interrupt_level(interrupt_save);
3216 
3217 #if CONFIG_CPU_COUNTERS
3218 	/* force the task to release all ctrs */
3219 	if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) {
3220 		kpc_force_all_ctrs(task, 0);
3221 	}
3222 #endif /* CONFIG_CPU_COUNTERS */
3223 
3224 #if CONFIG_COALITIONS
3225 	/*
3226 	 * Leave the coalition for corpse task or task that
3227 	 * never had any active threads (e.g. fork, exec failure).
3228 	 * For task with active threads, the task will be removed
3229 	 * from coalition by last terminating thread.
3230 	 */
3231 	if (task->active_thread_count == 0) {
3232 		coalitions_remove_task(task);
3233 	}
3234 #endif
3235 
3236 #if CONFIG_FREEZE
3237 	extern int      vm_compressor_available;
3238 	if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE && vm_compressor_available) {
3239 		task_disown_frozen_csegs(task);
3240 		assert(queue_empty(&task->task_frozen_cseg_q));
3241 	}
3242 #endif /* CONFIG_FREEZE */
3243 
3244 
3245 	/*
3246 	 * Get rid of the task active reference on itself.
3247 	 */
3248 	task_deallocate_grp(task, TASK_GRP_INTERNAL);
3249 
3250 	return KERN_SUCCESS;
3251 }
3252 
3253 void
tasks_system_suspend(boolean_t suspend)3254 tasks_system_suspend(boolean_t suspend)
3255 {
3256 	task_t task;
3257 
3258 	KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SUSPEND_USERSPACE) |
3259 	    (suspend ? DBG_FUNC_START : DBG_FUNC_END));
3260 
3261 	lck_mtx_lock(&tasks_threads_lock);
3262 	assert(tasks_suspend_state != suspend);
3263 	tasks_suspend_state = suspend;
3264 	queue_iterate(&tasks, task, task_t, tasks) {
3265 		if (task == kernel_task) {
3266 			continue;
3267 		}
3268 		suspend ? task_suspend_internal(task) : task_resume_internal(task);
3269 	}
3270 	lck_mtx_unlock(&tasks_threads_lock);
3271 }
3272 
3273 /*
3274  * task_start_halt:
3275  *
3276  *      Shut the current task down (except for the current thread) in
3277  *	preparation for dramatic changes to the task (probably exec).
3278  *	We hold the task and mark all other threads in the task for
3279  *	termination.
3280  */
3281 kern_return_t
task_start_halt(task_t task)3282 task_start_halt(task_t task)
3283 {
3284 	kern_return_t kr = KERN_SUCCESS;
3285 	task_lock(task);
3286 	kr = task_start_halt_locked(task, FALSE);
3287 	task_unlock(task);
3288 	return kr;
3289 }
3290 
3291 static kern_return_t
task_start_halt_locked(task_t task,boolean_t should_mark_corpse)3292 task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
3293 {
3294 	thread_t thread, self;
3295 	uint64_t dispatchqueue_offset;
3296 
3297 	assert(task != kernel_task);
3298 
3299 	self = current_thread();
3300 
3301 	if (task != get_threadtask(self) && !task_is_a_corpse_fork(task)) {
3302 		return KERN_INVALID_ARGUMENT;
3303 	}
3304 
3305 	if (!should_mark_corpse &&
3306 	    (task->halting || !task->active || !self->active)) {
3307 		/*
3308 		 * Task or current thread is already being terminated.
3309 		 * Hurry up and return out of the current kernel context
3310 		 * so that we run our AST special handler to terminate
3311 		 * ourselves. If should_mark_corpse is set, the corpse
3312 		 * creation might have raced with exec, let the corpse
3313 		 * creation continue, once the current thread reaches AST
3314 		 * thread in exec will be woken up from task_complete_halt.
3315 		 * Exec will fail cause the proc was marked for exit.
3316 		 * Once the thread in exec reaches AST, it will call proc_exit
3317 		 * and deliver the EXC_CORPSE_NOTIFY.
3318 		 */
3319 		return KERN_FAILURE;
3320 	}
3321 
3322 	/* Thread creation will fail after this point of no return. */
3323 	task->halting = TRUE;
3324 
3325 	/*
3326 	 * Mark all the threads to keep them from starting any more
3327 	 * user-level execution. The thread_terminate_internal code
3328 	 * would do this on a thread by thread basis anyway, but this
3329 	 * gives us a better chance of not having to wait there.
3330 	 */
3331 	task_hold_locked(task);
3332 
3333 #if CONFIG_EXCLAVES
3334 	if (should_mark_corpse) {
3335 		void *crash_info_ptr = task_get_corpseinfo(task);
3336 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
3337 			if (crash_info_ptr != NULL && thread->th_exclaves_ipc_ctx.ipcb != NULL) {
3338 				struct thread_crash_exclaves_info info = { 0 };
3339 
3340 				info.tcei_flags = kExclaveRPCActive;
3341 				info.tcei_scid = thread->th_exclaves_ipc_ctx.scid;
3342 				info.tcei_thread_id = thread->thread_id;
3343 
3344 				kcdata_push_data(crash_info_ptr,
3345 				    STACKSHOT_KCTYPE_KERN_EXCLAVES_CRASH_THREADINFO,
3346 				    sizeof(struct thread_crash_exclaves_info), &info);
3347 			}
3348 		}
3349 
3350 		task_unlock(task);
3351 		task_stop_conclave(task, true);
3352 		task_lock(task);
3353 	}
3354 #endif /* CONFIG_EXCLAVES */
3355 
3356 	dispatchqueue_offset = get_dispatchqueue_offset_from_proc(get_bsdtask_info(task));
3357 	/*
3358 	 * Terminate all the other threads in the task.
3359 	 */
3360 	queue_iterate(&task->threads, thread, thread_t, task_threads)
3361 	{
3362 		/*
3363 		 * Remove priority throttles for threads to terminate timely. This has
3364 		 * to be done after task_hold_locked() traps all threads to AST, but before
3365 		 * threads are marked inactive in thread_terminate_internal(). Takes thread
3366 		 * mutex lock.
3367 		 *
3368 		 * We need task_is_a_corpse() check so that we don't accidently update policy
3369 		 * for tasks that are doing posix_spawn().
3370 		 *
3371 		 * See: thread_policy_update_tasklocked().
3372 		 */
3373 		if (task_is_a_corpse(task)) {
3374 			proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE,
3375 			    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3376 		}
3377 
3378 		if (should_mark_corpse) {
3379 			thread_mtx_lock(thread);
3380 			thread->inspection = TRUE;
3381 			thread_mtx_unlock(thread);
3382 		}
3383 		if (thread != self) {
3384 			thread_terminate_internal(thread);
3385 		}
3386 	}
3387 	task->dispatchqueue_offset = dispatchqueue_offset;
3388 
3389 	task_release_locked(task);
3390 
3391 	return KERN_SUCCESS;
3392 }
3393 
3394 
3395 /*
3396  * task_complete_halt:
3397  *
3398  *	Complete task halt by waiting for threads to terminate, then clean
3399  *	up task resources (VM, port namespace, etc...) and then let the
3400  *	current thread go in the (practically empty) task context.
3401  *
3402  *	Note: task->halting flag is not cleared in order to avoid creation
3403  *	of new thread in old exec'ed task.
3404  */
3405 void
task_complete_halt(task_t task)3406 task_complete_halt(task_t task)
3407 {
3408 	task_lock(task);
3409 	assert(task->halting);
3410 	assert(task == current_task());
3411 
3412 	/*
3413 	 *	Wait for the other threads to get shut down.
3414 	 *      When the last other thread is reaped, we'll be
3415 	 *	woken up.
3416 	 */
3417 	if (task->thread_count > 1) {
3418 		assert_wait((event_t)&task->halting, THREAD_UNINT);
3419 		task_unlock(task);
3420 		thread_block(THREAD_CONTINUE_NULL);
3421 	} else {
3422 		task_unlock(task);
3423 	}
3424 
3425 #if CONFIG_DEFERRED_RECLAIM
3426 	if (task->deferred_reclamation_metadata) {
3427 		vm_deferred_reclamation_buffer_uninstall(
3428 			task->deferred_reclamation_metadata);
3429 		vm_deferred_reclamation_buffer_deallocate(
3430 			task->deferred_reclamation_metadata);
3431 		task->deferred_reclamation_metadata = NULL;
3432 	}
3433 #endif /* CONFIG_DEFERRED_RECLAIM */
3434 
3435 	/*
3436 	 *	Give the machine dependent code a chance
3437 	 *	to perform cleanup of task-level resources
3438 	 *	associated with the current thread before
3439 	 *	ripping apart the task.
3440 	 */
3441 	machine_task_terminate(task);
3442 
3443 	/*
3444 	 *	Destroy all synchronizers owned by the task.
3445 	 */
3446 	task_synchronizer_destroy_all(task);
3447 
3448 	/* let iokit know 1 */
3449 	iokit_task_terminate(task, 1);
3450 
3451 	/*
3452 	 *	Terminate the IPC space.  A long time ago,
3453 	 *	this used to be ipc_space_clean() which would
3454 	 *	keep the space active but hollow it.
3455 	 *
3456 	 *	We really do not need this semantics given
3457 	 *	tasks die with exec now.
3458 	 */
3459 	ipc_space_terminate(task->itk_space);
3460 
3461 	/*
3462 	 * Clean out the address space, as we are going to be
3463 	 * getting a new one.
3464 	 */
3465 	vm_map_terminate(task->map);
3466 
3467 	/*
3468 	 * Kick out any IOKitUser handles to the task. At best they're stale,
3469 	 * at worst someone is racing a SUID exec.
3470 	 */
3471 	/* let iokit know 2 */
3472 	iokit_task_terminate(task, 2);
3473 }
3474 
3475 #ifdef CONFIG_TASK_SUSPEND_STATS
3476 
3477 static void
_task_mark_suspend_source(task_t task)3478 _task_mark_suspend_source(task_t task)
3479 {
3480 	int idx;
3481 	task_suspend_stats_t stats;
3482 	task_suspend_source_t source;
3483 	task_lock_assert_owned(task);
3484 	stats = &task->t_suspend_stats;
3485 
3486 	idx = stats->tss_count % TASK_SUSPEND_SOURCES_MAX;
3487 	source = &task->t_suspend_sources[idx];
3488 	bzero(source, sizeof(*source));
3489 
3490 	source->tss_time = mach_absolute_time();
3491 	source->tss_tid = current_thread()->thread_id;
3492 	source->tss_pid = task_pid(current_task());
3493 	strlcpy(source->tss_procname, task_best_name(current_task()),
3494 	    sizeof(source->tss_procname));
3495 
3496 	stats->tss_count++;
3497 }
3498 
3499 static inline void
_task_mark_suspend_start(task_t task)3500 _task_mark_suspend_start(task_t task)
3501 {
3502 	task_lock_assert_owned(task);
3503 	task->t_suspend_stats.tss_last_start = mach_absolute_time();
3504 }
3505 
3506 static inline void
_task_mark_suspend_end(task_t task)3507 _task_mark_suspend_end(task_t task)
3508 {
3509 	task_lock_assert_owned(task);
3510 	task->t_suspend_stats.tss_last_end = mach_absolute_time();
3511 	task->t_suspend_stats.tss_duration += (task->t_suspend_stats.tss_last_end -
3512 	    task->t_suspend_stats.tss_last_start);
3513 }
3514 
3515 static kern_return_t
_task_get_suspend_stats_locked(task_t task,task_suspend_stats_t stats)3516 _task_get_suspend_stats_locked(task_t task, task_suspend_stats_t stats)
3517 {
3518 	if (task == TASK_NULL || stats == NULL) {
3519 		return KERN_INVALID_ARGUMENT;
3520 	}
3521 	task_lock_assert_owned(task);
3522 	memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3523 	return KERN_SUCCESS;
3524 }
3525 
3526 static kern_return_t
_task_get_suspend_sources_locked(task_t task,task_suspend_source_t sources)3527 _task_get_suspend_sources_locked(task_t task, task_suspend_source_t sources)
3528 {
3529 	if (task == TASK_NULL || sources == NULL) {
3530 		return KERN_INVALID_ARGUMENT;
3531 	}
3532 	task_lock_assert_owned(task);
3533 	memcpy(sources, task->t_suspend_sources,
3534 	    sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3535 	return KERN_SUCCESS;
3536 }
3537 
3538 #endif /* CONFIG_TASK_SUSPEND_STATS */
3539 
3540 kern_return_t
task_get_suspend_stats(task_t task,task_suspend_stats_t stats)3541 task_get_suspend_stats(task_t task, task_suspend_stats_t stats)
3542 {
3543 #ifdef CONFIG_TASK_SUSPEND_STATS
3544 	kern_return_t kr;
3545 	if (task == TASK_NULL || stats == NULL) {
3546 		return KERN_INVALID_ARGUMENT;
3547 	}
3548 	task_lock(task);
3549 	kr = _task_get_suspend_stats_locked(task, stats);
3550 	task_unlock(task);
3551 	return kr;
3552 #else /* CONFIG_TASK_SUSPEND_STATS */
3553 	(void)task;
3554 	(void)stats;
3555 	return KERN_NOT_SUPPORTED;
3556 #endif
3557 }
3558 
3559 kern_return_t
task_get_suspend_stats_kdp(task_t task,task_suspend_stats_t stats)3560 task_get_suspend_stats_kdp(task_t task, task_suspend_stats_t stats)
3561 {
3562 #ifdef CONFIG_TASK_SUSPEND_STATS
3563 	if (task == TASK_NULL || stats == NULL) {
3564 		return KERN_INVALID_ARGUMENT;
3565 	}
3566 	memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3567 	return KERN_SUCCESS;
3568 #else /* CONFIG_TASK_SUSPEND_STATS */
3569 #pragma unused(task, stats)
3570 	return KERN_NOT_SUPPORTED;
3571 #endif /* CONFIG_TASK_SUSPEND_STATS */
3572 }
3573 
3574 kern_return_t
task_get_suspend_sources(task_t task,task_suspend_source_array_t sources)3575 task_get_suspend_sources(task_t task, task_suspend_source_array_t sources)
3576 {
3577 #ifdef CONFIG_TASK_SUSPEND_STATS
3578 	kern_return_t kr;
3579 	if (task == TASK_NULL || sources == NULL) {
3580 		return KERN_INVALID_ARGUMENT;
3581 	}
3582 	task_lock(task);
3583 	kr = _task_get_suspend_sources_locked(task, sources);
3584 	task_unlock(task);
3585 	return kr;
3586 #else /* CONFIG_TASK_SUSPEND_STATS */
3587 	(void)task;
3588 	(void)sources;
3589 	return KERN_NOT_SUPPORTED;
3590 #endif
3591 }
3592 
3593 kern_return_t
task_get_suspend_sources_kdp(task_t task,task_suspend_source_array_t sources)3594 task_get_suspend_sources_kdp(task_t task, task_suspend_source_array_t sources)
3595 {
3596 #ifdef CONFIG_TASK_SUSPEND_STATS
3597 	if (task == TASK_NULL || sources == NULL) {
3598 		return KERN_INVALID_ARGUMENT;
3599 	}
3600 	memcpy(sources, task->t_suspend_sources,
3601 	    sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3602 	return KERN_SUCCESS;
3603 #else /* CONFIG_TASK_SUSPEND_STATS */
3604 #pragma unused(task, sources)
3605 	return KERN_NOT_SUPPORTED;
3606 #endif
3607 }
3608 
3609 /*
3610  *	task_hold_locked:
3611  *
3612  *	Suspend execution of the specified task.
3613  *	This is a recursive-style suspension of the task, a count of
3614  *	suspends is maintained.
3615  *
3616  *	CONDITIONS: the task is locked and active.
3617  */
3618 void
task_hold_locked(task_t task)3619 task_hold_locked(
3620 	task_t          task)
3621 {
3622 	thread_t        thread;
3623 	void *bsd_info = get_bsdtask_info(task);
3624 
3625 	assert(task->active);
3626 
3627 	if (task->suspend_count++ > 0) {
3628 		return;
3629 	}
3630 
3631 	if (bsd_info) {
3632 		workq_proc_suspended(bsd_info);
3633 	}
3634 
3635 	/*
3636 	 *	Iterate through all the threads and hold them.
3637 	 */
3638 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3639 		thread_mtx_lock(thread);
3640 		thread_hold(thread);
3641 		thread_mtx_unlock(thread);
3642 	}
3643 
3644 #ifdef CONFIG_TASK_SUSPEND_STATS
3645 	_task_mark_suspend_start(task);
3646 #endif
3647 }
3648 
3649 /*
3650  *	task_hold_and_wait
3651  *
3652  *	Same as the internal routine above, except that is must lock
3653  *	and verify that the task is active.  This differs from task_suspend
3654  *	in that it places a kernel hold on the task rather than just a
3655  *	user-level hold.  This keeps users from over resuming and setting
3656  *	it running out from under the kernel.
3657  *
3658  *      CONDITIONS: the caller holds a reference on the task
3659  */
3660 kern_return_t
task_hold_and_wait(task_t task)3661 task_hold_and_wait(
3662 	task_t          task)
3663 {
3664 	if (task == TASK_NULL) {
3665 		return KERN_INVALID_ARGUMENT;
3666 	}
3667 
3668 	task_lock(task);
3669 	if (!task->active) {
3670 		task_unlock(task);
3671 		return KERN_FAILURE;
3672 	}
3673 
3674 #ifdef CONFIG_TASK_SUSPEND_STATS
3675 	_task_mark_suspend_source(task);
3676 #endif /* CONFIG_TASK_SUSPEND_STATS */
3677 
3678 	task_hold_locked(task);
3679 	task_wait_locked(task, FALSE);
3680 	task_unlock(task);
3681 
3682 	return KERN_SUCCESS;
3683 }
3684 
3685 /*
3686  *	task_wait_locked:
3687  *
3688  *	Wait for all threads in task to stop.
3689  *
3690  * Conditions:
3691  *	Called with task locked, active, and held.
3692  */
3693 void
task_wait_locked(task_t task,boolean_t until_not_runnable)3694 task_wait_locked(
3695 	task_t          task,
3696 	boolean_t               until_not_runnable)
3697 {
3698 	thread_t        thread, self;
3699 
3700 	assert(task->active);
3701 	assert(task->suspend_count > 0);
3702 
3703 	self = current_thread();
3704 
3705 	/*
3706 	 *	Iterate through all the threads and wait for them to
3707 	 *	stop.  Do not wait for the current thread if it is within
3708 	 *	the task.
3709 	 */
3710 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3711 		if (thread != self) {
3712 			thread_wait(thread, until_not_runnable);
3713 		}
3714 	}
3715 }
3716 
3717 boolean_t
task_is_app_suspended(task_t task)3718 task_is_app_suspended(task_t task)
3719 {
3720 	return task->pidsuspended;
3721 }
3722 
3723 /*
3724  *	task_release_locked:
3725  *
3726  *	Release a kernel hold on a task.
3727  *
3728  *      CONDITIONS: the task is locked and active
3729  */
3730 void
task_release_locked(task_t task)3731 task_release_locked(
3732 	task_t          task)
3733 {
3734 	thread_t        thread;
3735 	void *bsd_info = get_bsdtask_info(task);
3736 
3737 	assert(task->active);
3738 	assert(task->suspend_count > 0);
3739 
3740 	if (--task->suspend_count > 0) {
3741 		return;
3742 	}
3743 
3744 	if (bsd_info) {
3745 		workq_proc_resumed(bsd_info);
3746 	}
3747 
3748 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3749 		thread_mtx_lock(thread);
3750 		thread_release(thread);
3751 		thread_mtx_unlock(thread);
3752 	}
3753 
3754 #if CONFIG_TASK_SUSPEND_STATS
3755 	_task_mark_suspend_end(task);
3756 #endif
3757 }
3758 
3759 /*
3760  *	task_release:
3761  *
3762  *	Same as the internal routine above, except that it must lock
3763  *	and verify that the task is active.
3764  *
3765  *      CONDITIONS: The caller holds a reference to the task
3766  */
3767 kern_return_t
task_release(task_t task)3768 task_release(
3769 	task_t          task)
3770 {
3771 	if (task == TASK_NULL) {
3772 		return KERN_INVALID_ARGUMENT;
3773 	}
3774 
3775 	task_lock(task);
3776 
3777 	if (!task->active) {
3778 		task_unlock(task);
3779 
3780 		return KERN_FAILURE;
3781 	}
3782 
3783 	task_release_locked(task);
3784 	task_unlock(task);
3785 
3786 	return KERN_SUCCESS;
3787 }
3788 
3789 static kern_return_t
task_threads_internal(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * countp,mach_thread_flavor_t flavor)3790 task_threads_internal(
3791 	task_t                  task,
3792 	thread_act_array_t     *threads_out,
3793 	mach_msg_type_number_t *countp,
3794 	mach_thread_flavor_t    flavor)
3795 {
3796 	mach_msg_type_number_t  actual, count, count_needed;
3797 	thread_act_array_t      thread_list;
3798 	thread_t                thread;
3799 	unsigned int            i;
3800 
3801 	count = 0;
3802 	thread_list = NULL;
3803 
3804 	if (task == TASK_NULL) {
3805 		return KERN_INVALID_ARGUMENT;
3806 	}
3807 
3808 	assert(flavor <= THREAD_FLAVOR_INSPECT);
3809 
3810 	for (;;) {
3811 		task_lock(task);
3812 		if (!task->active) {
3813 			task_unlock(task);
3814 
3815 			mach_port_array_free(thread_list, count);
3816 			return KERN_FAILURE;
3817 		}
3818 
3819 		count_needed = actual = task->thread_count;
3820 		if (count_needed <= count) {
3821 			break;
3822 		}
3823 
3824 		/* unlock the task and allocate more memory */
3825 		task_unlock(task);
3826 
3827 		mach_port_array_free(thread_list, count);
3828 		count = count_needed;
3829 		thread_list = mach_port_array_alloc(count, Z_WAITOK);
3830 
3831 		if (thread_list == NULL) {
3832 			return KERN_RESOURCE_SHORTAGE;
3833 		}
3834 	}
3835 
3836 	i = 0;
3837 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3838 		assert(i < actual);
3839 		thread_reference(thread);
3840 		((thread_t *)thread_list)[i++] = thread;
3841 	}
3842 
3843 	count_needed = actual;
3844 
3845 	/* can unlock task now that we've got the thread refs */
3846 	task_unlock(task);
3847 
3848 	if (actual == 0) {
3849 		/* no threads, so return null pointer and deallocate memory */
3850 
3851 		mach_port_array_free(thread_list, count);
3852 
3853 		*threads_out = NULL;
3854 		*countp = 0;
3855 	} else {
3856 		/* if we allocated too much, must copy */
3857 		if (count_needed < count) {
3858 			mach_port_array_t newaddr;
3859 
3860 			newaddr = mach_port_array_alloc(count_needed, Z_WAITOK);
3861 			if (newaddr == NULL) {
3862 				for (i = 0; i < actual; ++i) {
3863 					thread_deallocate(((thread_t *)thread_list)[i]);
3864 				}
3865 				mach_port_array_free(thread_list, count);
3866 				return KERN_RESOURCE_SHORTAGE;
3867 			}
3868 
3869 			bcopy(thread_list, newaddr, count_needed * sizeof(thread_t));
3870 			mach_port_array_free(thread_list, count);
3871 			thread_list = newaddr;
3872 		}
3873 
3874 		/* do the conversion that Mig should handle */
3875 		convert_thread_array_to_ports(thread_list, actual, flavor);
3876 
3877 		*threads_out = thread_list;
3878 		*countp = actual;
3879 	}
3880 
3881 	return KERN_SUCCESS;
3882 }
3883 
3884 
3885 kern_return_t
task_threads_from_user(mach_port_t port,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3886 task_threads_from_user(
3887 	mach_port_t                 port,
3888 	thread_act_array_t         *threads_out,
3889 	mach_msg_type_number_t     *count)
3890 {
3891 	ipc_kobject_type_t kotype;
3892 	kern_return_t kr;
3893 
3894 	task_t task = convert_port_to_task_inspect_no_eval(port);
3895 
3896 	if (task == TASK_NULL) {
3897 		return KERN_INVALID_ARGUMENT;
3898 	}
3899 
3900 	kotype = ip_kotype(port);
3901 
3902 	switch (kotype) {
3903 	case IKOT_TASK_CONTROL:
3904 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3905 		break;
3906 	case IKOT_TASK_READ:
3907 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_READ);
3908 		break;
3909 	case IKOT_TASK_INSPECT:
3910 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_INSPECT);
3911 		break;
3912 	default:
3913 		panic("strange kobject type");
3914 		break;
3915 	}
3916 
3917 	task_deallocate(task);
3918 	return kr;
3919 }
3920 
3921 #define TASK_HOLD_NORMAL        0
3922 #define TASK_HOLD_PIDSUSPEND    1
3923 #define TASK_HOLD_LEGACY        2
3924 #define TASK_HOLD_LEGACY_ALL    3
3925 
3926 static kern_return_t
place_task_hold(task_t task,int mode)3927 place_task_hold(
3928 	task_t task,
3929 	int mode)
3930 {
3931 	if (!task->active && !task_is_a_corpse(task)) {
3932 		return KERN_FAILURE;
3933 	}
3934 
3935 	/* Return success for corpse task */
3936 	if (task_is_a_corpse(task)) {
3937 		return KERN_SUCCESS;
3938 	}
3939 
3940 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_SUSPEND),
3941 	    task_pid(task),
3942 	    task->thread_count > 0 ?((thread_t)queue_first(&task->threads))->thread_id : 0,
3943 	    task->user_stop_count, task->user_stop_count + 1);
3944 
3945 #if MACH_ASSERT
3946 	current_task()->suspends_outstanding++;
3947 #endif
3948 
3949 	if (mode == TASK_HOLD_LEGACY) {
3950 		task->legacy_stop_count++;
3951 	}
3952 
3953 #ifdef CONFIG_TASK_SUSPEND_STATS
3954 	_task_mark_suspend_source(task);
3955 #endif /* CONFIG_TASK_SUSPEND_STATS */
3956 
3957 	if (task->user_stop_count++ > 0) {
3958 		/*
3959 		 *	If the stop count was positive, the task is
3960 		 *	already stopped and we can exit.
3961 		 */
3962 		return KERN_SUCCESS;
3963 	}
3964 
3965 	/*
3966 	 * Put a kernel-level hold on the threads in the task (all
3967 	 * user-level task suspensions added together represent a
3968 	 * single kernel-level hold).  We then wait for the threads
3969 	 * to stop executing user code.
3970 	 */
3971 	task_hold_locked(task);
3972 	task_wait_locked(task, FALSE);
3973 
3974 	return KERN_SUCCESS;
3975 }
3976 
3977 static kern_return_t
release_task_hold(task_t task,int mode)3978 release_task_hold(
3979 	task_t          task,
3980 	int                     mode)
3981 {
3982 	boolean_t release = FALSE;
3983 
3984 	if (!task->active && !task_is_a_corpse(task)) {
3985 		return KERN_FAILURE;
3986 	}
3987 
3988 	/* Return success for corpse task */
3989 	if (task_is_a_corpse(task)) {
3990 		return KERN_SUCCESS;
3991 	}
3992 
3993 	if (mode == TASK_HOLD_PIDSUSPEND) {
3994 		if (task->pidsuspended == FALSE) {
3995 			return KERN_FAILURE;
3996 		}
3997 		task->pidsuspended = FALSE;
3998 	}
3999 
4000 	if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
4001 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4002 		    MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_RESUME) | DBG_FUNC_NONE,
4003 		    task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id,
4004 		    task->user_stop_count, mode, task->legacy_stop_count);
4005 
4006 #if MACH_ASSERT
4007 		/*
4008 		 * This is obviously not robust; if we suspend one task and then resume a different one,
4009 		 * we'll fly under the radar. This is only meant to catch the common case of a crashed
4010 		 * or buggy suspender.
4011 		 */
4012 		current_task()->suspends_outstanding--;
4013 #endif
4014 
4015 		if (mode == TASK_HOLD_LEGACY_ALL) {
4016 			if (task->legacy_stop_count >= task->user_stop_count) {
4017 				task->user_stop_count = 0;
4018 				release = TRUE;
4019 			} else {
4020 				task->user_stop_count -= task->legacy_stop_count;
4021 			}
4022 			task->legacy_stop_count = 0;
4023 		} else {
4024 			if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) {
4025 				task->legacy_stop_count--;
4026 			}
4027 			if (--task->user_stop_count == 0) {
4028 				release = TRUE;
4029 			}
4030 		}
4031 	} else {
4032 		return KERN_FAILURE;
4033 	}
4034 
4035 	/*
4036 	 *	Release the task if necessary.
4037 	 */
4038 	if (release) {
4039 		task_release_locked(task);
4040 	}
4041 
4042 	return KERN_SUCCESS;
4043 }
4044 
4045 boolean_t
get_task_suspended(task_t task)4046 get_task_suspended(task_t task)
4047 {
4048 	return 0 != task->user_stop_count;
4049 }
4050 
4051 /*
4052  *	task_suspend:
4053  *
4054  *	Implement an (old-fashioned) user-level suspension on a task.
4055  *
4056  *	Because the user isn't expecting to have to manage a suspension
4057  *	token, we'll track it for him in the kernel in the form of a naked
4058  *	send right to the task's resume port.  All such send rights
4059  *	account for a single suspension against the task (unlike task_suspend2()
4060  *	where each caller gets a unique suspension count represented by a
4061  *	unique send-once right).
4062  *
4063  * Conditions:
4064  *      The caller holds a reference to the task
4065  */
4066 kern_return_t
task_suspend(task_t task)4067 task_suspend(
4068 	task_t          task)
4069 {
4070 	kern_return_t                   kr;
4071 	mach_port_t                     port;
4072 	mach_port_name_t                name;
4073 
4074 	if (task == TASK_NULL || task == kernel_task) {
4075 		return KERN_INVALID_ARGUMENT;
4076 	}
4077 
4078 	/*
4079 	 * place a legacy hold on the task.
4080 	 */
4081 	task_lock(task);
4082 	kr = place_task_hold(task, TASK_HOLD_LEGACY);
4083 	task_unlock(task);
4084 
4085 	if (kr != KERN_SUCCESS) {
4086 		return kr;
4087 	}
4088 
4089 	/*
4090 	 * Claim a send right on the task resume port, and request a no-senders
4091 	 * notification on that port (if none outstanding).
4092 	 */
4093 	itk_lock(task);
4094 	port = task->itk_resume;
4095 	if (port == IP_NULL) {
4096 		port = ipc_kobject_alloc_port(task, IKOT_TASK_RESUME,
4097 		    IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
4098 		task->itk_resume = port;
4099 	} else {
4100 		(void)ipc_kobject_make_send_nsrequest(port, task, IKOT_TASK_RESUME);
4101 	}
4102 	itk_unlock(task);
4103 
4104 	/*
4105 	 * Copyout the send right into the calling task's IPC space.  It won't know it is there,
4106 	 * but we'll look it up when calling a traditional resume.  Any IPC operations that
4107 	 * deallocate the send right will auto-release the suspension.
4108 	 */
4109 	if (IP_VALID(port)) {
4110 		kr = ipc_object_copyout(current_space(), ip_to_object(port),
4111 		    MACH_MSG_TYPE_MOVE_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
4112 		    NULL, NULL, &name);
4113 	} else {
4114 		kr = KERN_SUCCESS;
4115 	}
4116 	if (kr != KERN_SUCCESS) {
4117 		printf("warning: %s(%d) failed to copyout suspension "
4118 		    "token for pid %d with error: %d\n",
4119 		    proc_name_address(get_bsdtask_info(current_task())),
4120 		    proc_pid(get_bsdtask_info(current_task())),
4121 		    task_pid(task), kr);
4122 	}
4123 
4124 	return kr;
4125 }
4126 
4127 /*
4128  *	task_resume:
4129  *		Release a user hold on a task.
4130  *
4131  * Conditions:
4132  *		The caller holds a reference to the task
4133  */
4134 kern_return_t
task_resume(task_t task)4135 task_resume(
4136 	task_t  task)
4137 {
4138 	kern_return_t    kr;
4139 	mach_port_name_t resume_port_name;
4140 	ipc_entry_t              resume_port_entry;
4141 	ipc_space_t              space = current_task()->itk_space;
4142 
4143 	if (task == TASK_NULL || task == kernel_task) {
4144 		return KERN_INVALID_ARGUMENT;
4145 	}
4146 
4147 	/* release a legacy task hold */
4148 	task_lock(task);
4149 	kr = release_task_hold(task, TASK_HOLD_LEGACY);
4150 	task_unlock(task);
4151 
4152 	itk_lock(task); /* for itk_resume */
4153 	is_write_lock(space); /* spin lock */
4154 	if (is_active(space) && IP_VALID(task->itk_resume) &&
4155 	    ipc_hash_lookup(space, ip_to_object(task->itk_resume), &resume_port_name, &resume_port_entry) == TRUE) {
4156 		/*
4157 		 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
4158 		 * we are holding one less legacy hold on the task from this caller.  If the release failed,
4159 		 * go ahead and drop all the rights, as someone either already released our holds or the task
4160 		 * is gone.
4161 		 */
4162 		itk_unlock(task);
4163 		if (kr == KERN_SUCCESS) {
4164 			ipc_right_dealloc(space, resume_port_name, resume_port_entry);
4165 		} else {
4166 			ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0);
4167 		}
4168 		/* space unlocked */
4169 	} else {
4170 		itk_unlock(task);
4171 		is_write_unlock(space);
4172 		if (kr == KERN_SUCCESS) {
4173 			printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
4174 			    proc_name_address(get_bsdtask_info(current_task())), proc_pid(get_bsdtask_info(current_task())),
4175 			    task_pid(task));
4176 		}
4177 	}
4178 
4179 	return kr;
4180 }
4181 
4182 /*
4183  * Suspend the target task.
4184  * Making/holding a token/reference/port is the callers responsibility.
4185  */
4186 kern_return_t
task_suspend_internal(task_t task)4187 task_suspend_internal(task_t task)
4188 {
4189 	kern_return_t    kr;
4190 
4191 	if (task == TASK_NULL || task == kernel_task) {
4192 		return KERN_INVALID_ARGUMENT;
4193 	}
4194 
4195 	task_lock(task);
4196 	kr = place_task_hold(task, TASK_HOLD_NORMAL);
4197 	task_unlock(task);
4198 	return kr;
4199 }
4200 
4201 /*
4202  * Suspend the target task, and return a suspension token. The token
4203  * represents a reference on the suspended task.
4204  */
4205 static kern_return_t
task_suspend2_grp(task_t task,task_suspension_token_t * suspend_token,task_grp_t grp)4206 task_suspend2_grp(
4207 	task_t                  task,
4208 	task_suspension_token_t *suspend_token,
4209 	task_grp_t              grp)
4210 {
4211 	kern_return_t    kr;
4212 
4213 	kr = task_suspend_internal(task);
4214 	if (kr != KERN_SUCCESS) {
4215 		*suspend_token = TASK_NULL;
4216 		return kr;
4217 	}
4218 
4219 	/*
4220 	 * Take a reference on the target task and return that to the caller
4221 	 * as a "suspension token," which can be converted into an SO right to
4222 	 * the now-suspended task's resume port.
4223 	 */
4224 	task_reference_grp(task, grp);
4225 	*suspend_token = task;
4226 
4227 	return KERN_SUCCESS;
4228 }
4229 
4230 kern_return_t
task_suspend2_mig(task_t task,task_suspension_token_t * suspend_token)4231 task_suspend2_mig(
4232 	task_t                  task,
4233 	task_suspension_token_t *suspend_token)
4234 {
4235 	return task_suspend2_grp(task, suspend_token, TASK_GRP_MIG);
4236 }
4237 
4238 kern_return_t
task_suspend2_external(task_t task,task_suspension_token_t * suspend_token)4239 task_suspend2_external(
4240 	task_t                  task,
4241 	task_suspension_token_t *suspend_token)
4242 {
4243 	return task_suspend2_grp(task, suspend_token, TASK_GRP_EXTERNAL);
4244 }
4245 
4246 /*
4247  * Resume the task
4248  * (reference/token/port management is caller's responsibility).
4249  */
4250 kern_return_t
task_resume_internal(task_suspension_token_t task)4251 task_resume_internal(
4252 	task_suspension_token_t         task)
4253 {
4254 	kern_return_t kr;
4255 
4256 	if (task == TASK_NULL || task == kernel_task) {
4257 		return KERN_INVALID_ARGUMENT;
4258 	}
4259 
4260 	task_lock(task);
4261 	kr = release_task_hold(task, TASK_HOLD_NORMAL);
4262 	task_unlock(task);
4263 	return kr;
4264 }
4265 
4266 /*
4267  * Resume the task using a suspension token. Consumes the token's ref.
4268  */
4269 static kern_return_t
task_resume2_grp(task_suspension_token_t task,task_grp_t grp)4270 task_resume2_grp(
4271 	task_suspension_token_t         task,
4272 	task_grp_t                      grp)
4273 {
4274 	kern_return_t kr;
4275 
4276 	kr = task_resume_internal(task);
4277 	task_suspension_token_deallocate_grp(task, grp);
4278 
4279 	return kr;
4280 }
4281 
4282 kern_return_t
task_resume2_mig(task_suspension_token_t task)4283 task_resume2_mig(
4284 	task_suspension_token_t         task)
4285 {
4286 	return task_resume2_grp(task, TASK_GRP_MIG);
4287 }
4288 
4289 kern_return_t
task_resume2_external(task_suspension_token_t task)4290 task_resume2_external(
4291 	task_suspension_token_t         task)
4292 {
4293 	return task_resume2_grp(task, TASK_GRP_EXTERNAL);
4294 }
4295 
4296 static void
task_suspension_no_senders(ipc_port_t port,mach_port_mscount_t mscount)4297 task_suspension_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
4298 {
4299 	task_t task = convert_port_to_task_suspension_token(port);
4300 	kern_return_t kr;
4301 
4302 	if (task == TASK_NULL) {
4303 		return;
4304 	}
4305 
4306 	if (task == kernel_task) {
4307 		task_suspension_token_deallocate(task);
4308 		return;
4309 	}
4310 
4311 	task_lock(task);
4312 
4313 	kr = ipc_kobject_nsrequest(port, mscount, NULL);
4314 	if (kr == KERN_FAILURE) {
4315 		/* release all the [remaining] outstanding legacy holds */
4316 		release_task_hold(task, TASK_HOLD_LEGACY_ALL);
4317 	}
4318 
4319 	task_unlock(task);
4320 
4321 	task_suspension_token_deallocate(task);         /* drop token reference */
4322 }
4323 
4324 /*
4325  * Fires when a send once made
4326  * by convert_task_suspension_token_to_port() dies.
4327  */
4328 void
task_suspension_send_once(ipc_port_t port)4329 task_suspension_send_once(ipc_port_t port)
4330 {
4331 	task_t task = convert_port_to_task_suspension_token(port);
4332 
4333 	if (task == TASK_NULL || task == kernel_task) {
4334 		return; /* nothing to do */
4335 	}
4336 
4337 	/* release the hold held by this specific send-once right */
4338 	task_lock(task);
4339 	release_task_hold(task, TASK_HOLD_NORMAL);
4340 	task_unlock(task);
4341 
4342 	task_suspension_token_deallocate(task);         /* drop token reference */
4343 }
4344 
4345 static kern_return_t
task_pidsuspend_locked(task_t task)4346 task_pidsuspend_locked(task_t task)
4347 {
4348 	kern_return_t kr;
4349 
4350 	if (task->pidsuspended) {
4351 		kr = KERN_FAILURE;
4352 		goto out;
4353 	}
4354 
4355 	task->pidsuspended = TRUE;
4356 
4357 	kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
4358 	if (kr != KERN_SUCCESS) {
4359 		task->pidsuspended = FALSE;
4360 	}
4361 out:
4362 	return kr;
4363 }
4364 
4365 
4366 /*
4367  *	task_pidsuspend:
4368  *
4369  *	Suspends a task by placing a hold on its threads.
4370  *
4371  * Conditions:
4372  *      The caller holds a reference to the task
4373  */
4374 kern_return_t
task_pidsuspend(task_t task)4375 task_pidsuspend(
4376 	task_t          task)
4377 {
4378 	kern_return_t    kr;
4379 
4380 	if (task == TASK_NULL || task == kernel_task) {
4381 		return KERN_INVALID_ARGUMENT;
4382 	}
4383 
4384 	task_lock(task);
4385 
4386 	kr = task_pidsuspend_locked(task);
4387 
4388 	task_unlock(task);
4389 
4390 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4391 		iokit_task_app_suspended_changed(task);
4392 	}
4393 
4394 	return kr;
4395 }
4396 
4397 /*
4398  *	task_pidresume:
4399  *		Resumes a previously suspended task.
4400  *
4401  * Conditions:
4402  *		The caller holds a reference to the task
4403  */
4404 kern_return_t
task_pidresume(task_t task)4405 task_pidresume(
4406 	task_t  task)
4407 {
4408 	kern_return_t    kr;
4409 
4410 	if (task == TASK_NULL || task == kernel_task) {
4411 		return KERN_INVALID_ARGUMENT;
4412 	}
4413 
4414 	task_lock(task);
4415 
4416 #if CONFIG_FREEZE
4417 
4418 	while (task->changing_freeze_state) {
4419 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4420 		task_unlock(task);
4421 		thread_block(THREAD_CONTINUE_NULL);
4422 
4423 		task_lock(task);
4424 	}
4425 	task->changing_freeze_state = TRUE;
4426 #endif
4427 
4428 	kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
4429 
4430 	task_unlock(task);
4431 
4432 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4433 		iokit_task_app_suspended_changed(task);
4434 	}
4435 
4436 #if CONFIG_FREEZE
4437 
4438 	task_lock(task);
4439 
4440 	if (kr == KERN_SUCCESS) {
4441 		task->frozen = FALSE;
4442 	}
4443 	task->changing_freeze_state = FALSE;
4444 	thread_wakeup(&task->changing_freeze_state);
4445 
4446 	task_unlock(task);
4447 #endif
4448 
4449 	return kr;
4450 }
4451 
4452 os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
4453 
4454 /*
4455  *	task_add_turnstile_watchports:
4456  *		Setup watchports to boost the main thread of the task.
4457  *
4458  *	Arguments:
4459  *		task: task being spawned
4460  *		thread: main thread of task
4461  *		portwatch_ports: array of watchports
4462  *		portwatch_count: number of watchports
4463  *
4464  *	Conditions:
4465  *		Nothing locked.
4466  */
4467 void
task_add_turnstile_watchports(task_t task,thread_t thread,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4468 task_add_turnstile_watchports(
4469 	task_t          task,
4470 	thread_t        thread,
4471 	ipc_port_t      *portwatch_ports,
4472 	uint32_t        portwatch_count)
4473 {
4474 	struct task_watchports *watchports = NULL;
4475 	struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
4476 	os_ref_count_t refs;
4477 
4478 	/* Check if the task has terminated */
4479 	if (!task->active) {
4480 		return;
4481 	}
4482 
4483 	assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
4484 
4485 	watchports = task_watchports_alloc_init(task, thread, portwatch_count);
4486 
4487 	/* Lock the ipc space */
4488 	is_write_lock(task->itk_space);
4489 
4490 	/* Setup watchports to boost the main thread */
4491 	refs = task_add_turnstile_watchports_locked(task,
4492 	    watchports, previous_elem_array, portwatch_ports,
4493 	    portwatch_count);
4494 
4495 	/* Drop the space lock */
4496 	is_write_unlock(task->itk_space);
4497 
4498 	if (refs == 0) {
4499 		task_watchports_deallocate(watchports);
4500 	}
4501 
4502 	/* Drop the ref on previous_elem_array */
4503 	for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
4504 		task_watchport_elem_deallocate(previous_elem_array[i]);
4505 	}
4506 }
4507 
4508 /*
4509  *	task_remove_turnstile_watchports:
4510  *		Clear all turnstile boost on the task from watchports.
4511  *
4512  *	Arguments:
4513  *		task: task being terminated
4514  *
4515  *	Conditions:
4516  *		Nothing locked.
4517  */
4518 void
task_remove_turnstile_watchports(task_t task)4519 task_remove_turnstile_watchports(
4520 	task_t          task)
4521 {
4522 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4523 	struct task_watchports *watchports = NULL;
4524 	ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
4525 	uint32_t portwatch_count;
4526 
4527 	/* Lock the ipc space */
4528 	is_write_lock(task->itk_space);
4529 
4530 	/* Check if watchport boost exist */
4531 	if (task->watchports == NULL) {
4532 		is_write_unlock(task->itk_space);
4533 		return;
4534 	}
4535 	watchports = task->watchports;
4536 	portwatch_count = watchports->tw_elem_array_count;
4537 
4538 	refs = task_remove_turnstile_watchports_locked(task, watchports,
4539 	    port_freelist);
4540 
4541 	is_write_unlock(task->itk_space);
4542 
4543 	/* Drop all the port references */
4544 	for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
4545 		ip_release(port_freelist[i]);
4546 	}
4547 
4548 	/* Clear the task and thread references for task_watchport */
4549 	if (refs == 0) {
4550 		task_watchports_deallocate(watchports);
4551 	}
4552 }
4553 
4554 /*
4555  *	task_transfer_turnstile_watchports:
4556  *		Transfer all watchport turnstile boost from old task to new task.
4557  *
4558  *	Arguments:
4559  *		old_task: task calling exec
4560  *		new_task: new exec'ed task
4561  *		thread: main thread of new task
4562  *
4563  *	Conditions:
4564  *		Nothing locked.
4565  */
4566 void
task_transfer_turnstile_watchports(task_t old_task,task_t new_task,thread_t new_thread)4567 task_transfer_turnstile_watchports(
4568 	task_t   old_task,
4569 	task_t   new_task,
4570 	thread_t new_thread)
4571 {
4572 	struct task_watchports *old_watchports = NULL;
4573 	struct task_watchports *new_watchports = NULL;
4574 	os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
4575 	os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
4576 	uint32_t portwatch_count;
4577 
4578 	if (old_task->watchports == NULL || !new_task->active) {
4579 		return;
4580 	}
4581 
4582 	/* Get the watch port count from the old task */
4583 	is_write_lock(old_task->itk_space);
4584 	if (old_task->watchports == NULL) {
4585 		is_write_unlock(old_task->itk_space);
4586 		return;
4587 	}
4588 
4589 	portwatch_count = old_task->watchports->tw_elem_array_count;
4590 	is_write_unlock(old_task->itk_space);
4591 
4592 	new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
4593 
4594 	/* Lock the ipc space for old task */
4595 	is_write_lock(old_task->itk_space);
4596 
4597 	/* Lock the ipc space for new task */
4598 	is_write_lock(new_task->itk_space);
4599 
4600 	/* Check if watchport boost exist */
4601 	if (old_task->watchports == NULL || !new_task->active) {
4602 		is_write_unlock(new_task->itk_space);
4603 		is_write_unlock(old_task->itk_space);
4604 		(void)task_watchports_release(new_watchports);
4605 		task_watchports_deallocate(new_watchports);
4606 		return;
4607 	}
4608 
4609 	old_watchports = old_task->watchports;
4610 	assert(portwatch_count == old_task->watchports->tw_elem_array_count);
4611 
4612 	/* Setup new task watchports */
4613 	new_task->watchports = new_watchports;
4614 
4615 	for (uint32_t i = 0; i < portwatch_count; i++) {
4616 		ipc_port_t port = old_watchports->tw_elem[i].twe_port;
4617 
4618 		if (port == NULL) {
4619 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4620 			continue;
4621 		}
4622 
4623 		/* Lock the port and check if it has the entry */
4624 		ip_mq_lock(port);
4625 
4626 		task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
4627 
4628 		if (ipc_port_replace_watchport_elem_conditional_locked(port,
4629 		    &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
4630 			task_watchport_elem_clear(&old_watchports->tw_elem[i]);
4631 
4632 			task_watchports_retain(new_watchports);
4633 			old_refs = task_watchports_release(old_watchports);
4634 
4635 			/* Check if all ports are cleaned */
4636 			if (old_refs == 0) {
4637 				old_task->watchports = NULL;
4638 			}
4639 		} else {
4640 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4641 		}
4642 		/* port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
4643 	}
4644 
4645 	/* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
4646 	new_refs = task_watchports_release(new_watchports);
4647 	if (new_refs == 0) {
4648 		new_task->watchports = NULL;
4649 	}
4650 
4651 	is_write_unlock(new_task->itk_space);
4652 	is_write_unlock(old_task->itk_space);
4653 
4654 	/* Clear the task and thread references for old_watchport */
4655 	if (old_refs == 0) {
4656 		task_watchports_deallocate(old_watchports);
4657 	}
4658 
4659 	/* Clear the task and thread references for new_watchport */
4660 	if (new_refs == 0) {
4661 		task_watchports_deallocate(new_watchports);
4662 	}
4663 }
4664 
4665 /*
4666  *	task_add_turnstile_watchports_locked:
4667  *		Setup watchports to boost the main thread of the task.
4668  *
4669  *	Arguments:
4670  *		task: task to boost
4671  *		watchports: watchport structure to be attached to the task
4672  *		previous_elem_array: an array of old watchport_elem to be returned to caller
4673  *		portwatch_ports: array of watchports
4674  *		portwatch_count: number of watchports
4675  *
4676  *	Conditions:
4677  *		ipc space of the task locked.
4678  *		returns array of old watchport_elem in previous_elem_array
4679  */
4680 static os_ref_count_t
task_add_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,struct task_watchport_elem ** previous_elem_array,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4681 task_add_turnstile_watchports_locked(
4682 	task_t                      task,
4683 	struct task_watchports      *watchports,
4684 	struct task_watchport_elem  **previous_elem_array,
4685 	ipc_port_t                  *portwatch_ports,
4686 	uint32_t                    portwatch_count)
4687 {
4688 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4689 
4690 	/* Check if the task is still active */
4691 	if (!task->active) {
4692 		refs = task_watchports_release(watchports);
4693 		return refs;
4694 	}
4695 
4696 	assert(task->watchports == NULL);
4697 	task->watchports = watchports;
4698 
4699 	for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
4700 		ipc_port_t port = portwatch_ports[i];
4701 
4702 		task_watchport_elem_init(&watchports->tw_elem[i], task, port);
4703 		if (port == NULL) {
4704 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4705 			continue;
4706 		}
4707 
4708 		ip_mq_lock(port);
4709 
4710 		/* Check if port is in valid state to be setup as watchport */
4711 		if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
4712 		    &previous_elem_array[j]) != KERN_SUCCESS) {
4713 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4714 			continue;
4715 		}
4716 		/* port unlocked on return */
4717 
4718 		ip_reference(port);
4719 		task_watchports_retain(watchports);
4720 		if (previous_elem_array[j] != NULL) {
4721 			j++;
4722 		}
4723 	}
4724 
4725 	/* Drop the reference on task_watchport struct returned by os_ref_init */
4726 	refs = task_watchports_release(watchports);
4727 	if (refs == 0) {
4728 		task->watchports = NULL;
4729 	}
4730 
4731 	return refs;
4732 }
4733 
4734 /*
4735  *	task_remove_turnstile_watchports_locked:
4736  *		Clear all turnstile boost on the task from watchports.
4737  *
4738  *	Arguments:
4739  *		task: task to remove watchports from
4740  *		watchports: watchports structure for the task
4741  *		port_freelist: array of ports returned with ref to caller
4742  *
4743  *
4744  *	Conditions:
4745  *		ipc space of the task locked.
4746  *		array of ports with refs are returned in port_freelist
4747  */
4748 static os_ref_count_t
task_remove_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,ipc_port_t * port_freelist)4749 task_remove_turnstile_watchports_locked(
4750 	task_t                 task,
4751 	struct task_watchports *watchports,
4752 	ipc_port_t             *port_freelist)
4753 {
4754 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4755 
4756 	for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
4757 		ipc_port_t port = watchports->tw_elem[i].twe_port;
4758 		if (port == NULL) {
4759 			continue;
4760 		}
4761 
4762 		/* Lock the port and check if it has the entry */
4763 		ip_mq_lock(port);
4764 		if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
4765 		    &watchports->tw_elem[i]) == KERN_SUCCESS) {
4766 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4767 			port_freelist[j++] = port;
4768 			refs = task_watchports_release(watchports);
4769 
4770 			/* Check if all ports are cleaned */
4771 			if (refs == 0) {
4772 				task->watchports = NULL;
4773 				break;
4774 			}
4775 		}
4776 		/* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
4777 	}
4778 	return refs;
4779 }
4780 
4781 /*
4782  *	task_watchports_alloc_init:
4783  *		Allocate and initialize task watchport struct.
4784  *
4785  *	Conditions:
4786  *		Nothing locked.
4787  */
4788 static struct task_watchports *
task_watchports_alloc_init(task_t task,thread_t thread,uint32_t count)4789 task_watchports_alloc_init(
4790 	task_t        task,
4791 	thread_t      thread,
4792 	uint32_t      count)
4793 {
4794 	struct task_watchports *watchports = kalloc_type(struct task_watchports,
4795 	    struct task_watchport_elem, count, Z_WAITOK | Z_ZERO | Z_NOFAIL);
4796 
4797 	task_reference(task);
4798 	thread_reference(thread);
4799 	watchports->tw_task = task;
4800 	watchports->tw_thread = thread;
4801 	watchports->tw_elem_array_count = count;
4802 	os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
4803 
4804 	return watchports;
4805 }
4806 
4807 /*
4808  *	task_watchports_deallocate:
4809  *		Deallocate task watchport struct.
4810  *
4811  *	Conditions:
4812  *		Nothing locked.
4813  */
4814 static void
task_watchports_deallocate(struct task_watchports * watchports)4815 task_watchports_deallocate(
4816 	struct task_watchports *watchports)
4817 {
4818 	uint32_t portwatch_count = watchports->tw_elem_array_count;
4819 
4820 	task_deallocate(watchports->tw_task);
4821 	thread_deallocate(watchports->tw_thread);
4822 	kfree_type(struct task_watchports, struct task_watchport_elem,
4823 	    portwatch_count, watchports);
4824 }
4825 
4826 /*
4827  *	task_watchport_elem_deallocate:
4828  *		Deallocate task watchport element and release its ref on task_watchport.
4829  *
4830  *	Conditions:
4831  *		Nothing locked.
4832  */
4833 void
task_watchport_elem_deallocate(struct task_watchport_elem * watchport_elem)4834 task_watchport_elem_deallocate(
4835 	struct task_watchport_elem *watchport_elem)
4836 {
4837 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4838 	task_t task = watchport_elem->twe_task;
4839 	struct task_watchports *watchports = NULL;
4840 	ipc_port_t port = NULL;
4841 
4842 	assert(task != NULL);
4843 
4844 	/* Take the space lock to modify the elememt */
4845 	is_write_lock(task->itk_space);
4846 
4847 	watchports = task->watchports;
4848 	assert(watchports != NULL);
4849 
4850 	port = watchport_elem->twe_port;
4851 	assert(port != NULL);
4852 
4853 	task_watchport_elem_clear(watchport_elem);
4854 	refs = task_watchports_release(watchports);
4855 
4856 	if (refs == 0) {
4857 		task->watchports = NULL;
4858 	}
4859 
4860 	is_write_unlock(task->itk_space);
4861 
4862 	ip_release(port);
4863 	if (refs == 0) {
4864 		task_watchports_deallocate(watchports);
4865 	}
4866 }
4867 
4868 /*
4869  *	task_has_watchports:
4870  *		Return TRUE if task has watchport boosts.
4871  *
4872  *	Conditions:
4873  *		Nothing locked.
4874  */
4875 boolean_t
task_has_watchports(task_t task)4876 task_has_watchports(task_t task)
4877 {
4878 	return task->watchports != NULL;
4879 }
4880 
4881 #if DEVELOPMENT || DEBUG
4882 
4883 extern void IOSleep(int);
4884 
4885 kern_return_t
task_disconnect_page_mappings(task_t task)4886 task_disconnect_page_mappings(task_t task)
4887 {
4888 	int     n;
4889 
4890 	if (task == TASK_NULL || task == kernel_task) {
4891 		return KERN_INVALID_ARGUMENT;
4892 	}
4893 
4894 	/*
4895 	 * this function is used to strip all of the mappings from
4896 	 * the pmap for the specified task to force the task to
4897 	 * re-fault all of the pages it is actively using... this
4898 	 * allows us to approximate the true working set of the
4899 	 * specified task.  We only engage if at least 1 of the
4900 	 * threads in the task is runnable, but we want to continuously
4901 	 * sweep (at least for a while - I've arbitrarily set the limit at
4902 	 * 100 sweeps to be re-looked at as we gain experience) to get a better
4903 	 * view into what areas within a page are being visited (as opposed to only
4904 	 * seeing the first fault of a page after the task becomes
4905 	 * runnable)...  in the future I may
4906 	 * try to block until awakened by a thread in this task
4907 	 * being made runnable, but for now we'll periodically poll from the
4908 	 * user level debug tool driving the sysctl
4909 	 */
4910 	for (n = 0; n < 100; n++) {
4911 		thread_t        thread;
4912 		boolean_t       runnable;
4913 		boolean_t       do_unnest;
4914 		int             page_count;
4915 
4916 		runnable = FALSE;
4917 		do_unnest = FALSE;
4918 
4919 		task_lock(task);
4920 
4921 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
4922 			if (thread->state & TH_RUN) {
4923 				runnable = TRUE;
4924 				break;
4925 			}
4926 		}
4927 		if (n == 0) {
4928 			task->task_disconnected_count++;
4929 		}
4930 
4931 		if (task->task_unnested == FALSE) {
4932 			if (runnable == TRUE) {
4933 				task->task_unnested = TRUE;
4934 				do_unnest = TRUE;
4935 			}
4936 		}
4937 		task_unlock(task);
4938 
4939 		if (runnable == FALSE) {
4940 			break;
4941 		}
4942 
4943 		KDBG_RELEASE((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
4944 		    task, do_unnest, task->task_disconnected_count);
4945 
4946 		page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
4947 
4948 		KDBG_RELEASE((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
4949 		    task, page_count);
4950 
4951 		if ((n % 5) == 4) {
4952 			IOSleep(1);
4953 		}
4954 	}
4955 	return KERN_SUCCESS;
4956 }
4957 
4958 #endif
4959 
4960 
4961 #if CONFIG_FREEZE
4962 
4963 /*
4964  *	task_freeze:
4965  *
4966  *	Freeze a task.
4967  *
4968  * Conditions:
4969  *      The caller holds a reference to the task
4970  */
4971 extern struct freezer_context freezer_context_global;
4972 
4973 kern_return_t
task_freeze(task_t task,uint32_t * purgeable_count,uint32_t * wired_count,uint32_t * clean_count,uint32_t * dirty_count,uint32_t dirty_budget,uint32_t * shared_count,int * freezer_error_code,boolean_t eval_only)4974 task_freeze(
4975 	task_t    task,
4976 	uint32_t           *purgeable_count,
4977 	uint32_t           *wired_count,
4978 	uint32_t           *clean_count,
4979 	uint32_t           *dirty_count,
4980 	uint32_t           dirty_budget,
4981 	uint32_t           *shared_count,
4982 	int                *freezer_error_code,
4983 	boolean_t          eval_only)
4984 {
4985 	kern_return_t kr = KERN_SUCCESS;
4986 
4987 	if (task == TASK_NULL || task == kernel_task) {
4988 		return KERN_INVALID_ARGUMENT;
4989 	}
4990 
4991 	task_lock(task);
4992 
4993 	while (task->changing_freeze_state) {
4994 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4995 		task_unlock(task);
4996 		thread_block(THREAD_CONTINUE_NULL);
4997 
4998 		task_lock(task);
4999 	}
5000 	if (task->frozen) {
5001 		task_unlock(task);
5002 		return KERN_FAILURE;
5003 	}
5004 	task->changing_freeze_state = TRUE;
5005 
5006 	freezer_context_global.freezer_ctx_task = task;
5007 
5008 	task_unlock(task);
5009 
5010 	kr = vm_map_freeze(task,
5011 	    purgeable_count,
5012 	    wired_count,
5013 	    clean_count,
5014 	    dirty_count,
5015 	    dirty_budget,
5016 	    shared_count,
5017 	    freezer_error_code,
5018 	    eval_only);
5019 
5020 	task_lock(task);
5021 
5022 	if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) {
5023 		task->frozen = TRUE;
5024 
5025 		freezer_context_global.freezer_ctx_task = NULL;
5026 		freezer_context_global.freezer_ctx_uncompressed_pages = 0;
5027 
5028 		if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
5029 			/*
5030 			 * reset the counter tracking the # of swapped compressed pages
5031 			 * because we are now done with this freeze session and task.
5032 			 */
5033 
5034 			*dirty_count = (uint32_t) (freezer_context_global.freezer_ctx_swapped_bytes / PAGE_SIZE_64);         /*used to track pageouts*/
5035 		}
5036 
5037 		freezer_context_global.freezer_ctx_swapped_bytes = 0;
5038 	}
5039 
5040 	task->changing_freeze_state = FALSE;
5041 	thread_wakeup(&task->changing_freeze_state);
5042 
5043 	task_unlock(task);
5044 
5045 	if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
5046 	    (kr == KERN_SUCCESS) &&
5047 	    (eval_only == FALSE)) {
5048 		vm_wake_compactor_swapper();
5049 		/*
5050 		 * We do an explicit wakeup of the swapout thread here
5051 		 * because the compact_and_swap routines don't have
5052 		 * knowledge about these kind of "per-task packed c_segs"
5053 		 * and so will not be evaluating whether we need to do
5054 		 * a wakeup there.
5055 		 */
5056 		thread_wakeup((event_t)&vm_swapout_thread);
5057 	}
5058 
5059 	return kr;
5060 }
5061 
5062 /*
5063  *	task_thaw:
5064  *
5065  *	Thaw a currently frozen task.
5066  *
5067  * Conditions:
5068  *      The caller holds a reference to the task
5069  */
5070 kern_return_t
task_thaw(task_t task)5071 task_thaw(
5072 	task_t          task)
5073 {
5074 	if (task == TASK_NULL || task == kernel_task) {
5075 		return KERN_INVALID_ARGUMENT;
5076 	}
5077 
5078 	task_lock(task);
5079 
5080 	while (task->changing_freeze_state) {
5081 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5082 		task_unlock(task);
5083 		thread_block(THREAD_CONTINUE_NULL);
5084 
5085 		task_lock(task);
5086 	}
5087 	if (!task->frozen) {
5088 		task_unlock(task);
5089 		return KERN_FAILURE;
5090 	}
5091 	task->frozen = FALSE;
5092 
5093 	task_unlock(task);
5094 
5095 	return KERN_SUCCESS;
5096 }
5097 
5098 void
task_update_frozen_to_swap_acct(task_t task,int64_t amount,freezer_acct_op_t op)5099 task_update_frozen_to_swap_acct(task_t task, int64_t amount, freezer_acct_op_t op)
5100 {
5101 	/*
5102 	 * We don't assert that the task lock is held because we call this
5103 	 * routine from the decompression path and we won't be holding the
5104 	 * task lock. However, since we are in the context of the task we are
5105 	 * safe.
5106 	 * In the case of the task_freeze path, we call it from behind the task
5107 	 * lock but we don't need to because we have a reference on the proc
5108 	 * being frozen.
5109 	 */
5110 
5111 	assert(task);
5112 	if (amount == 0) {
5113 		return;
5114 	}
5115 
5116 	if (op == CREDIT_TO_SWAP) {
5117 		ledger_credit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5118 	} else if (op == DEBIT_FROM_SWAP) {
5119 		ledger_debit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5120 	} else {
5121 		panic("task_update_frozen_to_swap_acct: Invalid ledger op");
5122 	}
5123 }
5124 #endif /* CONFIG_FREEZE */
5125 
5126 kern_return_t
task_set_security_tokens(task_t task,security_token_t sec_token,audit_token_t audit_token,host_priv_t host_priv)5127 task_set_security_tokens(
5128 	task_t           task,
5129 	security_token_t sec_token,
5130 	audit_token_t    audit_token,
5131 	host_priv_t      host_priv)
5132 {
5133 	ipc_port_t       host_port = IP_NULL;
5134 	kern_return_t    kr;
5135 
5136 	if (task == TASK_NULL) {
5137 		return KERN_INVALID_ARGUMENT;
5138 	}
5139 
5140 	task_lock(task);
5141 	task_set_tokens(task, &sec_token, &audit_token);
5142 	task_unlock(task);
5143 
5144 	if (host_priv != HOST_PRIV_NULL) {
5145 		kr = host_get_host_priv_port(host_priv, &host_port);
5146 	} else {
5147 		kr = host_get_host_port(host_priv_self(), &host_port);
5148 	}
5149 	assert(kr == KERN_SUCCESS);
5150 
5151 	kr = task_set_special_port_internal(task, TASK_HOST_PORT, host_port);
5152 	return kr;
5153 }
5154 
5155 kern_return_t
task_send_trace_memory(__unused task_t target_task,__unused uint32_t pid,__unused uint64_t uniqueid)5156 task_send_trace_memory(
5157 	__unused task_t   target_task,
5158 	__unused uint32_t pid,
5159 	__unused uint64_t uniqueid)
5160 {
5161 	return KERN_INVALID_ARGUMENT;
5162 }
5163 
5164 /*
5165  * This routine was added, pretty much exclusively, for registering the
5166  * RPC glue vector for in-kernel short circuited tasks.  Rather than
5167  * removing it completely, I have only disabled that feature (which was
5168  * the only feature at the time).  It just appears that we are going to
5169  * want to add some user data to tasks in the future (i.e. bsd info,
5170  * task names, etc...), so I left it in the formal task interface.
5171  */
5172 kern_return_t
task_set_info(task_t task,task_flavor_t flavor,__unused task_info_t task_info_in,__unused mach_msg_type_number_t task_info_count)5173 task_set_info(
5174 	task_t          task,
5175 	task_flavor_t   flavor,
5176 	__unused task_info_t    task_info_in,           /* pointer to IN array */
5177 	__unused mach_msg_type_number_t task_info_count)
5178 {
5179 	if (task == TASK_NULL) {
5180 		return KERN_INVALID_ARGUMENT;
5181 	}
5182 	switch (flavor) {
5183 #if CONFIG_ATM
5184 	case TASK_TRACE_MEMORY_INFO:
5185 		return KERN_NOT_SUPPORTED;
5186 #endif // CONFIG_ATM
5187 	default:
5188 		return KERN_INVALID_ARGUMENT;
5189 	}
5190 }
5191 
5192 static void
_task_fill_times(task_t task,time_value_t * user_time,time_value_t * sys_time)5193 _task_fill_times(task_t task, time_value_t *user_time, time_value_t *sys_time)
5194 {
5195 	clock_sec_t sec;
5196 	clock_usec_t usec;
5197 
5198 	struct recount_times_mach times = recount_task_terminated_times(task);
5199 	absolutetime_to_microtime(times.rtm_user, &sec, &usec);
5200 	user_time->seconds = (typeof(user_time->seconds))sec;
5201 	user_time->microseconds = usec;
5202 	absolutetime_to_microtime(times.rtm_system, &sec, &usec);
5203 	sys_time->seconds = (typeof(sys_time->seconds))sec;
5204 	sys_time->microseconds = usec;
5205 }
5206 
5207 int radar_20146450 = 1;
5208 kern_return_t
task_info(task_t task,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)5209 task_info(
5210 	task_t                  task,
5211 	task_flavor_t           flavor,
5212 	task_info_t             task_info_out,
5213 	mach_msg_type_number_t  *task_info_count)
5214 {
5215 	kern_return_t error = KERN_SUCCESS;
5216 	mach_msg_type_number_t  original_task_info_count;
5217 	bool is_kernel_task = (task == kernel_task);
5218 
5219 	if (task == TASK_NULL) {
5220 		return KERN_INVALID_ARGUMENT;
5221 	}
5222 
5223 	original_task_info_count = *task_info_count;
5224 	task_lock(task);
5225 
5226 	if (task != current_task() && !task->active) {
5227 		task_unlock(task);
5228 		return KERN_INVALID_ARGUMENT;
5229 	}
5230 
5231 
5232 	switch (flavor) {
5233 	case TASK_BASIC_INFO_32:
5234 	case TASK_BASIC2_INFO_32:
5235 #if defined(__arm64__)
5236 	case TASK_BASIC_INFO_64:
5237 #endif
5238 		{
5239 			task_basic_info_32_t basic_info;
5240 			ledger_amount_t      tmp;
5241 
5242 			if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
5243 				error = KERN_INVALID_ARGUMENT;
5244 				break;
5245 			}
5246 
5247 			basic_info = (task_basic_info_32_t)task_info_out;
5248 
5249 			basic_info->virtual_size = (typeof(basic_info->virtual_size))
5250 			    vm_map_adjusted_size(is_kernel_task ? kernel_map : task->map);
5251 			if (flavor == TASK_BASIC2_INFO_32) {
5252 				/*
5253 				 * The "BASIC2" flavor gets the maximum resident
5254 				 * size instead of the current resident size...
5255 				 */
5256 				ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, &tmp);
5257 			} else {
5258 				ledger_get_balance(task->ledger, task_ledgers.phys_mem, &tmp);
5259 			}
5260 			basic_info->resident_size = (natural_t) MIN((ledger_amount_t) UINT32_MAX, tmp);
5261 
5262 			_task_fill_times(task, &basic_info->user_time,
5263 			    &basic_info->system_time);
5264 
5265 			basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5266 			basic_info->suspend_count = task->user_stop_count;
5267 
5268 			*task_info_count = TASK_BASIC_INFO_32_COUNT;
5269 			break;
5270 		}
5271 
5272 #if defined(__arm64__)
5273 	case TASK_BASIC_INFO_64_2:
5274 	{
5275 		task_basic_info_64_2_t  basic_info;
5276 
5277 		if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) {
5278 			error = KERN_INVALID_ARGUMENT;
5279 			break;
5280 		}
5281 
5282 		basic_info = (task_basic_info_64_2_t)task_info_out;
5283 
5284 		basic_info->virtual_size  = vm_map_adjusted_size(is_kernel_task ?
5285 		    kernel_map : task->map);
5286 		ledger_get_balance(task->ledger, task_ledgers.phys_mem,
5287 		    (ledger_amount_t *)&basic_info->resident_size);
5288 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5289 		basic_info->suspend_count = task->user_stop_count;
5290 		_task_fill_times(task, &basic_info->user_time,
5291 		    &basic_info->system_time);
5292 
5293 		*task_info_count = TASK_BASIC_INFO_64_2_COUNT;
5294 		break;
5295 	}
5296 
5297 #else /* defined(__arm64__) */
5298 	case TASK_BASIC_INFO_64:
5299 	{
5300 		task_basic_info_64_t basic_info;
5301 
5302 		if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
5303 			error = KERN_INVALID_ARGUMENT;
5304 			break;
5305 		}
5306 
5307 		basic_info = (task_basic_info_64_t)task_info_out;
5308 
5309 		basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5310 		    kernel_map : task->map);
5311 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *)&basic_info->resident_size);
5312 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5313 		basic_info->suspend_count = task->user_stop_count;
5314 		_task_fill_times(task, &basic_info->user_time,
5315 		    &basic_info->system_time);
5316 
5317 		*task_info_count = TASK_BASIC_INFO_64_COUNT;
5318 		break;
5319 	}
5320 #endif /* defined(__arm64__) */
5321 
5322 	case MACH_TASK_BASIC_INFO:
5323 	{
5324 		mach_task_basic_info_t  basic_info;
5325 
5326 		if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
5327 			error = KERN_INVALID_ARGUMENT;
5328 			break;
5329 		}
5330 
5331 		basic_info = (mach_task_basic_info_t)task_info_out;
5332 
5333 		basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5334 		    kernel_map : task->map);
5335 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size);
5336 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size_max);
5337 		basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5338 		basic_info->suspend_count = task->user_stop_count;
5339 		_task_fill_times(task, &basic_info->user_time,
5340 		    &basic_info->system_time);
5341 
5342 		*task_info_count = MACH_TASK_BASIC_INFO_COUNT;
5343 		break;
5344 	}
5345 
5346 	case TASK_THREAD_TIMES_INFO:
5347 	{
5348 		task_thread_times_info_t times_info;
5349 		thread_t                 thread;
5350 
5351 		if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
5352 			error = KERN_INVALID_ARGUMENT;
5353 			break;
5354 		}
5355 
5356 		times_info = (task_thread_times_info_t)task_info_out;
5357 		times_info->user_time = (time_value_t){ 0 };
5358 		times_info->system_time = (time_value_t){ 0 };
5359 
5360 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5361 			if ((thread->options & TH_OPT_IDLE_THREAD) == 0) {
5362 				time_value_t user_time, system_time;
5363 
5364 				thread_read_times(thread, &user_time, &system_time, NULL);
5365 				time_value_add(&times_info->user_time, &user_time);
5366 				time_value_add(&times_info->system_time, &system_time);
5367 			}
5368 		}
5369 
5370 		*task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
5371 		break;
5372 	}
5373 
5374 	case TASK_ABSOLUTETIME_INFO:
5375 	{
5376 		task_absolutetime_info_t        info;
5377 
5378 		if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
5379 			error = KERN_INVALID_ARGUMENT;
5380 			break;
5381 		}
5382 
5383 		info = (task_absolutetime_info_t)task_info_out;
5384 
5385 		struct recount_times_mach term_times =
5386 		    recount_task_terminated_times(task);
5387 		struct recount_times_mach total_times = recount_task_times(task);
5388 
5389 		info->total_user = total_times.rtm_user;
5390 		info->total_system = total_times.rtm_system;
5391 		info->threads_user = total_times.rtm_user - term_times.rtm_user;
5392 		info->threads_system += total_times.rtm_system - term_times.rtm_system;
5393 
5394 		*task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
5395 		break;
5396 	}
5397 
5398 	case TASK_DYLD_INFO:
5399 	{
5400 		task_dyld_info_t info;
5401 
5402 		/*
5403 		 * We added the format field to TASK_DYLD_INFO output.  For
5404 		 * temporary backward compatibility, accept the fact that
5405 		 * clients may ask for the old version - distinquished by the
5406 		 * size of the expected result structure.
5407 		 */
5408 #define TASK_LEGACY_DYLD_INFO_COUNT \
5409 	        offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
5410 
5411 		if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
5412 			error = KERN_INVALID_ARGUMENT;
5413 			break;
5414 		}
5415 
5416 		info = (task_dyld_info_t)task_info_out;
5417 		info->all_image_info_addr = task->all_image_info_addr;
5418 		info->all_image_info_size = task->all_image_info_size;
5419 
5420 		/* only set format on output for those expecting it */
5421 		if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
5422 			info->all_image_info_format = task_has_64Bit_addr(task) ?
5423 			    TASK_DYLD_ALL_IMAGE_INFO_64 :
5424 			    TASK_DYLD_ALL_IMAGE_INFO_32;
5425 			*task_info_count = TASK_DYLD_INFO_COUNT;
5426 		} else {
5427 			*task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
5428 		}
5429 		break;
5430 	}
5431 
5432 	case TASK_EXTMOD_INFO:
5433 	{
5434 		task_extmod_info_t info;
5435 		void *p;
5436 
5437 		if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
5438 			error = KERN_INVALID_ARGUMENT;
5439 			break;
5440 		}
5441 
5442 		info = (task_extmod_info_t)task_info_out;
5443 
5444 		p = get_bsdtask_info(task);
5445 		if (p) {
5446 			proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
5447 		} else {
5448 			bzero(info->task_uuid, sizeof(info->task_uuid));
5449 		}
5450 		info->extmod_statistics = task->extmod_statistics;
5451 		*task_info_count = TASK_EXTMOD_INFO_COUNT;
5452 
5453 		break;
5454 	}
5455 
5456 	case TASK_KERNELMEMORY_INFO:
5457 	{
5458 		task_kernelmemory_info_t        tkm_info;
5459 		ledger_amount_t                 credit, debit;
5460 
5461 		if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
5462 			error = KERN_INVALID_ARGUMENT;
5463 			break;
5464 		}
5465 
5466 		tkm_info = (task_kernelmemory_info_t) task_info_out;
5467 		tkm_info->total_palloc = 0;
5468 		tkm_info->total_pfree = 0;
5469 		tkm_info->total_salloc = 0;
5470 		tkm_info->total_sfree = 0;
5471 
5472 		if (task == kernel_task) {
5473 			/*
5474 			 * All shared allocs/frees from other tasks count against
5475 			 * the kernel private memory usage.  If we are looking up
5476 			 * info for the kernel task, gather from everywhere.
5477 			 */
5478 			task_unlock(task);
5479 
5480 			/* start by accounting for all the terminated tasks against the kernel */
5481 			tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
5482 			tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
5483 
5484 			/* count all other task/thread shared alloc/free against the kernel */
5485 			lck_mtx_lock(&tasks_threads_lock);
5486 
5487 			/* XXX this really shouldn't be using the function parameter 'task' as a local var! */
5488 			queue_iterate(&tasks, task, task_t, tasks) {
5489 				if (task == kernel_task) {
5490 					if (ledger_get_entries(task->ledger,
5491 					    task_ledgers.tkm_private, &credit,
5492 					    &debit) == KERN_SUCCESS) {
5493 						tkm_info->total_palloc += credit;
5494 						tkm_info->total_pfree += debit;
5495 					}
5496 				}
5497 				if (!ledger_get_entries(task->ledger,
5498 				    task_ledgers.tkm_shared, &credit, &debit)) {
5499 					tkm_info->total_palloc += credit;
5500 					tkm_info->total_pfree += debit;
5501 				}
5502 			}
5503 			lck_mtx_unlock(&tasks_threads_lock);
5504 		} else {
5505 			if (!ledger_get_entries(task->ledger,
5506 			    task_ledgers.tkm_private, &credit, &debit)) {
5507 				tkm_info->total_palloc = credit;
5508 				tkm_info->total_pfree = debit;
5509 			}
5510 			if (!ledger_get_entries(task->ledger,
5511 			    task_ledgers.tkm_shared, &credit, &debit)) {
5512 				tkm_info->total_salloc = credit;
5513 				tkm_info->total_sfree = debit;
5514 			}
5515 			task_unlock(task);
5516 		}
5517 
5518 		*task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
5519 		return KERN_SUCCESS;
5520 	}
5521 
5522 	/* OBSOLETE */
5523 	case TASK_SCHED_FIFO_INFO:
5524 	{
5525 		if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
5526 			error = KERN_INVALID_ARGUMENT;
5527 			break;
5528 		}
5529 
5530 		error = KERN_INVALID_POLICY;
5531 		break;
5532 	}
5533 
5534 	/* OBSOLETE */
5535 	case TASK_SCHED_RR_INFO:
5536 	{
5537 		policy_rr_base_t        rr_base;
5538 		uint32_t quantum_time;
5539 		uint64_t quantum_ns;
5540 
5541 		if (*task_info_count < POLICY_RR_BASE_COUNT) {
5542 			error = KERN_INVALID_ARGUMENT;
5543 			break;
5544 		}
5545 
5546 		rr_base = (policy_rr_base_t) task_info_out;
5547 
5548 		if (task != kernel_task) {
5549 			error = KERN_INVALID_POLICY;
5550 			break;
5551 		}
5552 
5553 		rr_base->base_priority = task->priority;
5554 
5555 		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
5556 		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
5557 
5558 		rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
5559 
5560 		*task_info_count = POLICY_RR_BASE_COUNT;
5561 		break;
5562 	}
5563 
5564 	/* OBSOLETE */
5565 	case TASK_SCHED_TIMESHARE_INFO:
5566 	{
5567 		policy_timeshare_base_t ts_base;
5568 
5569 		if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
5570 			error = KERN_INVALID_ARGUMENT;
5571 			break;
5572 		}
5573 
5574 		ts_base = (policy_timeshare_base_t) task_info_out;
5575 
5576 		if (task == kernel_task) {
5577 			error = KERN_INVALID_POLICY;
5578 			break;
5579 		}
5580 
5581 		ts_base->base_priority = task->priority;
5582 
5583 		*task_info_count = POLICY_TIMESHARE_BASE_COUNT;
5584 		break;
5585 	}
5586 
5587 	case TASK_SECURITY_TOKEN:
5588 	{
5589 		security_token_t        *sec_token_p;
5590 
5591 		if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
5592 			error = KERN_INVALID_ARGUMENT;
5593 			break;
5594 		}
5595 
5596 		sec_token_p = (security_token_t *) task_info_out;
5597 
5598 		*sec_token_p = *task_get_sec_token(task);
5599 
5600 		*task_info_count = TASK_SECURITY_TOKEN_COUNT;
5601 		break;
5602 	}
5603 
5604 	case TASK_AUDIT_TOKEN:
5605 	{
5606 		audit_token_t   *audit_token_p;
5607 
5608 		if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
5609 			error = KERN_INVALID_ARGUMENT;
5610 			break;
5611 		}
5612 
5613 		audit_token_p = (audit_token_t *) task_info_out;
5614 
5615 		*audit_token_p = *task_get_audit_token(task);
5616 
5617 		*task_info_count = TASK_AUDIT_TOKEN_COUNT;
5618 		break;
5619 	}
5620 
5621 	case TASK_SCHED_INFO:
5622 		error = KERN_INVALID_ARGUMENT;
5623 		break;
5624 
5625 	case TASK_EVENTS_INFO:
5626 	{
5627 		task_events_info_t      events_info;
5628 		thread_t                thread;
5629 		uint64_t                n_syscalls_mach, n_syscalls_unix, n_csw;
5630 
5631 		if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
5632 			error = KERN_INVALID_ARGUMENT;
5633 			break;
5634 		}
5635 
5636 		events_info = (task_events_info_t) task_info_out;
5637 
5638 
5639 		events_info->faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
5640 		events_info->pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
5641 		events_info->cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
5642 		events_info->messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
5643 		events_info->messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
5644 
5645 		n_syscalls_mach = task->syscalls_mach;
5646 		n_syscalls_unix = task->syscalls_unix;
5647 		n_csw = task->c_switch;
5648 
5649 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5650 			n_csw           += thread->c_switch;
5651 			n_syscalls_mach += thread->syscalls_mach;
5652 			n_syscalls_unix += thread->syscalls_unix;
5653 		}
5654 
5655 		events_info->syscalls_mach = (int32_t) MIN(n_syscalls_mach, INT32_MAX);
5656 		events_info->syscalls_unix = (int32_t) MIN(n_syscalls_unix, INT32_MAX);
5657 		events_info->csw = (int32_t) MIN(n_csw, INT32_MAX);
5658 
5659 		*task_info_count = TASK_EVENTS_INFO_COUNT;
5660 		break;
5661 	}
5662 	case TASK_AFFINITY_TAG_INFO:
5663 	{
5664 		if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
5665 			error = KERN_INVALID_ARGUMENT;
5666 			break;
5667 		}
5668 
5669 		error = task_affinity_info(task, task_info_out, task_info_count);
5670 		break;
5671 	}
5672 	case TASK_POWER_INFO:
5673 	{
5674 		if (*task_info_count < TASK_POWER_INFO_COUNT) {
5675 			error = KERN_INVALID_ARGUMENT;
5676 			break;
5677 		}
5678 
5679 		task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL, NULL);
5680 		break;
5681 	}
5682 
5683 	case TASK_POWER_INFO_V2:
5684 	{
5685 		if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
5686 			error = KERN_INVALID_ARGUMENT;
5687 			break;
5688 		}
5689 		task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
5690 		task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2, NULL);
5691 		break;
5692 	}
5693 
5694 	case TASK_VM_INFO:
5695 	case TASK_VM_INFO_PURGEABLE:
5696 	{
5697 		task_vm_info_t          vm_info;
5698 		vm_map_t                map;
5699 		ledger_amount_t         tmp_amount;
5700 
5701 		struct proc *p;
5702 		uint32_t platform, sdk;
5703 		p = current_proc();
5704 		platform = proc_platform(p);
5705 		sdk = proc_sdk(p);
5706 		if (original_task_info_count > TASK_VM_INFO_COUNT) {
5707 			/*
5708 			 * Some iOS apps pass an incorrect value for
5709 			 * task_info_count, expressed in number of bytes
5710 			 * instead of number of "natural_t" elements, which
5711 			 * can lead to binary compatibility issues (including
5712 			 * stack corruption) when the data structure is
5713 			 * expanded in the future.
5714 			 * Let's make this potential issue visible by
5715 			 * logging about it...
5716 			 */
5717 			if (!proc_is_simulated(p)) {
5718 				os_log(OS_LOG_DEFAULT, "%s[%d] task_info: possibly invalid "
5719 				    "task_info_count %d > TASK_VM_INFO_COUNT=%d on platform %d sdk "
5720 				    "%d.%d.%d - please use TASK_VM_INFO_COUNT",
5721 				    proc_name_address(p), proc_pid(p),
5722 				    original_task_info_count, TASK_VM_INFO_COUNT,
5723 				    platform, (sdk >> 16), ((sdk >> 8) & 0xff), (sdk & 0xff));
5724 			}
5725 			DTRACE_VM4(suspicious_task_vm_info_count,
5726 			    mach_msg_type_number_t, original_task_info_count,
5727 			    mach_msg_type_number_t, TASK_VM_INFO_COUNT,
5728 			    uint32_t, platform,
5729 			    uint32_t, sdk);
5730 		}
5731 #if __arm64__
5732 		if (original_task_info_count > TASK_VM_INFO_REV2_COUNT &&
5733 		    platform == PLATFORM_IOS &&
5734 		    sdk != 0 &&
5735 		    (sdk >> 16) <= 12) {
5736 			/*
5737 			 * Some iOS apps pass an incorrect value for
5738 			 * task_info_count, expressed in number of bytes
5739 			 * instead of number of "natural_t" elements.
5740 			 * For the sake of backwards binary compatibility
5741 			 * for apps built with an iOS12 or older SDK and using
5742 			 * the "rev2" data structure, let's fix task_info_count
5743 			 * for them, to avoid stomping past the actual end
5744 			 * of their buffer.
5745 			 */
5746 #if DEVELOPMENT || DEBUG
5747 			printf("%s:%d %d[%s] rdar://49484582 task_info_count %d -> %d "
5748 			    "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5749 			    proc_name_address(p), original_task_info_count,
5750 			    TASK_VM_INFO_REV2_COUNT, platform, (sdk >> 16),
5751 			    ((sdk >> 8) & 0xff), (sdk & 0xff));
5752 #endif /* DEVELOPMENT || DEBUG */
5753 			DTRACE_VM4(workaround_task_vm_info_count,
5754 			    mach_msg_type_number_t, original_task_info_count,
5755 			    mach_msg_type_number_t, TASK_VM_INFO_REV2_COUNT,
5756 			    uint32_t, platform,
5757 			    uint32_t, sdk);
5758 			original_task_info_count = TASK_VM_INFO_REV2_COUNT;
5759 			*task_info_count = original_task_info_count;
5760 		}
5761 		if (original_task_info_count > TASK_VM_INFO_REV5_COUNT &&
5762 		    platform == PLATFORM_IOS &&
5763 		    sdk != 0 &&
5764 		    (sdk >> 16) <= 15) {
5765 			/*
5766 			 * Some iOS apps pass an incorrect value for
5767 			 * task_info_count, expressed in number of bytes
5768 			 * instead of number of "natural_t" elements.
5769 			 */
5770 			printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5771 			    "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5772 			    proc_name_address(p), original_task_info_count,
5773 			    TASK_VM_INFO_REV5_COUNT, platform, (sdk >> 16),
5774 			    ((sdk >> 8) & 0xff), (sdk & 0xff));
5775 			DTRACE_VM4(workaround_task_vm_info_count,
5776 			    mach_msg_type_number_t, original_task_info_count,
5777 			    mach_msg_type_number_t, TASK_VM_INFO_REV5_COUNT,
5778 			    uint32_t, platform,
5779 			    uint32_t, sdk);
5780 #if DEVELOPMENT || DEBUG
5781 			/*
5782 			 * For the sake of internal builds livability,
5783 			 * work around this user-space bug by capping the
5784 			 * buffer's size to what it was with the iOS15 SDK.
5785 			 */
5786 			original_task_info_count = TASK_VM_INFO_REV5_COUNT;
5787 			*task_info_count = original_task_info_count;
5788 #endif /* DEVELOPMENT || DEBUG */
5789 		}
5790 
5791 		if (original_task_info_count > TASK_VM_INFO_REV7_COUNT &&
5792 		    platform == PLATFORM_IOS &&
5793 		    sdk != 0 &&
5794 		    (sdk >> 16) == 17) {
5795 			/*
5796 			 * Some iOS apps still pass an incorrect value for
5797 			 * task_info_count, expressed in number of bytes
5798 			 * instead of number of "natural_t" elements.
5799 			 */
5800 			printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5801 			    "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5802 			    proc_name_address(p), original_task_info_count,
5803 			    TASK_VM_INFO_REV7_COUNT, platform, (sdk >> 16),
5804 			    ((sdk >> 8) & 0xff), (sdk & 0xff));
5805 			DTRACE_VM4(workaround_task_vm_info_count,
5806 			    mach_msg_type_number_t, original_task_info_count,
5807 			    mach_msg_type_number_t, TASK_VM_INFO_REV6_COUNT,
5808 			    uint32_t, platform,
5809 			    uint32_t, sdk);
5810 #if DEVELOPMENT || DEBUG
5811 			/*
5812 			 * For the sake of internal builds livability,
5813 			 * work around this user-space bug by capping the
5814 			 * buffer's size to what it was with the iOS15 and iOS16 SDKs.
5815 			 */
5816 			original_task_info_count = TASK_VM_INFO_REV6_COUNT;
5817 			*task_info_count = original_task_info_count;
5818 #endif /* DEVELOPMENT || DEBUG */
5819 		}
5820 #endif /* __arm64__ */
5821 
5822 		if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
5823 			error = KERN_INVALID_ARGUMENT;
5824 			break;
5825 		}
5826 
5827 		vm_info = (task_vm_info_t)task_info_out;
5828 
5829 		/*
5830 		 * Do not hold both the task and map locks,
5831 		 * so convert the task lock into a map reference,
5832 		 * drop the task lock, then lock the map.
5833 		 */
5834 		if (is_kernel_task) {
5835 			map = kernel_map;
5836 			task_unlock(task);
5837 			/* no lock, no reference */
5838 		} else {
5839 			map = task->map;
5840 			vm_map_reference(map);
5841 			task_unlock(task);
5842 			vm_map_lock_read(map);
5843 		}
5844 
5845 		vm_info->virtual_size = (typeof(vm_info->virtual_size))vm_map_adjusted_size(map);
5846 		vm_info->region_count = map->hdr.nentries;
5847 		vm_info->page_size = vm_map_page_size(map);
5848 
5849 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size);
5850 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size_peak);
5851 
5852 		vm_info->device = 0;
5853 		vm_info->device_peak = 0;
5854 		ledger_get_balance(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external);
5855 		ledger_get_lifetime_max(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external_peak);
5856 		ledger_get_balance(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal);
5857 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal_peak);
5858 		ledger_get_balance(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable);
5859 		ledger_get_lifetime_max(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable_peak);
5860 		ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed);
5861 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_peak);
5862 		ledger_get_entries(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_lifetime, &tmp_amount);
5863 		ledger_get_balance(task->ledger, task_ledgers.neural_nofootprint_total, (ledger_amount_t *) &vm_info->ledger_tag_neural_nofootprint_total);
5864 		ledger_get_lifetime_max(task->ledger, task_ledgers.neural_nofootprint_total, (ledger_amount_t *) &vm_info->ledger_tag_neural_nofootprint_peak);
5865 
5866 		vm_info->purgeable_volatile_pmap = 0;
5867 		vm_info->purgeable_volatile_resident = 0;
5868 		vm_info->purgeable_volatile_virtual = 0;
5869 		if (is_kernel_task) {
5870 			/*
5871 			 * We do not maintain the detailed stats for the
5872 			 * kernel_pmap, so just count everything as
5873 			 * "internal"...
5874 			 */
5875 			vm_info->internal = vm_info->resident_size;
5876 			/*
5877 			 * ... but since the memory held by the VM compressor
5878 			 * in the kernel address space ought to be attributed
5879 			 * to user-space tasks, we subtract it from "internal"
5880 			 * to give memory reporting tools a more accurate idea
5881 			 * of what the kernel itself is actually using, instead
5882 			 * of making it look like the kernel is leaking memory
5883 			 * when the system is under memory pressure.
5884 			 */
5885 			vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
5886 			    PAGE_SIZE);
5887 		} else {
5888 			mach_vm_size_t  volatile_virtual_size;
5889 			mach_vm_size_t  volatile_resident_size;
5890 			mach_vm_size_t  volatile_compressed_size;
5891 			mach_vm_size_t  volatile_pmap_size;
5892 			mach_vm_size_t  volatile_compressed_pmap_size;
5893 			kern_return_t   kr;
5894 
5895 			if (flavor == TASK_VM_INFO_PURGEABLE) {
5896 				kr = vm_map_query_volatile(
5897 					map,
5898 					&volatile_virtual_size,
5899 					&volatile_resident_size,
5900 					&volatile_compressed_size,
5901 					&volatile_pmap_size,
5902 					&volatile_compressed_pmap_size);
5903 				if (kr == KERN_SUCCESS) {
5904 					vm_info->purgeable_volatile_pmap =
5905 					    volatile_pmap_size;
5906 					if (radar_20146450) {
5907 						vm_info->compressed -=
5908 						    volatile_compressed_pmap_size;
5909 					}
5910 					vm_info->purgeable_volatile_resident =
5911 					    volatile_resident_size;
5912 					vm_info->purgeable_volatile_virtual =
5913 					    volatile_virtual_size;
5914 				}
5915 			}
5916 		}
5917 		*task_info_count = TASK_VM_INFO_REV0_COUNT;
5918 
5919 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5920 			/* must be captured while we still have the map lock */
5921 			vm_info->min_address = map->min_offset;
5922 			vm_info->max_address = map->max_offset;
5923 		}
5924 
5925 		/*
5926 		 * Done with vm map things, can drop the map lock and reference,
5927 		 * and take the task lock back.
5928 		 *
5929 		 * Re-validate that the task didn't die on us.
5930 		 */
5931 		if (!is_kernel_task) {
5932 			vm_map_unlock_read(map);
5933 			vm_map_deallocate(map);
5934 		}
5935 		map = VM_MAP_NULL;
5936 
5937 		task_lock(task);
5938 
5939 		if ((task != current_task()) && (!task->active)) {
5940 			error = KERN_INVALID_ARGUMENT;
5941 			break;
5942 		}
5943 
5944 		if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
5945 			vm_info->phys_footprint =
5946 			    (mach_vm_size_t) get_task_phys_footprint(task);
5947 			*task_info_count = TASK_VM_INFO_REV1_COUNT;
5948 		}
5949 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5950 			/* data was captured above */
5951 			*task_info_count = TASK_VM_INFO_REV2_COUNT;
5952 		}
5953 
5954 		if (original_task_info_count >= TASK_VM_INFO_REV3_COUNT) {
5955 			ledger_get_lifetime_max(task->ledger,
5956 			    task_ledgers.phys_footprint,
5957 			    &vm_info->ledger_phys_footprint_peak);
5958 			ledger_get_balance(task->ledger,
5959 			    task_ledgers.purgeable_nonvolatile,
5960 			    &vm_info->ledger_purgeable_nonvolatile);
5961 			ledger_get_balance(task->ledger,
5962 			    task_ledgers.purgeable_nonvolatile_compressed,
5963 			    &vm_info->ledger_purgeable_novolatile_compressed);
5964 			ledger_get_balance(task->ledger,
5965 			    task_ledgers.purgeable_volatile,
5966 			    &vm_info->ledger_purgeable_volatile);
5967 			ledger_get_balance(task->ledger,
5968 			    task_ledgers.purgeable_volatile_compressed,
5969 			    &vm_info->ledger_purgeable_volatile_compressed);
5970 			ledger_get_balance(task->ledger,
5971 			    task_ledgers.network_nonvolatile,
5972 			    &vm_info->ledger_tag_network_nonvolatile);
5973 			ledger_get_balance(task->ledger,
5974 			    task_ledgers.network_nonvolatile_compressed,
5975 			    &vm_info->ledger_tag_network_nonvolatile_compressed);
5976 			ledger_get_balance(task->ledger,
5977 			    task_ledgers.network_volatile,
5978 			    &vm_info->ledger_tag_network_volatile);
5979 			ledger_get_balance(task->ledger,
5980 			    task_ledgers.network_volatile_compressed,
5981 			    &vm_info->ledger_tag_network_volatile_compressed);
5982 			ledger_get_balance(task->ledger,
5983 			    task_ledgers.media_footprint,
5984 			    &vm_info->ledger_tag_media_footprint);
5985 			ledger_get_balance(task->ledger,
5986 			    task_ledgers.media_footprint_compressed,
5987 			    &vm_info->ledger_tag_media_footprint_compressed);
5988 			ledger_get_balance(task->ledger,
5989 			    task_ledgers.media_nofootprint,
5990 			    &vm_info->ledger_tag_media_nofootprint);
5991 			ledger_get_balance(task->ledger,
5992 			    task_ledgers.media_nofootprint_compressed,
5993 			    &vm_info->ledger_tag_media_nofootprint_compressed);
5994 			ledger_get_balance(task->ledger,
5995 			    task_ledgers.graphics_footprint,
5996 			    &vm_info->ledger_tag_graphics_footprint);
5997 			ledger_get_balance(task->ledger,
5998 			    task_ledgers.graphics_footprint_compressed,
5999 			    &vm_info->ledger_tag_graphics_footprint_compressed);
6000 			ledger_get_balance(task->ledger,
6001 			    task_ledgers.graphics_nofootprint,
6002 			    &vm_info->ledger_tag_graphics_nofootprint);
6003 			ledger_get_balance(task->ledger,
6004 			    task_ledgers.graphics_nofootprint_compressed,
6005 			    &vm_info->ledger_tag_graphics_nofootprint_compressed);
6006 			ledger_get_balance(task->ledger,
6007 			    task_ledgers.neural_footprint,
6008 			    &vm_info->ledger_tag_neural_footprint);
6009 			ledger_get_balance(task->ledger,
6010 			    task_ledgers.neural_footprint_compressed,
6011 			    &vm_info->ledger_tag_neural_footprint_compressed);
6012 			ledger_get_balance(task->ledger,
6013 			    task_ledgers.neural_nofootprint,
6014 			    &vm_info->ledger_tag_neural_nofootprint);
6015 			ledger_get_balance(task->ledger,
6016 			    task_ledgers.neural_nofootprint_compressed,
6017 			    &vm_info->ledger_tag_neural_nofootprint_compressed);
6018 			*task_info_count = TASK_VM_INFO_REV3_COUNT;
6019 		}
6020 		if (original_task_info_count >= TASK_VM_INFO_REV4_COUNT) {
6021 			if (get_bsdtask_info(task)) {
6022 				vm_info->limit_bytes_remaining =
6023 				    memorystatus_available_memory_internal(get_bsdtask_info(task));
6024 			} else {
6025 				vm_info->limit_bytes_remaining = 0;
6026 			}
6027 			*task_info_count = TASK_VM_INFO_REV4_COUNT;
6028 		}
6029 		if (original_task_info_count >= TASK_VM_INFO_REV5_COUNT) {
6030 			thread_t thread;
6031 			uint64_t total = task->decompressions;
6032 			queue_iterate(&task->threads, thread, thread_t, task_threads) {
6033 				total += thread->decompressions;
6034 			}
6035 			vm_info->decompressions = (int32_t) MIN(total, INT32_MAX);
6036 			*task_info_count = TASK_VM_INFO_REV5_COUNT;
6037 		}
6038 		if (original_task_info_count >= TASK_VM_INFO_REV6_COUNT) {
6039 			ledger_get_balance(task->ledger, task_ledgers.swapins,
6040 			    &vm_info->ledger_swapins);
6041 			*task_info_count = TASK_VM_INFO_REV6_COUNT;
6042 		}
6043 		if (original_task_info_count >= TASK_VM_INFO_REV7_COUNT) {
6044 			ledger_get_balance(task->ledger,
6045 			    task_ledgers.neural_nofootprint_total,
6046 			    &vm_info->ledger_tag_neural_nofootprint_total);
6047 			ledger_get_lifetime_max(task->ledger,
6048 			    task_ledgers.neural_nofootprint_total,
6049 			    &vm_info->ledger_tag_neural_nofootprint_peak);
6050 			*task_info_count = TASK_VM_INFO_REV7_COUNT;
6051 		}
6052 
6053 		break;
6054 	}
6055 
6056 	case TASK_WAIT_STATE_INFO:
6057 	{
6058 		/*
6059 		 * Deprecated flavor. Currently allowing some results until all users
6060 		 * stop calling it. The results may not be accurate.
6061 		 */
6062 		task_wait_state_info_t  wait_state_info;
6063 		uint64_t total_sfi_ledger_val = 0;
6064 
6065 		if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
6066 			error = KERN_INVALID_ARGUMENT;
6067 			break;
6068 		}
6069 
6070 		wait_state_info = (task_wait_state_info_t) task_info_out;
6071 
6072 		wait_state_info->total_wait_state_time = 0;
6073 		bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
6074 
6075 #if CONFIG_SCHED_SFI
6076 		int i, prev_lentry = -1;
6077 		int64_t  val_credit, val_debit;
6078 
6079 		for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
6080 			val_credit = 0;
6081 			/*
6082 			 * checking with prev_lentry != entry ensures adjacent classes
6083 			 * which share the same ledger do not add wait times twice.
6084 			 * Note: Use ledger() call to get data for each individual sfi class.
6085 			 */
6086 			if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
6087 			    KERN_SUCCESS == ledger_get_entries(task->ledger,
6088 			    task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
6089 				total_sfi_ledger_val += val_credit;
6090 			}
6091 			prev_lentry = task_ledgers.sfi_wait_times[i];
6092 		}
6093 
6094 #endif /* CONFIG_SCHED_SFI */
6095 		wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
6096 		*task_info_count = TASK_WAIT_STATE_INFO_COUNT;
6097 
6098 		break;
6099 	}
6100 	case TASK_VM_INFO_PURGEABLE_ACCOUNT:
6101 	{
6102 #if DEVELOPMENT || DEBUG
6103 		pvm_account_info_t      acnt_info;
6104 
6105 		if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
6106 			error = KERN_INVALID_ARGUMENT;
6107 			break;
6108 		}
6109 
6110 		if (task_info_out == NULL) {
6111 			error = KERN_INVALID_ARGUMENT;
6112 			break;
6113 		}
6114 
6115 		acnt_info = (pvm_account_info_t) task_info_out;
6116 
6117 		error = vm_purgeable_account(task, acnt_info);
6118 
6119 		*task_info_count = PVM_ACCOUNT_INFO_COUNT;
6120 
6121 		break;
6122 #else /* DEVELOPMENT || DEBUG */
6123 		error = KERN_NOT_SUPPORTED;
6124 		break;
6125 #endif /* DEVELOPMENT || DEBUG */
6126 	}
6127 	case TASK_FLAGS_INFO:
6128 	{
6129 		task_flags_info_t               flags_info;
6130 
6131 		if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
6132 			error = KERN_INVALID_ARGUMENT;
6133 			break;
6134 		}
6135 
6136 		flags_info = (task_flags_info_t)task_info_out;
6137 
6138 		/* only publish the 64-bit flag of the task */
6139 		flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA);
6140 
6141 		*task_info_count = TASK_FLAGS_INFO_COUNT;
6142 		break;
6143 	}
6144 
6145 	case TASK_DEBUG_INFO_INTERNAL:
6146 	{
6147 #if DEVELOPMENT || DEBUG
6148 		task_debug_info_internal_t dbg_info;
6149 		ipc_space_t space = task->itk_space;
6150 		if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
6151 			error = KERN_NOT_SUPPORTED;
6152 			break;
6153 		}
6154 
6155 		if (task_info_out == NULL) {
6156 			error = KERN_INVALID_ARGUMENT;
6157 			break;
6158 		}
6159 		dbg_info = (task_debug_info_internal_t) task_info_out;
6160 		dbg_info->ipc_space_size = 0;
6161 
6162 		if (space) {
6163 			smr_ipc_enter();
6164 			ipc_entry_table_t table = smr_entered_load(&space->is_table);
6165 			if (table) {
6166 				dbg_info->ipc_space_size =
6167 				    ipc_entry_table_count(table);
6168 			}
6169 			smr_ipc_leave();
6170 		}
6171 
6172 		dbg_info->suspend_count = task->suspend_count;
6173 
6174 		error = KERN_SUCCESS;
6175 		*task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
6176 		break;
6177 #else /* DEVELOPMENT || DEBUG */
6178 		error = KERN_NOT_SUPPORTED;
6179 		break;
6180 #endif /* DEVELOPMENT || DEBUG */
6181 	}
6182 	case TASK_SUSPEND_STATS_INFO:
6183 	{
6184 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6185 		if (*task_info_count < TASK_SUSPEND_STATS_INFO_COUNT || task_info_out == NULL) {
6186 			error = KERN_INVALID_ARGUMENT;
6187 			break;
6188 		}
6189 		error = _task_get_suspend_stats_locked(task, (task_suspend_stats_t)task_info_out);
6190 		*task_info_count = TASK_SUSPEND_STATS_INFO_COUNT;
6191 		break;
6192 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6193 		error = KERN_NOT_SUPPORTED;
6194 		break;
6195 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6196 	}
6197 	case TASK_SUSPEND_SOURCES_INFO:
6198 	{
6199 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6200 		if (*task_info_count < TASK_SUSPEND_SOURCES_INFO_COUNT || task_info_out == NULL) {
6201 			error = KERN_INVALID_ARGUMENT;
6202 			break;
6203 		}
6204 		error = _task_get_suspend_sources_locked(task, (task_suspend_source_t)task_info_out);
6205 		*task_info_count = TASK_SUSPEND_SOURCES_INFO_COUNT;
6206 		break;
6207 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6208 		error = KERN_NOT_SUPPORTED;
6209 		break;
6210 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6211 	}
6212 	default:
6213 		error = KERN_INVALID_ARGUMENT;
6214 	}
6215 
6216 	task_unlock(task);
6217 	return error;
6218 }
6219 
6220 /*
6221  * task_info_from_user
6222  *
6223  * When calling task_info from user space,
6224  * this function will be executed as mig server side
6225  * instead of calling directly into task_info.
6226  * This gives the possibility to perform more security
6227  * checks on task_port.
6228  *
6229  * In the case of TASK_DYLD_INFO, we require the more
6230  * privileged task_read_port not the less-privileged task_name_port.
6231  *
6232  */
6233 kern_return_t
task_info_from_user(mach_port_t task_port,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)6234 task_info_from_user(
6235 	mach_port_t             task_port,
6236 	task_flavor_t           flavor,
6237 	task_info_t             task_info_out,
6238 	mach_msg_type_number_t  *task_info_count)
6239 {
6240 	task_t task;
6241 	kern_return_t ret;
6242 
6243 	if (flavor == TASK_DYLD_INFO) {
6244 		task = convert_port_to_task_read(task_port);
6245 	} else {
6246 		task = convert_port_to_task_name(task_port);
6247 	}
6248 
6249 	ret = task_info(task, flavor, task_info_out, task_info_count);
6250 
6251 	task_deallocate(task);
6252 
6253 	return ret;
6254 }
6255 
6256 /*
6257  * Routine: task_dyld_process_info_update_helper
6258  *
6259  * Release send rights in release_ports.
6260  *
6261  * If no active ports found in task's dyld notifier array, unset the magic value
6262  * in user space to indicate so.
6263  *
6264  * Condition:
6265  *      task's itk_lock is locked, and is unlocked upon return.
6266  *      Global g_dyldinfo_mtx is locked, and is unlocked upon return.
6267  */
6268 void
task_dyld_process_info_update_helper(task_t task,size_t active_count,vm_map_address_t magic_addr,ipc_port_t * release_ports,size_t release_count)6269 task_dyld_process_info_update_helper(
6270 	task_t                  task,
6271 	size_t                  active_count,
6272 	vm_map_address_t        magic_addr,    /* a userspace address */
6273 	ipc_port_t             *release_ports,
6274 	size_t                  release_count)
6275 {
6276 	void *notifiers_ptr = NULL;
6277 
6278 	assert(release_count <= DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT);
6279 
6280 	if (active_count == 0) {
6281 		assert(task->itk_dyld_notify != NULL);
6282 		notifiers_ptr = task->itk_dyld_notify;
6283 		task->itk_dyld_notify = NULL;
6284 		itk_unlock(task);
6285 
6286 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6287 		(void)copyoutmap_atomic32(task->map, MACH_PORT_NULL, magic_addr); /* unset magic */
6288 	} else {
6289 		itk_unlock(task);
6290 		(void)copyoutmap_atomic32(task->map, (mach_port_name_t)DYLD_PROCESS_INFO_NOTIFY_MAGIC,
6291 		    magic_addr);     /* reset magic */
6292 	}
6293 
6294 	lck_mtx_unlock(&g_dyldinfo_mtx);
6295 
6296 	for (size_t i = 0; i < release_count; i++) {
6297 		ipc_port_release_send(release_ports[i]);
6298 	}
6299 }
6300 
6301 /*
6302  * Routine: task_dyld_process_info_notify_register
6303  *
6304  * Insert a send right to target task's itk_dyld_notify array. Allocate kernel
6305  * memory for the array if it's the first port to be registered. Also cleanup
6306  * any dead rights found in the array.
6307  *
6308  * Consumes sright if returns KERN_SUCCESS, otherwise MIG will destroy it.
6309  *
6310  * Args:
6311  *     task:   Target task for the registration.
6312  *     sright: A send right.
6313  *
6314  * Returns:
6315  *     KERN_SUCCESS: Registration succeeded.
6316  *     KERN_INVALID_TASK: task is invalid.
6317  *     KERN_INVALID_RIGHT: sright is invalid.
6318  *     KERN_DENIED: Security policy denied this call.
6319  *     KERN_RESOURCE_SHORTAGE: Kernel memory allocation failed.
6320  *     KERN_NO_SPACE: No available notifier port slot left for this task.
6321  *     KERN_RIGHT_EXISTS: The notifier port is already registered and active.
6322  *
6323  *     Other error code see task_info().
6324  *
6325  * See Also:
6326  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6327  */
6328 kern_return_t
task_dyld_process_info_notify_register(task_t task,ipc_port_t sright)6329 task_dyld_process_info_notify_register(
6330 	task_t                  task,
6331 	ipc_port_t              sright)
6332 {
6333 	struct task_dyld_info dyld_info;
6334 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6335 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6336 	uint32_t release_count = 0, active_count = 0;
6337 	mach_vm_address_t ports_addr; /* a user space address */
6338 	kern_return_t kr;
6339 	boolean_t right_exists = false;
6340 	ipc_port_t *notifiers_ptr = NULL;
6341 	ipc_port_t *portp;
6342 
6343 	if (task == TASK_NULL || task == kernel_task) {
6344 		return KERN_INVALID_TASK;
6345 	}
6346 
6347 	if (!IP_VALID(sright)) {
6348 		return KERN_INVALID_RIGHT;
6349 	}
6350 
6351 #if CONFIG_MACF
6352 	if (mac_task_check_dyld_process_info_notify_register()) {
6353 		return KERN_DENIED;
6354 	}
6355 #endif
6356 
6357 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6358 	if (kr) {
6359 		return kr;
6360 	}
6361 
6362 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6363 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6364 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6365 	} else {
6366 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6367 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6368 	}
6369 
6370 retry:
6371 	if (task->itk_dyld_notify == NULL) {
6372 		notifiers_ptr = kalloc_type(ipc_port_t,
6373 		    DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT,
6374 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
6375 	}
6376 
6377 	lck_mtx_lock(&g_dyldinfo_mtx);
6378 	itk_lock(task);
6379 
6380 	if (task->itk_dyld_notify == NULL) {
6381 		if (notifiers_ptr == NULL) {
6382 			itk_unlock(task);
6383 			lck_mtx_unlock(&g_dyldinfo_mtx);
6384 			goto retry;
6385 		}
6386 		task->itk_dyld_notify = notifiers_ptr;
6387 		notifiers_ptr = NULL;
6388 	}
6389 
6390 	assert(task->itk_dyld_notify != NULL);
6391 	/* First pass: clear dead names and check for duplicate registration */
6392 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6393 		portp = &task->itk_dyld_notify[slot];
6394 		if (*portp != IPC_PORT_NULL && !ip_active(*portp)) {
6395 			release_ports[release_count++] = *portp;
6396 			*portp = IPC_PORT_NULL;
6397 		} else if (*portp == sright) {
6398 			/* the port is already registered and is active */
6399 			right_exists = true;
6400 		}
6401 
6402 		if (*portp != IPC_PORT_NULL) {
6403 			active_count++;
6404 		}
6405 	}
6406 
6407 	if (right_exists) {
6408 		/* skip second pass */
6409 		kr = KERN_RIGHT_EXISTS;
6410 		goto out;
6411 	}
6412 
6413 	/* Second pass: register the port */
6414 	kr = KERN_NO_SPACE;
6415 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6416 		portp = &task->itk_dyld_notify[slot];
6417 		if (*portp == IPC_PORT_NULL) {
6418 			*portp = sright;
6419 			active_count++;
6420 			kr = KERN_SUCCESS;
6421 			break;
6422 		}
6423 	}
6424 
6425 out:
6426 	assert(active_count > 0);
6427 
6428 	task_dyld_process_info_update_helper(task, active_count,
6429 	    (vm_map_address_t)ports_addr, release_ports, release_count);
6430 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6431 
6432 	kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6433 
6434 	return kr;
6435 }
6436 
6437 /*
6438  * Routine: task_dyld_process_info_notify_deregister
6439  *
6440  * Remove a send right in target task's itk_dyld_notify array matching the receive
6441  * right name passed in. Deallocate kernel memory for the array if it's the last port to
6442  * be deregistered, or all ports have died. Also cleanup any dead rights found in the array.
6443  *
6444  * Does not consume any reference.
6445  *
6446  * Args:
6447  *     task: Target task for the deregistration.
6448  *     rcv_name: The name denoting the receive right in caller's space.
6449  *
6450  * Returns:
6451  *     KERN_SUCCESS: A matching entry found and degistration succeeded.
6452  *     KERN_INVALID_TASK: task is invalid.
6453  *     KERN_INVALID_NAME: name is invalid.
6454  *     KERN_DENIED: Security policy denied this call.
6455  *     KERN_FAILURE: A matching entry is not found.
6456  *     KERN_INVALID_RIGHT: The name passed in does not represent a valid rcv right.
6457  *
6458  *     Other error code see task_info().
6459  *
6460  * See Also:
6461  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6462  */
6463 kern_return_t
task_dyld_process_info_notify_deregister(task_t task,mach_port_name_t rcv_name)6464 task_dyld_process_info_notify_deregister(
6465 	task_t                  task,
6466 	mach_port_name_t        rcv_name)
6467 {
6468 	struct task_dyld_info dyld_info;
6469 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6470 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6471 	uint32_t release_count = 0, active_count = 0;
6472 	boolean_t port_found = false;
6473 	mach_vm_address_t ports_addr; /* a user space address */
6474 	ipc_port_t sright;
6475 	kern_return_t kr;
6476 	ipc_port_t *portp;
6477 
6478 	if (task == TASK_NULL || task == kernel_task) {
6479 		return KERN_INVALID_TASK;
6480 	}
6481 
6482 	if (!MACH_PORT_VALID(rcv_name)) {
6483 		return KERN_INVALID_NAME;
6484 	}
6485 
6486 #if CONFIG_MACF
6487 	if (mac_task_check_dyld_process_info_notify_register()) {
6488 		return KERN_DENIED;
6489 	}
6490 #endif
6491 
6492 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6493 	if (kr) {
6494 		return kr;
6495 	}
6496 
6497 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6498 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6499 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6500 	} else {
6501 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6502 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6503 	}
6504 
6505 	kr = ipc_port_translate_receive(current_space(), rcv_name, &sright); /* does not produce port ref */
6506 	if (kr) {
6507 		return KERN_INVALID_RIGHT;
6508 	}
6509 
6510 	ip_reference(sright);
6511 	ip_mq_unlock(sright);
6512 
6513 	assert(sright != IPC_PORT_NULL);
6514 
6515 	lck_mtx_lock(&g_dyldinfo_mtx);
6516 	itk_lock(task);
6517 
6518 	if (task->itk_dyld_notify == NULL) {
6519 		itk_unlock(task);
6520 		lck_mtx_unlock(&g_dyldinfo_mtx);
6521 		ip_release(sright);
6522 		return KERN_FAILURE;
6523 	}
6524 
6525 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6526 		portp = &task->itk_dyld_notify[slot];
6527 		if (*portp == sright) {
6528 			release_ports[release_count++] = *portp;
6529 			*portp = IPC_PORT_NULL;
6530 			port_found = true;
6531 		} else if ((*portp != IPC_PORT_NULL && !ip_active(*portp))) {
6532 			release_ports[release_count++] = *portp;
6533 			*portp = IPC_PORT_NULL;
6534 		}
6535 
6536 		if (*portp != IPC_PORT_NULL) {
6537 			active_count++;
6538 		}
6539 	}
6540 
6541 	task_dyld_process_info_update_helper(task, active_count,
6542 	    (vm_map_address_t)ports_addr, release_ports, release_count);
6543 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6544 
6545 	ip_release(sright);
6546 
6547 	return port_found ? KERN_SUCCESS : KERN_FAILURE;
6548 }
6549 
6550 /*
6551  *	task_power_info
6552  *
6553  *	Returns power stats for the task.
6554  *	Note: Called with task locked.
6555  */
6556 void
task_power_info_locked(task_t task,task_power_info_t info,gpu_energy_data_t ginfo,task_power_info_v2_t infov2,struct task_power_info_extra * extra_info)6557 task_power_info_locked(
6558 	task_t                        task,
6559 	task_power_info_t             info,
6560 	gpu_energy_data_t             ginfo,
6561 	task_power_info_v2_t          infov2,
6562 	struct task_power_info_extra *extra_info)
6563 {
6564 	thread_t                thread;
6565 	ledger_amount_t         tmp;
6566 
6567 	uint64_t                runnable_time_sum = 0;
6568 
6569 	task_lock_assert_owned(task);
6570 
6571 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
6572 	    (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
6573 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
6574 	    (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
6575 
6576 	info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
6577 	info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
6578 
6579 	struct recount_usage usage = { 0 };
6580 	struct recount_usage usage_perf = { 0 };
6581 	recount_task_usage_perf_only(task, &usage, &usage_perf);
6582 
6583 	info->total_user = usage.ru_metrics[RCT_LVL_USER].rm_time_mach;
6584 	info->total_system = recount_usage_system_time_mach(&usage);
6585 	runnable_time_sum = task->total_runnable_time;
6586 
6587 	if (ginfo) {
6588 		ginfo->task_gpu_utilisation = task->task_gpu_ns;
6589 	}
6590 
6591 	if (infov2) {
6592 		infov2->task_ptime = recount_usage_time_mach(&usage_perf);
6593 		infov2->task_pset_switches = task->ps_switch;
6594 #if CONFIG_PERVASIVE_ENERGY
6595 		infov2->task_energy = usage.ru_energy_nj;
6596 #endif /* CONFIG_PERVASIVE_ENERGY */
6597 	}
6598 
6599 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6600 		spl_t x;
6601 
6602 		if (thread->options & TH_OPT_IDLE_THREAD) {
6603 			continue;
6604 		}
6605 
6606 		x = splsched();
6607 		thread_lock(thread);
6608 
6609 		info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
6610 		info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
6611 
6612 		if (infov2) {
6613 			infov2->task_pset_switches += thread->ps_switch;
6614 		}
6615 
6616 		runnable_time_sum += timer_grab(&thread->runnable_timer);
6617 
6618 		if (ginfo) {
6619 			ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
6620 		}
6621 		thread_unlock(thread);
6622 		splx(x);
6623 	}
6624 
6625 	if (extra_info) {
6626 		extra_info->runnable_time = runnable_time_sum;
6627 #if CONFIG_PERVASIVE_CPI
6628 		extra_info->cycles = recount_usage_cycles(&usage);
6629 		extra_info->instructions = recount_usage_instructions(&usage);
6630 		extra_info->pcycles = recount_usage_cycles(&usage_perf);
6631 		extra_info->pinstructions = recount_usage_instructions(&usage_perf);
6632 		extra_info->user_ptime = usage_perf.ru_metrics[RCT_LVL_USER].rm_time_mach;
6633 		extra_info->system_ptime = recount_usage_system_time_mach(&usage_perf);
6634 #endif // CONFIG_PERVASIVE_CPI
6635 #if CONFIG_PERVASIVE_ENERGY
6636 		extra_info->energy = usage.ru_energy_nj;
6637 		extra_info->penergy = usage_perf.ru_energy_nj;
6638 #endif // CONFIG_PERVASIVE_ENERGY
6639 #if RECOUNT_SECURE_METRICS
6640 		if (PE_i_can_has_debugger(NULL)) {
6641 			extra_info->secure_time = usage.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6642 			extra_info->secure_ptime = usage_perf.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6643 		}
6644 #endif // RECOUNT_SECURE_METRICS
6645 	}
6646 }
6647 
6648 /*
6649  *	task_gpu_utilisation
6650  *
6651  *	Returns the total gpu time used by the all the threads of the task
6652  *  (both dead and alive)
6653  */
6654 uint64_t
task_gpu_utilisation(task_t task)6655 task_gpu_utilisation(
6656 	task_t  task)
6657 {
6658 	uint64_t gpu_time = 0;
6659 #if defined(__x86_64__)
6660 	thread_t thread;
6661 
6662 	task_lock(task);
6663 	gpu_time += task->task_gpu_ns;
6664 
6665 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6666 		spl_t x;
6667 		x = splsched();
6668 		thread_lock(thread);
6669 		gpu_time += ml_gpu_stat(thread);
6670 		thread_unlock(thread);
6671 		splx(x);
6672 	}
6673 
6674 	task_unlock(task);
6675 #else /* defined(__x86_64__) */
6676 	/* silence compiler warning */
6677 	(void)task;
6678 #endif /* defined(__x86_64__) */
6679 	return gpu_time;
6680 }
6681 
6682 /* This function updates the cpu time in the arrays for each
6683  * effective and requested QoS class
6684  */
6685 void
task_update_cpu_time_qos_stats(task_t task,uint64_t * eqos_stats,uint64_t * rqos_stats)6686 task_update_cpu_time_qos_stats(
6687 	task_t  task,
6688 	uint64_t *eqos_stats,
6689 	uint64_t *rqos_stats)
6690 {
6691 	if (!eqos_stats && !rqos_stats) {
6692 		return;
6693 	}
6694 
6695 	task_lock(task);
6696 	thread_t thread;
6697 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6698 		if (thread->options & TH_OPT_IDLE_THREAD) {
6699 			continue;
6700 		}
6701 
6702 		thread_update_qos_cpu_time(thread);
6703 	}
6704 
6705 	if (eqos_stats) {
6706 		eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default;
6707 		eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
6708 		eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background;
6709 		eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility;
6710 		eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy;
6711 		eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
6712 		eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
6713 	}
6714 
6715 	if (rqos_stats) {
6716 		rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default;
6717 		rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance;
6718 		rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background;
6719 		rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility;
6720 		rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy;
6721 		rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated;
6722 		rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive;
6723 	}
6724 
6725 	task_unlock(task);
6726 }
6727 
6728 kern_return_t
task_purgable_info(task_t task,task_purgable_info_t * stats)6729 task_purgable_info(
6730 	task_t                  task,
6731 	task_purgable_info_t    *stats)
6732 {
6733 	if (task == TASK_NULL || stats == NULL) {
6734 		return KERN_INVALID_ARGUMENT;
6735 	}
6736 	/* Take task reference */
6737 	task_reference(task);
6738 	vm_purgeable_stats((vm_purgeable_info_t)stats, task);
6739 	/* Drop task reference */
6740 	task_deallocate(task);
6741 	return KERN_SUCCESS;
6742 }
6743 
6744 void
task_vtimer_set(task_t task,integer_t which)6745 task_vtimer_set(
6746 	task_t          task,
6747 	integer_t       which)
6748 {
6749 	thread_t        thread;
6750 	spl_t           x;
6751 
6752 	task_lock(task);
6753 
6754 	task->vtimers |= which;
6755 
6756 	switch (which) {
6757 	case TASK_VTIMER_USER:
6758 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6759 			x = splsched();
6760 			thread_lock(thread);
6761 			struct recount_times_mach times = recount_thread_times(thread);
6762 			thread->vtimer_user_save = times.rtm_user;
6763 			thread_unlock(thread);
6764 			splx(x);
6765 		}
6766 		break;
6767 
6768 	case TASK_VTIMER_PROF:
6769 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6770 			x = splsched();
6771 			thread_lock(thread);
6772 			thread->vtimer_prof_save = recount_thread_time_mach(thread);
6773 			thread_unlock(thread);
6774 			splx(x);
6775 		}
6776 		break;
6777 
6778 	case TASK_VTIMER_RLIM:
6779 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6780 			x = splsched();
6781 			thread_lock(thread);
6782 			thread->vtimer_rlim_save = recount_thread_time_mach(thread);
6783 			thread_unlock(thread);
6784 			splx(x);
6785 		}
6786 		break;
6787 	}
6788 
6789 	task_unlock(task);
6790 }
6791 
6792 void
task_vtimer_clear(task_t task,integer_t which)6793 task_vtimer_clear(
6794 	task_t          task,
6795 	integer_t       which)
6796 {
6797 	task_lock(task);
6798 
6799 	task->vtimers &= ~which;
6800 
6801 	task_unlock(task);
6802 }
6803 
6804 void
task_vtimer_update(__unused task_t task,integer_t which,uint32_t * microsecs)6805 task_vtimer_update(
6806 	__unused
6807 	task_t          task,
6808 	integer_t       which,
6809 	uint32_t        *microsecs)
6810 {
6811 	thread_t        thread = current_thread();
6812 	uint32_t        tdelt = 0;
6813 	clock_sec_t     secs = 0;
6814 	uint64_t        tsum;
6815 
6816 	assert(task == current_task());
6817 
6818 	spl_t s = splsched();
6819 	thread_lock(thread);
6820 
6821 	if ((task->vtimers & which) != (uint32_t)which) {
6822 		thread_unlock(thread);
6823 		splx(s);
6824 		return;
6825 	}
6826 
6827 	switch (which) {
6828 	case TASK_VTIMER_USER:;
6829 		struct recount_times_mach times = recount_thread_times(thread);
6830 		tsum = times.rtm_user;
6831 		tdelt = (uint32_t)(tsum - thread->vtimer_user_save);
6832 		thread->vtimer_user_save = tsum;
6833 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6834 		break;
6835 
6836 	case TASK_VTIMER_PROF:
6837 		tsum = recount_current_thread_time_mach();
6838 		tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
6839 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6840 		/* if the time delta is smaller than a usec, ignore */
6841 		if (*microsecs != 0) {
6842 			thread->vtimer_prof_save = tsum;
6843 		}
6844 		break;
6845 
6846 	case TASK_VTIMER_RLIM:
6847 		tsum = recount_current_thread_time_mach();
6848 		tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
6849 		thread->vtimer_rlim_save = tsum;
6850 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6851 		break;
6852 	}
6853 
6854 	thread_unlock(thread);
6855 	splx(s);
6856 }
6857 
6858 uint64_t
get_task_dispatchqueue_offset(task_t task)6859 get_task_dispatchqueue_offset(
6860 	task_t          task)
6861 {
6862 	return task->dispatchqueue_offset;
6863 }
6864 
6865 void
task_synchronizer_destroy_all(task_t task)6866 task_synchronizer_destroy_all(task_t task)
6867 {
6868 	/*
6869 	 *  Destroy owned semaphores
6870 	 */
6871 	semaphore_destroy_all(task);
6872 }
6873 
6874 /*
6875  * Install default (machine-dependent) initial thread state
6876  * on the task.  Subsequent thread creation will have this initial
6877  * state set on the thread by machine_thread_inherit_taskwide().
6878  * Flavors and structures are exactly the same as those to thread_set_state()
6879  */
6880 kern_return_t
task_set_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t state_count)6881 task_set_state(
6882 	task_t task,
6883 	int flavor,
6884 	thread_state_t state,
6885 	mach_msg_type_number_t state_count)
6886 {
6887 	kern_return_t ret;
6888 
6889 	if (task == TASK_NULL) {
6890 		return KERN_INVALID_ARGUMENT;
6891 	}
6892 
6893 	task_lock(task);
6894 
6895 	if (!task->active) {
6896 		task_unlock(task);
6897 		return KERN_FAILURE;
6898 	}
6899 
6900 	ret = machine_task_set_state(task, flavor, state, state_count);
6901 
6902 	task_unlock(task);
6903 	return ret;
6904 }
6905 
6906 /*
6907  * Examine the default (machine-dependent) initial thread state
6908  * on the task, as set by task_set_state().  Flavors and structures
6909  * are exactly the same as those passed to thread_get_state().
6910  */
6911 kern_return_t
task_get_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)6912 task_get_state(
6913 	task_t  task,
6914 	int     flavor,
6915 	thread_state_t state,
6916 	mach_msg_type_number_t *state_count)
6917 {
6918 	kern_return_t ret;
6919 
6920 	if (task == TASK_NULL) {
6921 		return KERN_INVALID_ARGUMENT;
6922 	}
6923 
6924 	task_lock(task);
6925 
6926 	if (!task->active) {
6927 		task_unlock(task);
6928 		return KERN_FAILURE;
6929 	}
6930 
6931 	ret = machine_task_get_state(task, flavor, state, state_count);
6932 
6933 	task_unlock(task);
6934 	return ret;
6935 }
6936 
6937 
6938 static kern_return_t __attribute__((noinline, not_tail_called))
PROC_VIOLATED_GUARD__SEND_EXC_GUARD(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,boolean_t backtrace_only)6939 PROC_VIOLATED_GUARD__SEND_EXC_GUARD(
6940 	mach_exception_code_t code,
6941 	mach_exception_subcode_t subcode,
6942 	void *reason,
6943 	boolean_t backtrace_only)
6944 {
6945 #ifdef MACH_BSD
6946 	if (1 == proc_selfpid()) {
6947 		return KERN_NOT_SUPPORTED;              // initproc is immune
6948 	}
6949 #endif
6950 	mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
6951 		[0] = code,
6952 		[1] = subcode,
6953 	};
6954 	task_t task = current_task();
6955 	kern_return_t kr;
6956 	void *bsd_info = get_bsdtask_info(task);
6957 
6958 	/* (See jetsam-related comments below) */
6959 
6960 	proc_memstat_skip(bsd_info, TRUE);
6961 	kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason, backtrace_only);
6962 	proc_memstat_skip(bsd_info, FALSE);
6963 	return kr;
6964 }
6965 
6966 kern_return_t
task_violated_guard(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,bool backtrace_only)6967 task_violated_guard(
6968 	mach_exception_code_t code,
6969 	mach_exception_subcode_t subcode,
6970 	void *reason,
6971 	bool backtrace_only)
6972 {
6973 	return PROC_VIOLATED_GUARD__SEND_EXC_GUARD(code, subcode, reason, backtrace_only);
6974 }
6975 
6976 
6977 #if CONFIG_MEMORYSTATUS
6978 
6979 boolean_t
task_get_memlimit_is_active(task_t task)6980 task_get_memlimit_is_active(task_t task)
6981 {
6982 	assert(task != NULL);
6983 
6984 	if (task->memlimit_is_active == 1) {
6985 		return TRUE;
6986 	} else {
6987 		return FALSE;
6988 	}
6989 }
6990 
6991 void
task_set_memlimit_is_active(task_t task,boolean_t memlimit_is_active)6992 task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
6993 {
6994 	assert(task != NULL);
6995 
6996 	if (memlimit_is_active) {
6997 		task->memlimit_is_active = 1;
6998 	} else {
6999 		task->memlimit_is_active = 0;
7000 	}
7001 }
7002 
7003 boolean_t
task_get_memlimit_is_fatal(task_t task)7004 task_get_memlimit_is_fatal(task_t task)
7005 {
7006 	assert(task != NULL);
7007 
7008 	if (task->memlimit_is_fatal == 1) {
7009 		return TRUE;
7010 	} else {
7011 		return FALSE;
7012 	}
7013 }
7014 
7015 void
task_set_memlimit_is_fatal(task_t task,boolean_t memlimit_is_fatal)7016 task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
7017 {
7018 	assert(task != NULL);
7019 
7020 	if (memlimit_is_fatal) {
7021 		task->memlimit_is_fatal = 1;
7022 	} else {
7023 		task->memlimit_is_fatal = 0;
7024 	}
7025 }
7026 
7027 uint64_t
task_get_dirty_start(task_t task)7028 task_get_dirty_start(task_t task)
7029 {
7030 	return task->memstat_dirty_start;
7031 }
7032 
7033 void
task_set_dirty_start(task_t task,uint64_t start)7034 task_set_dirty_start(task_t task, uint64_t start)
7035 {
7036 	task_lock(task);
7037 	task->memstat_dirty_start = start;
7038 	task_unlock(task);
7039 }
7040 
7041 boolean_t
task_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)7042 task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
7043 {
7044 	boolean_t triggered = FALSE;
7045 
7046 	assert(task == current_task());
7047 
7048 	/*
7049 	 * Returns true, if task has already triggered an exc_resource exception.
7050 	 */
7051 
7052 	if (memlimit_is_active) {
7053 		triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
7054 	} else {
7055 		triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
7056 	}
7057 
7058 	return triggered;
7059 }
7060 
7061 void
task_mark_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)7062 task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
7063 {
7064 	assert(task == current_task());
7065 
7066 	/*
7067 	 * We allow one exc_resource per process per active/inactive limit.
7068 	 * The limit's fatal attribute does not come into play.
7069 	 */
7070 
7071 	if (memlimit_is_active) {
7072 		task->memlimit_active_exc_resource = 1;
7073 	} else {
7074 		task->memlimit_inactive_exc_resource = 1;
7075 	}
7076 }
7077 
7078 #define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
7079 
7080 void __attribute__((noinline))
PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb,send_exec_resource_options_t exception_options)7081 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options)
7082 {
7083 	task_t                                          task            = current_task();
7084 	int                                                     pid         = 0;
7085 	const char                                      *procname       = "unknown";
7086 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
7087 	boolean_t send_sync_exc_resource = FALSE;
7088 	void *cur_bsd_info = get_bsdtask_info(current_task());
7089 
7090 #ifdef MACH_BSD
7091 	pid = proc_selfpid();
7092 
7093 	if (pid == 1) {
7094 		/*
7095 		 * Cannot have ReportCrash analyzing
7096 		 * a suspended initproc.
7097 		 */
7098 		return;
7099 	}
7100 
7101 	if (cur_bsd_info != NULL) {
7102 		procname = proc_name_address(cur_bsd_info);
7103 		send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(cur_bsd_info);
7104 	}
7105 #endif
7106 #if CONFIG_COREDUMP
7107 	if (hwm_user_cores) {
7108 		int                             error;
7109 		uint64_t                starttime, end;
7110 		clock_sec_t             secs = 0;
7111 		uint32_t                microsecs = 0;
7112 
7113 		starttime = mach_absolute_time();
7114 		/*
7115 		 * Trigger a coredump of this process. Don't proceed unless we know we won't
7116 		 * be filling up the disk; and ignore the core size resource limit for this
7117 		 * core file.
7118 		 */
7119 		if ((error = coredump(cur_bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
7120 			printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
7121 		}
7122 		/*
7123 		 * coredump() leaves the task suspended.
7124 		 */
7125 		task_resume_internal(current_task());
7126 
7127 		end = mach_absolute_time();
7128 		absolutetime_to_microtime(end - starttime, &secs, &microsecs);
7129 		printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
7130 		    proc_name_address(cur_bsd_info), pid, (int)secs, microsecs);
7131 	}
7132 #endif /* CONFIG_COREDUMP */
7133 
7134 	if (disable_exc_resource) {
7135 		printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7136 		    "suppressed by a boot-arg.\n", procname, pid, max_footprint_mb);
7137 		return;
7138 	}
7139 	printf("process %s [%d] crossed memory %s (%d MB); EXC_RESOURCE "
7140 	    "\n", procname, pid, (!(exception_options & EXEC_RESOURCE_DIAGNOSTIC) ? "high watermark" : "diagnostics limit"), max_footprint_mb);
7141 
7142 	/*
7143 	 * A task that has triggered an EXC_RESOURCE, should not be
7144 	 * jetsammed when the device is under memory pressure.  Here
7145 	 * we set the P_MEMSTAT_SKIP flag so that the process
7146 	 * will be skipped if the memorystatus_thread wakes up.
7147 	 *
7148 	 * This is a debugging aid to ensure we can get a corpse before
7149 	 * the jetsam thread kills the process.
7150 	 * Note that proc_memstat_skip is a no-op on release kernels.
7151 	 */
7152 	proc_memstat_skip(cur_bsd_info, TRUE);
7153 
7154 	code[0] = code[1] = 0;
7155 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
7156 	/*
7157 	 * Regardless if there was a diag memlimit violation, fatal exceptions shall be notified always
7158 	 * as high level watermaks. In another words, if there was a diag limit and a watermark, and the
7159 	 * violation if for limit watermark, a watermark shall be reported.
7160 	 */
7161 	if (!(exception_options & EXEC_RESOURCE_FATAL)) {
7162 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], !(exception_options & EXEC_RESOURCE_DIAGNOSTIC)  ? FLAVOR_HIGH_WATERMARK : FLAVOR_DIAG_MEMLIMIT);
7163 	} else {
7164 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK );
7165 	}
7166 	EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
7167 	/*
7168 	 * Do not generate a corpse fork if the violation is a fatal one
7169 	 * or the process wants synchronous EXC_RESOURCE exceptions.
7170 	 */
7171 	if ((exception_options & EXEC_RESOURCE_FATAL) || send_sync_exc_resource || !exc_via_corpse_forking) {
7172 		if (exception_options & EXEC_RESOURCE_FATAL) {
7173 			vm_map_set_corpse_source(task->map);
7174 		}
7175 
7176 		/* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */
7177 		if (send_sync_exc_resource || !corpse_for_fatal_memkill) {
7178 			/*
7179 			 * Use the _internal_ variant so that no user-space
7180 			 * process can resume our task from under us.
7181 			 */
7182 			task_suspend_internal(task);
7183 			exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
7184 			task_resume_internal(task);
7185 		}
7186 	} else {
7187 		if (disable_exc_resource_during_audio && audio_active) {
7188 			printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7189 			    "suppressed due to audio playback.\n", procname, pid, max_footprint_mb);
7190 		} else {
7191 			task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
7192 			    code, EXCEPTION_CODE_MAX, NULL, FALSE);
7193 		}
7194 	}
7195 
7196 	/*
7197 	 * After the EXC_RESOURCE has been handled, we must clear the
7198 	 * P_MEMSTAT_SKIP flag so that the process can again be
7199 	 * considered for jetsam if the memorystatus_thread wakes up.
7200 	 */
7201 	proc_memstat_skip(cur_bsd_info, FALSE);         /* clear the flag */
7202 }
7203 /*
7204  * Callback invoked when a task exceeds its physical footprint limit.
7205  */
7206 void
task_footprint_exceeded(int warning,__unused const void * param0,__unused const void * param1)7207 task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
7208 {
7209 	ledger_amount_t max_footprint = 0;
7210 	ledger_amount_t max_footprint_mb = 0;
7211 #if DEBUG || DEVELOPMENT
7212 	ledger_amount_t diag_threshold_limit_mb = 0;
7213 	ledger_amount_t diag_threshold_limit = 0;
7214 #endif
7215 #if CONFIG_DEFERRED_RECLAIM
7216 	ledger_amount_t current_footprint;
7217 #endif /* CONFIG_DEFERRED_RECLAIM */
7218 	task_t task;
7219 	send_exec_resource_is_warning is_warning = IS_NOT_WARNING;
7220 	boolean_t memlimit_is_active;
7221 	send_exec_resource_is_fatal memlimit_is_fatal;
7222 	send_exec_resource_is_diagnostics is_diag_mem_threshold = IS_NOT_DIAGNOSTICS;
7223 	if (warning == LEDGER_WARNING_DIAG_MEM_THRESHOLD) {
7224 		is_diag_mem_threshold = IS_DIAGNOSTICS;
7225 		is_warning = IS_WARNING;
7226 	} else if (warning == LEDGER_WARNING_DIPPED_BELOW) {
7227 		/*
7228 		 * Task memory limits only provide a warning on the way up.
7229 		 */
7230 		return;
7231 	} else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
7232 		/*
7233 		 * This task is in danger of violating a memory limit,
7234 		 * It has exceeded a percentage level of the limit.
7235 		 */
7236 		is_warning = IS_WARNING;
7237 	} else {
7238 		/*
7239 		 * The task has exceeded the physical footprint limit.
7240 		 * This is not a warning but a true limit violation.
7241 		 */
7242 		is_warning = IS_NOT_WARNING;
7243 	}
7244 
7245 	task = current_task();
7246 
7247 	ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint);
7248 #if DEBUG || DEVELOPMENT
7249 	ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &diag_threshold_limit);
7250 #endif
7251 #if CONFIG_DEFERRED_RECLAIM
7252 	if (task->deferred_reclamation_metadata != NULL) {
7253 		/*
7254 		 * Task is enrolled in deferred reclamation.
7255 		 * Do a reclaim to ensure it's really over its limit.
7256 		 */
7257 		vm_deferred_reclamation_reclaim_from_task_sync(task, UINT64_MAX);
7258 		ledger_get_balance(task->ledger, task_ledgers.phys_footprint, &current_footprint);
7259 		if (current_footprint < max_footprint) {
7260 			return;
7261 		}
7262 	}
7263 #endif /* CONFIG_DEFERRED_RECLAIM */
7264 	max_footprint_mb = max_footprint >> 20;
7265 #if DEBUG || DEVELOPMENT
7266 	diag_threshold_limit_mb = diag_threshold_limit >> 20;
7267 #endif
7268 	memlimit_is_active = task_get_memlimit_is_active(task);
7269 	memlimit_is_fatal = task_get_memlimit_is_fatal(task) == FALSE ? IS_NOT_FATAL : IS_FATAL;
7270 #if DEBUG || DEVELOPMENT
7271 	if (is_diag_mem_threshold == IS_NOT_DIAGNOSTICS) {
7272 		task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7273 	} else {
7274 		task_process_crossed_limit_diag(diag_threshold_limit_mb);
7275 	}
7276 #else
7277 	task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7278 #endif
7279 }
7280 
7281 /*
7282  * Actions to perfrom when a process has crossed watermark or is a fatal consumption */
7283 static inline void
task_process_crossed_limit_no_diag(task_t task,ledger_amount_t ledger_limit_size,bool memlimit_is_fatal,bool memlimit_is_active,send_exec_resource_is_warning is_warning)7284 task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning)
7285 {
7286 	send_exec_resource_options_t exception_options = 0;
7287 	if (memlimit_is_fatal) {
7288 		exception_options |= EXEC_RESOURCE_FATAL;
7289 	}
7290 	/*
7291 	 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7292 	 * We only generate the exception once per process per memlimit (active/inactive limit).
7293 	 * To enforce this, we monitor state based on the  memlimit's active/inactive attribute
7294 	 * and we disable it by marking that memlimit as exception triggered.
7295 	 */
7296 	if (is_warning == IS_NOT_WARNING && !task_has_triggered_exc_resource(task, memlimit_is_active)) {
7297 		PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7298 		// If it was not a diag threshold (if was a memory limit), then we do not want more signalling,
7299 		// however, if was a diag limit, the user may reload a different limit and signal again the violation
7300 		memorystatus_log_exception((int)ledger_limit_size, memlimit_is_active, memlimit_is_fatal);
7301 		task_mark_has_triggered_exc_resource(task, memlimit_is_active);
7302 	}
7303 	memorystatus_on_ledger_footprint_exceeded(is_warning == IS_NOT_WARNING ? FALSE : TRUE, memlimit_is_active, memlimit_is_fatal);
7304 }
7305 
7306 #if DEBUG || DEVELOPMENT
7307 /**
7308  * Actions to take when a process has crossed the diagnostics limit
7309  */
7310 static inline void
task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)7311 task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)
7312 {
7313 	/*
7314 	 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7315 	 * In the case of the diagnostics thresholds, the exception will be signaled only once, but the
7316 	 * inhibit / rearm mechanism if performed at ledger level.
7317 	 */
7318 	send_exec_resource_options_t exception_options = EXEC_RESOURCE_DIAGNOSTIC;
7319 	PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7320 	memorystatus_log_diag_threshold_exception((int)ledger_limit_size);
7321 }
7322 #endif
7323 
7324 extern int proc_check_footprint_priv(void);
7325 
7326 kern_return_t
task_set_phys_footprint_limit(task_t task,int new_limit_mb,int * old_limit_mb)7327 task_set_phys_footprint_limit(
7328 	task_t task,
7329 	int new_limit_mb,
7330 	int *old_limit_mb)
7331 {
7332 	kern_return_t error;
7333 
7334 	boolean_t memlimit_is_active;
7335 	boolean_t memlimit_is_fatal;
7336 
7337 	if ((error = proc_check_footprint_priv())) {
7338 		return KERN_NO_ACCESS;
7339 	}
7340 
7341 	/*
7342 	 * This call should probably be obsoleted.
7343 	 * But for now, we default to current state.
7344 	 */
7345 	memlimit_is_active = task_get_memlimit_is_active(task);
7346 	memlimit_is_fatal = task_get_memlimit_is_fatal(task);
7347 
7348 	return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
7349 }
7350 
7351 /*
7352  * Set the limit of diagnostics memory consumption for a concrete task
7353  */
7354 #if CONFIG_MEMORYSTATUS
7355 #if DEVELOPMENT || DEBUG
7356 kern_return_t
task_set_diag_footprint_limit(task_t task,uint64_t new_limit_mb,uint64_t * old_limit_mb)7357 task_set_diag_footprint_limit(
7358 	task_t task,
7359 	uint64_t new_limit_mb,
7360 	uint64_t *old_limit_mb)
7361 {
7362 	kern_return_t error;
7363 
7364 	if ((error = proc_check_footprint_priv())) {
7365 		return KERN_NO_ACCESS;
7366 	}
7367 
7368 	return task_set_diag_footprint_limit_internal(task, new_limit_mb, old_limit_mb);
7369 }
7370 
7371 #endif // DEVELOPMENT || DEBUG
7372 #endif // CONFIG_MEMORYSTATUS
7373 
7374 kern_return_t
task_convert_phys_footprint_limit(int limit_mb,int * converted_limit_mb)7375 task_convert_phys_footprint_limit(
7376 	int limit_mb,
7377 	int *converted_limit_mb)
7378 {
7379 	if (limit_mb == -1) {
7380 		/*
7381 		 * No limit
7382 		 */
7383 		if (max_task_footprint != 0) {
7384 			*converted_limit_mb = (int)(max_task_footprint / 1024 / 1024);         /* bytes to MB */
7385 		} else {
7386 			*converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
7387 		}
7388 	} else {
7389 		/* nothing to convert */
7390 		*converted_limit_mb = limit_mb;
7391 	}
7392 	return KERN_SUCCESS;
7393 }
7394 
7395 kern_return_t
task_set_phys_footprint_limit_internal(task_t task,int new_limit_mb,int * old_limit_mb,boolean_t memlimit_is_active,boolean_t memlimit_is_fatal)7396 task_set_phys_footprint_limit_internal(
7397 	task_t task,
7398 	int new_limit_mb,
7399 	int *old_limit_mb,
7400 	boolean_t memlimit_is_active,
7401 	boolean_t memlimit_is_fatal)
7402 {
7403 	ledger_amount_t old;
7404 	kern_return_t ret;
7405 #if DEVELOPMENT || DEBUG
7406 	diagthreshold_check_return diag_threshold_validity;
7407 #endif
7408 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
7409 
7410 	if (ret != KERN_SUCCESS) {
7411 		return ret;
7412 	}
7413 	/**
7414 	 * Maybe we will need to re-enable the diag threshold, lets get the value
7415 	 * and the current status
7416 	 */
7417 #if DEVELOPMENT || DEBUG
7418 	diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_mb, false);
7419 	/**
7420 	 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7421 	 */
7422 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7423 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7424 	} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7425 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7426 	}
7427 #endif
7428 
7429 	/*
7430 	 * Check that limit >> 20 will not give an "unexpected" 32-bit
7431 	 * result. There are, however, implicit assumptions that -1 mb limit
7432 	 * equates to LEDGER_LIMIT_INFINITY.
7433 	 */
7434 	assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
7435 
7436 	if (old_limit_mb) {
7437 		*old_limit_mb = (int)(old >> 20);
7438 	}
7439 
7440 	if (new_limit_mb == -1) {
7441 		/*
7442 		 * Caller wishes to remove the limit.
7443 		 */
7444 		ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7445 		    max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
7446 		    max_task_footprint ? (uint8_t)max_task_footprint_warning_level : 0);
7447 
7448 		task_lock(task);
7449 		task_set_memlimit_is_active(task, memlimit_is_active);
7450 		task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7451 		task_unlock(task);
7452 		/**
7453 		 * If the diagnostics were disabled, and now we have a new limit, we have to re-enable it.
7454 		 */
7455 #if DEVELOPMENT || DEBUG
7456 		if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7457 			ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7458 		} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7459 			ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7460 		}
7461 	#endif
7462 		return KERN_SUCCESS;
7463 	}
7464 
7465 #ifdef CONFIG_NOMONITORS
7466 	return KERN_SUCCESS;
7467 #endif /* CONFIG_NOMONITORS */
7468 
7469 	task_lock(task);
7470 
7471 	if ((memlimit_is_active == task_get_memlimit_is_active(task)) &&
7472 	    (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) &&
7473 	    (((ledger_amount_t)new_limit_mb << 20) == old)) {
7474 		/*
7475 		 * memlimit state is not changing
7476 		 */
7477 		task_unlock(task);
7478 		return KERN_SUCCESS;
7479 	}
7480 
7481 	task_set_memlimit_is_active(task, memlimit_is_active);
7482 	task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7483 
7484 	ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7485 	    (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
7486 
7487 	if (task == current_task()) {
7488 		ledger_check_new_balance(current_thread(), task->ledger,
7489 		    task_ledgers.phys_footprint);
7490 	}
7491 
7492 	task_unlock(task);
7493 #if DEVELOPMENT || DEBUG
7494 	if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7495 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7496 	}
7497 	#endif
7498 
7499 	return KERN_SUCCESS;
7500 }
7501 
7502 #if RESETTABLE_DIAG_FOOTPRINT_LIMITS
7503 kern_return_t
task_set_diag_footprint_limit_internal(task_t task,uint64_t new_limit_bytes,uint64_t * old_limit_bytes)7504 task_set_diag_footprint_limit_internal(
7505 	task_t task,
7506 	uint64_t new_limit_bytes,
7507 	uint64_t *old_limit_bytes)
7508 {
7509 	ledger_amount_t old = 0;
7510 	kern_return_t ret = KERN_SUCCESS;
7511 	diagthreshold_check_return diag_threshold_validity;
7512 	ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &old);
7513 
7514 	if (ret != KERN_SUCCESS) {
7515 		return ret;
7516 	}
7517 	/**
7518 	 * Maybe we will need to re-enable the diag threshold, lets get the value
7519 	 * and the current status
7520 	 */
7521 	diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_bytes >> 20, true);
7522 	/**
7523 	 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7524 	 */
7525 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7526 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7527 	}
7528 
7529 	/*
7530 	 * Check that limit >> 20 will not give an "unexpected" 32-bit
7531 	 * result. There are, however, implicit assumptions that -1 mb limit
7532 	 * equates to LEDGER_LIMIT_INFINITY.
7533 	 */
7534 	if (old_limit_bytes) {
7535 		*old_limit_bytes = old;
7536 	}
7537 
7538 	if (new_limit_bytes == -1) {
7539 		/*
7540 		 * Caller wishes to remove the limit.
7541 		 */
7542 		ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7543 		    LEDGER_LIMIT_INFINITY);
7544 		/*
7545 		 * If the memory diagnostics flag was disabled, lets enable it again
7546 		 */
7547 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7548 		return KERN_SUCCESS;
7549 	}
7550 
7551 #ifdef CONFIG_NOMONITORS
7552 	return KERN_SUCCESS;
7553 #else
7554 
7555 	task_lock(task);
7556 	ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7557 	    (ledger_amount_t)new_limit_bytes );
7558 	if (task == current_task()) {
7559 		ledger_check_new_balance(current_thread(), task->ledger,
7560 		    task_ledgers.phys_footprint);
7561 	}
7562 
7563 	task_unlock(task);
7564 	if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7565 		ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7566 	} else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7567 		ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7568 	}
7569 
7570 	return KERN_SUCCESS;
7571 #endif /* CONFIG_NOMONITORS */
7572 }
7573 
7574 kern_return_t
task_get_diag_footprint_limit_internal(task_t task,uint64_t * new_limit_bytes,bool * threshold_disabled)7575 task_get_diag_footprint_limit_internal(
7576 	task_t task,
7577 	uint64_t *new_limit_bytes,
7578 	bool *threshold_disabled)
7579 {
7580 	ledger_amount_t ledger_limit;
7581 	kern_return_t ret = KERN_SUCCESS;
7582 	if (new_limit_bytes == NULL || threshold_disabled == NULL) {
7583 		return KERN_INVALID_ARGUMENT;
7584 	}
7585 	ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &ledger_limit);
7586 	if (ledger_limit == LEDGER_LIMIT_INFINITY) {
7587 		ledger_limit = -1;
7588 	}
7589 	if (ret == KERN_SUCCESS) {
7590 		*new_limit_bytes = ledger_limit;
7591 		ret = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, threshold_disabled);
7592 	}
7593 	return ret;
7594 }
7595 #endif /* RESETTABLE_DIAG_FOOTPRINT_LIMITS */
7596 
7597 
7598 kern_return_t
task_get_phys_footprint_limit(task_t task,int * limit_mb)7599 task_get_phys_footprint_limit(
7600 	task_t task,
7601 	int *limit_mb)
7602 {
7603 	ledger_amount_t limit;
7604 	kern_return_t ret;
7605 
7606 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
7607 	if (ret != KERN_SUCCESS) {
7608 		return ret;
7609 	}
7610 
7611 	/*
7612 	 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
7613 	 * result. There are, however, implicit assumptions that -1 mb limit
7614 	 * equates to LEDGER_LIMIT_INFINITY.
7615 	 */
7616 	assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
7617 	*limit_mb = (int)(limit >> 20);
7618 
7619 	return KERN_SUCCESS;
7620 }
7621 #else /* CONFIG_MEMORYSTATUS */
7622 kern_return_t
task_set_phys_footprint_limit(__unused task_t task,__unused int new_limit_mb,__unused int * old_limit_mb)7623 task_set_phys_footprint_limit(
7624 	__unused task_t task,
7625 	__unused int new_limit_mb,
7626 	__unused int *old_limit_mb)
7627 {
7628 	return KERN_FAILURE;
7629 }
7630 
7631 kern_return_t
task_get_phys_footprint_limit(__unused task_t task,__unused int * limit_mb)7632 task_get_phys_footprint_limit(
7633 	__unused task_t task,
7634 	__unused int *limit_mb)
7635 {
7636 	return KERN_FAILURE;
7637 }
7638 #endif /* CONFIG_MEMORYSTATUS */
7639 
7640 security_token_t *
task_get_sec_token(task_t task)7641 task_get_sec_token(task_t task)
7642 {
7643 	return &task_get_ro(task)->task_tokens.sec_token;
7644 }
7645 
7646 void
task_set_sec_token(task_t task,security_token_t * token)7647 task_set_sec_token(task_t task, security_token_t *token)
7648 {
7649 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7650 	    task_tokens.sec_token, token);
7651 }
7652 
7653 audit_token_t *
task_get_audit_token(task_t task)7654 task_get_audit_token(task_t task)
7655 {
7656 	return &task_get_ro(task)->task_tokens.audit_token;
7657 }
7658 
7659 void
task_set_audit_token(task_t task,audit_token_t * token)7660 task_set_audit_token(task_t task, audit_token_t *token)
7661 {
7662 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7663 	    task_tokens.audit_token, token);
7664 }
7665 
7666 void
task_set_tokens(task_t task,security_token_t * sec_token,audit_token_t * audit_token)7667 task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token)
7668 {
7669 	struct task_token_ro_data tokens;
7670 
7671 	tokens = task_get_ro(task)->task_tokens;
7672 	tokens.sec_token = *sec_token;
7673 	tokens.audit_token = *audit_token;
7674 
7675 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task), task_tokens,
7676 	    &tokens);
7677 }
7678 
7679 boolean_t
task_is_privileged(task_t task)7680 task_is_privileged(task_t task)
7681 {
7682 	return task_get_sec_token(task)->val[0] == 0;
7683 }
7684 
7685 #ifdef CONFIG_MACF
7686 uint8_t *
task_get_mach_trap_filter_mask(task_t task)7687 task_get_mach_trap_filter_mask(task_t task)
7688 {
7689 	return task_get_ro(task)->task_filters.mach_trap_filter_mask;
7690 }
7691 
7692 void
task_set_mach_trap_filter_mask(task_t task,uint8_t * mask)7693 task_set_mach_trap_filter_mask(task_t task, uint8_t *mask)
7694 {
7695 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7696 	    task_filters.mach_trap_filter_mask, &mask);
7697 }
7698 
7699 uint8_t *
task_get_mach_kobj_filter_mask(task_t task)7700 task_get_mach_kobj_filter_mask(task_t task)
7701 {
7702 	return task_get_ro(task)->task_filters.mach_kobj_filter_mask;
7703 }
7704 
7705 mach_vm_address_t
task_get_all_image_info_addr(task_t task)7706 task_get_all_image_info_addr(task_t task)
7707 {
7708 	return task->all_image_info_addr;
7709 }
7710 
7711 void
task_set_mach_kobj_filter_mask(task_t task,uint8_t * mask)7712 task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask)
7713 {
7714 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7715 	    task_filters.mach_kobj_filter_mask, &mask);
7716 }
7717 
7718 #endif /* CONFIG_MACF */
7719 
7720 void
task_set_thread_limit(task_t task,uint16_t thread_limit)7721 task_set_thread_limit(task_t task, uint16_t thread_limit)
7722 {
7723 	assert(task != kernel_task);
7724 	if (thread_limit <= TASK_MAX_THREAD_LIMIT) {
7725 		task_lock(task);
7726 		task->task_thread_limit = thread_limit;
7727 		task_unlock(task);
7728 	}
7729 }
7730 
7731 #if CONFIG_PROC_RESOURCE_LIMITS
7732 kern_return_t
task_set_port_space_limits(task_t task,uint32_t soft_limit,uint32_t hard_limit)7733 task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit)
7734 {
7735 	return ipc_space_set_table_size_limits(task->itk_space, soft_limit, hard_limit);
7736 }
7737 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7738 
7739 #if XNU_TARGET_OS_OSX
7740 boolean_t
task_has_system_version_compat_enabled(task_t task)7741 task_has_system_version_compat_enabled(task_t task)
7742 {
7743 	boolean_t enabled = FALSE;
7744 
7745 	task_lock(task);
7746 	enabled = (task->t_flags & TF_SYS_VERSION_COMPAT);
7747 	task_unlock(task);
7748 
7749 	return enabled;
7750 }
7751 
7752 void
task_set_system_version_compat_enabled(task_t task,boolean_t enable_system_version_compat)7753 task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat)
7754 {
7755 	assert(task == current_task());
7756 	assert(task != kernel_task);
7757 
7758 	task_lock(task);
7759 	if (enable_system_version_compat) {
7760 		task->t_flags |= TF_SYS_VERSION_COMPAT;
7761 	} else {
7762 		task->t_flags &= ~TF_SYS_VERSION_COMPAT;
7763 	}
7764 	task_unlock(task);
7765 }
7766 #endif /* XNU_TARGET_OS_OSX */
7767 
7768 /*
7769  * We need to export some functions to other components that
7770  * are currently implemented in macros within the osfmk
7771  * component.  Just export them as functions of the same name.
7772  */
7773 boolean_t
is_kerneltask(task_t t)7774 is_kerneltask(task_t t)
7775 {
7776 	if (t == kernel_task) {
7777 		return TRUE;
7778 	}
7779 
7780 	return FALSE;
7781 }
7782 
7783 boolean_t
is_corpsefork(task_t t)7784 is_corpsefork(task_t t)
7785 {
7786 	return task_is_a_corpse_fork(t);
7787 }
7788 
7789 task_t
current_task_early(void)7790 current_task_early(void)
7791 {
7792 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
7793 		if (current_thread()->t_tro == NULL) {
7794 			return TASK_NULL;
7795 		}
7796 	}
7797 	return get_threadtask(current_thread());
7798 }
7799 
7800 task_t
current_task(void)7801 current_task(void)
7802 {
7803 	return get_threadtask(current_thread());
7804 }
7805 
7806 /* defined in bsd/kern/kern_prot.c */
7807 extern int get_audit_token_pid(audit_token_t *audit_token);
7808 
7809 int
task_pid(task_t task)7810 task_pid(task_t task)
7811 {
7812 	if (task) {
7813 		return get_audit_token_pid(task_get_audit_token(task));
7814 	}
7815 	return -1;
7816 }
7817 
7818 #if __has_feature(ptrauth_calls)
7819 /*
7820  * Get the shared region id and jop signing key for the task.
7821  * The function will allocate a kalloc buffer and return
7822  * it to caller, the caller needs to free it. This is used
7823  * for getting the information via task port.
7824  */
7825 char *
task_get_vm_shared_region_id_and_jop_pid(task_t task,uint64_t * jop_pid)7826 task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid)
7827 {
7828 	size_t len;
7829 	char *shared_region_id = NULL;
7830 
7831 	task_lock(task);
7832 	if (task->shared_region_id == NULL) {
7833 		task_unlock(task);
7834 		return NULL;
7835 	}
7836 	len = strlen(task->shared_region_id) + 1;
7837 
7838 	/* don't hold task lock while allocating */
7839 	task_unlock(task);
7840 	shared_region_id = kalloc_data(len, Z_WAITOK);
7841 	task_lock(task);
7842 
7843 	if (task->shared_region_id == NULL) {
7844 		task_unlock(task);
7845 		kfree_data(shared_region_id, len);
7846 		return NULL;
7847 	}
7848 	assert(len == strlen(task->shared_region_id) + 1);         /* should never change */
7849 	strlcpy(shared_region_id, task->shared_region_id, len);
7850 	task_unlock(task);
7851 
7852 	/* find key from its auth pager */
7853 	if (jop_pid != NULL) {
7854 		*jop_pid = shared_region_find_key(shared_region_id);
7855 	}
7856 
7857 	return shared_region_id;
7858 }
7859 
7860 /*
7861  * set the shared region id for a task
7862  */
7863 void
task_set_shared_region_id(task_t task,char * id)7864 task_set_shared_region_id(task_t task, char *id)
7865 {
7866 	char *old_id;
7867 
7868 	task_lock(task);
7869 	old_id = task->shared_region_id;
7870 	task->shared_region_id = id;
7871 	task->shared_region_auth_remapped = FALSE;
7872 	task_unlock(task);
7873 
7874 	/* free any pre-existing shared region id */
7875 	if (old_id != NULL) {
7876 		shared_region_key_dealloc(old_id);
7877 		kfree_data(old_id, strlen(old_id) + 1);
7878 	}
7879 }
7880 #endif /* __has_feature(ptrauth_calls) */
7881 
7882 /*
7883  * This routine finds a thread in a task by its unique id
7884  * Returns a referenced thread or THREAD_NULL if the thread was not found
7885  *
7886  * TODO: This is super inefficient - it's an O(threads in task) list walk!
7887  *       We should make a tid hash, or transition all tid clients to thread ports
7888  *
7889  * Precondition: No locks held (will take task lock)
7890  */
7891 thread_t
task_findtid(task_t task,uint64_t tid)7892 task_findtid(task_t task, uint64_t tid)
7893 {
7894 	thread_t self           = current_thread();
7895 	thread_t found_thread   = THREAD_NULL;
7896 	thread_t iter_thread    = THREAD_NULL;
7897 
7898 	/* Short-circuit the lookup if we're looking up ourselves */
7899 	if (tid == self->thread_id || tid == TID_NULL) {
7900 		assert(get_threadtask(self) == task);
7901 
7902 		thread_reference(self);
7903 
7904 		return self;
7905 	}
7906 
7907 	task_lock(task);
7908 
7909 	queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
7910 		if (iter_thread->thread_id == tid) {
7911 			found_thread = iter_thread;
7912 			thread_reference(found_thread);
7913 			break;
7914 		}
7915 	}
7916 
7917 	task_unlock(task);
7918 
7919 	return found_thread;
7920 }
7921 
7922 int
pid_from_task(task_t task)7923 pid_from_task(task_t task)
7924 {
7925 	int pid = -1;
7926 	void *bsd_info = get_bsdtask_info(task);
7927 
7928 	if (bsd_info) {
7929 		pid = proc_pid(bsd_info);
7930 	} else {
7931 		pid = task_pid(task);
7932 	}
7933 
7934 	return pid;
7935 }
7936 
7937 /*
7938  * Control the CPU usage monitor for a task.
7939  */
7940 kern_return_t
task_cpu_usage_monitor_ctl(task_t task,uint32_t * flags)7941 task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
7942 {
7943 	int error = KERN_SUCCESS;
7944 
7945 	if (*flags & CPUMON_MAKE_FATAL) {
7946 		task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
7947 	} else {
7948 		error = KERN_INVALID_ARGUMENT;
7949 	}
7950 
7951 	return error;
7952 }
7953 
7954 /*
7955  * Control the wakeups monitor for a task.
7956  */
7957 kern_return_t
task_wakeups_monitor_ctl(task_t task,uint32_t * flags,int32_t * rate_hz)7958 task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
7959 {
7960 	ledger_t ledger = task->ledger;
7961 
7962 	task_lock(task);
7963 	if (*flags & WAKEMON_GET_PARAMS) {
7964 		ledger_amount_t limit;
7965 		uint64_t                period;
7966 
7967 		ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
7968 		ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
7969 
7970 		if (limit != LEDGER_LIMIT_INFINITY) {
7971 			/*
7972 			 * An active limit means the wakeups monitor is enabled.
7973 			 */
7974 			*rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
7975 			*flags = WAKEMON_ENABLE;
7976 			if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
7977 				*flags |= WAKEMON_MAKE_FATAL;
7978 			}
7979 		} else {
7980 			*flags = WAKEMON_DISABLE;
7981 			*rate_hz = -1;
7982 		}
7983 
7984 		/*
7985 		 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
7986 		 */
7987 		task_unlock(task);
7988 		return KERN_SUCCESS;
7989 	}
7990 
7991 	if (*flags & WAKEMON_ENABLE) {
7992 		if (*flags & WAKEMON_SET_DEFAULTS) {
7993 			*rate_hz = task_wakeups_monitor_rate;
7994 		}
7995 
7996 #ifndef CONFIG_NOMONITORS
7997 		if (*flags & WAKEMON_MAKE_FATAL) {
7998 			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
7999 		}
8000 #endif /* CONFIG_NOMONITORS */
8001 
8002 		if (*rate_hz <= 0) {
8003 			task_unlock(task);
8004 			return KERN_INVALID_ARGUMENT;
8005 		}
8006 
8007 #ifndef CONFIG_NOMONITORS
8008 		ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
8009 		    (uint8_t)task_wakeups_monitor_ustackshots_trigger_pct);
8010 		ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
8011 		ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
8012 #endif /* CONFIG_NOMONITORS */
8013 	} else if (*flags & WAKEMON_DISABLE) {
8014 		/*
8015 		 * Caller wishes to disable wakeups monitor on the task.
8016 		 *
8017 		 * Disable telemetry if it was triggered by the wakeups monitor, and
8018 		 * remove the limit & callback on the wakeups ledger entry.
8019 		 */
8020 #if CONFIG_TELEMETRY
8021 		telemetry_task_ctl_locked(task, TF_WAKEMON_WARNING, 0);
8022 #endif
8023 		ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
8024 		ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
8025 	}
8026 
8027 	task_unlock(task);
8028 	return KERN_SUCCESS;
8029 }
8030 
8031 void
task_wakeups_rate_exceeded(int warning,__unused const void * param0,__unused const void * param1)8032 task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
8033 {
8034 	if (warning == LEDGER_WARNING_ROSE_ABOVE) {
8035 #if CONFIG_TELEMETRY
8036 		/*
8037 		 * This task is in danger of violating the wakeups monitor. Enable telemetry on this task
8038 		 * so there are micro-stackshots available if and when EXC_RESOURCE is triggered.
8039 		 */
8040 		telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 1);
8041 #endif
8042 		return;
8043 	}
8044 
8045 #if CONFIG_TELEMETRY
8046 	/*
8047 	 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
8048 	 * exceeded the limit, turn telemetry off for the task.
8049 	 */
8050 	telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 0);
8051 #endif
8052 
8053 	if (warning == 0) {
8054 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
8055 	}
8056 }
8057 
8058 TUNABLE(bool, enable_wakeup_reports, "enable_wakeup_reports", false); /* Enable wakeup reports. */
8059 
8060 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)8061 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
8062 {
8063 	task_t                      task        = current_task();
8064 	int                         pid         = 0;
8065 	const char                  *procname   = "unknown";
8066 	boolean_t                   fatal;
8067 	kern_return_t               kr;
8068 #ifdef EXC_RESOURCE_MONITORS
8069 	mach_exception_data_type_t  code[EXCEPTION_CODE_MAX];
8070 #endif /* EXC_RESOURCE_MONITORS */
8071 	struct ledger_entry_info    lei;
8072 
8073 #ifdef MACH_BSD
8074 	pid = proc_selfpid();
8075 	if (get_bsdtask_info(task) != NULL) {
8076 		procname = proc_name_address(get_bsdtask_info(current_task()));
8077 	}
8078 #endif
8079 
8080 	ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
8081 
8082 	/*
8083 	 * Disable the exception notification so we don't overwhelm
8084 	 * the listener with an endless stream of redundant exceptions.
8085 	 * TODO: detect whether another thread is already reporting the violation.
8086 	 */
8087 	uint32_t flags = WAKEMON_DISABLE;
8088 	task_wakeups_monitor_ctl(task, &flags, NULL);
8089 
8090 	fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
8091 	trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
8092 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
8093 	    "over ~%llu seconds, averaging %llu wakes / second and "
8094 	    "violating a %slimit of %llu wakes over %llu seconds.\n",
8095 	    procname, pid,
8096 	    lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
8097 	    lei.lei_last_refill == 0 ? 0 :
8098 	    (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
8099 	    fatal ? "FATAL " : "",
8100 	    lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
8101 
8102 	if (enable_wakeup_reports) {
8103 		kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
8104 		    fatal ? kRNFatalLimitFlag : 0);
8105 		if (kr) {
8106 			printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
8107 		}
8108 	}
8109 
8110 #ifdef EXC_RESOURCE_MONITORS
8111 	if (disable_exc_resource) {
8112 		printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8113 		    "suppressed by a boot-arg\n", procname, pid);
8114 		return;
8115 	}
8116 	if (disable_exc_resource_during_audio && audio_active) {
8117 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8118 		    "suppressed due to audio playback\n", procname, pid);
8119 		return;
8120 	}
8121 	if (lei.lei_last_refill == 0) {
8122 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8123 		    "suppressed due to lei.lei_last_refill = 0 \n", procname, pid);
8124 	}
8125 
8126 	code[0] = code[1] = 0;
8127 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
8128 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
8129 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
8130 	    NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
8131 	EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
8132 	    lei.lei_last_refill);
8133 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
8134 	    NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
8135 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8136 #endif /* EXC_RESOURCE_MONITORS */
8137 
8138 	if (fatal) {
8139 		task_terminate_internal(task);
8140 	}
8141 }
8142 
8143 static boolean_t
global_update_logical_writes(int64_t io_delta,int64_t * global_write_count)8144 global_update_logical_writes(int64_t io_delta, int64_t *global_write_count)
8145 {
8146 	int64_t old_count, new_count;
8147 	boolean_t needs_telemetry;
8148 
8149 	do {
8150 		new_count = old_count = *global_write_count;
8151 		new_count += io_delta;
8152 		if (new_count >= io_telemetry_limit) {
8153 			new_count = 0;
8154 			needs_telemetry = TRUE;
8155 		} else {
8156 			needs_telemetry = FALSE;
8157 		}
8158 	} while (!OSCompareAndSwap64(old_count, new_count, global_write_count));
8159 	return needs_telemetry;
8160 }
8161 
8162 void
task_update_physical_writes(__unused task_t task,__unused task_physical_write_flavor_t flavor,__unused uint64_t io_size,__unused task_balance_flags_t flags)8163 task_update_physical_writes(__unused task_t task, __unused task_physical_write_flavor_t flavor, __unused uint64_t io_size, __unused task_balance_flags_t flags)
8164 {
8165 #if CONFIG_PHYS_WRITE_ACCT
8166 	if (!io_size) {
8167 		return;
8168 	}
8169 
8170 	/*
8171 	 * task == NULL means that we have to update kernel_task ledgers
8172 	 */
8173 	if (!task) {
8174 		task = kernel_task;
8175 	}
8176 
8177 	KDBG((VMDBG_CODE(DBG_VM_PHYS_WRITE_ACCT)) | DBG_FUNC_NONE,
8178 	    task_pid(task), flavor, io_size, flags);
8179 	DTRACE_IO4(physical_writes, struct task *, task, task_physical_write_flavor_t, flavor, uint64_t, io_size, task_balance_flags_t, flags);
8180 
8181 	if (flags & TASK_BALANCE_CREDIT) {
8182 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8183 			OSAddAtomic64(io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8184 			ledger_credit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8185 		}
8186 	} else if (flags & TASK_BALANCE_DEBIT) {
8187 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8188 			OSAddAtomic64(-1 * io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8189 			ledger_debit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8190 		}
8191 	}
8192 #endif /* CONFIG_PHYS_WRITE_ACCT */
8193 }
8194 
8195 void
task_update_logical_writes(task_t task,uint32_t io_size,int flags,void * vp)8196 task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
8197 {
8198 	int64_t io_delta = 0;
8199 	int64_t * global_counter_to_update;
8200 	boolean_t needs_telemetry = FALSE;
8201 	boolean_t is_external_device = FALSE;
8202 	int ledger_to_update = 0;
8203 	struct task_writes_counters * writes_counters_to_update;
8204 
8205 	if ((!task) || (!io_size) || (!vp)) {
8206 		return;
8207 	}
8208 
8209 	KDBG((VMDBG_CODE(DBG_VM_DATA_WRITE)) | DBG_FUNC_NONE,
8210 	    task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp));
8211 	DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
8212 
8213 	// Is the drive backing this vnode internal or external to the system?
8214 	if (vnode_isonexternalstorage(vp) == false) {
8215 		global_counter_to_update = &global_logical_writes_count;
8216 		ledger_to_update = task_ledgers.logical_writes;
8217 		writes_counters_to_update = &task->task_writes_counters_internal;
8218 		is_external_device = FALSE;
8219 	} else {
8220 		global_counter_to_update = &global_logical_writes_to_external_count;
8221 		ledger_to_update = task_ledgers.logical_writes_to_external;
8222 		writes_counters_to_update = &task->task_writes_counters_external;
8223 		is_external_device = TRUE;
8224 	}
8225 
8226 	switch (flags) {
8227 	case TASK_WRITE_IMMEDIATE:
8228 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_immediate_writes));
8229 		ledger_credit(task->ledger, ledger_to_update, io_size);
8230 		if (!is_external_device) {
8231 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8232 		}
8233 		break;
8234 	case TASK_WRITE_DEFERRED:
8235 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_deferred_writes));
8236 		ledger_credit(task->ledger, ledger_to_update, io_size);
8237 		if (!is_external_device) {
8238 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8239 		}
8240 		break;
8241 	case TASK_WRITE_INVALIDATED:
8242 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_invalidated_writes));
8243 		ledger_debit(task->ledger, ledger_to_update, io_size);
8244 		if (!is_external_device) {
8245 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, FALSE, io_size);
8246 		}
8247 		break;
8248 	case TASK_WRITE_METADATA:
8249 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_metadata_writes));
8250 		ledger_credit(task->ledger, ledger_to_update, io_size);
8251 		if (!is_external_device) {
8252 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8253 		}
8254 		break;
8255 	}
8256 
8257 	io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
8258 	if (io_telemetry_limit != 0) {
8259 		/* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
8260 		needs_telemetry = global_update_logical_writes(io_delta, global_counter_to_update);
8261 		if (needs_telemetry && !is_external_device) {
8262 			act_set_io_telemetry_ast(current_thread());
8263 		}
8264 	}
8265 }
8266 
8267 /*
8268  * Control the I/O monitor for a task.
8269  */
8270 kern_return_t
task_io_monitor_ctl(task_t task,uint32_t * flags)8271 task_io_monitor_ctl(task_t task, uint32_t *flags)
8272 {
8273 	ledger_t ledger = task->ledger;
8274 
8275 	task_lock(task);
8276 	if (*flags & IOMON_ENABLE) {
8277 		/* Configure the physical I/O ledger */
8278 		ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
8279 		ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
8280 	} else if (*flags & IOMON_DISABLE) {
8281 		/*
8282 		 * Caller wishes to disable I/O monitor on the task.
8283 		 */
8284 		ledger_disable_refill(ledger, task_ledgers.physical_writes);
8285 		ledger_disable_callback(ledger, task_ledgers.physical_writes);
8286 	}
8287 
8288 	task_unlock(task);
8289 	return KERN_SUCCESS;
8290 }
8291 
8292 void
task_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)8293 task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
8294 {
8295 	if (warning == 0) {
8296 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
8297 	}
8298 }
8299 
8300 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)8301 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
8302 {
8303 	int                             pid = 0;
8304 	task_t                          task = current_task();
8305 #ifdef EXC_RESOURCE_MONITORS
8306 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
8307 #endif /* EXC_RESOURCE_MONITORS */
8308 	struct ledger_entry_info        lei = {};
8309 	kern_return_t                   kr;
8310 
8311 #ifdef MACH_BSD
8312 	pid = proc_selfpid();
8313 #endif
8314 	/*
8315 	 * Get the ledger entry info. We need to do this before disabling the exception
8316 	 * to get correct values for all fields.
8317 	 */
8318 	switch (flavor) {
8319 	case FLAVOR_IO_PHYSICAL_WRITES:
8320 		ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
8321 		break;
8322 	}
8323 
8324 
8325 	/*
8326 	 * Disable the exception notification so we don't overwhelm
8327 	 * the listener with an endless stream of redundant exceptions.
8328 	 * TODO: detect whether another thread is already reporting the violation.
8329 	 */
8330 	uint32_t flags = IOMON_DISABLE;
8331 	task_io_monitor_ctl(task, &flags);
8332 
8333 	if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
8334 		trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
8335 	}
8336 	os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
8337 	    pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
8338 
8339 	kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
8340 	if (kr) {
8341 		printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
8342 	}
8343 
8344 #ifdef EXC_RESOURCE_MONITORS
8345 	code[0] = code[1] = 0;
8346 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
8347 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
8348 	EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
8349 	EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
8350 	EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
8351 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8352 #endif /* EXC_RESOURCE_MONITORS */
8353 }
8354 
8355 void
task_port_space_ast(__unused task_t task)8356 task_port_space_ast(__unused task_t task)
8357 {
8358 	uint32_t current_size, soft_limit, hard_limit;
8359 	assert(task == current_task());
8360 	bool should_notify = ipc_space_check_table_size_limit(task->itk_space,
8361 	    &current_size, &soft_limit, &hard_limit);
8362 	if (should_notify) {
8363 		SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task, current_size, soft_limit, hard_limit);
8364 	}
8365 }
8366 
8367 #if CONFIG_PROC_RESOURCE_LIMITS
8368 static mach_port_t
task_allocate_fatal_port(void)8369 task_allocate_fatal_port(void)
8370 {
8371 	mach_port_t task_fatal_port = MACH_PORT_NULL;
8372 	task_id_token_t token;
8373 
8374 	kern_return_t kr = task_create_identity_token(current_task(), &token); /* Takes a reference on the token */
8375 	if (kr) {
8376 		return MACH_PORT_NULL;
8377 	}
8378 	task_fatal_port = ipc_kobject_alloc_port((ipc_kobject_t)token, IKOT_TASK_FATAL,
8379 	    IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
8380 
8381 	task_id_token_set_port(token, task_fatal_port);
8382 
8383 	return task_fatal_port;
8384 }
8385 
8386 static void
task_fatal_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)8387 task_fatal_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
8388 {
8389 	task_t task = TASK_NULL;
8390 	kern_return_t kr;
8391 
8392 	task_id_token_t token = ipc_kobject_get_stable(port, IKOT_TASK_FATAL);
8393 
8394 	assert(token != NULL);
8395 	if (token) {
8396 		kr = task_identity_token_get_task_grp(token, &task, TASK_GRP_KERNEL); /* takes a reference on task */
8397 		if (task) {
8398 			task_bsdtask_kill(task);
8399 			task_deallocate(task);
8400 		}
8401 		task_id_token_release(token); /* consumes ref given by notification */
8402 	}
8403 }
8404 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8405 
8406 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,uint32_t current_size,uint32_t soft_limit,uint32_t hard_limit)8407 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task, uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit)
8408 {
8409 	int pid = 0;
8410 	char *procname = (char *) "unknown";
8411 	__unused kern_return_t kr;
8412 	__unused resource_notify_flags_t flags = kRNFlagsNone;
8413 	__unused uint32_t limit;
8414 	__unused mach_port_t task_fatal_port = MACH_PORT_NULL;
8415 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
8416 
8417 	pid = proc_selfpid();
8418 	if (get_bsdtask_info(task) != NULL) {
8419 		procname = proc_name_address(get_bsdtask_info(task));
8420 	}
8421 
8422 	/*
8423 	 * Only kernel_task and launchd may be allowed to
8424 	 * have really large ipc space.
8425 	 */
8426 	if (pid == 0 || pid == 1) {
8427 		return;
8428 	}
8429 
8430 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many mach ports. \
8431 	    Num of ports allocated %u; \n", procname, pid, current_size);
8432 
8433 	/* Abort the process if it has hit the system-wide limit for ipc port table size */
8434 	if (!hard_limit && !soft_limit) {
8435 		code[0] = code[1] = 0;
8436 		EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_PORTS);
8437 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_PORT_SPACE_FULL);
8438 		EXC_RESOURCE_PORTS_ENCODE_PORTS(code[0], current_size);
8439 
8440 		exception_info_t info = {
8441 			.os_reason = OS_REASON_PORT_SPACE,
8442 			.exception_type = EXC_RESOURCE,
8443 			.mx_code = code[0],
8444 			.mx_subcode = code[1]
8445 		};
8446 
8447 		exit_with_mach_exception(current_proc(), info, PX_DEBUG_NO_HONOR);
8448 		return;
8449 	}
8450 
8451 #if CONFIG_PROC_RESOURCE_LIMITS
8452 	if (hard_limit > 0) {
8453 		flags |= kRNHardLimitFlag;
8454 		limit = hard_limit;
8455 		task_fatal_port = task_allocate_fatal_port();
8456 		if (!task_fatal_port) {
8457 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8458 			task_bsdtask_kill(task);
8459 		}
8460 	} else {
8461 		flags |= kRNSoftLimitFlag;
8462 		limit = soft_limit;
8463 	}
8464 
8465 	kr = send_resource_violation_with_fatal_port(send_port_space_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8466 	if (kr) {
8467 		os_log(OS_LOG_DEFAULT, "send_resource_violation(ports, ...): error %#x\n", kr);
8468 	}
8469 	if (task_fatal_port) {
8470 		ipc_port_release_send(task_fatal_port);
8471 	}
8472 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8473 }
8474 
8475 #if CONFIG_PROC_RESOURCE_LIMITS
8476 void
task_kqworkloop_ast(task_t task,int current_size,int soft_limit,int hard_limit)8477 task_kqworkloop_ast(task_t task, int current_size, int soft_limit, int hard_limit)
8478 {
8479 	assert(task == current_task());
8480 	return SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task, current_size, soft_limit, hard_limit);
8481 }
8482 
8483 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task,int current_size,int soft_limit,int hard_limit)8484 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit)
8485 {
8486 	int pid = 0;
8487 	char *procname = (char *) "unknown";
8488 #ifdef MACH_BSD
8489 	pid = proc_selfpid();
8490 	if (get_bsdtask_info(task) != NULL) {
8491 		procname = proc_name_address(get_bsdtask_info(task));
8492 	}
8493 #endif
8494 	if (pid == 0 || pid == 1) {
8495 		return;
8496 	}
8497 
8498 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many kqworkloops. \
8499 	    Num of kqworkloops allocated %u; \n", procname, pid, current_size);
8500 
8501 	int limit = 0;
8502 	resource_notify_flags_t flags = kRNFlagsNone;
8503 	mach_port_t task_fatal_port = MACH_PORT_NULL;
8504 	if (hard_limit) {
8505 		flags |= kRNHardLimitFlag;
8506 		limit = hard_limit;
8507 
8508 		task_fatal_port = task_allocate_fatal_port();
8509 		if (task_fatal_port == MACH_PORT_NULL) {
8510 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8511 			task_bsdtask_kill(task);
8512 		}
8513 	} else {
8514 		flags |= kRNSoftLimitFlag;
8515 		limit = soft_limit;
8516 	}
8517 
8518 	kern_return_t kr;
8519 	kr = send_resource_violation_with_fatal_port(send_kqworkloops_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8520 	if (kr) {
8521 		os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(kqworkloops, ...): error %#x\n", kr);
8522 	}
8523 	if (task_fatal_port) {
8524 		ipc_port_release_send(task_fatal_port);
8525 	}
8526 }
8527 
8528 
8529 void
task_filedesc_ast(__unused task_t task,__unused int current_size,__unused int soft_limit,__unused int hard_limit)8530 task_filedesc_ast(__unused task_t task, __unused int current_size, __unused int soft_limit, __unused int hard_limit)
8531 {
8532 	assert(task == current_task());
8533 	SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task, current_size, soft_limit, hard_limit);
8534 }
8535 
8536 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task,int current_size,int soft_limit,int hard_limit)8537 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit)
8538 {
8539 	int pid = 0;
8540 	char *procname = (char *) "unknown";
8541 	kern_return_t kr;
8542 	resource_notify_flags_t flags = kRNFlagsNone;
8543 	int limit;
8544 	mach_port_t task_fatal_port = MACH_PORT_NULL;
8545 
8546 #ifdef MACH_BSD
8547 	pid = proc_selfpid();
8548 	if (get_bsdtask_info(task) != NULL) {
8549 		procname = proc_name_address(get_bsdtask_info(task));
8550 	}
8551 #endif
8552 	/*
8553 	 * Only kernel_task and launchd may be allowed to
8554 	 * have really large ipc space.
8555 	 */
8556 	if (pid == 0 || pid == 1) {
8557 		return;
8558 	}
8559 
8560 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many file descriptors. \
8561 	    Num of fds allocated %u; \n", procname, pid, current_size);
8562 
8563 	if (hard_limit > 0) {
8564 		flags |= kRNHardLimitFlag;
8565 		limit = hard_limit;
8566 		task_fatal_port = task_allocate_fatal_port();
8567 		if (!task_fatal_port) {
8568 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8569 			task_bsdtask_kill(task);
8570 		}
8571 	} else {
8572 		flags |= kRNSoftLimitFlag;
8573 		limit = soft_limit;
8574 	}
8575 
8576 	kr = send_resource_violation_with_fatal_port(send_file_descriptors_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8577 	if (kr) {
8578 		os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(filedesc, ...): error %#x\n", kr);
8579 	}
8580 	if (task_fatal_port) {
8581 		ipc_port_release_send(task_fatal_port);
8582 	}
8583 }
8584 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8585 
8586 /* Placeholders for the task set/get voucher interfaces */
8587 kern_return_t
task_get_mach_voucher(task_t task,mach_voucher_selector_t __unused which,ipc_voucher_t * voucher)8588 task_get_mach_voucher(
8589 	task_t                  task,
8590 	mach_voucher_selector_t __unused which,
8591 	ipc_voucher_t           *voucher)
8592 {
8593 	if (TASK_NULL == task) {
8594 		return KERN_INVALID_TASK;
8595 	}
8596 
8597 	*voucher = NULL;
8598 	return KERN_SUCCESS;
8599 }
8600 
8601 kern_return_t
task_set_mach_voucher(task_t task,ipc_voucher_t __unused voucher)8602 task_set_mach_voucher(
8603 	task_t                  task,
8604 	ipc_voucher_t           __unused voucher)
8605 {
8606 	if (TASK_NULL == task) {
8607 		return KERN_INVALID_TASK;
8608 	}
8609 
8610 	return KERN_SUCCESS;
8611 }
8612 
8613 kern_return_t
task_swap_mach_voucher(__unused task_t task,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)8614 task_swap_mach_voucher(
8615 	__unused task_t         task,
8616 	__unused ipc_voucher_t  new_voucher,
8617 	ipc_voucher_t          *in_out_old_voucher)
8618 {
8619 	/*
8620 	 * Currently this function is only called from a MIG generated
8621 	 * routine which doesn't release the reference on the voucher
8622 	 * addressed by in_out_old_voucher. To avoid leaking this reference,
8623 	 * a call to release it has been added here.
8624 	 */
8625 	ipc_voucher_release(*in_out_old_voucher);
8626 	OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
8627 }
8628 
8629 void
task_set_gpu_denied(task_t task,boolean_t denied)8630 task_set_gpu_denied(task_t task, boolean_t denied)
8631 {
8632 	task_lock(task);
8633 
8634 	if (denied) {
8635 		task->t_flags |= TF_GPU_DENIED;
8636 	} else {
8637 		task->t_flags &= ~TF_GPU_DENIED;
8638 	}
8639 
8640 	task_unlock(task);
8641 }
8642 
8643 boolean_t
task_is_gpu_denied(task_t task)8644 task_is_gpu_denied(task_t task)
8645 {
8646 	/* We don't need the lock to read this flag */
8647 	return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE;
8648 }
8649 
8650 /*
8651  * Task policy termination uses this path to clear the bit the final time
8652  * during the termination flow, and the TASK_POLICY_TERMINATED bit guarantees
8653  * that it won't be changed again on a terminated task.
8654  */
8655 bool
task_set_game_mode_locked(task_t task,bool enabled)8656 task_set_game_mode_locked(task_t task, bool enabled)
8657 {
8658 	task_lock_assert_owned(task);
8659 
8660 	if (enabled) {
8661 		assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
8662 	}
8663 
8664 	bool previously_enabled = task_get_game_mode(task);
8665 	bool needs_update = false;
8666 	uint32_t new_count = 0;
8667 
8668 	if (enabled) {
8669 		task->t_flags |= TF_GAME_MODE;
8670 	} else {
8671 		task->t_flags &= ~TF_GAME_MODE;
8672 	}
8673 
8674 	if (enabled && !previously_enabled) {
8675 		if (task_coalition_adjust_game_mode_count(task, 1, &new_count) && (new_count == 1)) {
8676 			needs_update = true;
8677 		}
8678 	} else if (!enabled && previously_enabled) {
8679 		if (task_coalition_adjust_game_mode_count(task, -1, &new_count) && (new_count == 0)) {
8680 			needs_update = true;
8681 		}
8682 	}
8683 
8684 	return needs_update;
8685 }
8686 
8687 void
task_set_game_mode(task_t task,bool enabled)8688 task_set_game_mode(task_t task, bool enabled)
8689 {
8690 	bool needs_update = false;
8691 
8692 	task_lock(task);
8693 
8694 	/* After termination, further updates are no longer effective */
8695 	if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
8696 		needs_update = task_set_game_mode_locked(task, enabled);
8697 	}
8698 
8699 	task_unlock(task);
8700 
8701 #if CONFIG_THREAD_GROUPS
8702 	if (needs_update) {
8703 		task_coalition_thread_group_game_mode_update(task);
8704 	}
8705 #endif /* CONFIG_THREAD_GROUPS */
8706 }
8707 
8708 bool
task_get_game_mode(task_t task)8709 task_get_game_mode(task_t task)
8710 {
8711 	/* We don't need the lock to read this flag */
8712 	return task->t_flags & TF_GAME_MODE;
8713 }
8714 
8715 bool
task_set_carplay_mode_locked(task_t task,bool enabled)8716 task_set_carplay_mode_locked(task_t task, bool enabled)
8717 {
8718 	task_lock_assert_owned(task);
8719 
8720 	if (enabled) {
8721 		assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
8722 	}
8723 
8724 	bool previously_enabled = task_get_carplay_mode(task);
8725 	bool needs_update = false;
8726 	uint32_t new_count = 0;
8727 
8728 	if (enabled) {
8729 		task->t_flags |= TF_CARPLAY_MODE;
8730 	} else {
8731 		task->t_flags &= ~TF_CARPLAY_MODE;
8732 	}
8733 
8734 	if (enabled && !previously_enabled) {
8735 		if (task_coalition_adjust_carplay_mode_count(task, 1, &new_count) && (new_count == 1)) {
8736 			needs_update = true;
8737 		}
8738 	} else if (!enabled && previously_enabled) {
8739 		if (task_coalition_adjust_carplay_mode_count(task, -1, &new_count) && (new_count == 0)) {
8740 			needs_update = true;
8741 		}
8742 	}
8743 	return needs_update;
8744 }
8745 
8746 void
task_set_carplay_mode(task_t task,bool enabled)8747 task_set_carplay_mode(task_t task, bool enabled)
8748 {
8749 	bool needs_update = false;
8750 
8751 	task_lock(task);
8752 
8753 	/* After termination, further updates are no longer effective */
8754 	if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
8755 		needs_update = task_set_carplay_mode_locked(task, enabled);
8756 	}
8757 
8758 	task_unlock(task);
8759 
8760 #if CONFIG_THREAD_GROUPS
8761 	if (needs_update) {
8762 		task_coalition_thread_group_carplay_mode_update(task);
8763 	}
8764 #endif /* CONFIG_THREAD_GROUPS */
8765 }
8766 
8767 bool
task_get_carplay_mode(task_t task)8768 task_get_carplay_mode(task_t task)
8769 {
8770 	/* We don't need the lock to read this flag */
8771 	return task->t_flags & TF_CARPLAY_MODE;
8772 }
8773 
8774 uint64_t
get_task_memory_region_count(task_t task)8775 get_task_memory_region_count(task_t task)
8776 {
8777 	vm_map_t map;
8778 	map = (task == kernel_task) ? kernel_map: task->map;
8779 	return (uint64_t)get_map_nentries(map);
8780 }
8781 
8782 static void
kdebug_trace_dyld_internal(uint32_t base_code,struct dyld_kernel_image_info * info)8783 kdebug_trace_dyld_internal(uint32_t base_code,
8784     struct dyld_kernel_image_info *info)
8785 {
8786 	static_assert(sizeof(info->uuid) >= 16);
8787 
8788 #if defined(__LP64__)
8789 	uint64_t *uuid = (uint64_t *)&(info->uuid);
8790 
8791 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8792 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
8793 	    uuid[1], info->load_addr,
8794 	    (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
8795 	    0);
8796 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8797 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
8798 	    (uint64_t)info->fsobjid.fid_objno |
8799 	    ((uint64_t)info->fsobjid.fid_generation << 32),
8800 	    0, 0, 0, 0);
8801 #else /* defined(__LP64__) */
8802 	uint32_t *uuid = (uint32_t *)&(info->uuid);
8803 
8804 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8805 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
8806 	    uuid[1], uuid[2], uuid[3], 0);
8807 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8808 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
8809 	    (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
8810 	    info->fsobjid.fid_objno, 0);
8811 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8812 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
8813 	    info->fsobjid.fid_generation, 0, 0, 0, 0);
8814 #endif /* !defined(__LP64__) */
8815 }
8816 
8817 static kern_return_t
kdebug_trace_dyld(task_t task,uint32_t base_code,vm_map_copy_t infos_copy,mach_msg_type_number_t infos_len)8818 kdebug_trace_dyld(task_t task, uint32_t base_code,
8819     vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
8820 {
8821 	kern_return_t kr;
8822 	dyld_kernel_image_info_array_t infos;
8823 	vm_map_offset_t map_data;
8824 	vm_offset_t data;
8825 
8826 	if (!infos_copy) {
8827 		return KERN_INVALID_ADDRESS;
8828 	}
8829 
8830 	if (!kdebug_enable ||
8831 	    !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) {
8832 		vm_map_copy_discard(infos_copy);
8833 		return KERN_SUCCESS;
8834 	}
8835 
8836 	if (task == NULL || task != current_task()) {
8837 		return KERN_INVALID_TASK;
8838 	}
8839 
8840 	kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
8841 	if (kr != KERN_SUCCESS) {
8842 		return kr;
8843 	}
8844 
8845 	infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
8846 
8847 	for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
8848 		kdebug_trace_dyld_internal(base_code, &(infos[i]));
8849 	}
8850 
8851 	data = CAST_DOWN(vm_offset_t, map_data);
8852 	mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
8853 	return KERN_SUCCESS;
8854 }
8855 
8856 kern_return_t
task_register_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8857 task_register_dyld_image_infos(task_t task,
8858     dyld_kernel_image_info_array_t infos_copy,
8859     mach_msg_type_number_t infos_len)
8860 {
8861 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
8862 	           (vm_map_copy_t)infos_copy, infos_len);
8863 }
8864 
8865 kern_return_t
task_unregister_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8866 task_unregister_dyld_image_infos(task_t task,
8867     dyld_kernel_image_info_array_t infos_copy,
8868     mach_msg_type_number_t infos_len)
8869 {
8870 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
8871 	           (vm_map_copy_t)infos_copy, infos_len);
8872 }
8873 
8874 kern_return_t
task_get_dyld_image_infos(__unused task_t task,__unused dyld_kernel_image_info_array_t * dyld_images,__unused mach_msg_type_number_t * dyld_imagesCnt)8875 task_get_dyld_image_infos(__unused task_t task,
8876     __unused dyld_kernel_image_info_array_t * dyld_images,
8877     __unused mach_msg_type_number_t * dyld_imagesCnt)
8878 {
8879 	return KERN_NOT_SUPPORTED;
8880 }
8881 
8882 kern_return_t
task_register_dyld_shared_cache_image_info(task_t task,dyld_kernel_image_info_t cache_img,__unused boolean_t no_cache,__unused boolean_t private_cache)8883 task_register_dyld_shared_cache_image_info(task_t task,
8884     dyld_kernel_image_info_t cache_img,
8885     __unused boolean_t no_cache,
8886     __unused boolean_t private_cache)
8887 {
8888 	if (task == NULL || task != current_task()) {
8889 		return KERN_INVALID_TASK;
8890 	}
8891 
8892 	kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
8893 	return KERN_SUCCESS;
8894 }
8895 
8896 kern_return_t
task_register_dyld_set_dyld_state(__unused task_t task,__unused uint8_t dyld_state)8897 task_register_dyld_set_dyld_state(__unused task_t task,
8898     __unused uint8_t dyld_state)
8899 {
8900 	return KERN_NOT_SUPPORTED;
8901 }
8902 
8903 kern_return_t
task_register_dyld_get_process_state(__unused task_t task,__unused dyld_kernel_process_info_t * dyld_process_state)8904 task_register_dyld_get_process_state(__unused task_t task,
8905     __unused dyld_kernel_process_info_t * dyld_process_state)
8906 {
8907 	return KERN_NOT_SUPPORTED;
8908 }
8909 
8910 kern_return_t
task_inspect(task_inspect_t task_insp,task_inspect_flavor_t flavor,task_inspect_info_t info_out,mach_msg_type_number_t * size_in_out)8911 task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
8912     task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out)
8913 {
8914 #if CONFIG_PERVASIVE_CPI
8915 	task_t task = (task_t)task_insp;
8916 	kern_return_t kr = KERN_SUCCESS;
8917 	mach_msg_type_number_t size;
8918 
8919 	if (task == TASK_NULL) {
8920 		return KERN_INVALID_ARGUMENT;
8921 	}
8922 
8923 	size = *size_in_out;
8924 
8925 	switch (flavor) {
8926 	case TASK_INSPECT_BASIC_COUNTS: {
8927 		struct task_inspect_basic_counts *bc =
8928 		    (struct task_inspect_basic_counts *)info_out;
8929 		struct recount_usage stats = { 0 };
8930 		if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
8931 			kr = KERN_INVALID_ARGUMENT;
8932 			break;
8933 		}
8934 
8935 		recount_sum(&recount_task_plan, task->tk_recount.rtk_lifetime, &stats);
8936 		bc->instructions = recount_usage_instructions(&stats);
8937 		bc->cycles = recount_usage_cycles(&stats);
8938 		size = TASK_INSPECT_BASIC_COUNTS_COUNT;
8939 		break;
8940 	}
8941 	default:
8942 		kr = KERN_INVALID_ARGUMENT;
8943 		break;
8944 	}
8945 
8946 	if (kr == KERN_SUCCESS) {
8947 		*size_in_out = size;
8948 	}
8949 	return kr;
8950 #else /* CONFIG_PERVASIVE_CPI */
8951 #pragma unused(task_insp, flavor, info_out, size_in_out)
8952 	return KERN_NOT_SUPPORTED;
8953 #endif /* !CONFIG_PERVASIVE_CPI */
8954 }
8955 
8956 #if CONFIG_SECLUDED_MEMORY
8957 int num_tasks_can_use_secluded_mem = 0;
8958 
8959 void
task_set_can_use_secluded_mem(task_t task,boolean_t can_use_secluded_mem)8960 task_set_can_use_secluded_mem(
8961 	task_t          task,
8962 	boolean_t       can_use_secluded_mem)
8963 {
8964 	if (!task->task_could_use_secluded_mem) {
8965 		return;
8966 	}
8967 	task_lock(task);
8968 	task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
8969 	task_unlock(task);
8970 }
8971 
8972 void
task_set_can_use_secluded_mem_locked(task_t task,boolean_t can_use_secluded_mem)8973 task_set_can_use_secluded_mem_locked(
8974 	task_t          task,
8975 	boolean_t       can_use_secluded_mem)
8976 {
8977 	assert(task->task_could_use_secluded_mem);
8978 	if (can_use_secluded_mem &&
8979 	    secluded_for_apps &&         /* global boot-arg */
8980 	    !task->task_can_use_secluded_mem) {
8981 		assert(num_tasks_can_use_secluded_mem >= 0);
8982 		OSAddAtomic(+1,
8983 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
8984 		task->task_can_use_secluded_mem = TRUE;
8985 	} else if (!can_use_secluded_mem &&
8986 	    task->task_can_use_secluded_mem) {
8987 		assert(num_tasks_can_use_secluded_mem > 0);
8988 		OSAddAtomic(-1,
8989 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
8990 		task->task_can_use_secluded_mem = FALSE;
8991 	}
8992 }
8993 
8994 void
task_set_could_use_secluded_mem(task_t task,boolean_t could_use_secluded_mem)8995 task_set_could_use_secluded_mem(
8996 	task_t          task,
8997 	boolean_t       could_use_secluded_mem)
8998 {
8999 	task->task_could_use_secluded_mem = !!could_use_secluded_mem;
9000 }
9001 
9002 void
task_set_could_also_use_secluded_mem(task_t task,boolean_t could_also_use_secluded_mem)9003 task_set_could_also_use_secluded_mem(
9004 	task_t          task,
9005 	boolean_t       could_also_use_secluded_mem)
9006 {
9007 	task->task_could_also_use_secluded_mem = !!could_also_use_secluded_mem;
9008 }
9009 
9010 boolean_t
task_can_use_secluded_mem(task_t task,boolean_t is_alloc)9011 task_can_use_secluded_mem(
9012 	task_t          task,
9013 	boolean_t       is_alloc)
9014 {
9015 	if (task->task_can_use_secluded_mem) {
9016 		assert(task->task_could_use_secluded_mem);
9017 		assert(num_tasks_can_use_secluded_mem > 0);
9018 		return TRUE;
9019 	}
9020 	if (task->task_could_also_use_secluded_mem &&
9021 	    num_tasks_can_use_secluded_mem > 0) {
9022 		assert(num_tasks_can_use_secluded_mem > 0);
9023 		return TRUE;
9024 	}
9025 
9026 	/*
9027 	 * If a single task is using more than some large amount of
9028 	 * memory (i.e. secluded_shutoff_trigger) and is approaching
9029 	 * its task limit, allow it to dip into secluded and begin
9030 	 * suppression of rebuilding secluded memory until that task exits.
9031 	 */
9032 	if (is_alloc && secluded_shutoff_trigger != 0) {
9033 		uint64_t phys_used = get_task_phys_footprint(task);
9034 		uint64_t limit = get_task_phys_footprint_limit(task);
9035 		if (phys_used > secluded_shutoff_trigger &&
9036 		    limit > secluded_shutoff_trigger &&
9037 		    phys_used > limit - secluded_shutoff_headroom) {
9038 			start_secluded_suppression(task);
9039 			return TRUE;
9040 		}
9041 	}
9042 
9043 	return FALSE;
9044 }
9045 
9046 boolean_t
task_could_use_secluded_mem(task_t task)9047 task_could_use_secluded_mem(
9048 	task_t  task)
9049 {
9050 	return task->task_could_use_secluded_mem;
9051 }
9052 
9053 boolean_t
task_could_also_use_secluded_mem(task_t task)9054 task_could_also_use_secluded_mem(
9055 	task_t  task)
9056 {
9057 	return task->task_could_also_use_secluded_mem;
9058 }
9059 #endif /* CONFIG_SECLUDED_MEMORY */
9060 
9061 queue_head_t *
task_io_user_clients(task_t task)9062 task_io_user_clients(task_t task)
9063 {
9064 	return &task->io_user_clients;
9065 }
9066 
9067 void
task_set_message_app_suspended(task_t task,boolean_t enable)9068 task_set_message_app_suspended(task_t task, boolean_t enable)
9069 {
9070 	task->message_app_suspended = enable;
9071 }
9072 
9073 void
task_copy_fields_for_exec(task_t dst_task,task_t src_task)9074 task_copy_fields_for_exec(task_t dst_task, task_t src_task)
9075 {
9076 	dst_task->vtimers = src_task->vtimers;
9077 }
9078 
9079 #if DEVELOPMENT || DEBUG
9080 int vm_region_footprint = 0;
9081 #endif /* DEVELOPMENT || DEBUG */
9082 
9083 boolean_t
task_self_region_footprint(void)9084 task_self_region_footprint(void)
9085 {
9086 #if DEVELOPMENT || DEBUG
9087 	if (vm_region_footprint) {
9088 		/* system-wide override */
9089 		return TRUE;
9090 	}
9091 #endif /* DEVELOPMENT || DEBUG */
9092 	return current_task()->task_region_footprint;
9093 }
9094 
9095 void
task_self_region_footprint_set(boolean_t newval)9096 task_self_region_footprint_set(
9097 	boolean_t newval)
9098 {
9099 	task_t  curtask;
9100 
9101 	curtask = current_task();
9102 	task_lock(curtask);
9103 	if (newval) {
9104 		curtask->task_region_footprint = TRUE;
9105 	} else {
9106 		curtask->task_region_footprint = FALSE;
9107 	}
9108 	task_unlock(curtask);
9109 }
9110 
9111 int
task_self_region_info_flags(void)9112 task_self_region_info_flags(void)
9113 {
9114 	return current_task()->task_region_info_flags;
9115 }
9116 
9117 kern_return_t
task_self_region_info_flags_set(int newval)9118 task_self_region_info_flags_set(
9119 	int newval)
9120 {
9121 	task_t  curtask;
9122 	kern_return_t err = KERN_SUCCESS;
9123 
9124 	curtask = current_task();
9125 	task_lock(curtask);
9126 	curtask->task_region_info_flags = newval;
9127 	/* check for overflow (flag added without increasing bitfield size?) */
9128 	if (curtask->task_region_info_flags != newval) {
9129 		err = KERN_INVALID_ARGUMENT;
9130 	}
9131 	task_unlock(curtask);
9132 
9133 	return err;
9134 }
9135 
9136 void
task_set_darkwake_mode(task_t task,boolean_t set_mode)9137 task_set_darkwake_mode(task_t task, boolean_t set_mode)
9138 {
9139 	assert(task);
9140 
9141 	task_lock(task);
9142 
9143 	if (set_mode) {
9144 		task->t_flags |= TF_DARKWAKE_MODE;
9145 	} else {
9146 		task->t_flags &= ~(TF_DARKWAKE_MODE);
9147 	}
9148 
9149 	task_unlock(task);
9150 }
9151 
9152 boolean_t
task_get_darkwake_mode(task_t task)9153 task_get_darkwake_mode(task_t task)
9154 {
9155 	assert(task);
9156 	return (task->t_flags & TF_DARKWAKE_MODE) != 0;
9157 }
9158 
9159 /*
9160  * Set default behavior for task's control port and EXC_GUARD variants that have
9161  * settable behavior.
9162  *
9163  * Platform binaries typically have one behavior, third parties another -
9164  * but there are special exception we may need to account for.
9165  */
9166 void
task_set_exc_guard_ctrl_port_default(task_t task,thread_t main_thread,const char * name,unsigned int namelen,boolean_t is_simulated,uint32_t platform,uint32_t sdk)9167 task_set_exc_guard_ctrl_port_default(
9168 	task_t task,
9169 	thread_t main_thread,
9170 	const char *name,
9171 	unsigned int namelen,
9172 	boolean_t is_simulated,
9173 	uint32_t platform,
9174 	uint32_t sdk)
9175 {
9176 	task_control_port_options_t opts = TASK_CONTROL_PORT_OPTIONS_NONE;
9177 
9178 	if (task_is_hardened_binary(task)) {
9179 		/* set exc guard default behavior for hardened binaries */
9180 		task->task_exc_guard = (task_exc_guard_default & TASK_EXC_GUARD_ALL);
9181 
9182 		if (1 == task_pid(task)) {
9183 			/* special flags for inittask - delivery every instance as corpse */
9184 			task->task_exc_guard = _TASK_EXC_GUARD_ALL_CORPSE;
9185 		} else if (task_exc_guard_default & TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS) {
9186 			/* honor by-name default setting overrides */
9187 
9188 			int count = sizeof(task_exc_guard_named_defaults) / sizeof(struct task_exc_guard_named_default);
9189 
9190 			for (int i = 0; i < count; i++) {
9191 				const struct task_exc_guard_named_default *named_default =
9192 				    &task_exc_guard_named_defaults[i];
9193 				if (strncmp(named_default->name, name, namelen) == 0 &&
9194 				    strlen(named_default->name) == namelen) {
9195 					task->task_exc_guard = named_default->behavior;
9196 					break;
9197 				}
9198 			}
9199 		}
9200 
9201 		/* set control port options for 1p code, inherited from parent task by default */
9202 		opts = ipc_control_port_options & ICP_OPTIONS_1P_MASK;
9203 	} else {
9204 		/* set exc guard default behavior for third-party code */
9205 		task->task_exc_guard = ((task_exc_guard_default >> TASK_EXC_GUARD_THIRD_PARTY_DEFAULT_SHIFT) & TASK_EXC_GUARD_ALL);
9206 		/* set control port options for 3p code, inherited from parent task by default */
9207 		opts = (ipc_control_port_options & ICP_OPTIONS_3P_MASK) >> ICP_OPTIONS_3P_SHIFT;
9208 	}
9209 
9210 	if (is_simulated) {
9211 		/* If simulated and built against pre-iOS 15 SDK, disable all EXC_GUARD */
9212 		if ((platform == PLATFORM_IOSSIMULATOR && sdk < 0xf0000) ||
9213 		    (platform == PLATFORM_TVOSSIMULATOR && sdk < 0xf0000) ||
9214 		    (platform == PLATFORM_WATCHOSSIMULATOR && sdk < 0x80000)) {
9215 			task->task_exc_guard = TASK_EXC_GUARD_NONE;
9216 		}
9217 		/* Disable protection for control ports for simulated binaries */
9218 		opts = TASK_CONTROL_PORT_OPTIONS_NONE;
9219 	}
9220 
9221 
9222 	task_set_control_port_options(task, opts);
9223 
9224 	task_set_immovable_pinned(task);
9225 	main_thread_set_immovable_pinned(main_thread);
9226 }
9227 
9228 kern_return_t
task_get_exc_guard_behavior(task_t task,task_exc_guard_behavior_t * behaviorp)9229 task_get_exc_guard_behavior(
9230 	task_t task,
9231 	task_exc_guard_behavior_t *behaviorp)
9232 {
9233 	if (task == TASK_NULL) {
9234 		return KERN_INVALID_TASK;
9235 	}
9236 	*behaviorp = task->task_exc_guard;
9237 	return KERN_SUCCESS;
9238 }
9239 
9240 kern_return_t
task_set_exc_guard_behavior(task_t task,task_exc_guard_behavior_t new_behavior)9241 task_set_exc_guard_behavior(
9242 	task_t task,
9243 	task_exc_guard_behavior_t new_behavior)
9244 {
9245 	if (task == TASK_NULL) {
9246 		return KERN_INVALID_TASK;
9247 	}
9248 	if (new_behavior & ~TASK_EXC_GUARD_ALL) {
9249 		return KERN_INVALID_VALUE;
9250 	}
9251 
9252 	/* limit setting to that allowed for this config */
9253 	new_behavior = new_behavior & task_exc_guard_config_mask;
9254 
9255 #if !defined (DEBUG) && !defined (DEVELOPMENT)
9256 	/* On release kernels, only allow _upgrading_ exc guard behavior */
9257 	task_exc_guard_behavior_t cur_behavior;
9258 
9259 	os_atomic_rmw_loop(&task->task_exc_guard, cur_behavior, new_behavior, relaxed, {
9260 		if ((cur_behavior & task_exc_guard_no_unset_mask) & ~(new_behavior & task_exc_guard_no_unset_mask)) {
9261 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
9262 		}
9263 
9264 		if ((new_behavior & task_exc_guard_no_set_mask) & ~(cur_behavior & task_exc_guard_no_set_mask)) {
9265 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
9266 		}
9267 
9268 		/* no restrictions on CORPSE bit */
9269 	});
9270 #else
9271 	task->task_exc_guard = new_behavior;
9272 #endif
9273 	return KERN_SUCCESS;
9274 }
9275 
9276 kern_return_t
task_set_corpse_forking_behavior(task_t task,task_corpse_forking_behavior_t behavior)9277 task_set_corpse_forking_behavior(task_t task, task_corpse_forking_behavior_t behavior)
9278 {
9279 #if DEVELOPMENT || DEBUG
9280 	if (task == TASK_NULL) {
9281 		return KERN_INVALID_TASK;
9282 	}
9283 
9284 	task_lock(task);
9285 	if (behavior & TASK_CORPSE_FORKING_DISABLED_MEM_DIAG) {
9286 		task->t_flags |= TF_NO_CORPSE_FORKING;
9287 	} else {
9288 		task->t_flags &= ~TF_NO_CORPSE_FORKING;
9289 	}
9290 	task_unlock(task);
9291 
9292 	return KERN_SUCCESS;
9293 #else
9294 	(void)task;
9295 	(void)behavior;
9296 	return KERN_NOT_SUPPORTED;
9297 #endif
9298 }
9299 
9300 boolean_t
task_corpse_forking_disabled(task_t task)9301 task_corpse_forking_disabled(task_t task)
9302 {
9303 	boolean_t disabled = FALSE;
9304 
9305 	task_lock(task);
9306 	disabled = (task->t_flags & TF_NO_CORPSE_FORKING);
9307 	task_unlock(task);
9308 
9309 	return disabled;
9310 }
9311 
9312 #if __arm64__
9313 extern int legacy_footprint_entitlement_mode;
9314 extern void memorystatus_act_on_legacy_footprint_entitlement(struct proc *, boolean_t);
9315 extern void memorystatus_act_on_ios13extended_footprint_entitlement(struct proc *);
9316 
9317 
9318 void
task_set_legacy_footprint(task_t task)9319 task_set_legacy_footprint(
9320 	task_t task)
9321 {
9322 	task_lock(task);
9323 	task->task_legacy_footprint = TRUE;
9324 	task_unlock(task);
9325 }
9326 
9327 void
task_set_extra_footprint_limit(task_t task)9328 task_set_extra_footprint_limit(
9329 	task_t task)
9330 {
9331 	if (task->task_extra_footprint_limit) {
9332 		return;
9333 	}
9334 	task_lock(task);
9335 	if (task->task_extra_footprint_limit) {
9336 		task_unlock(task);
9337 		return;
9338 	}
9339 	task->task_extra_footprint_limit = TRUE;
9340 	task_unlock(task);
9341 	memorystatus_act_on_legacy_footprint_entitlement(get_bsdtask_info(task), TRUE);
9342 }
9343 
9344 void
task_set_ios13extended_footprint_limit(task_t task)9345 task_set_ios13extended_footprint_limit(
9346 	task_t task)
9347 {
9348 	if (task->task_ios13extended_footprint_limit) {
9349 		return;
9350 	}
9351 	task_lock(task);
9352 	if (task->task_ios13extended_footprint_limit) {
9353 		task_unlock(task);
9354 		return;
9355 	}
9356 	task->task_ios13extended_footprint_limit = TRUE;
9357 	task_unlock(task);
9358 	memorystatus_act_on_ios13extended_footprint_entitlement(get_bsdtask_info(task));
9359 }
9360 #endif /* __arm64__ */
9361 
9362 static inline ledger_amount_t
task_ledger_get_balance(ledger_t ledger,int ledger_idx)9363 task_ledger_get_balance(
9364 	ledger_t        ledger,
9365 	int             ledger_idx)
9366 {
9367 	ledger_amount_t amount;
9368 	amount = 0;
9369 	ledger_get_balance(ledger, ledger_idx, &amount);
9370 	return amount;
9371 }
9372 
9373 /*
9374  * Gather the amount of memory counted in a task's footprint due to
9375  * being in a specific set of ledgers.
9376  */
9377 void
task_ledgers_footprint(ledger_t ledger,ledger_amount_t * ledger_resident,ledger_amount_t * ledger_compressed)9378 task_ledgers_footprint(
9379 	ledger_t        ledger,
9380 	ledger_amount_t *ledger_resident,
9381 	ledger_amount_t *ledger_compressed)
9382 {
9383 	*ledger_resident = 0;
9384 	*ledger_compressed = 0;
9385 
9386 	/* purgeable non-volatile memory */
9387 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile);
9388 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile_compressed);
9389 
9390 	/* "default" tagged memory */
9391 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint);
9392 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint_compressed);
9393 
9394 	/* "network" currently never counts in the footprint... */
9395 
9396 	/* "media" tagged memory */
9397 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.media_footprint);
9398 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.media_footprint_compressed);
9399 
9400 	/* "graphics" tagged memory */
9401 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint);
9402 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint_compressed);
9403 
9404 	/* "neural" tagged memory */
9405 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.neural_footprint);
9406 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.neural_footprint_compressed);
9407 }
9408 
9409 #if CONFIG_MEMORYSTATUS
9410 /*
9411  * Credit any outstanding task dirty time to the ledger.
9412  * memstat_dirty_start is pushed forward to prevent any possibility of double
9413  * counting, making it safe to call this as often as necessary to ensure that
9414  * anyone reading the ledger gets up-to-date information.
9415  */
9416 void
task_ledger_settle_dirty_time(task_t t)9417 task_ledger_settle_dirty_time(task_t t)
9418 {
9419 	task_lock(t);
9420 
9421 	uint64_t start = t->memstat_dirty_start;
9422 	if (start) {
9423 		uint64_t now = mach_absolute_time();
9424 
9425 		uint64_t duration;
9426 		absolutetime_to_nanoseconds(now - start, &duration);
9427 
9428 		ledger_t ledger = get_task_ledger(t);
9429 		ledger_credit(ledger, task_ledgers.memorystatus_dirty_time, duration);
9430 
9431 		t->memstat_dirty_start = now;
9432 	}
9433 
9434 	task_unlock(t);
9435 }
9436 #endif /* CONFIG_MEMORYSTATUS */
9437 
9438 void
task_set_memory_ownership_transfer(task_t task,boolean_t value)9439 task_set_memory_ownership_transfer(
9440 	task_t    task,
9441 	boolean_t value)
9442 {
9443 	task_lock(task);
9444 	task->task_can_transfer_memory_ownership = !!value;
9445 	task_unlock(task);
9446 }
9447 
9448 #if DEVELOPMENT || DEBUG
9449 
9450 void
task_set_no_footprint_for_debug(task_t task,boolean_t value)9451 task_set_no_footprint_for_debug(task_t task, boolean_t value)
9452 {
9453 	task_lock(task);
9454 	task->task_no_footprint_for_debug = !!value;
9455 	task_unlock(task);
9456 }
9457 
9458 int
task_get_no_footprint_for_debug(task_t task)9459 task_get_no_footprint_for_debug(task_t task)
9460 {
9461 	return task->task_no_footprint_for_debug;
9462 }
9463 
9464 #endif /* DEVELOPMENT || DEBUG */
9465 
9466 void
task_copy_vmobjects(task_t task,vm_object_query_t query,size_t len,size_t * num)9467 task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num)
9468 {
9469 	vm_object_t find_vmo;
9470 	size_t size = 0;
9471 
9472 	/*
9473 	 * Allocate a save area for FP state before taking task_objq lock,
9474 	 * if necessary, to ensure that VM_KERNEL_ADDRHASH() doesn't cause
9475 	 * an FP state allocation while holding VM locks.
9476 	 */
9477 	ml_fp_save_area_prealloc();
9478 
9479 	task_objq_lock(task);
9480 	if (query != NULL) {
9481 		queue_iterate(&task->task_objq, find_vmo, vm_object_t, task_objq)
9482 		{
9483 			vm_object_query_t p = &query[size++];
9484 
9485 			/* make sure to not overrun */
9486 			if (size * sizeof(vm_object_query_data_t) > len) {
9487 				--size;
9488 				break;
9489 			}
9490 
9491 			bzero(p, sizeof(*p));
9492 			p->object_id = (vm_object_id_t) VM_KERNEL_ADDRHASH(find_vmo);
9493 			p->virtual_size = find_vmo->internal ? find_vmo->vo_size : 0;
9494 			p->resident_size = find_vmo->resident_page_count * PAGE_SIZE;
9495 			p->wired_size = find_vmo->wired_page_count * PAGE_SIZE;
9496 			p->reusable_size = find_vmo->reusable_page_count * PAGE_SIZE;
9497 			p->vo_no_footprint = find_vmo->vo_no_footprint;
9498 			p->vo_ledger_tag = find_vmo->vo_ledger_tag;
9499 			p->purgable = find_vmo->purgable;
9500 
9501 			if (find_vmo->internal && find_vmo->pager_created && find_vmo->pager != NULL) {
9502 				p->compressed_size = vm_compressor_pager_get_count(find_vmo->pager) * PAGE_SIZE;
9503 			} else {
9504 				p->compressed_size = 0;
9505 			}
9506 		}
9507 	} else {
9508 		size = (size_t)task->task_owned_objects;
9509 	}
9510 	task_objq_unlock(task);
9511 
9512 	*num = size;
9513 }
9514 
9515 void
task_get_owned_vmobjects(task_t task,size_t buffer_size,vmobject_list_output_t buffer,size_t * output_size,size_t * entries)9516 task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries)
9517 {
9518 	assert(output_size);
9519 	assert(entries);
9520 
9521 	/* copy the vmobjects and vmobject data out of the task */
9522 	if (buffer_size == 0) {
9523 		task_copy_vmobjects(task, NULL, 0, entries);
9524 		*output_size = (*entries > 0) ? *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer) : 0;
9525 	} else {
9526 		assert(buffer);
9527 		task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(*buffer), entries);
9528 		buffer->entries = (uint64_t)*entries;
9529 		*output_size = *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer);
9530 	}
9531 }
9532 
9533 void
task_store_owned_vmobject_info(task_t to_task,task_t from_task)9534 task_store_owned_vmobject_info(task_t to_task, task_t from_task)
9535 {
9536 	size_t buffer_size;
9537 	vmobject_list_output_t buffer;
9538 	size_t output_size;
9539 	size_t entries;
9540 
9541 	assert(to_task != from_task);
9542 
9543 	/* get the size, allocate a bufferr, and populate */
9544 	entries = 0;
9545 	output_size = 0;
9546 	task_get_owned_vmobjects(from_task, 0, NULL, &output_size, &entries);
9547 
9548 	if (output_size) {
9549 		buffer_size = output_size;
9550 		buffer = kalloc_data(buffer_size, Z_WAITOK);
9551 
9552 		if (buffer) {
9553 			entries = 0;
9554 			output_size = 0;
9555 
9556 			task_get_owned_vmobjects(from_task, buffer_size, buffer, &output_size, &entries);
9557 
9558 			if (entries) {
9559 				to_task->corpse_vmobject_list = buffer;
9560 				to_task->corpse_vmobject_list_size = buffer_size;
9561 			}
9562 		}
9563 	}
9564 }
9565 
9566 void
task_set_filter_msg_flag(task_t task,boolean_t flag)9567 task_set_filter_msg_flag(
9568 	task_t task,
9569 	boolean_t flag)
9570 {
9571 	assert(task != TASK_NULL);
9572 
9573 	if (flag) {
9574 		task_ro_flags_set(task, TFRO_FILTER_MSG);
9575 	} else {
9576 		task_ro_flags_clear(task, TFRO_FILTER_MSG);
9577 	}
9578 }
9579 
9580 boolean_t
task_get_filter_msg_flag(task_t task)9581 task_get_filter_msg_flag(
9582 	task_t task)
9583 {
9584 	if (!task) {
9585 		return false;
9586 	}
9587 
9588 	return (task_ro_flags_get(task) & TFRO_FILTER_MSG) ? TRUE : FALSE;
9589 }
9590 bool
task_is_exotic(task_t task)9591 task_is_exotic(
9592 	task_t task)
9593 {
9594 	if (task == TASK_NULL) {
9595 		return false;
9596 	}
9597 	return vm_map_is_exotic(get_task_map(task));
9598 }
9599 
9600 bool
task_is_alien(task_t task)9601 task_is_alien(
9602 	task_t task)
9603 {
9604 	if (task == TASK_NULL) {
9605 		return false;
9606 	}
9607 	return vm_map_is_alien(get_task_map(task));
9608 }
9609 
9610 
9611 
9612 #if CONFIG_MACF
9613 uint8_t *
mac_task_get_mach_filter_mask(task_t task)9614 mac_task_get_mach_filter_mask(task_t task)
9615 {
9616 	assert(task);
9617 	return task_get_mach_trap_filter_mask(task);
9618 }
9619 
9620 uint8_t *
mac_task_get_kobj_filter_mask(task_t task)9621 mac_task_get_kobj_filter_mask(task_t task)
9622 {
9623 	assert(task);
9624 	return task_get_mach_kobj_filter_mask(task);
9625 }
9626 
9627 /* Set the filter mask for Mach traps. */
9628 void
mac_task_set_mach_filter_mask(task_t task,uint8_t * maskptr)9629 mac_task_set_mach_filter_mask(task_t task, uint8_t *maskptr)
9630 {
9631 	assert(task);
9632 
9633 	task_set_mach_trap_filter_mask(task, maskptr);
9634 }
9635 
9636 /* Set the filter mask for kobject msgs. */
9637 void
mac_task_set_kobj_filter_mask(task_t task,uint8_t * maskptr)9638 mac_task_set_kobj_filter_mask(task_t task, uint8_t *maskptr)
9639 {
9640 	assert(task);
9641 
9642 	task_set_mach_kobj_filter_mask(task, maskptr);
9643 }
9644 
9645 /* Hook for mach trap/sc filter evaluation policy. */
9646 SECURITY_READ_ONLY_LATE(mac_task_mach_filter_cbfunc_t) mac_task_mach_trap_evaluate = NULL;
9647 
9648 /* Hook for kobj message filter evaluation policy. */
9649 SECURITY_READ_ONLY_LATE(mac_task_kobj_filter_cbfunc_t) mac_task_kobj_msg_evaluate = NULL;
9650 
9651 /* Set the callback hooks for the filtering policy. */
9652 int
mac_task_register_filter_callbacks(const mac_task_mach_filter_cbfunc_t mach_cbfunc,const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)9653 mac_task_register_filter_callbacks(
9654 	const mac_task_mach_filter_cbfunc_t mach_cbfunc,
9655 	const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)
9656 {
9657 	if (mach_cbfunc != NULL) {
9658 		if (mac_task_mach_trap_evaluate != NULL) {
9659 			return KERN_FAILURE;
9660 		}
9661 		mac_task_mach_trap_evaluate = mach_cbfunc;
9662 	}
9663 	if (kobj_cbfunc != NULL) {
9664 		if (mac_task_kobj_msg_evaluate != NULL) {
9665 			return KERN_FAILURE;
9666 		}
9667 		mac_task_kobj_msg_evaluate = kobj_cbfunc;
9668 	}
9669 
9670 	return KERN_SUCCESS;
9671 }
9672 #endif /* CONFIG_MACF */
9673 
9674 #if CONFIG_ROSETTA
9675 bool
task_is_translated(task_t task)9676 task_is_translated(task_t task)
9677 {
9678 	extern boolean_t proc_is_translated(struct proc* p);
9679 	return task && proc_is_translated(get_bsdtask_info(task));
9680 }
9681 #endif
9682 
9683 
9684 
9685 #if __has_feature(ptrauth_calls)
9686 /* On FPAC, we want to deliver all PAC violations as fatal exceptions, regardless
9687  * of the enable_pac_exception boot-arg value or any other entitlements.
9688  * The only case where we allow non-fatal PAC exceptions on FPAC is for debugging,
9689  * which requires Developer Mode enabled.
9690  *
9691  * On non-FPAC hardware, we gate the decision behind entitlements and the
9692  * enable_pac_exception boot-arg.
9693  */
9694 extern int gARM_FEAT_FPAC;
9695 /*
9696  * Having the PAC_EXCEPTION_ENTITLEMENT entitlement means we always enforce all
9697  * of the PAC exception hardening: fatal exceptions and signed user state.
9698  */
9699 #define PAC_EXCEPTION_ENTITLEMENT "com.apple.private.pac.exception"
9700 /*
9701  * On non-FPAC hardware, when enable_pac_exception boot-arg is set to true,
9702  * processes can choose to get non-fatal PAC exception delivery by setting
9703  * the SKIP_PAC_EXCEPTION_ENTITLEMENT entitlement.
9704  */
9705 #define SKIP_PAC_EXCEPTION_ENTITLEMENT "com.apple.private.skip.pac.exception"
9706 
9707 void
task_set_pac_exception_fatal_flag(task_t task)9708 task_set_pac_exception_fatal_flag(
9709 	task_t task)
9710 {
9711 	assert(task != TASK_NULL);
9712 	bool pac_hardened_task = false;
9713 	uint32_t set_flags = 0;
9714 
9715 	/*
9716 	 * We must not apply this security policy on tasks which have opted out of mach hardening to
9717 	 * avoid regressions in third party plugins and third party apps when using AMFI boot-args
9718 	 */
9719 	bool platform_binary = task_get_platform_binary(task);
9720 #if XNU_TARGET_OS_OSX
9721 	platform_binary &= !task_opted_out_mach_hardening(task);
9722 #endif /* XNU_TARGET_OS_OSX */
9723 
9724 	/*
9725 	 * On non-FPAC hardware, we allow gating PAC exceptions behind
9726 	 * SKIP_PAC_EXCEPTION_ENTITLEMENT and the boot-arg.
9727 	 */
9728 	if (!gARM_FEAT_FPAC && enable_pac_exception &&
9729 	    IOTaskHasEntitlement(task, SKIP_PAC_EXCEPTION_ENTITLEMENT)) {
9730 		return;
9731 	}
9732 
9733 	if (IOTaskHasEntitlement(task, PAC_EXCEPTION_ENTITLEMENT) || task_get_hardened_runtime(task)) {
9734 		pac_hardened_task = true;
9735 		set_flags |= TFRO_PAC_ENFORCE_USER_STATE;
9736 	}
9737 
9738 	/* On non-FPAC hardware, gate the fatal property behind entitlements and boot-arg. */
9739 	if (pac_hardened_task ||
9740 	    ((enable_pac_exception || gARM_FEAT_FPAC) && platform_binary)) {
9741 		set_flags |= TFRO_PAC_EXC_FATAL;
9742 	}
9743 
9744 	if (set_flags != 0) {
9745 		task_ro_flags_set(task, set_flags);
9746 	}
9747 }
9748 
9749 bool
task_is_pac_exception_fatal(task_t task)9750 task_is_pac_exception_fatal(
9751 	task_t task)
9752 {
9753 	assert(task != TASK_NULL);
9754 	return !!(task_ro_flags_get(task) & TFRO_PAC_EXC_FATAL);
9755 }
9756 #endif /* __has_feature(ptrauth_calls) */
9757 
9758 /*
9759  * FATAL_EXCEPTION_ENTITLEMENT, if present, will contain a list of
9760  * conditions for which access violations should deliver SIGKILL rather than
9761  * SIGSEGV.  This is a hardening measure intended for use by applications
9762  * that are able to handle the stricter error handling behavior.  Currently
9763  * this supports FATAL_EXCEPTION_ENTITLEMENT_JIT, which is documented in
9764  * user_fault_in_self_restrict_mode().
9765  */
9766 #define FATAL_EXCEPTION_ENTITLEMENT "com.apple.security.fatal-exceptions"
9767 #define FATAL_EXCEPTION_ENTITLEMENT_JIT "jit"
9768 
9769 void
task_set_jit_exception_fatal_flag(task_t task)9770 task_set_jit_exception_fatal_flag(
9771 	task_t task)
9772 {
9773 	assert(task != TASK_NULL);
9774 	if (IOTaskHasStringEntitlement(task, FATAL_EXCEPTION_ENTITLEMENT, FATAL_EXCEPTION_ENTITLEMENT_JIT)) {
9775 		task_ro_flags_set(task, TFRO_JIT_EXC_FATAL);
9776 	}
9777 }
9778 
9779 bool
task_is_jit_exception_fatal(__unused task_t task)9780 task_is_jit_exception_fatal(
9781 	__unused task_t task)
9782 {
9783 #if !defined(XNU_PLATFORM_MacOSX)
9784 	return true;
9785 #else
9786 	assert(task != TASK_NULL);
9787 	return !!(task_ro_flags_get(task) & TFRO_JIT_EXC_FATAL);
9788 #endif
9789 }
9790 
9791 bool
task_needs_user_signed_thread_state(task_t task)9792 task_needs_user_signed_thread_state(
9793 	task_t task)
9794 {
9795 	assert(task != TASK_NULL);
9796 	return !!(task_ro_flags_get(task) & TFRO_PAC_ENFORCE_USER_STATE);
9797 }
9798 
9799 void
task_set_tecs(task_t task)9800 task_set_tecs(task_t task)
9801 {
9802 	if (task == TASK_NULL) {
9803 		task = current_task();
9804 	}
9805 
9806 	if (!machine_csv(CPUVN_CI)) {
9807 		return;
9808 	}
9809 
9810 	LCK_MTX_ASSERT(&task->lock, LCK_MTX_ASSERT_NOTOWNED);
9811 
9812 	task_lock(task);
9813 
9814 	task->t_flags |= TF_TECS;
9815 
9816 	thread_t thread;
9817 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
9818 		machine_tecs(thread);
9819 	}
9820 	task_unlock(task);
9821 }
9822 
9823 kern_return_t
task_test_sync_upcall(task_t task,ipc_port_t send_port)9824 task_test_sync_upcall(
9825 	task_t     task,
9826 	ipc_port_t send_port)
9827 {
9828 #if DEVELOPMENT || DEBUG
9829 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9830 		return KERN_INVALID_ARGUMENT;
9831 	}
9832 
9833 	/* Block on sync kernel upcall on the given send port */
9834 	mach_test_sync_upcall(send_port);
9835 
9836 	ipc_port_release_send(send_port);
9837 	return KERN_SUCCESS;
9838 #else
9839 	(void)task;
9840 	(void)send_port;
9841 	return KERN_NOT_SUPPORTED;
9842 #endif
9843 }
9844 
9845 kern_return_t
task_test_async_upcall_propagation(task_t task,ipc_port_t send_port,int qos,int iotier)9846 task_test_async_upcall_propagation(
9847 	task_t      task,
9848 	ipc_port_t  send_port,
9849 	int         qos,
9850 	int         iotier)
9851 {
9852 #if DEVELOPMENT || DEBUG
9853 	kern_return_t kr;
9854 
9855 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9856 		return KERN_INVALID_ARGUMENT;
9857 	}
9858 
9859 	if (qos < THREAD_QOS_DEFAULT || qos > THREAD_QOS_USER_INTERACTIVE ||
9860 	    iotier < THROTTLE_LEVEL_START || iotier > THROTTLE_LEVEL_END) {
9861 		return KERN_INVALID_ARGUMENT;
9862 	}
9863 
9864 	struct thread_attr_for_ipc_propagation attr = {
9865 		.tafip_iotier = iotier,
9866 		.tafip_qos = qos
9867 	};
9868 
9869 	/* Apply propagate attr to port */
9870 	kr = ipc_port_propagate_thread_attr(send_port, attr);
9871 	if (kr != KERN_SUCCESS) {
9872 		return kr;
9873 	}
9874 
9875 	thread_enable_send_importance(current_thread(), TRUE);
9876 
9877 	/* Perform an async kernel upcall on the given send port */
9878 	mach_test_async_upcall(send_port);
9879 	thread_enable_send_importance(current_thread(), FALSE);
9880 
9881 	ipc_port_release_send(send_port);
9882 	return KERN_SUCCESS;
9883 #else
9884 	(void)task;
9885 	(void)send_port;
9886 	(void)qos;
9887 	(void)iotier;
9888 	return KERN_NOT_SUPPORTED;
9889 #endif
9890 }
9891 
9892 #if CONFIG_PROC_RESOURCE_LIMITS
9893 mach_port_name_t
current_task_get_fatal_port_name(void)9894 current_task_get_fatal_port_name(void)
9895 {
9896 	mach_port_t task_fatal_port = MACH_PORT_NULL;
9897 	mach_port_name_t port_name = 0;
9898 
9899 	task_fatal_port = task_allocate_fatal_port();
9900 
9901 	if (task_fatal_port) {
9902 		ipc_object_copyout(current_space(), ip_to_object(task_fatal_port), MACH_MSG_TYPE_PORT_SEND,
9903 		    IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, &port_name);
9904 	}
9905 
9906 	return port_name;
9907 }
9908 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
9909 
9910 #if defined(__x86_64__)
9911 bool
curtask_get_insn_copy_optout(void)9912 curtask_get_insn_copy_optout(void)
9913 {
9914 	bool optout;
9915 	task_t cur_task = current_task();
9916 
9917 	task_lock(cur_task);
9918 	optout = (cur_task->t_flags & TF_INSN_COPY_OPTOUT) ? true : false;
9919 	task_unlock(cur_task);
9920 
9921 	return optout;
9922 }
9923 
9924 void
curtask_set_insn_copy_optout(void)9925 curtask_set_insn_copy_optout(void)
9926 {
9927 	task_t cur_task = current_task();
9928 
9929 	task_lock(cur_task);
9930 
9931 	cur_task->t_flags |= TF_INSN_COPY_OPTOUT;
9932 
9933 	thread_t thread;
9934 	queue_iterate(&cur_task->threads, thread, thread_t, task_threads) {
9935 		machine_thread_set_insn_copy_optout(thread);
9936 	}
9937 	task_unlock(cur_task);
9938 }
9939 #endif /* defined(__x86_64__) */
9940 
9941 void
task_get_corpse_vmobject_list(task_t task,vmobject_list_output_t * list,size_t * list_size)9942 task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size)
9943 {
9944 	assert(task);
9945 	assert(list_size);
9946 
9947 	*list = task->corpse_vmobject_list;
9948 	*list_size = (size_t)task->corpse_vmobject_list_size;
9949 }
9950 
9951 __abortlike
9952 static void
panic_proc_ro_task_backref_mismatch(task_t t,proc_ro_t ro)9953 panic_proc_ro_task_backref_mismatch(task_t t, proc_ro_t ro)
9954 {
9955 	panic("proc_ro->task backref mismatch: t=%p, ro=%p, "
9956 	    "proc_ro_task(ro)=%p", t, ro, proc_ro_task(ro));
9957 }
9958 
9959 proc_ro_t
task_get_ro(task_t t)9960 task_get_ro(task_t t)
9961 {
9962 	proc_ro_t ro = (proc_ro_t)t->bsd_info_ro;
9963 
9964 	zone_require_ro(ZONE_ID_PROC_RO, sizeof(struct proc_ro), ro);
9965 	if (__improbable(proc_ro_task(ro) != t)) {
9966 		panic_proc_ro_task_backref_mismatch(t, ro);
9967 	}
9968 
9969 	return ro;
9970 }
9971 
9972 uint32_t
task_ro_flags_get(task_t task)9973 task_ro_flags_get(task_t task)
9974 {
9975 	return task_get_ro(task)->t_flags_ro;
9976 }
9977 
9978 void
task_ro_flags_set(task_t task,uint32_t flags)9979 task_ro_flags_set(task_t task, uint32_t flags)
9980 {
9981 	zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
9982 	    t_flags_ro, ZRO_ATOMIC_OR_32, flags);
9983 }
9984 
9985 void
task_ro_flags_clear(task_t task,uint32_t flags)9986 task_ro_flags_clear(task_t task, uint32_t flags)
9987 {
9988 	zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
9989 	    t_flags_ro, ZRO_ATOMIC_AND_32, ~flags);
9990 }
9991 
9992 task_control_port_options_t
task_get_control_port_options(task_t task)9993 task_get_control_port_options(task_t task)
9994 {
9995 	return task_get_ro(task)->task_control_port_options;
9996 }
9997 
9998 void
task_set_control_port_options(task_t task,task_control_port_options_t opts)9999 task_set_control_port_options(task_t task, task_control_port_options_t opts)
10000 {
10001 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
10002 	    task_control_port_options, &opts);
10003 }
10004 
10005 /*!
10006  * @function kdp_task_is_locked
10007  *
10008  * @abstract
10009  * Checks if task is locked.
10010  *
10011  * @discussion
10012  * NOT SAFE: To be used only by kernel debugger.
10013  *
10014  * @param task task to check
10015  *
10016  * @returns TRUE if the task is locked.
10017  */
10018 boolean_t
kdp_task_is_locked(task_t task)10019 kdp_task_is_locked(task_t task)
10020 {
10021 	return kdp_lck_mtx_lock_spin_is_acquired(&task->lock);
10022 }
10023 
10024 #if DEBUG || DEVELOPMENT
10025 /**
10026  *
10027  * Check if a threshold limit is valid based on the actual phys memory
10028  * limit. If they are same, race conditions may arise, so we have to prevent
10029  * it to happen.
10030  */
10031 static diagthreshold_check_return
task_check_memorythreshold_is_valid(task_t task,uint64_t new_limit,bool is_diagnostics_value)10032 task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value)
10033 {
10034 	int phys_limit_mb;
10035 	kern_return_t ret_value;
10036 	bool threshold_enabled;
10037 	bool dummy;
10038 	ret_value = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, &threshold_enabled);
10039 	if (ret_value != KERN_SUCCESS) {
10040 		return ret_value;
10041 	}
10042 	if (is_diagnostics_value == true) {
10043 		ret_value = task_get_phys_footprint_limit(task, &phys_limit_mb);
10044 	} else {
10045 		uint64_t diag_limit;
10046 		ret_value = task_get_diag_footprint_limit_internal(task, &diag_limit, &dummy);
10047 		phys_limit_mb = (int)(diag_limit >> 20);
10048 	}
10049 	if (ret_value != KERN_SUCCESS) {
10050 		return ret_value;
10051 	}
10052 	if (phys_limit_mb == (int)  new_limit) {
10053 		if (threshold_enabled == false) {
10054 			return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED;
10055 		} else {
10056 			return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED;
10057 		}
10058 	}
10059 	if (threshold_enabled == false) {
10060 		return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED;
10061 	} else {
10062 		return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED;
10063 	}
10064 }
10065 #endif
10066 
10067 #if CONFIG_EXCLAVES
10068 kern_return_t
task_add_conclave(task_t task,void * vnode,int64_t off,const char * task_conclave_id)10069 task_add_conclave(task_t task, void *vnode, int64_t off, const char *task_conclave_id)
10070 {
10071 	/*
10072 	 * Only launchd or properly entitled tasks can attach tasks to
10073 	 * conclaves.
10074 	 */
10075 	if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10076 		return KERN_DENIED;
10077 	}
10078 
10079 	/*
10080 	 * Only entitled tasks can have conclaves attached.
10081 	 * Allow tasks which have the SPAWN privilege to also host conclaves.
10082 	 * This allows xpc proxy to add a conclave before execing a daemon.
10083 	 */
10084 	if (!exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_HOST) &&
10085 	    !exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10086 		return KERN_DENIED;
10087 	}
10088 
10089 	return exclaves_conclave_attach(task_conclave_id, task);
10090 }
10091 
10092 kern_return_t
task_launch_conclave(mach_port_name_t port __unused)10093 task_launch_conclave(mach_port_name_t port __unused)
10094 {
10095 	kern_return_t kr = KERN_FAILURE;
10096 	assert3u(port, ==, MACH_PORT_NULL);
10097 	exclaves_resource_t *conclave = task_get_conclave(current_task());
10098 	if (conclave == NULL) {
10099 		return kr;
10100 	}
10101 
10102 	kr = exclaves_conclave_launch(conclave);
10103 	if (kr != KERN_SUCCESS) {
10104 		return kr;
10105 	}
10106 	task_set_conclave_taint(current_task());
10107 
10108 	return KERN_SUCCESS;
10109 }
10110 
10111 kern_return_t
task_inherit_conclave(task_t old_task,task_t new_task,void * vnode,int64_t off)10112 task_inherit_conclave(task_t old_task, task_t new_task, void *vnode, int64_t off)
10113 {
10114 	if (old_task->conclave == NULL ||
10115 	    !exclaves_conclave_is_attached(old_task->conclave)) {
10116 		return KERN_SUCCESS;
10117 	}
10118 
10119 	/*
10120 	 * Only launchd or properly entitled tasks can attach tasks to
10121 	 * conclaves.
10122 	 */
10123 	if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10124 		return KERN_DENIED;
10125 	}
10126 
10127 	/*
10128 	 * Only entitled tasks can have conclaves attached.
10129 	 */
10130 	if (!exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_HOST)) {
10131 		return KERN_DENIED;
10132 	}
10133 
10134 	return exclaves_conclave_inherit(old_task->conclave, old_task, new_task);
10135 }
10136 
10137 void
task_clear_conclave(task_t task)10138 task_clear_conclave(task_t task)
10139 {
10140 	if (task->exclave_crash_info) {
10141 		kfree_data(task->exclave_crash_info, CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE);
10142 		task->exclave_crash_info = NULL;
10143 	}
10144 
10145 	if (task->conclave == NULL) {
10146 		return;
10147 	}
10148 
10149 	/*
10150 	 * XXX
10151 	 * This should only fail if either the conclave is in an unexpected
10152 	 * state (i.e. not ATTACHED) or if the wrong port is supplied.
10153 	 * We should re-visit this and make sure we guarantee the above
10154 	 * constraints.
10155 	 */
10156 	__assert_only kern_return_t ret =
10157 	    exclaves_conclave_detach(task->conclave, task);
10158 	assert3u(ret, ==, KERN_SUCCESS);
10159 }
10160 
10161 void
task_stop_conclave(task_t task,bool gather_crash_bt)10162 task_stop_conclave(task_t task, bool gather_crash_bt)
10163 {
10164 	thread_t thread = current_thread();
10165 
10166 	if (task->conclave == NULL) {
10167 		return;
10168 	}
10169 
10170 	if (task_should_panic_on_exit_due_to_conclave_taint(task)) {
10171 		panic("Conclave tainted task %p terminated\n", task);
10172 	}
10173 
10174 	/* Stash the task on current thread for conclave teardown */
10175 	thread->conclave_stop_task = task;
10176 
10177 	__assert_only kern_return_t ret =
10178 	    exclaves_conclave_stop(task->conclave, gather_crash_bt);
10179 
10180 	thread->conclave_stop_task = TASK_NULL;
10181 
10182 	assert3u(ret, ==, KERN_SUCCESS);
10183 }
10184 
10185 kern_return_t
task_stop_conclave_upcall(void)10186 task_stop_conclave_upcall(void)
10187 {
10188 	task_t task = current_task();
10189 	if (task->conclave == NULL) {
10190 		return KERN_INVALID_TASK;
10191 	}
10192 
10193 	return exclaves_conclave_stop_upcall(task->conclave);
10194 }
10195 
10196 kern_return_t
task_stop_conclave_upcall_complete(void)10197 task_stop_conclave_upcall_complete(void)
10198 {
10199 	task_t task = current_task();
10200 	thread_t thread = current_thread();
10201 
10202 	if (!(thread->th_exclaves_state & TH_EXCLAVES_STOP_UPCALL_PENDING)) {
10203 		return KERN_SUCCESS;
10204 	}
10205 
10206 	assert3p(task->conclave, !=, NULL);
10207 
10208 	return exclaves_conclave_stop_upcall_complete(task->conclave, task);
10209 }
10210 
10211 kern_return_t
task_suspend_conclave_upcall(uint64_t * scid_list,size_t scid_list_count)10212 task_suspend_conclave_upcall(uint64_t *scid_list, size_t scid_list_count)
10213 {
10214 	task_t task = current_task();
10215 	thread_t thread;
10216 	int scid_count = 0;
10217 	kern_return_t kr;
10218 	if (task->conclave == NULL) {
10219 		return KERN_INVALID_TASK;
10220 	}
10221 
10222 	kr = task_hold_and_wait(task);
10223 
10224 	task_lock(task);
10225 	queue_iterate(&task->threads, thread, thread_t, task_threads)
10226 	{
10227 		if (thread->th_exclaves_state & TH_EXCLAVES_RPC) {
10228 			scid_list[scid_count++] = thread->th_exclaves_ipc_ctx.scid;
10229 			if (scid_count >= scid_list_count) {
10230 				break;
10231 			}
10232 		}
10233 	}
10234 
10235 	task_unlock(task);
10236 	return kr;
10237 }
10238 
10239 kern_return_t
task_crash_info_conclave_upcall(task_t task,const xnuupcalls_conclavesharedbuffer_s * shared_buf,uint32_t length)10240 task_crash_info_conclave_upcall(task_t task, const xnuupcalls_conclavesharedbuffer_s *shared_buf,
10241     uint32_t length)
10242 {
10243 	if (task->conclave == NULL) {
10244 		return KERN_INVALID_TASK;
10245 	}
10246 
10247 	/* Allocate the buffer and memcpy it */
10248 	int task_crash_info_buffer_size = 0;
10249 	uint8_t * task_crash_info_buffer;
10250 
10251 	if (!length) {
10252 		printf("Conclave upcall: task_crash_info_conclave_upcall did not return any page addresses\n");
10253 		return KERN_INVALID_ARGUMENT;
10254 	}
10255 
10256 	task_crash_info_buffer_size = CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE;
10257 	assert3u(task_crash_info_buffer_size, >=, length);
10258 
10259 	task_crash_info_buffer = kalloc_data(task_crash_info_buffer_size, Z_WAITOK);
10260 	if (!task_crash_info_buffer) {
10261 		panic("task_crash_info_conclave_upcall: cannot allocate buffer for task_info shared memory");
10262 		return KERN_INVALID_ARGUMENT;
10263 	}
10264 
10265 	uint8_t * dst = task_crash_info_buffer;
10266 	uint32_t remaining = length;
10267 	for (size_t i = 0; i < CONCLAVE_CRASH_BUFFER_PAGECOUNT; i++) {
10268 		if (remaining) {
10269 			memcpy(dst, (uint8_t*)phystokv((pmap_paddr_t)shared_buf->physaddr[i]), PAGE_SIZE);
10270 			remaining = (remaining >= PAGE_SIZE) ? remaining - PAGE_SIZE : 0;
10271 			dst += PAGE_SIZE;
10272 		}
10273 	}
10274 
10275 	task_lock(task);
10276 	if (task->exclave_crash_info == NULL && task->active) {
10277 		task->exclave_crash_info = task_crash_info_buffer;
10278 		task->exclave_crash_info_length = length;
10279 		task_crash_info_buffer = NULL;
10280 	}
10281 	task_unlock(task);
10282 
10283 	if (task_crash_info_buffer) {
10284 		kfree_data(task_crash_info_buffer, task_crash_info_buffer_size);
10285 	}
10286 
10287 	return KERN_SUCCESS;
10288 }
10289 
10290 exclaves_resource_t *
task_get_conclave(task_t task)10291 task_get_conclave(task_t task)
10292 {
10293 	return task->conclave;
10294 }
10295 
10296 extern boolean_t IOPMRootDomainGetWillShutdown(void);
10297 
10298 TUNABLE(bool, disable_conclave_taint, "disable_conclave_taint", true); /* Do not taint processes when they talk to conclave, so system does not panic when exit. */
10299 
10300 static bool
task_should_panic_on_exit_due_to_conclave_taint(task_t task)10301 task_should_panic_on_exit_due_to_conclave_taint(task_t task)
10302 {
10303 	/* Check if boot-arg to disable conclave taint is set */
10304 	if (disable_conclave_taint) {
10305 		return false;
10306 	}
10307 
10308 	/* Check if the system is shutting down */
10309 	if (IOPMRootDomainGetWillShutdown()) {
10310 		return false;
10311 	}
10312 
10313 	return task_is_conclave_tainted(task);
10314 }
10315 
10316 static bool
task_is_conclave_tainted(task_t task)10317 task_is_conclave_tainted(task_t task)
10318 {
10319 	return (task->t_exclave_state & TES_CONCLAVE_TAINTED) != 0 &&
10320 	       !(task->t_exclave_state & TES_CONCLAVE_UNTAINTABLE);
10321 }
10322 
10323 static void
task_set_conclave_taint(task_t task)10324 task_set_conclave_taint(task_t task)
10325 {
10326 	os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_TAINTED, relaxed);
10327 }
10328 
10329 void
task_set_conclave_untaintable(task_t task)10330 task_set_conclave_untaintable(task_t task)
10331 {
10332 	os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_UNTAINTABLE, relaxed);
10333 }
10334 
10335 void
task_add_conclave_crash_info(task_t task,void * crash_info_ptr)10336 task_add_conclave_crash_info(task_t task, void *crash_info_ptr)
10337 {
10338 	__block kern_return_t error = KERN_SUCCESS;
10339 	tb_error_t tberr = TB_ERROR_SUCCESS;
10340 	void *crash_info;
10341 	uint32_t crash_info_length = 0;
10342 
10343 	if (task->conclave == NULL) {
10344 		return;
10345 	}
10346 
10347 	if (task->exclave_crash_info_length == 0) {
10348 		return;
10349 	}
10350 
10351 	error = kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_BEGIN,
10352 	    STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10353 	if (error != KERN_SUCCESS) {
10354 		return;
10355 	}
10356 
10357 	crash_info = task->exclave_crash_info;
10358 	crash_info_length = task->exclave_crash_info_length;
10359 
10360 	tberr = stackshot_stackshotresult__unmarshal(crash_info,
10361 	    (uint64_t)crash_info_length, ^(stackshot_stackshotresult_s result){
10362 		error = stackshot_exclaves_process_stackshot(&result, crash_info_ptr, false);
10363 		if (error != KERN_SUCCESS) {
10364 		        printf("task_add_conclave_crash_info: error processing stackshot result %d\n", error);
10365 		}
10366 	});
10367 	if (tberr != TB_ERROR_SUCCESS) {
10368 		printf("task_conclave_crash: task_add_conclave_crash_info could not unmarshal stackshot data 0x%x\n", tberr);
10369 		error = KERN_FAILURE;
10370 		goto error_exit;
10371 	}
10372 
10373 error_exit:
10374 	kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_END,
10375 	    STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10376 
10377 	return;
10378 }
10379 
10380 #endif /* CONFIG_EXCLAVES */
10381 
10382 /* defined in bsd/kern/kern_proc.c */
10383 extern void proc_name(int pid, char *buf, int size);
10384 extern const char *proc_best_name(struct proc *p);
10385 
10386 void
task_procname(task_t task,char * buf,int size)10387 task_procname(task_t task, char *buf, int size)
10388 {
10389 	proc_name(task_pid(task), buf, size);
10390 }
10391 
10392 const char *
task_best_name(task_t task)10393 task_best_name(task_t task)
10394 {
10395 	return proc_best_name(task_get_proc_raw(task));
10396 }
10397