xref: /xnu-8020.140.41/osfmk/kern/task.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  *	File:	kern/task.c
58  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59  *		David Black
60  *
61  *	Task management primitives implementation.
62  */
63 /*
64  * Copyright (c) 1993 The University of Utah and
65  * the Computer Systems Laboratory (CSL).  All rights reserved.
66  *
67  * Permission to use, copy, modify and distribute this software and its
68  * documentation is hereby granted, provided that both the copyright
69  * notice and this permission notice appear in all copies of the
70  * software, derivative works or modified versions, and any portions
71  * thereof, and that both notices appear in supporting documentation.
72  *
73  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76  *
77  * CSL requests users of this software to return to [email protected] any
78  * improvements that they make and grant CSL redistribution rights.
79  *
80  */
81 /*
82  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83  * support for mandatory and extensible security protections.  This notice
84  * is included in support of clause 2.2 (b) of the Apple Public License,
85  * Version 2.0.
86  * Copyright (c) 2005 SPARTA, Inc.
87  */
88 
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/host_priv.h>
92 #include <mach/machine/vm_types.h>
93 #include <mach/vm_param.h>
94 #include <mach/mach_vm.h>
95 #include <mach/semaphore.h>
96 #include <mach/task_info.h>
97 #include <mach/task_inspect.h>
98 #include <mach/task_special_ports.h>
99 #include <mach/sdt.h>
100 #include <mach/mach_test_upcall.h>
101 
102 #include <ipc/ipc_importance.h>
103 #include <ipc/ipc_types.h>
104 #include <ipc/ipc_space.h>
105 #include <ipc/ipc_entry.h>
106 #include <ipc/ipc_hash.h>
107 #include <ipc/ipc_init.h>
108 
109 #include <kern/kern_types.h>
110 #include <kern/mach_param.h>
111 #include <kern/misc_protos.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/coalition.h>
115 #include <kern/zalloc.h>
116 #include <kern/kalloc.h>
117 #include <kern/kern_cdata.h>
118 #include <kern/processor.h>
119 #include <kern/sched_prim.h>    /* for thread_wakeup */
120 #include <kern/ipc_tt.h>
121 #include <kern/host.h>
122 #include <kern/clock.h>
123 #include <kern/timer.h>
124 #include <kern/assert.h>
125 #include <kern/affinity.h>
126 #include <kern/exc_resource.h>
127 #include <kern/machine.h>
128 #include <kern/policy_internal.h>
129 #include <kern/restartable.h>
130 #include <kern/ipc_kobject.h>
131 
132 #include <corpses/task_corpse.h>
133 #if CONFIG_TELEMETRY
134 #include <kern/telemetry.h>
135 #endif
136 
137 #if MONOTONIC
138 #include <kern/monotonic.h>
139 #include <machine/monotonic.h>
140 #endif /* MONOTONIC */
141 
142 #include <os/log.h>
143 
144 #include <vm/pmap.h>
145 #include <vm/vm_map.h>
146 #include <vm/vm_kern.h>         /* for kernel_map, ipc_kernel_map */
147 #include <vm/vm_pageout.h>
148 #include <vm/vm_protos.h>
149 #include <vm/vm_purgeable_internal.h>
150 #include <vm/vm_compressor_pager.h>
151 
152 #include <sys/proc_ro.h>
153 #include <sys/resource.h>
154 #include <sys/signalvar.h> /* for coredump */
155 #include <sys/bsdtask_info.h>
156 /*
157  * Exported interfaces
158  */
159 
160 #include <mach/task_server.h>
161 #include <mach/mach_host_server.h>
162 #include <mach/mach_port_server.h>
163 
164 #include <vm/vm_shared_region.h>
165 
166 #include <libkern/OSDebug.h>
167 #include <libkern/OSAtomic.h>
168 #include <libkern/section_keywords.h>
169 
170 #include <mach-o/loader.h>
171 #include <kdp/kdp_dyld.h>
172 
173 #include <kern/sfi.h>           /* picks up ledger.h */
174 
175 #if CONFIG_MACF
176 #include <security/mac_mach_internal.h>
177 #endif
178 
179 #include <IOKit/IOBSD.h>
180 
181 #if KPERF
182 extern int kpc_force_all_ctrs(task_t, int);
183 #endif
184 
185 SECURITY_READ_ONLY_LATE(task_t) kernel_task;
186 
187 int64_t         next_taskuniqueid = 0;
188 
189 ZONE_DEFINE_ID(ZONE_ID_TASK, "tasks", struct task, ZC_ZFREE_CLEARMEM);
190 
191 extern uint32_t ipc_control_port_options;
192 
193 extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p);
194 extern void task_disown_frozen_csegs(task_t owner_task);
195 
196 static void task_port_no_senders(ipc_port_t, mach_msg_type_number_t);
197 static void task_port_with_flavor_no_senders(ipc_port_t, mach_msg_type_number_t);
198 static void task_suspension_no_senders(ipc_port_t, mach_msg_type_number_t);
199 
200 IPC_KOBJECT_DEFINE(IKOT_TASK_NAME);
201 IPC_KOBJECT_DEFINE(IKOT_TASK_CONTROL,
202     .iko_op_no_senders = task_port_no_senders);
203 IPC_KOBJECT_DEFINE(IKOT_TASK_READ,
204     .iko_op_no_senders = task_port_with_flavor_no_senders);
205 IPC_KOBJECT_DEFINE(IKOT_TASK_INSPECT,
206     .iko_op_no_senders = task_port_with_flavor_no_senders);
207 IPC_KOBJECT_DEFINE(IKOT_TASK_RESUME,
208     .iko_op_no_senders = task_suspension_no_senders);
209 
210 #if CONFIG_PROC_RESOURCE_LIMITS
211 static void task_fatal_port_no_senders(ipc_port_t, mach_msg_type_number_t);
212 static mach_port_t task_allocate_fatal_port(void);
213 
214 IPC_KOBJECT_DEFINE(IKOT_TASK_FATAL,
215     .iko_op_stable     = true,
216     .iko_op_no_senders = task_fatal_port_no_senders);
217 
218 extern void task_id_token_set_port(task_id_token_t token, ipc_port_t port);
219 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
220 
221 /* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
222 int audio_active = 0;
223 
224 /*
225  *	structure for tracking zone usage
226  *	Used either one per task/thread for all zones or <per-task,per-zone>.
227  */
228 typedef struct zinfo_usage_store_t {
229 	/* These fields may be updated atomically, and so must be 8 byte aligned */
230 	uint64_t        alloc __attribute__((aligned(8)));              /* allocation counter */
231 	uint64_t        free __attribute__((aligned(8)));               /* free counter */
232 } zinfo_usage_store_t;
233 
234 zinfo_usage_store_t tasks_tkm_private;
235 zinfo_usage_store_t tasks_tkm_shared;
236 
237 /* A container to accumulate statistics for expired tasks */
238 expired_task_statistics_t               dead_task_statistics;
239 LCK_SPIN_DECLARE_ATTR(dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
240 
241 ledger_template_t task_ledger_template = NULL;
242 
243 /* global lock for task_dyld_process_info_notify_{register, deregister, get_trap} */
244 LCK_GRP_DECLARE(g_dyldinfo_mtx_grp, "g_dyldinfo");
245 LCK_MTX_DECLARE(g_dyldinfo_mtx, &g_dyldinfo_mtx_grp);
246 
247 SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__((used)) =
248 {.cpu_time = -1,
249  .tkm_private = -1,
250  .tkm_shared = -1,
251  .phys_mem = -1,
252  .wired_mem = -1,
253  .internal = -1,
254  .iokit_mapped = -1,
255  .external = -1,
256  .reusable = -1,
257  .alternate_accounting = -1,
258  .alternate_accounting_compressed = -1,
259  .page_table = -1,
260  .phys_footprint = -1,
261  .internal_compressed = -1,
262  .purgeable_volatile = -1,
263  .purgeable_nonvolatile = -1,
264  .purgeable_volatile_compressed = -1,
265  .purgeable_nonvolatile_compressed = -1,
266  .tagged_nofootprint = -1,
267  .tagged_footprint = -1,
268  .tagged_nofootprint_compressed = -1,
269  .tagged_footprint_compressed = -1,
270  .network_volatile = -1,
271  .network_nonvolatile = -1,
272  .network_volatile_compressed = -1,
273  .network_nonvolatile_compressed = -1,
274  .media_nofootprint = -1,
275  .media_footprint = -1,
276  .media_nofootprint_compressed = -1,
277  .media_footprint_compressed = -1,
278  .graphics_nofootprint = -1,
279  .graphics_footprint = -1,
280  .graphics_nofootprint_compressed = -1,
281  .graphics_footprint_compressed = -1,
282  .neural_nofootprint = -1,
283  .neural_footprint = -1,
284  .neural_nofootprint_compressed = -1,
285  .neural_footprint_compressed = -1,
286  .platform_idle_wakeups = -1,
287  .interrupt_wakeups = -1,
288 #if CONFIG_SCHED_SFI
289  .sfi_wait_times = { 0 /* initialized at runtime */},
290 #endif /* CONFIG_SCHED_SFI */
291  .cpu_time_billed_to_me = -1,
292  .cpu_time_billed_to_others = -1,
293  .physical_writes = -1,
294  .logical_writes = -1,
295  .logical_writes_to_external = -1,
296 #if DEBUG || DEVELOPMENT
297  .pages_grabbed = -1,
298  .pages_grabbed_kern = -1,
299  .pages_grabbed_iopl = -1,
300  .pages_grabbed_upl = -1,
301 #endif
302 #if CONFIG_FREEZE
303  .frozen_to_swap = -1,
304 #endif /* CONFIG_FREEZE */
305  .energy_billed_to_me = -1,
306  .energy_billed_to_others = -1,
307 #if CONFIG_PHYS_WRITE_ACCT
308  .fs_metadata_writes = -1,
309 #endif /* CONFIG_PHYS_WRITE_ACCT */
310 #if CONFIG_MEMORYSTATUS
311  .memorystatus_dirty_time = -1,
312 #endif /* CONFIG_MEMORYSTATUS */
313  .swapins = -1, };
314 
315 /* System sleep state */
316 boolean_t tasks_suspend_state;
317 
318 
319 void init_task_ledgers(void);
320 void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
321 void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
322 void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
323 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
324 void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal);
325 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
326 #if CONFIG_PROC_RESOURCE_LIMITS
327 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit);
328 mach_port_name_t current_task_get_fatal_port_name(void);
329 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
330 
331 kern_return_t task_suspend_internal(task_t);
332 kern_return_t task_resume_internal(task_t);
333 static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
334 
335 extern kern_return_t iokit_task_terminate(task_t task);
336 extern void          iokit_task_app_suspended_changed(task_t task);
337 
338 extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
339 extern void bsd_copythreadname(void *dst_uth, void *src_uth);
340 extern kern_return_t thread_resume(thread_t thread);
341 
342 extern int exit_with_port_space_exception(void *proc, mach_exception_code_t code, mach_exception_subcode_t subcode);
343 
344 // Warn tasks when they hit 80% of their memory limit.
345 #define PHYS_FOOTPRINT_WARNING_LEVEL 80
346 
347 #define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT              150 /* wakeups per second */
348 #define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL   300 /* in seconds. */
349 
350 /*
351  * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
352  *
353  * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
354  *  stacktraces, aka micro-stackshots)
355  */
356 #define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER        70
357 
358 int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
359 int task_wakeups_monitor_rate;     /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
360 
361 unsigned int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
362 
363 int disable_exc_resource; /* Global override to supress EXC_RESOURCE for resource monitor violations. */
364 
365 ledger_amount_t max_task_footprint = 0;  /* Per-task limit on physical memory consumption in bytes     */
366 unsigned int max_task_footprint_warning_level = 0;  /* Per-task limit warning percentage */
367 int max_task_footprint_mb = 0;  /* Per-task limit on physical memory consumption in megabytes */
368 
369 /* I/O Monitor Limits */
370 #define IOMON_DEFAULT_LIMIT                     (20480ull)      /* MB of logical/physical I/O */
371 #define IOMON_DEFAULT_INTERVAL                  (86400ull)      /* in seconds */
372 
373 uint64_t task_iomon_limit_mb;           /* Per-task I/O monitor limit in MBs */
374 uint64_t task_iomon_interval_secs;      /* Per-task I/O monitor interval in secs */
375 
376 #define IO_TELEMETRY_DEFAULT_LIMIT              (10ll * 1024ll * 1024ll)
377 int64_t io_telemetry_limit;                     /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
378 int64_t global_logical_writes_count = 0;        /* Global count for logical writes */
379 int64_t global_logical_writes_to_external_count = 0;        /* Global count for logical writes to external storage*/
380 static boolean_t global_update_logical_writes(int64_t, int64_t*);
381 
382 #define TASK_MAX_THREAD_LIMIT 256
383 
384 #if MACH_ASSERT
385 int pmap_ledgers_panic = 1;
386 int pmap_ledgers_panic_leeway = 3;
387 #endif /* MACH_ASSERT */
388 
389 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
390 
391 #if CONFIG_COREDUMP
392 int hwm_user_cores = 0; /* high watermark violations generate user core files */
393 #endif
394 
395 #ifdef MACH_BSD
396 extern uint32_t proc_platform(const struct proc *);
397 extern uint32_t proc_sdk(struct proc *);
398 extern void     proc_getexecutableuuid(void *, unsigned char *, unsigned long);
399 extern int      proc_pid(struct proc *p);
400 extern int      proc_selfpid(void);
401 extern struct proc *current_proc(void);
402 extern char     *proc_name_address(struct proc *p);
403 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
404 extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize);
405 extern void workq_proc_suspended(struct proc *p);
406 extern void workq_proc_resumed(struct proc *p);
407 
408 #if CONFIG_MEMORYSTATUS
409 extern void     proc_memstat_skip(struct proc* p, boolean_t set);
410 extern void     memorystatus_on_ledger_footprint_exceeded(int warning, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal);
411 extern void     memorystatus_log_exception(const int max_footprint_mb, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal);
412 extern boolean_t memorystatus_allowed_vm_map_fork(task_t task);
413 extern uint64_t  memorystatus_available_memory_internal(struct proc *p);
414 
415 #if DEVELOPMENT || DEBUG
416 extern void memorystatus_abort_vm_map_fork(task_t);
417 #endif
418 
419 #endif /* CONFIG_MEMORYSTATUS */
420 
421 #endif /* MACH_BSD */
422 
423 #if DEVELOPMENT || DEBUG
424 int exc_resource_threads_enabled;
425 #endif /* DEVELOPMENT || DEBUG */
426 
427 /* Boot-arg that turns on fatal pac exception delivery for all first-party apps */
428 static TUNABLE(bool, enable_pac_exception, "enable_pac_exception", false);
429 
430 /*
431  * Defaults for controllable EXC_GUARD behaviors
432  *
433  * Internal builds are fatal by default (except BRIDGE).
434  * Create an alternate set of defaults for special processes by name.
435  */
436 struct task_exc_guard_named_default {
437 	char *name;
438 	uint32_t behavior;
439 };
440 #define _TASK_EXC_GUARD_MP_CORPSE  (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_CORPSE)
441 #define _TASK_EXC_GUARD_MP_ONCE    (_TASK_EXC_GUARD_MP_CORPSE | TASK_EXC_GUARD_MP_ONCE)
442 #define _TASK_EXC_GUARD_MP_FATAL   (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_FATAL)
443 
444 #define _TASK_EXC_GUARD_VM_CORPSE  (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_ONCE)
445 #define _TASK_EXC_GUARD_VM_ONCE    (_TASK_EXC_GUARD_VM_CORPSE | TASK_EXC_GUARD_VM_ONCE)
446 #define _TASK_EXC_GUARD_VM_FATAL   (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_FATAL)
447 
448 #define _TASK_EXC_GUARD_ALL_CORPSE (_TASK_EXC_GUARD_MP_CORPSE | _TASK_EXC_GUARD_VM_CORPSE)
449 #define _TASK_EXC_GUARD_ALL_ONCE   (_TASK_EXC_GUARD_MP_ONCE | _TASK_EXC_GUARD_VM_ONCE)
450 #define _TASK_EXC_GUARD_ALL_FATAL  (_TASK_EXC_GUARD_MP_FATAL | _TASK_EXC_GUARD_VM_FATAL)
451 
452 /* cannot turn off FATAL and DELIVER bit if set */
453 uint32_t task_exc_guard_no_unset_mask = TASK_EXC_GUARD_MP_FATAL | TASK_EXC_GUARD_VM_FATAL |
454     TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_VM_DELIVER;
455 /* cannot turn on ONCE bit if unset */
456 uint32_t task_exc_guard_no_set_mask = TASK_EXC_GUARD_MP_ONCE | TASK_EXC_GUARD_VM_ONCE;
457 
458 #if !defined(XNU_TARGET_OS_BRIDGE)
459 
460 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_FATAL;
461 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
462 /*
463  * These "by-process-name" default overrides are intended to be a short-term fix to
464  * quickly get over races between changes introducing new EXC_GUARD raising behaviors
465  * in some process and a change in default behavior for same. We should ship with
466  * these lists empty (by fixing the bugs, or explicitly changing the task's EXC_GUARD
467  * exception behavior via task_set_exc_guard_behavior()).
468  *
469  * XXX Remember to add/remove TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS back to
470  * task_exc_guard_default when transitioning this list between empty and
471  * non-empty.
472  */
473 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
474 
475 #else /* !defined(XNU_TARGET_OS_BRIDGE) */
476 
477 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_ONCE;
478 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
479 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
480 
481 #endif /* !defined(XNU_TARGET_OS_BRIDGE) */
482 
483 /* Forwards */
484 
485 static void task_hold_locked(task_t task);
486 static void task_wait_locked(task_t task, boolean_t until_not_runnable);
487 static void task_release_locked(task_t task);
488 
489 static void task_synchronizer_destroy_all(task_t task);
490 static os_ref_count_t
491 task_add_turnstile_watchports_locked(
492 	task_t                      task,
493 	struct task_watchports      *watchports,
494 	struct task_watchport_elem  **previous_elem_array,
495 	ipc_port_t                  *portwatch_ports,
496 	uint32_t                    portwatch_count);
497 
498 static os_ref_count_t
499 task_remove_turnstile_watchports_locked(
500 	task_t                 task,
501 	struct task_watchports *watchports,
502 	ipc_port_t             *port_freelist);
503 
504 static struct task_watchports *
505 task_watchports_alloc_init(
506 	task_t        task,
507 	thread_t      thread,
508 	uint32_t      count);
509 
510 static void
511 task_watchports_deallocate(
512 	struct task_watchports *watchports);
513 
514 void
task_set_64bit(task_t task,boolean_t is_64bit,boolean_t is_64bit_data)515 task_set_64bit(
516 	task_t task,
517 	boolean_t is_64bit,
518 	boolean_t is_64bit_data)
519 {
520 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
521 	thread_t thread;
522 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
523 
524 	task_lock(task);
525 
526 	/*
527 	 * Switching to/from 64-bit address spaces
528 	 */
529 	if (is_64bit) {
530 		if (!task_has_64Bit_addr(task)) {
531 			task_set_64Bit_addr(task);
532 		}
533 	} else {
534 		if (task_has_64Bit_addr(task)) {
535 			task_clear_64Bit_addr(task);
536 		}
537 	}
538 
539 	/*
540 	 * Switching to/from 64-bit register state.
541 	 */
542 	if (is_64bit_data) {
543 		if (task_has_64Bit_data(task)) {
544 			goto out;
545 		}
546 
547 		task_set_64Bit_data(task);
548 	} else {
549 		if (!task_has_64Bit_data(task)) {
550 			goto out;
551 		}
552 
553 		task_clear_64Bit_data(task);
554 	}
555 
556 	/* FIXME: On x86, the thread save state flavor can diverge from the
557 	 * task's 64-bit feature flag due to the 32-bit/64-bit register save
558 	 * state dichotomy. Since we can be pre-empted in this interval,
559 	 * certain routines may observe the thread as being in an inconsistent
560 	 * state with respect to its task's 64-bitness.
561 	 */
562 
563 #if defined(__x86_64__) || defined(__arm64__)
564 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
565 		thread_mtx_lock(thread);
566 		machine_thread_switch_addrmode(thread);
567 		thread_mtx_unlock(thread);
568 	}
569 #endif /* defined(__x86_64__) || defined(__arm64__) */
570 
571 out:
572 	task_unlock(task);
573 }
574 
575 bool
task_get_64bit_addr(task_t task)576 task_get_64bit_addr(task_t task)
577 {
578 	return task_has_64Bit_addr(task);
579 }
580 
581 bool
task_get_64bit_data(task_t task)582 task_get_64bit_data(task_t task)
583 {
584 	return task_has_64Bit_data(task);
585 }
586 
587 void
task_set_platform_binary(task_t task,boolean_t is_platform)588 task_set_platform_binary(
589 	task_t task,
590 	boolean_t is_platform)
591 {
592 	task_lock(task);
593 	if (is_platform) {
594 		task->t_flags |= TF_PLATFORM;
595 	} else {
596 		task->t_flags &= ~(TF_PLATFORM);
597 	}
598 	task_unlock(task);
599 }
600 
601 void
task_set_immovable_pinned(task_t task)602 task_set_immovable_pinned(task_t task)
603 {
604 	ipc_task_set_immovable_pinned(task);
605 }
606 
607 /*
608  * Set or clear per-task TF_CA_CLIENT_WI flag according to specified argument.
609  * Returns "false" if flag is already set, and "true" in other cases.
610  */
611 bool
task_set_ca_client_wi(task_t task,boolean_t set_or_clear)612 task_set_ca_client_wi(
613 	task_t task,
614 	boolean_t set_or_clear)
615 {
616 	bool ret = true;
617 	task_lock(task);
618 	if (set_or_clear) {
619 		/* Tasks can have only one CA_CLIENT work interval */
620 		if (task->t_flags & TF_CA_CLIENT_WI) {
621 			ret = false;
622 		} else {
623 			task->t_flags |= TF_CA_CLIENT_WI;
624 		}
625 	} else {
626 		task->t_flags &= ~TF_CA_CLIENT_WI;
627 	}
628 	task_unlock(task);
629 	return ret;
630 }
631 
632 void
task_set_dyld_info(task_t task,mach_vm_address_t addr,mach_vm_size_t size)633 task_set_dyld_info(
634 	task_t task,
635 	mach_vm_address_t addr,
636 	mach_vm_size_t size)
637 {
638 	task_lock(task);
639 	task->all_image_info_addr = addr;
640 	task->all_image_info_size = size;
641 	task_unlock(task);
642 }
643 
644 void
task_set_mach_header_address(task_t task,mach_vm_address_t addr)645 task_set_mach_header_address(
646 	task_t task,
647 	mach_vm_address_t addr)
648 {
649 	task_lock(task);
650 	task->mach_header_vm_address = addr;
651 	task_unlock(task);
652 }
653 
654 void
task_bank_reset(__unused task_t task)655 task_bank_reset(__unused task_t task)
656 {
657 	if (task->bank_context != NULL) {
658 		bank_task_destroy(task);
659 	}
660 }
661 
662 /*
663  * NOTE: This should only be called when the P_LINTRANSIT
664  *	 flag is set (the proc_trans lock is held) on the
665  *	 proc associated with the task.
666  */
667 void
task_bank_init(__unused task_t task)668 task_bank_init(__unused task_t task)
669 {
670 	if (task->bank_context != NULL) {
671 		panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
672 	}
673 	bank_task_initialize(task);
674 }
675 
676 void
task_set_did_exec_flag(task_t task)677 task_set_did_exec_flag(task_t task)
678 {
679 	task->t_procflags |= TPF_DID_EXEC;
680 }
681 
682 void
task_clear_exec_copy_flag(task_t task)683 task_clear_exec_copy_flag(task_t task)
684 {
685 	task->t_procflags &= ~TPF_EXEC_COPY;
686 }
687 
688 event_t
task_get_return_wait_event(task_t task)689 task_get_return_wait_event(task_t task)
690 {
691 	return (event_t)&task->returnwait_inheritor;
692 }
693 
694 void
task_clear_return_wait(task_t task,uint32_t flags)695 task_clear_return_wait(task_t task, uint32_t flags)
696 {
697 	if (flags & TCRW_CLEAR_INITIAL_WAIT) {
698 		thread_wakeup(task_get_return_wait_event(task));
699 	}
700 
701 	if (flags & TCRW_CLEAR_FINAL_WAIT) {
702 		is_write_lock(task->itk_space);
703 
704 		task->t_returnwaitflags &= ~TRW_LRETURNWAIT;
705 		task->returnwait_inheritor = NULL;
706 
707 		if (task->t_returnwaitflags & TRW_LRETURNWAITER) {
708 			struct turnstile *turnstile = turnstile_prepare((uintptr_t) task_get_return_wait_event(task),
709 			    NULL, TURNSTILE_NULL, TURNSTILE_ULOCK);
710 
711 			waitq_wakeup64_all(&turnstile->ts_waitq,
712 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
713 			    THREAD_AWAKENED, 0);
714 
715 			turnstile_update_inheritor(turnstile, NULL,
716 			    TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD);
717 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_HELD);
718 
719 			turnstile_complete((uintptr_t) task_get_return_wait_event(task), NULL, NULL, TURNSTILE_ULOCK);
720 			turnstile_cleanup();
721 			task->t_returnwaitflags &= ~TRW_LRETURNWAITER;
722 		}
723 		is_write_unlock(task->itk_space);
724 	}
725 }
726 
727 void __attribute__((noreturn))
task_wait_to_return(void)728 task_wait_to_return(void)
729 {
730 	task_t task = current_task();
731 
732 	is_write_lock(task->itk_space);
733 
734 	if (task->t_returnwaitflags & TRW_LRETURNWAIT) {
735 		struct turnstile *turnstile = turnstile_prepare((uintptr_t) task_get_return_wait_event(task),
736 		    NULL, TURNSTILE_NULL, TURNSTILE_ULOCK);
737 
738 		do {
739 			task->t_returnwaitflags |= TRW_LRETURNWAITER;
740 			turnstile_update_inheritor(turnstile, task->returnwait_inheritor,
741 			    (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
742 
743 			waitq_assert_wait64(&turnstile->ts_waitq,
744 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
745 			    THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
746 
747 			is_write_unlock(task->itk_space);
748 
749 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
750 
751 			thread_block(THREAD_CONTINUE_NULL);
752 
753 			is_write_lock(task->itk_space);
754 		} while (task->t_returnwaitflags & TRW_LRETURNWAIT);
755 
756 		turnstile_complete((uintptr_t) task_get_return_wait_event(task), NULL, NULL, TURNSTILE_ULOCK);
757 	}
758 
759 	is_write_unlock(task->itk_space);
760 	turnstile_cleanup();
761 
762 
763 #if CONFIG_MACF
764 	/*
765 	 * Before jumping to userspace and allowing this process to execute any code,
766 	 * notify any interested parties.
767 	 */
768 	mac_proc_notify_exec_complete(current_proc());
769 #endif
770 
771 	thread_bootstrap_return();
772 }
773 
774 #ifdef CONFIG_32BIT_TELEMETRY
775 boolean_t
task_consume_32bit_log_flag(task_t task)776 task_consume_32bit_log_flag(task_t task)
777 {
778 	if ((task->t_procflags & TPF_LOG_32BIT_TELEMETRY) != 0) {
779 		task->t_procflags &= ~TPF_LOG_32BIT_TELEMETRY;
780 		return TRUE;
781 	} else {
782 		return FALSE;
783 	}
784 }
785 
786 void
task_set_32bit_log_flag(task_t task)787 task_set_32bit_log_flag(task_t task)
788 {
789 	task->t_procflags |= TPF_LOG_32BIT_TELEMETRY;
790 }
791 #endif /* CONFIG_32BIT_TELEMETRY */
792 
793 boolean_t
task_is_exec_copy(task_t task)794 task_is_exec_copy(task_t task)
795 {
796 	return task_is_exec_copy_internal(task);
797 }
798 
799 boolean_t
task_did_exec(task_t task)800 task_did_exec(task_t task)
801 {
802 	return task_did_exec_internal(task);
803 }
804 
805 boolean_t
task_is_active(task_t task)806 task_is_active(task_t task)
807 {
808 	return task->active;
809 }
810 
811 boolean_t
task_is_halting(task_t task)812 task_is_halting(task_t task)
813 {
814 	return task->halting;
815 }
816 
817 void
task_init(void)818 task_init(void)
819 {
820 	/*
821 	 * Configure per-task memory limit.
822 	 * The boot-arg is interpreted as Megabytes,
823 	 * and takes precedence over the device tree.
824 	 * Setting the boot-arg to 0 disables task limits.
825 	 */
826 	if (!PE_parse_boot_argn("max_task_pmem", &max_task_footprint_mb,
827 	    sizeof(max_task_footprint_mb))) {
828 		/*
829 		 * No limit was found in boot-args, so go look in the device tree.
830 		 */
831 		if (!PE_get_default("kern.max_task_pmem", &max_task_footprint_mb,
832 		    sizeof(max_task_footprint_mb))) {
833 			/*
834 			 * No limit was found in device tree.
835 			 */
836 			max_task_footprint_mb = 0;
837 		}
838 	}
839 
840 	if (max_task_footprint_mb != 0) {
841 #if CONFIG_MEMORYSTATUS
842 		if (max_task_footprint_mb < 50) {
843 			printf("Warning: max_task_pmem %d below minimum.\n",
844 			    max_task_footprint_mb);
845 			max_task_footprint_mb = 50;
846 		}
847 		printf("Limiting task physical memory footprint to %d MB\n",
848 		    max_task_footprint_mb);
849 
850 		max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024;         // Convert MB to bytes
851 
852 		/*
853 		 * Configure the per-task memory limit warning level.
854 		 * This is computed as a percentage.
855 		 */
856 		max_task_footprint_warning_level = 0;
857 
858 		if (max_mem < 0x40000000) {
859 			/*
860 			 * On devices with < 1GB of memory:
861 			 *    -- set warnings to 50MB below the per-task limit.
862 			 */
863 			if (max_task_footprint_mb > 50) {
864 				max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
865 			}
866 		} else {
867 			/*
868 			 * On devices with >= 1GB of memory:
869 			 *    -- set warnings to 100MB below the per-task limit.
870 			 */
871 			if (max_task_footprint_mb > 100) {
872 				max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
873 			}
874 		}
875 
876 		/*
877 		 * Never allow warning level to land below the default.
878 		 */
879 		if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
880 			max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
881 		}
882 
883 		printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
884 
885 #else
886 		printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
887 #endif /* CONFIG_MEMORYSTATUS */
888 	}
889 
890 #if DEVELOPMENT || DEBUG
891 	if (!PE_parse_boot_argn("exc_resource_threads",
892 	    &exc_resource_threads_enabled,
893 	    sizeof(exc_resource_threads_enabled))) {
894 		exc_resource_threads_enabled = 1;
895 	}
896 	PE_parse_boot_argn("task_exc_guard_default",
897 	    &task_exc_guard_default,
898 	    sizeof(task_exc_guard_default));
899 #endif /* DEVELOPMENT || DEBUG */
900 
901 #if CONFIG_COREDUMP
902 	if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
903 	    sizeof(hwm_user_cores))) {
904 		hwm_user_cores = 0;
905 	}
906 #endif
907 
908 	proc_init_cpumon_params();
909 
910 	if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof(task_wakeups_monitor_rate))) {
911 		task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
912 	}
913 
914 	if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof(task_wakeups_monitor_interval))) {
915 		task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
916 	}
917 
918 	if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
919 	    sizeof(task_wakeups_monitor_ustackshots_trigger_pct))) {
920 		task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
921 	}
922 
923 	if (!PE_parse_boot_argn("disable_exc_resource", &disable_exc_resource,
924 	    sizeof(disable_exc_resource))) {
925 		disable_exc_resource = 0;
926 	}
927 
928 	if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof(task_iomon_limit_mb))) {
929 		task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
930 	}
931 
932 	if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof(task_iomon_interval_secs))) {
933 		task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
934 	}
935 
936 	if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof(io_telemetry_limit))) {
937 		io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
938 	}
939 
940 /*
941  * If we have coalitions, coalition_init() will call init_task_ledgers() as it
942  * sets up the ledgers for the default coalition. If we don't have coalitions,
943  * then we have to call it now.
944  */
945 #if CONFIG_COALITIONS
946 	assert(task_ledger_template);
947 #else /* CONFIG_COALITIONS */
948 	init_task_ledgers();
949 #endif /* CONFIG_COALITIONS */
950 
951 	task_ref_init();
952 
953 	/*
954 	 * Create the kernel task as the first task.
955 	 */
956 #ifdef __LP64__
957 	if (task_create_internal(TASK_NULL, NULL, NULL, FALSE, TRUE, TRUE, TF_NONE, TPF_NONE, TWF_NONE, &kernel_task) != KERN_SUCCESS)
958 #else
959 	if (task_create_internal(TASK_NULL, NULL, NULL, FALSE, FALSE, FALSE, TF_NONE, TPF_NONE, TWF_NONE, &kernel_task) != KERN_SUCCESS)
960 #endif
961 	{ panic("task_init");}
962 
963 #if defined(HAS_APPLE_PAC)
964 	kernel_task->rop_pid = ml_default_rop_pid();
965 	kernel_task->jop_pid = ml_default_jop_pid();
966 	// kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
967 	// disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
968 	ml_task_set_disable_user_jop(kernel_task, FALSE);
969 #endif
970 
971 	vm_map_deallocate(kernel_task->map);
972 	kernel_task->map = kernel_map;
973 }
974 
975 /*
976  * Create a task running in the kernel address space.  It may
977  * have its own map of size mem_size and may have ipc privileges.
978  */
979 kern_return_t
kernel_task_create(__unused task_t parent_task,__unused vm_offset_t map_base,__unused vm_size_t map_size,__unused task_t * child_task)980 kernel_task_create(
981 	__unused task_t         parent_task,
982 	__unused vm_offset_t            map_base,
983 	__unused vm_size_t              map_size,
984 	__unused task_t         *child_task)
985 {
986 	return KERN_INVALID_ARGUMENT;
987 }
988 
989 kern_return_t
task_create(task_t parent_task,__unused ledger_port_array_t ledger_ports,__unused mach_msg_type_number_t num_ledger_ports,__unused boolean_t inherit_memory,__unused task_t * child_task)990 task_create(
991 	task_t                          parent_task,
992 	__unused ledger_port_array_t    ledger_ports,
993 	__unused mach_msg_type_number_t num_ledger_ports,
994 	__unused boolean_t              inherit_memory,
995 	__unused task_t                 *child_task)        /* OUT */
996 {
997 	if (parent_task == TASK_NULL) {
998 		return KERN_INVALID_ARGUMENT;
999 	}
1000 
1001 	/*
1002 	 * No longer supported: too many calls assume that a task has a valid
1003 	 * process attached.
1004 	 */
1005 	return KERN_FAILURE;
1006 }
1007 
1008 /*
1009  * Task ledgers
1010  * ------------
1011  *
1012  * phys_footprint
1013  *   Physical footprint: This is the sum of:
1014  *     + (internal - alternate_accounting)
1015  *     + (internal_compressed - alternate_accounting_compressed)
1016  *     + iokit_mapped
1017  *     + purgeable_nonvolatile
1018  *     + purgeable_nonvolatile_compressed
1019  *     + page_table
1020  *
1021  * internal
1022  *   The task's anonymous memory, which on iOS is always resident.
1023  *
1024  * internal_compressed
1025  *   Amount of this task's internal memory which is held by the compressor.
1026  *   Such memory is no longer actually resident for the task [i.e., resident in its pmap],
1027  *   and could be either decompressed back into memory, or paged out to storage, depending
1028  *   on our implementation.
1029  *
1030  * iokit_mapped
1031  *   IOKit mappings: The total size of all IOKit mappings in this task, regardless of
1032  *    clean/dirty or internal/external state].
1033  *
1034  * alternate_accounting
1035  *   The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
1036  *   are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
1037  *   double counting.
1038  *
1039  * pages_grabbed
1040  *   pages_grabbed counts all page grabs in a task.  It is also broken out into three subtypes
1041  *   which track UPL, IOPL and Kernel page grabs.
1042  */
1043 void
init_task_ledgers(void)1044 init_task_ledgers(void)
1045 {
1046 	ledger_template_t t;
1047 
1048 	assert(task_ledger_template == NULL);
1049 	assert(kernel_task == TASK_NULL);
1050 
1051 #if MACH_ASSERT
1052 	PE_parse_boot_argn("pmap_ledgers_panic",
1053 	    &pmap_ledgers_panic,
1054 	    sizeof(pmap_ledgers_panic));
1055 	PE_parse_boot_argn("pmap_ledgers_panic_leeway",
1056 	    &pmap_ledgers_panic_leeway,
1057 	    sizeof(pmap_ledgers_panic_leeway));
1058 #endif /* MACH_ASSERT */
1059 
1060 	if ((t = ledger_template_create("Per-task ledger")) == NULL) {
1061 		panic("couldn't create task ledger template");
1062 	}
1063 
1064 	task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
1065 	task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
1066 	    "physmem", "bytes");
1067 	task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
1068 	    "bytes");
1069 	task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
1070 	    "bytes");
1071 	task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
1072 	    "bytes");
1073 	task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
1074 	    "bytes");
1075 	task_ledgers.iokit_mapped = ledger_entry_add_with_flags(t, "iokit_mapped", "mappings",
1076 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1077 	task_ledgers.alternate_accounting = ledger_entry_add_with_flags(t, "alternate_accounting", "physmem",
1078 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1079 	task_ledgers.alternate_accounting_compressed = ledger_entry_add_with_flags(t, "alternate_accounting_compressed", "physmem",
1080 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1081 	task_ledgers.page_table = ledger_entry_add_with_flags(t, "page_table", "physmem",
1082 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1083 	task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
1084 	    "bytes");
1085 	task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
1086 	    "bytes");
1087 	task_ledgers.reusable = ledger_entry_add(t, "reusable", "physmem", "bytes");
1088 	task_ledgers.external = ledger_entry_add(t, "external", "physmem", "bytes");
1089 	task_ledgers.purgeable_volatile = ledger_entry_add_with_flags(t, "purgeable_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1090 	task_ledgers.purgeable_nonvolatile = ledger_entry_add_with_flags(t, "purgeable_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1091 	task_ledgers.purgeable_volatile_compressed = ledger_entry_add_with_flags(t, "purgeable_volatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1092 	task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add_with_flags(t, "purgeable_nonvolatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1093 #if DEBUG || DEVELOPMENT
1094 	task_ledgers.pages_grabbed = ledger_entry_add_with_flags(t, "pages_grabbed", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1095 	task_ledgers.pages_grabbed_kern = ledger_entry_add_with_flags(t, "pages_grabbed_kern", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1096 	task_ledgers.pages_grabbed_iopl = ledger_entry_add_with_flags(t, "pages_grabbed_iopl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1097 	task_ledgers.pages_grabbed_upl = ledger_entry_add_with_flags(t, "pages_grabbed_upl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1098 #endif
1099 	task_ledgers.tagged_nofootprint = ledger_entry_add_with_flags(t, "tagged_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1100 	task_ledgers.tagged_footprint = ledger_entry_add_with_flags(t, "tagged_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1101 	task_ledgers.tagged_nofootprint_compressed = ledger_entry_add_with_flags(t, "tagged_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1102 	task_ledgers.tagged_footprint_compressed = ledger_entry_add_with_flags(t, "tagged_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1103 	task_ledgers.network_volatile = ledger_entry_add_with_flags(t, "network_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1104 	task_ledgers.network_nonvolatile = ledger_entry_add_with_flags(t, "network_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1105 	task_ledgers.network_volatile_compressed = ledger_entry_add_with_flags(t, "network_volatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1106 	task_ledgers.network_nonvolatile_compressed = ledger_entry_add_with_flags(t, "network_nonvolatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1107 	task_ledgers.media_nofootprint = ledger_entry_add_with_flags(t, "media_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1108 	task_ledgers.media_footprint = ledger_entry_add_with_flags(t, "media_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1109 	task_ledgers.media_nofootprint_compressed = ledger_entry_add_with_flags(t, "media_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1110 	task_ledgers.media_footprint_compressed = ledger_entry_add_with_flags(t, "media_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1111 	task_ledgers.graphics_nofootprint = ledger_entry_add_with_flags(t, "graphics_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1112 	task_ledgers.graphics_footprint = ledger_entry_add_with_flags(t, "graphics_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1113 	task_ledgers.graphics_nofootprint_compressed = ledger_entry_add_with_flags(t, "graphics_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1114 	task_ledgers.graphics_footprint_compressed = ledger_entry_add_with_flags(t, "graphics_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1115 	task_ledgers.neural_nofootprint = ledger_entry_add_with_flags(t, "neural_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1116 	task_ledgers.neural_footprint = ledger_entry_add_with_flags(t, "neural_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1117 	task_ledgers.neural_nofootprint_compressed = ledger_entry_add_with_flags(t, "neural_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1118 	task_ledgers.neural_footprint_compressed = ledger_entry_add_with_flags(t, "neural_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1119 
1120 #if CONFIG_FREEZE
1121 	task_ledgers.frozen_to_swap = ledger_entry_add(t, "frozen_to_swap", "physmem", "bytes");
1122 #endif /* CONFIG_FREEZE */
1123 
1124 	task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
1125 	    "count");
1126 	task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
1127 	    "count");
1128 
1129 #if CONFIG_SCHED_SFI
1130 	sfi_class_id_t class_id, ledger_alias;
1131 	for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1132 		task_ledgers.sfi_wait_times[class_id] = -1;
1133 	}
1134 
1135 	/* don't account for UNSPECIFIED */
1136 	for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
1137 		ledger_alias = sfi_get_ledger_alias_for_class(class_id);
1138 		if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
1139 			/* Check to see if alias has been registered yet */
1140 			if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
1141 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
1142 			} else {
1143 				/* Otherwise, initialize it first */
1144 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
1145 			}
1146 		} else {
1147 			task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
1148 		}
1149 
1150 		if (task_ledgers.sfi_wait_times[class_id] < 0) {
1151 			panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
1152 		}
1153 	}
1154 
1155 	assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID - 1] != -1);
1156 #endif /* CONFIG_SCHED_SFI */
1157 
1158 	task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
1159 	task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
1160 	task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
1161 	task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
1162 	task_ledgers.logical_writes_to_external = ledger_entry_add(t, "logical_writes_to_external", "res", "bytes");
1163 #if CONFIG_PHYS_WRITE_ACCT
1164 	task_ledgers.fs_metadata_writes = ledger_entry_add(t, "fs_metadata_writes", "res", "bytes");
1165 #endif /* CONFIG_PHYS_WRITE_ACCT */
1166 	task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj");
1167 	task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj");
1168 
1169 #if CONFIG_MEMORYSTATUS
1170 	task_ledgers.memorystatus_dirty_time = ledger_entry_add(t, "memorystatus_dirty_time", "physmem", "ns");
1171 #endif /* CONFIG_MEMORYSTATUS */
1172 
1173 	task_ledgers.swapins = ledger_entry_add_with_flags(t, "swapins", "physmem", "bytes",
1174 	    LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1175 
1176 	if ((task_ledgers.cpu_time < 0) ||
1177 	    (task_ledgers.tkm_private < 0) ||
1178 	    (task_ledgers.tkm_shared < 0) ||
1179 	    (task_ledgers.phys_mem < 0) ||
1180 	    (task_ledgers.wired_mem < 0) ||
1181 	    (task_ledgers.internal < 0) ||
1182 	    (task_ledgers.external < 0) ||
1183 	    (task_ledgers.reusable < 0) ||
1184 	    (task_ledgers.iokit_mapped < 0) ||
1185 	    (task_ledgers.alternate_accounting < 0) ||
1186 	    (task_ledgers.alternate_accounting_compressed < 0) ||
1187 	    (task_ledgers.page_table < 0) ||
1188 	    (task_ledgers.phys_footprint < 0) ||
1189 	    (task_ledgers.internal_compressed < 0) ||
1190 	    (task_ledgers.purgeable_volatile < 0) ||
1191 	    (task_ledgers.purgeable_nonvolatile < 0) ||
1192 	    (task_ledgers.purgeable_volatile_compressed < 0) ||
1193 	    (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
1194 	    (task_ledgers.tagged_nofootprint < 0) ||
1195 	    (task_ledgers.tagged_footprint < 0) ||
1196 	    (task_ledgers.tagged_nofootprint_compressed < 0) ||
1197 	    (task_ledgers.tagged_footprint_compressed < 0) ||
1198 #if CONFIG_FREEZE
1199 	    (task_ledgers.frozen_to_swap < 0) ||
1200 #endif /* CONFIG_FREEZE */
1201 	    (task_ledgers.network_volatile < 0) ||
1202 	    (task_ledgers.network_nonvolatile < 0) ||
1203 	    (task_ledgers.network_volatile_compressed < 0) ||
1204 	    (task_ledgers.network_nonvolatile_compressed < 0) ||
1205 	    (task_ledgers.media_nofootprint < 0) ||
1206 	    (task_ledgers.media_footprint < 0) ||
1207 	    (task_ledgers.media_nofootprint_compressed < 0) ||
1208 	    (task_ledgers.media_footprint_compressed < 0) ||
1209 	    (task_ledgers.graphics_nofootprint < 0) ||
1210 	    (task_ledgers.graphics_footprint < 0) ||
1211 	    (task_ledgers.graphics_nofootprint_compressed < 0) ||
1212 	    (task_ledgers.graphics_footprint_compressed < 0) ||
1213 	    (task_ledgers.neural_nofootprint < 0) ||
1214 	    (task_ledgers.neural_footprint < 0) ||
1215 	    (task_ledgers.neural_nofootprint_compressed < 0) ||
1216 	    (task_ledgers.neural_footprint_compressed < 0) ||
1217 	    (task_ledgers.platform_idle_wakeups < 0) ||
1218 	    (task_ledgers.interrupt_wakeups < 0) ||
1219 	    (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
1220 	    (task_ledgers.physical_writes < 0) ||
1221 	    (task_ledgers.logical_writes < 0) ||
1222 	    (task_ledgers.logical_writes_to_external < 0) ||
1223 #if CONFIG_PHYS_WRITE_ACCT
1224 	    (task_ledgers.fs_metadata_writes < 0) ||
1225 #endif /* CONFIG_PHYS_WRITE_ACCT */
1226 #if CONFIG_MEMORYSTATUS
1227 	    (task_ledgers.memorystatus_dirty_time < 0) ||
1228 #endif /* CONFIG_MEMORYSTATUS */
1229 	    (task_ledgers.energy_billed_to_me < 0) ||
1230 	    (task_ledgers.energy_billed_to_others < 0) ||
1231 	    (task_ledgers.swapins < 0)
1232 	    ) {
1233 		panic("couldn't create entries for task ledger template");
1234 	}
1235 
1236 	ledger_track_credit_only(t, task_ledgers.phys_footprint);
1237 	ledger_track_credit_only(t, task_ledgers.internal);
1238 	ledger_track_credit_only(t, task_ledgers.external);
1239 	ledger_track_credit_only(t, task_ledgers.reusable);
1240 
1241 	ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
1242 	ledger_track_maximum(t, task_ledgers.phys_mem, 60);
1243 	ledger_track_maximum(t, task_ledgers.internal, 60);
1244 	ledger_track_maximum(t, task_ledgers.internal_compressed, 60);
1245 	ledger_track_maximum(t, task_ledgers.reusable, 60);
1246 	ledger_track_maximum(t, task_ledgers.external, 60);
1247 #if MACH_ASSERT
1248 	if (pmap_ledgers_panic) {
1249 		ledger_panic_on_negative(t, task_ledgers.phys_footprint);
1250 		ledger_panic_on_negative(t, task_ledgers.page_table);
1251 		ledger_panic_on_negative(t, task_ledgers.internal);
1252 		ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
1253 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
1254 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
1255 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
1256 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
1257 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
1258 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
1259 #if CONFIG_PHYS_WRITE_ACCT
1260 		ledger_panic_on_negative(t, task_ledgers.fs_metadata_writes);
1261 #endif /* CONFIG_PHYS_WRITE_ACCT */
1262 
1263 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint);
1264 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint);
1265 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint_compressed);
1266 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint_compressed);
1267 		ledger_panic_on_negative(t, task_ledgers.network_volatile);
1268 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile);
1269 		ledger_panic_on_negative(t, task_ledgers.network_volatile_compressed);
1270 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile_compressed);
1271 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint);
1272 		ledger_panic_on_negative(t, task_ledgers.media_footprint);
1273 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint_compressed);
1274 		ledger_panic_on_negative(t, task_ledgers.media_footprint_compressed);
1275 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint);
1276 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint);
1277 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint_compressed);
1278 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint_compressed);
1279 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint);
1280 		ledger_panic_on_negative(t, task_ledgers.neural_footprint);
1281 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint_compressed);
1282 		ledger_panic_on_negative(t, task_ledgers.neural_footprint_compressed);
1283 	}
1284 #endif /* MACH_ASSERT */
1285 
1286 #if CONFIG_MEMORYSTATUS
1287 	ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
1288 #endif /* CONFIG_MEMORYSTATUS */
1289 
1290 	ledger_set_callback(t, task_ledgers.interrupt_wakeups,
1291 	    task_wakeups_rate_exceeded, NULL, NULL);
1292 	ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
1293 
1294 #if XNU_MONITOR
1295 	ledger_template_complete_secure_alloc(t);
1296 #else /* XNU_MONITOR */
1297 	ledger_template_complete(t);
1298 #endif /* XNU_MONITOR */
1299 	task_ledger_template = t;
1300 }
1301 
1302 kern_return_t
task_create_internal(task_t parent_task,proc_ro_t proc_ro,coalition_t * parent_coalitions __unused,boolean_t inherit_memory,boolean_t is_64bit __unused,boolean_t is_64bit_data,uint32_t t_flags,uint32_t t_procflags,uint8_t t_returnwaitflags,task_t * child_task)1303 task_create_internal(
1304 	task_t             parent_task,            /* Null-able */
1305 	proc_ro_t          proc_ro,
1306 	coalition_t        *parent_coalitions __unused,
1307 	boolean_t          inherit_memory,
1308 	boolean_t          is_64bit __unused,
1309 	boolean_t          is_64bit_data,
1310 	uint32_t           t_flags,
1311 	uint32_t           t_procflags,
1312 	uint8_t            t_returnwaitflags,
1313 	task_t             *child_task)            /* OUT */
1314 {
1315 	task_t                  new_task;
1316 	vm_shared_region_t      shared_region;
1317 	ledger_t                ledger = NULL;
1318 	struct task_ro_data     task_ro_data = {};
1319 
1320 	*child_task = NULL;
1321 	new_task = zalloc_id(ZONE_ID_TASK, Z_WAITOK | Z_NOFAIL);
1322 
1323 	if (task_ref_count_init(new_task) != KERN_SUCCESS) {
1324 		zfree_id(ZONE_ID_TASK, new_task);
1325 		return KERN_RESOURCE_SHORTAGE;
1326 	}
1327 
1328 	/* allocate with active entries */
1329 	assert(task_ledger_template != NULL);
1330 	ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1331 	if (ledger == NULL) {
1332 		task_ref_count_fini(new_task);
1333 		zfree_id(ZONE_ID_TASK, new_task);
1334 		return KERN_RESOURCE_SHORTAGE;
1335 	}
1336 
1337 	counter_alloc(&(new_task->faults));
1338 
1339 #if defined(HAS_APPLE_PAC)
1340 	ml_task_set_rop_pid(new_task, parent_task, inherit_memory);
1341 	ml_task_set_jop_pid(new_task, parent_task, inherit_memory);
1342 	ml_task_set_disable_user_jop(new_task, inherit_memory ? parent_task->disable_user_jop : FALSE);
1343 #endif
1344 
1345 
1346 	new_task->ledger = ledger;
1347 
1348 	/* if inherit_memory is true, parent_task MUST not be NULL */
1349 	if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) {
1350 		new_task->map = vm_map_fork(ledger, parent_task->map, 0);
1351 	} else {
1352 		unsigned int pmap_flags = is_64bit ? PMAP_CREATE_64BIT : 0;
1353 		pmap_t pmap = pmap_create_options(ledger, 0, pmap_flags);
1354 		if (pmap == NULL) {
1355 			counter_free(&new_task->faults);
1356 			ledger_dereference(ledger);
1357 			task_ref_count_fini(new_task);
1358 			zfree_id(ZONE_ID_TASK, new_task);
1359 			return KERN_RESOURCE_SHORTAGE;
1360 		}
1361 		new_task->map = vm_map_create_options(pmap,
1362 		    (vm_map_offset_t)(VM_MIN_ADDRESS),
1363 		    (vm_map_offset_t)(VM_MAX_ADDRESS),
1364 		    VM_MAP_CREATE_PAGEABLE);
1365 	}
1366 
1367 	if (new_task->map == NULL) {
1368 		counter_free(&new_task->faults);
1369 		ledger_dereference(ledger);
1370 		task_ref_count_fini(new_task);
1371 		zfree_id(ZONE_ID_TASK, new_task);
1372 		return KERN_RESOURCE_SHORTAGE;
1373 	}
1374 
1375 #if defined(CONFIG_SCHED_MULTIQ)
1376 	new_task->sched_group = sched_group_create();
1377 #endif
1378 
1379 	/* Inherit address space and memlock limit from parent */
1380 	if (parent_task) {
1381 		vm_map_set_size_limit(new_task->map, parent_task->map->size_limit);
1382 		vm_map_set_data_limit(new_task->map, parent_task->map->data_limit);
1383 		vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit);
1384 	}
1385 
1386 	lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
1387 	queue_init(&new_task->threads);
1388 	new_task->suspend_count = 0;
1389 	new_task->thread_count = 0;
1390 	new_task->active_thread_count = 0;
1391 	new_task->user_stop_count = 0;
1392 	new_task->legacy_stop_count = 0;
1393 	new_task->active = TRUE;
1394 	new_task->halting = FALSE;
1395 	new_task->priv_flags = 0;
1396 	new_task->t_flags = t_flags;
1397 	new_task->t_procflags = t_procflags;
1398 	new_task->t_returnwaitflags = t_returnwaitflags;
1399 	new_task->returnwait_inheritor = current_thread();
1400 	new_task->importance = 0;
1401 	new_task->crashed_thread_id = 0;
1402 	new_task->exec_token = 0;
1403 	new_task->watchports = NULL;
1404 	new_task->t_rr_ranges = NULL;
1405 
1406 	new_task->bank_context = NULL;
1407 
1408 #if __has_feature(ptrauth_calls)
1409 	/* Inherit the pac exception flags from parent if in fork */
1410 	if (parent_task && inherit_memory) {
1411 		new_task->t_flags |= (parent_task->t_flags & (TF_PAC_ENFORCE_USER_STATE
1412 		    | TF_PAC_EXC_FATAL));
1413 	}
1414 #endif
1415 
1416 #ifdef MACH_BSD
1417 	new_task->bsd_info = NULL;
1418 	new_task->corpse_info = NULL;
1419 #endif /* MACH_BSD */
1420 
1421 	/* kern_task not created by this function has unique id 0, start with 1 here. */
1422 	task_set_uniqueid(new_task);
1423 
1424 #if CONFIG_MACF
1425 	set_task_crash_label(new_task, NULL);
1426 
1427 	task_ro_data.task_filters.mach_trap_filter_mask = NULL;
1428 	task_ro_data.task_filters.mach_kobj_filter_mask = NULL;
1429 #endif
1430 
1431 #if CONFIG_MEMORYSTATUS
1432 	if (max_task_footprint != 0) {
1433 		ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1434 	}
1435 #endif /* CONFIG_MEMORYSTATUS */
1436 
1437 	if (task_wakeups_monitor_rate != 0) {
1438 		uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1439 		int32_t  rate;        // Ignored because of WAKEMON_SET_DEFAULTS
1440 		task_wakeups_monitor_ctl(new_task, &flags, &rate);
1441 	}
1442 
1443 #if CONFIG_IO_ACCOUNTING
1444 	uint32_t flags = IOMON_ENABLE;
1445 	task_io_monitor_ctl(new_task, &flags);
1446 #endif /* CONFIG_IO_ACCOUNTING */
1447 
1448 	machine_task_init(new_task, parent_task, inherit_memory);
1449 
1450 	new_task->task_debug = NULL;
1451 
1452 #if DEVELOPMENT || DEBUG
1453 	new_task->task_unnested = FALSE;
1454 	new_task->task_disconnected_count = 0;
1455 #endif
1456 	queue_init(&new_task->semaphore_list);
1457 	new_task->semaphores_owned = 0;
1458 
1459 	ipc_task_init(new_task, parent_task);
1460 
1461 	new_task->vtimers = 0;
1462 
1463 	new_task->shared_region = NULL;
1464 
1465 	new_task->affinity_space = NULL;
1466 
1467 	new_task->t_kpc = 0;
1468 
1469 	new_task->pidsuspended = FALSE;
1470 	new_task->frozen = FALSE;
1471 	new_task->changing_freeze_state = FALSE;
1472 	new_task->rusage_cpu_flags = 0;
1473 	new_task->rusage_cpu_percentage = 0;
1474 	new_task->rusage_cpu_interval = 0;
1475 	new_task->rusage_cpu_deadline = 0;
1476 	new_task->rusage_cpu_callt = NULL;
1477 #if MACH_ASSERT
1478 	new_task->suspends_outstanding = 0;
1479 #endif
1480 
1481 #if HYPERVISOR
1482 	new_task->hv_task_target = NULL;
1483 #endif /* HYPERVISOR */
1484 
1485 #if CONFIG_TASKWATCH
1486 	queue_init(&new_task->task_watchers);
1487 	new_task->num_taskwatchers  = 0;
1488 	new_task->watchapplying  = 0;
1489 #endif /* CONFIG_TASKWATCH */
1490 
1491 	new_task->mem_notify_reserved = 0;
1492 	new_task->memlimit_attrs_reserved = 0;
1493 
1494 	new_task->requested_policy = default_task_requested_policy;
1495 	new_task->effective_policy = default_task_effective_policy;
1496 
1497 	new_task->task_shared_region_slide = -1;
1498 
1499 	if (parent_task != NULL) {
1500 		task_ro_data.task_tokens.sec_token = *task_get_sec_token(parent_task);
1501 		task_ro_data.task_tokens.audit_token = *task_get_audit_token(parent_task);
1502 	} else {
1503 		task_ro_data.task_tokens.sec_token = KERNEL_SECURITY_TOKEN;
1504 		task_ro_data.task_tokens.audit_token = KERNEL_AUDIT_TOKEN;
1505 	}
1506 
1507 	/* must set before task_importance_init_from_parent: */
1508 	if (proc_ro != NULL) {
1509 		new_task->bsd_info_ro = proc_ro_ref_task(proc_ro, new_task, &task_ro_data);
1510 	} else {
1511 		new_task->bsd_info_ro = proc_ro_alloc(NULL, NULL, new_task, &task_ro_data);
1512 	}
1513 
1514 	task_importance_init_from_parent(new_task, parent_task);
1515 
1516 	new_task->corpse_vmobject_list = NULL;
1517 
1518 	if (parent_task != TASK_NULL) {
1519 		/* inherit the parent's shared region */
1520 		shared_region = vm_shared_region_get(parent_task);
1521 		vm_shared_region_set(new_task, shared_region);
1522 
1523 #if __has_feature(ptrauth_calls)
1524 		/* use parent's shared_region_id */
1525 		char *shared_region_id = task_get_vm_shared_region_id_and_jop_pid(parent_task, NULL);
1526 		if (shared_region_id != NULL) {
1527 			shared_region_key_alloc(shared_region_id, FALSE, 0);         /* get a reference */
1528 		}
1529 		task_set_shared_region_id(new_task, shared_region_id);
1530 #endif /* __has_feature(ptrauth_calls) */
1531 
1532 		if (task_has_64Bit_addr(parent_task)) {
1533 			task_set_64Bit_addr(new_task);
1534 		}
1535 
1536 		if (task_has_64Bit_data(parent_task)) {
1537 			task_set_64Bit_data(new_task);
1538 		}
1539 
1540 		new_task->all_image_info_addr = parent_task->all_image_info_addr;
1541 		new_task->all_image_info_size = parent_task->all_image_info_size;
1542 		new_task->mach_header_vm_address = 0;
1543 
1544 		if (inherit_memory && parent_task->affinity_space) {
1545 			task_affinity_create(parent_task, new_task);
1546 		}
1547 
1548 		new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
1549 
1550 		new_task->task_exc_guard = parent_task->task_exc_guard;
1551 		/* only inherit the option bits, no effect until task_set_immovable_pinned() */
1552 		new_task->task_control_port_options = parent_task->task_control_port_options;
1553 
1554 		if (parent_task->t_flags & TF_NO_SMT) {
1555 			new_task->t_flags |= TF_NO_SMT;
1556 		}
1557 
1558 		if (parent_task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE) {
1559 			new_task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
1560 		}
1561 
1562 		if (parent_task->t_flags & TF_TECS) {
1563 			new_task->t_flags |= TF_TECS;
1564 		}
1565 
1566 		if (parent_task->t_flags & TF_FILTER_MSG) {
1567 			new_task->t_flags |= TF_FILTER_MSG;
1568 		}
1569 
1570 #if defined(__x86_64__)
1571 		if (parent_task->t_flags & TF_INSN_COPY_OPTOUT) {
1572 			new_task->t_flags |= TF_INSN_COPY_OPTOUT;
1573 		}
1574 #endif
1575 		new_task->priority = BASEPRI_DEFAULT;
1576 		new_task->max_priority = MAXPRI_USER;
1577 
1578 		task_policy_create(new_task, parent_task);
1579 	} else {
1580 #ifdef __LP64__
1581 		if (is_64bit) {
1582 			task_set_64Bit_addr(new_task);
1583 		}
1584 #endif
1585 
1586 		if (is_64bit_data) {
1587 			task_set_64Bit_data(new_task);
1588 		}
1589 
1590 		new_task->all_image_info_addr = (mach_vm_address_t)0;
1591 		new_task->all_image_info_size = (mach_vm_size_t)0;
1592 
1593 		new_task->pset_hint = PROCESSOR_SET_NULL;
1594 
1595 		new_task->task_exc_guard = TASK_EXC_GUARD_NONE;
1596 		new_task->task_control_port_options = TASK_CONTROL_PORT_OPTIONS_NONE;
1597 
1598 		if (kernel_task == TASK_NULL) {
1599 			new_task->priority = BASEPRI_KERNEL;
1600 			new_task->max_priority = MAXPRI_KERNEL;
1601 		} else {
1602 			new_task->priority = BASEPRI_DEFAULT;
1603 			new_task->max_priority = MAXPRI_USER;
1604 		}
1605 	}
1606 
1607 	bzero(new_task->coalition, sizeof(new_task->coalition));
1608 	for (int i = 0; i < COALITION_NUM_TYPES; i++) {
1609 		queue_chain_init(new_task->task_coalition[i]);
1610 	}
1611 
1612 	/* Allocate I/O Statistics */
1613 	new_task->task_io_stats = kalloc_data(sizeof(struct io_stat_info),
1614 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1615 
1616 	bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats));
1617 	bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats));
1618 
1619 	bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
1620 
1621 	counter_alloc(&(new_task->pageins));
1622 	counter_alloc(&(new_task->cow_faults));
1623 	counter_alloc(&(new_task->messages_sent));
1624 	counter_alloc(&(new_task->messages_received));
1625 
1626 	/* Copy resource acc. info from Parent for Corpe Forked task. */
1627 	if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
1628 		task_rollup_accounting_info(new_task, parent_task);
1629 		task_store_owned_vmobject_info(new_task, parent_task);
1630 	} else {
1631 		/* Initialize to zero for standard fork/spawn case */
1632 		new_task->total_user_time = 0;
1633 		new_task->total_system_time = 0;
1634 		new_task->total_ptime = 0;
1635 		new_task->total_runnable_time = 0;
1636 		new_task->syscalls_mach = 0;
1637 		new_task->syscalls_unix = 0;
1638 		new_task->c_switch = 0;
1639 		new_task->p_switch = 0;
1640 		new_task->ps_switch = 0;
1641 		new_task->decompressions = 0;
1642 		new_task->low_mem_notified_warn = 0;
1643 		new_task->low_mem_notified_critical = 0;
1644 		new_task->purged_memory_warn = 0;
1645 		new_task->purged_memory_critical = 0;
1646 		new_task->low_mem_privileged_listener = 0;
1647 		new_task->memlimit_is_active = 0;
1648 		new_task->memlimit_is_fatal = 0;
1649 		new_task->memlimit_active_exc_resource = 0;
1650 		new_task->memlimit_inactive_exc_resource = 0;
1651 		new_task->task_timer_wakeups_bin_1 = 0;
1652 		new_task->task_timer_wakeups_bin_2 = 0;
1653 		new_task->task_gpu_ns = 0;
1654 		new_task->task_writes_counters_internal.task_immediate_writes = 0;
1655 		new_task->task_writes_counters_internal.task_deferred_writes = 0;
1656 		new_task->task_writes_counters_internal.task_invalidated_writes = 0;
1657 		new_task->task_writes_counters_internal.task_metadata_writes = 0;
1658 		new_task->task_writes_counters_external.task_immediate_writes = 0;
1659 		new_task->task_writes_counters_external.task_deferred_writes = 0;
1660 		new_task->task_writes_counters_external.task_invalidated_writes = 0;
1661 		new_task->task_writes_counters_external.task_metadata_writes = 0;
1662 #if CONFIG_PHYS_WRITE_ACCT
1663 		new_task->task_fs_metadata_writes = 0;
1664 #endif /* CONFIG_PHYS_WRITE_ACCT */
1665 
1666 		new_task->task_energy = 0;
1667 #if MONOTONIC
1668 		memset(&new_task->task_monotonic, 0, sizeof(new_task->task_monotonic));
1669 #endif /* MONOTONIC */
1670 	}
1671 
1672 
1673 #if CONFIG_COALITIONS
1674 	if (!(t_flags & TF_CORPSE_FORK)) {
1675 		/* TODO: there is no graceful failure path here... */
1676 		if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1677 			coalitions_adopt_task(parent_coalitions, new_task);
1678 		} else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1679 			/*
1680 			 * all tasks at least have a resource coalition, so
1681 			 * if the parent has one then inherit all coalitions
1682 			 * the parent is a part of
1683 			 */
1684 			coalitions_adopt_task(parent_task->coalition, new_task);
1685 		} else {
1686 			/* TODO: assert that new_task will be PID 1 (launchd) */
1687 			coalitions_adopt_init_task(new_task);
1688 		}
1689 		/*
1690 		 * on exec, we need to transfer the coalition roles from the
1691 		 * parent task to the exec copy task.
1692 		 */
1693 		if (parent_task && (t_procflags & TPF_EXEC_COPY)) {
1694 			int coal_roles[COALITION_NUM_TYPES];
1695 			task_coalition_roles(parent_task, coal_roles);
1696 			(void)coalitions_set_roles(new_task->coalition, new_task, coal_roles);
1697 		}
1698 	} else {
1699 		coalitions_adopt_corpse_task(new_task);
1700 	}
1701 
1702 	if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1703 		panic("created task is not a member of a resource coalition");
1704 	}
1705 	task_set_coalition_member(new_task);
1706 #endif /* CONFIG_COALITIONS */
1707 
1708 	new_task->dispatchqueue_offset = 0;
1709 	if (parent_task != NULL) {
1710 		new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
1711 	}
1712 
1713 	new_task->task_can_transfer_memory_ownership = FALSE;
1714 	new_task->task_volatile_objects = 0;
1715 	new_task->task_nonvolatile_objects = 0;
1716 	new_task->task_objects_disowning = FALSE;
1717 	new_task->task_objects_disowned = FALSE;
1718 	new_task->task_owned_objects = 0;
1719 	queue_init(&new_task->task_objq);
1720 
1721 #if CONFIG_FREEZE
1722 	queue_init(&new_task->task_frozen_cseg_q);
1723 #endif /* CONFIG_FREEZE */
1724 
1725 	task_objq_lock_init(new_task);
1726 
1727 #if __arm64__
1728 	new_task->task_legacy_footprint = FALSE;
1729 	new_task->task_extra_footprint_limit = FALSE;
1730 	new_task->task_ios13extended_footprint_limit = FALSE;
1731 #endif /* __arm64__ */
1732 	new_task->task_region_footprint = FALSE;
1733 	new_task->task_has_crossed_thread_limit = FALSE;
1734 	new_task->task_thread_limit = 0;
1735 #if CONFIG_SECLUDED_MEMORY
1736 	new_task->task_can_use_secluded_mem = FALSE;
1737 	new_task->task_could_use_secluded_mem = FALSE;
1738 	new_task->task_could_also_use_secluded_mem = FALSE;
1739 	new_task->task_suppressed_secluded = FALSE;
1740 #endif /* CONFIG_SECLUDED_MEMORY */
1741 
1742 	/*
1743 	 * t_flags is set up above. But since we don't
1744 	 * support darkwake mode being set that way
1745 	 * currently, we clear it out here explicitly.
1746 	 */
1747 	new_task->t_flags &= ~(TF_DARKWAKE_MODE);
1748 
1749 	queue_init(&new_task->io_user_clients);
1750 	new_task->loadTag = 0;
1751 
1752 	ipc_task_enable(new_task);
1753 
1754 	lck_mtx_lock(&tasks_threads_lock);
1755 	queue_enter(&tasks, new_task, task_t, tasks);
1756 	tasks_count++;
1757 	if (tasks_suspend_state) {
1758 		task_suspend_internal(new_task);
1759 	}
1760 	lck_mtx_unlock(&tasks_threads_lock);
1761 
1762 	*child_task = new_task;
1763 	return KERN_SUCCESS;
1764 }
1765 
1766 /*
1767  *	task_rollup_accounting_info
1768  *
1769  *	Roll up accounting stats. Used to rollup stats
1770  *	for exec copy task and corpse fork.
1771  */
1772 void
task_rollup_accounting_info(task_t to_task,task_t from_task)1773 task_rollup_accounting_info(task_t to_task, task_t from_task)
1774 {
1775 	assert(from_task != to_task);
1776 
1777 	to_task->total_user_time = from_task->total_user_time;
1778 	to_task->total_system_time = from_task->total_system_time;
1779 	to_task->total_ptime = from_task->total_ptime;
1780 	to_task->total_runnable_time = from_task->total_runnable_time;
1781 	counter_add(&to_task->faults, counter_load(&from_task->faults));
1782 	counter_add(&to_task->pageins, counter_load(&from_task->pageins));
1783 	counter_add(&to_task->cow_faults, counter_load(&from_task->cow_faults));
1784 	counter_add(&to_task->messages_sent, counter_load(&from_task->messages_sent));
1785 	counter_add(&to_task->messages_received, counter_load(&from_task->messages_received));
1786 	to_task->decompressions = from_task->decompressions;
1787 	to_task->syscalls_mach = from_task->syscalls_mach;
1788 	to_task->syscalls_unix = from_task->syscalls_unix;
1789 	to_task->c_switch = from_task->c_switch;
1790 	to_task->p_switch = from_task->p_switch;
1791 	to_task->ps_switch = from_task->ps_switch;
1792 	to_task->extmod_statistics = from_task->extmod_statistics;
1793 	to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
1794 	to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
1795 	to_task->purged_memory_warn = from_task->purged_memory_warn;
1796 	to_task->purged_memory_critical = from_task->purged_memory_critical;
1797 	to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
1798 	*to_task->task_io_stats = *from_task->task_io_stats;
1799 	to_task->cpu_time_eqos_stats = from_task->cpu_time_eqos_stats;
1800 	to_task->cpu_time_rqos_stats = from_task->cpu_time_rqos_stats;
1801 	to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
1802 	to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
1803 	to_task->task_gpu_ns = from_task->task_gpu_ns;
1804 	to_task->task_writes_counters_internal.task_immediate_writes = from_task->task_writes_counters_internal.task_immediate_writes;
1805 	to_task->task_writes_counters_internal.task_deferred_writes = from_task->task_writes_counters_internal.task_deferred_writes;
1806 	to_task->task_writes_counters_internal.task_invalidated_writes = from_task->task_writes_counters_internal.task_invalidated_writes;
1807 	to_task->task_writes_counters_internal.task_metadata_writes = from_task->task_writes_counters_internal.task_metadata_writes;
1808 	to_task->task_writes_counters_external.task_immediate_writes = from_task->task_writes_counters_external.task_immediate_writes;
1809 	to_task->task_writes_counters_external.task_deferred_writes = from_task->task_writes_counters_external.task_deferred_writes;
1810 	to_task->task_writes_counters_external.task_invalidated_writes = from_task->task_writes_counters_external.task_invalidated_writes;
1811 	to_task->task_writes_counters_external.task_metadata_writes = from_task->task_writes_counters_external.task_metadata_writes;
1812 #if CONFIG_PHYS_WRITE_ACCT
1813 	to_task->task_fs_metadata_writes = from_task->task_fs_metadata_writes;
1814 #endif /* CONFIG_PHYS_WRITE_ACCT */
1815 	to_task->task_energy = from_task->task_energy;
1816 
1817 #if CONFIG_MEMORYSTATUS
1818 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.memorystatus_dirty_time);
1819 #endif /* CONFIG_MEMORYSTATUS */
1820 
1821 	/* Skip ledger roll up for memory accounting entries */
1822 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
1823 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
1824 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
1825 #if CONFIG_SCHED_SFI
1826 	for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1827 		ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
1828 	}
1829 #endif
1830 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
1831 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
1832 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
1833 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
1834 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_me);
1835 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_others);
1836 }
1837 
1838 /*
1839  *	task_deallocate_internal:
1840  *
1841  *	Drop a reference on a task.
1842  *	Don't call this directly.
1843  */
1844 extern void task_deallocate_internal(task_t task, os_ref_count_t refs);
1845 void
task_deallocate_internal(task_t task,os_ref_count_t refs)1846 task_deallocate_internal(
1847 	task_t          task,
1848 	os_ref_count_t  refs)
1849 {
1850 	ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
1851 
1852 	if (task == TASK_NULL) {
1853 		return;
1854 	}
1855 
1856 #if IMPORTANCE_INHERITANCE
1857 	if (refs == 1) {
1858 		/*
1859 		 * If last ref potentially comes from the task's importance,
1860 		 * disconnect it.  But more task refs may be added before
1861 		 * that completes, so wait for the reference to go to zero
1862 		 * naturally (it may happen on a recursive task_deallocate()
1863 		 * from the ipc_importance_disconnect_task() call).
1864 		 */
1865 		if (IIT_NULL != task->task_imp_base) {
1866 			ipc_importance_disconnect_task(task);
1867 		}
1868 		return;
1869 	}
1870 #endif /* IMPORTANCE_INHERITANCE */
1871 
1872 	if (refs > 0) {
1873 		return;
1874 	}
1875 
1876 	/*
1877 	 * The task should be dead at this point. Ensure other resources
1878 	 * like threads, are gone before we trash the world.
1879 	 */
1880 	assert(queue_empty(&task->threads));
1881 	assert(task->bsd_info == NULL);
1882 	assert(!is_active(task->itk_space));
1883 	assert(!task->active);
1884 	assert(task->active_thread_count == 0);
1885 
1886 	lck_mtx_lock(&tasks_threads_lock);
1887 	assert(terminated_tasks_count > 0);
1888 	queue_remove(&terminated_tasks, task, task_t, tasks);
1889 	terminated_tasks_count--;
1890 	lck_mtx_unlock(&tasks_threads_lock);
1891 
1892 	/*
1893 	 * remove the reference on bank context
1894 	 */
1895 	task_bank_reset(task);
1896 
1897 	kfree_data(task->task_io_stats, sizeof(struct io_stat_info));
1898 
1899 	/*
1900 	 *	Give the machine dependent code a chance
1901 	 *	to perform cleanup before ripping apart
1902 	 *	the task.
1903 	 */
1904 	machine_task_terminate(task);
1905 
1906 	ipc_task_terminate(task);
1907 
1908 	/* let iokit know */
1909 	iokit_task_terminate(task);
1910 
1911 	if (task->affinity_space) {
1912 		task_affinity_deallocate(task);
1913 	}
1914 
1915 #if MACH_ASSERT
1916 	if (task->ledger != NULL &&
1917 	    task->map != NULL &&
1918 	    task->map->pmap != NULL &&
1919 	    task->map->pmap->ledger != NULL) {
1920 		assert(task->ledger == task->map->pmap->ledger);
1921 	}
1922 #endif /* MACH_ASSERT */
1923 
1924 	vm_owned_objects_disown(task);
1925 	assert(task->task_objects_disowned);
1926 	if (task->task_owned_objects != 0) {
1927 		panic("task_deallocate(%p): "
1928 		    "volatile_objects=%d nonvolatile_objects=%d owned=%d\n",
1929 		    task,
1930 		    task->task_volatile_objects,
1931 		    task->task_nonvolatile_objects,
1932 		    task->task_owned_objects);
1933 	}
1934 
1935 	vm_map_deallocate(task->map);
1936 	is_release(task->itk_space);
1937 	if (task->t_rr_ranges) {
1938 		restartable_ranges_release(task->t_rr_ranges);
1939 	}
1940 
1941 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
1942 	    &interrupt_wakeups, &debit);
1943 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
1944 	    &platform_idle_wakeups, &debit);
1945 
1946 #if defined(CONFIG_SCHED_MULTIQ)
1947 	sched_group_destroy(task->sched_group);
1948 #endif
1949 
1950 	/* Accumulate statistics for dead tasks */
1951 	lck_spin_lock(&dead_task_statistics_lock);
1952 	dead_task_statistics.total_user_time += task->total_user_time;
1953 	dead_task_statistics.total_system_time += task->total_system_time;
1954 
1955 	dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
1956 	dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
1957 
1958 	dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
1959 	dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
1960 	dead_task_statistics.total_ptime += task->total_ptime;
1961 	dead_task_statistics.total_pset_switches += task->ps_switch;
1962 	dead_task_statistics.task_gpu_ns += task->task_gpu_ns;
1963 	dead_task_statistics.task_energy += task->task_energy;
1964 
1965 	lck_spin_unlock(&dead_task_statistics_lock);
1966 	lck_mtx_destroy(&task->lock, &task_lck_grp);
1967 
1968 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
1969 	    &debit)) {
1970 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
1971 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
1972 	}
1973 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
1974 	    &debit)) {
1975 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
1976 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
1977 	}
1978 	ledger_dereference(task->ledger);
1979 
1980 	counter_free(&task->faults);
1981 	counter_free(&task->pageins);
1982 	counter_free(&task->cow_faults);
1983 	counter_free(&task->messages_sent);
1984 	counter_free(&task->messages_received);
1985 
1986 #if CONFIG_COALITIONS
1987 	task_release_coalitions(task);
1988 #endif /* CONFIG_COALITIONS */
1989 
1990 	bzero(task->coalition, sizeof(task->coalition));
1991 
1992 #if MACH_BSD
1993 	/* clean up collected information since last reference to task is gone */
1994 	if (task->corpse_info) {
1995 		void *corpse_info_kernel = kcdata_memory_get_begin_addr(task->corpse_info);
1996 		task_crashinfo_destroy(task->corpse_info);
1997 		task->corpse_info = NULL;
1998 		kfree_data(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
1999 	}
2000 #endif
2001 
2002 #if CONFIG_MACF
2003 	if (get_task_crash_label(task)) {
2004 		mac_exc_free_label(get_task_crash_label(task));
2005 		set_task_crash_label(task, NULL);
2006 	}
2007 #endif
2008 
2009 	assert(queue_empty(&task->task_objq));
2010 	task_objq_lock_destroy(task);
2011 
2012 	if (task->corpse_vmobject_list) {
2013 		kfree_data(task->corpse_vmobject_list,
2014 		    (vm_size_t)task->corpse_vmobject_list_size);
2015 	}
2016 
2017 	task_ref_count_fini(task);
2018 
2019 	task->bsd_info_ro = proc_ro_release_task((proc_ro_t)task->bsd_info_ro);
2020 
2021 	if (task->bsd_info_ro != NULL) {
2022 		proc_ro_free(task->bsd_info_ro);
2023 		task->bsd_info_ro = NULL;
2024 	}
2025 
2026 	zfree_id(ZONE_ID_TASK, task);
2027 }
2028 
2029 /*
2030  *	task_name_deallocate_mig:
2031  *
2032  *	Drop a reference on a task name.
2033  */
2034 void
task_name_deallocate_mig(task_name_t task_name)2035 task_name_deallocate_mig(
2036 	task_name_t             task_name)
2037 {
2038 	return task_deallocate_grp((task_t)task_name, TASK_GRP_MIG);
2039 }
2040 
2041 /*
2042  *	task_policy_set_deallocate_mig:
2043  *
2044  *	Drop a reference on a task type.
2045  */
2046 void
task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)2047 task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)
2048 {
2049 	return task_deallocate_grp((task_t)task_policy_set, TASK_GRP_MIG);
2050 }
2051 
2052 /*
2053  *	task_policy_get_deallocate_mig:
2054  *
2055  *	Drop a reference on a task type.
2056  */
2057 void
task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)2058 task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)
2059 {
2060 	return task_deallocate_grp((task_t)task_policy_get, TASK_GRP_MIG);
2061 }
2062 
2063 /*
2064  *	task_inspect_deallocate_mig:
2065  *
2066  *	Drop a task inspection reference.
2067  */
2068 void
task_inspect_deallocate_mig(task_inspect_t task_inspect)2069 task_inspect_deallocate_mig(
2070 	task_inspect_t          task_inspect)
2071 {
2072 	return task_deallocate_grp((task_t)task_inspect, TASK_GRP_MIG);
2073 }
2074 
2075 /*
2076  *	task_read_deallocate_mig:
2077  *
2078  *	Drop a reference on task read port.
2079  */
2080 void
task_read_deallocate_mig(task_read_t task_read)2081 task_read_deallocate_mig(
2082 	task_read_t          task_read)
2083 {
2084 	return task_deallocate_grp((task_t)task_read, TASK_GRP_MIG);
2085 }
2086 
2087 /*
2088  *	task_suspension_token_deallocate:
2089  *
2090  *	Drop a reference on a task suspension token.
2091  */
2092 void
task_suspension_token_deallocate(task_suspension_token_t token)2093 task_suspension_token_deallocate(
2094 	task_suspension_token_t         token)
2095 {
2096 	return task_deallocate((task_t)token);
2097 }
2098 
2099 void
task_suspension_token_deallocate_grp(task_suspension_token_t token,task_grp_t grp)2100 task_suspension_token_deallocate_grp(
2101 	task_suspension_token_t         token,
2102 	task_grp_t                      grp)
2103 {
2104 	return task_deallocate_grp((task_t)token, grp);
2105 }
2106 
2107 /*
2108  * task_collect_crash_info:
2109  *
2110  * collect crash info from bsd and mach based data
2111  */
2112 kern_return_t
task_collect_crash_info(task_t task,struct label * crash_label,int is_corpse_fork)2113 task_collect_crash_info(
2114 	task_t task,
2115 #ifdef CONFIG_MACF
2116 	struct label *crash_label,
2117 #endif
2118 	int is_corpse_fork)
2119 {
2120 	kern_return_t kr = KERN_SUCCESS;
2121 
2122 	kcdata_descriptor_t crash_data = NULL;
2123 	kcdata_descriptor_t crash_data_release = NULL;
2124 	mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
2125 	mach_vm_offset_t crash_data_ptr = 0;
2126 	void *crash_data_kernel = NULL;
2127 	void *crash_data_kernel_release = NULL;
2128 #if CONFIG_MACF
2129 	struct label *label, *free_label;
2130 #endif
2131 
2132 	if (!corpses_enabled()) {
2133 		return KERN_NOT_SUPPORTED;
2134 	}
2135 
2136 #if CONFIG_MACF
2137 	free_label = label = mac_exc_create_label(NULL);
2138 #endif
2139 
2140 	task_lock(task);
2141 
2142 	assert(is_corpse_fork || task->bsd_info != NULL);
2143 	if (task->corpse_info == NULL && (is_corpse_fork || task->bsd_info != NULL)) {
2144 #if CONFIG_MACF
2145 		/* Set the crash label, used by the exception delivery mac hook */
2146 		free_label = get_task_crash_label(task);         // Most likely NULL.
2147 		set_task_crash_label(task, label);
2148 		mac_exc_update_task_crash_label(task, crash_label);
2149 #endif
2150 		task_unlock(task);
2151 
2152 		crash_data_kernel = kalloc_data(CORPSEINFO_ALLOCATION_SIZE,
2153 		    Z_WAITOK | Z_ZERO);
2154 		if (crash_data_kernel == NULL) {
2155 			kr = KERN_RESOURCE_SHORTAGE;
2156 			goto out_no_lock;
2157 		}
2158 		crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
2159 
2160 		/* Do not get a corpse ref for corpse fork */
2161 		crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size,
2162 		    is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF,
2163 		    KCFLAG_USE_MEMCOPY);
2164 		if (crash_data) {
2165 			task_lock(task);
2166 			crash_data_release = task->corpse_info;
2167 			crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);
2168 			task->corpse_info = crash_data;
2169 
2170 			task_unlock(task);
2171 			kr = KERN_SUCCESS;
2172 		} else {
2173 			kfree_data(crash_data_kernel,
2174 			    CORPSEINFO_ALLOCATION_SIZE);
2175 			kr = KERN_FAILURE;
2176 		}
2177 
2178 		if (crash_data_release != NULL) {
2179 			task_crashinfo_destroy(crash_data_release);
2180 		}
2181 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2182 	} else {
2183 		task_unlock(task);
2184 	}
2185 
2186 out_no_lock:
2187 #if CONFIG_MACF
2188 	if (free_label != NULL) {
2189 		mac_exc_free_label(free_label);
2190 	}
2191 #endif
2192 	return kr;
2193 }
2194 
2195 /*
2196  * task_deliver_crash_notification:
2197  *
2198  * Makes outcall to registered host port for a corpse.
2199  */
2200 kern_return_t
task_deliver_crash_notification(task_t corpse,thread_t thread,exception_type_t etype,mach_exception_subcode_t subcode)2201 task_deliver_crash_notification(
2202 	task_t corpse, /* corpse or corpse fork */
2203 	thread_t thread,
2204 	exception_type_t etype,
2205 	mach_exception_subcode_t subcode)
2206 {
2207 	kcdata_descriptor_t crash_info = corpse->corpse_info;
2208 	thread_t th_iter = NULL;
2209 	kern_return_t kr = KERN_SUCCESS;
2210 	wait_interrupt_t wsave;
2211 	mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2212 	ipc_port_t corpse_port;
2213 
2214 	if (crash_info == NULL) {
2215 		return KERN_FAILURE;
2216 	}
2217 
2218 	assert(task_is_a_corpse(corpse));
2219 
2220 	task_lock(corpse);
2221 
2222 	/*
2223 	 * Always populate code[0] as the effective exception type for EXC_CORPSE_NOTIFY.
2224 	 * Crash reporters should derive whether it's fatal from corpse blob.
2225 	 */
2226 	code[0] = etype;
2227 	code[1] = subcode;
2228 
2229 	queue_iterate(&corpse->threads, th_iter, thread_t, task_threads)
2230 	{
2231 		if (th_iter->corpse_dup == FALSE) {
2232 			ipc_thread_reset(th_iter);
2233 		}
2234 	}
2235 	task_unlock(corpse);
2236 
2237 	/* Arm the no-sender notification for taskport */
2238 	task_reference(corpse);
2239 	corpse_port = convert_corpse_to_port_and_nsrequest(corpse);
2240 
2241 	wsave = thread_interrupt_level(THREAD_UNINT);
2242 	kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
2243 	if (kr != KERN_SUCCESS) {
2244 		printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(corpse));
2245 	}
2246 
2247 	(void)thread_interrupt_level(wsave);
2248 
2249 	/*
2250 	 * Drop the send right on corpse port, will fire the
2251 	 * no-sender notification if exception deliver failed.
2252 	 */
2253 	ipc_port_release_send(corpse_port);
2254 	return kr;
2255 }
2256 
2257 /*
2258  *	task_terminate:
2259  *
2260  *	Terminate the specified task.  See comments on thread_terminate
2261  *	(kern/thread.c) about problems with terminating the "current task."
2262  */
2263 
2264 kern_return_t
task_terminate(task_t task)2265 task_terminate(
2266 	task_t          task)
2267 {
2268 	if (task == TASK_NULL) {
2269 		return KERN_INVALID_ARGUMENT;
2270 	}
2271 
2272 	if (task->bsd_info) {
2273 		return KERN_FAILURE;
2274 	}
2275 
2276 	return task_terminate_internal(task);
2277 }
2278 
2279 #if MACH_ASSERT
2280 extern int proc_pid(struct proc *);
2281 extern void proc_name_kdp(struct proc *p, char *buf, int size);
2282 #endif /* MACH_ASSERT */
2283 
2284 #define VM_MAP_PARTIAL_REAP 0x54  /* 0x150 */
2285 static void
task_partial_reap(task_t task,__unused int pid)2286 __unused task_partial_reap(task_t task, __unused int pid)
2287 {
2288 	unsigned int    reclaimed_resident = 0;
2289 	unsigned int    reclaimed_compressed = 0;
2290 	uint64_t        task_page_count;
2291 
2292 	task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
2293 
2294 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_START),
2295 	    pid, task_page_count, 0, 0, 0);
2296 
2297 	vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
2298 
2299 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_END),
2300 	    pid, reclaimed_resident, reclaimed_compressed, 0, 0);
2301 }
2302 
2303 /*
2304  * task_mark_corpse:
2305  *
2306  * Mark the task as a corpse. Called by crashing thread.
2307  */
2308 kern_return_t
task_mark_corpse(task_t task)2309 task_mark_corpse(task_t task)
2310 {
2311 	kern_return_t kr = KERN_SUCCESS;
2312 	thread_t self_thread;
2313 	(void) self_thread;
2314 	wait_interrupt_t wsave;
2315 #if CONFIG_MACF
2316 	struct label *crash_label = NULL;
2317 #endif
2318 
2319 	assert(task != kernel_task);
2320 	assert(task == current_task());
2321 	assert(!task_is_a_corpse(task));
2322 
2323 #if CONFIG_MACF
2324 	crash_label = mac_exc_create_label_for_proc((struct proc*)task->bsd_info);
2325 #endif
2326 
2327 	kr = task_collect_crash_info(task,
2328 #if CONFIG_MACF
2329 	    crash_label,
2330 #endif
2331 	    FALSE);
2332 	if (kr != KERN_SUCCESS) {
2333 		goto out;
2334 	}
2335 
2336 	self_thread = current_thread();
2337 
2338 	wsave = thread_interrupt_level(THREAD_UNINT);
2339 	task_lock(task);
2340 
2341 	/*
2342 	 * Check if any other thread called task_terminate_internal
2343 	 * and made the task inactive before we could mark it for
2344 	 * corpse pending report. Bail out if the task is inactive.
2345 	 */
2346 	if (!task->active) {
2347 		kcdata_descriptor_t crash_data_release = task->corpse_info;;
2348 		void *crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);;
2349 
2350 		task->corpse_info = NULL;
2351 		task_unlock(task);
2352 
2353 		if (crash_data_release != NULL) {
2354 			task_crashinfo_destroy(crash_data_release);
2355 		}
2356 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2357 		return KERN_TERMINATED;
2358 	}
2359 
2360 	task_set_corpse_pending_report(task);
2361 	task_set_corpse(task);
2362 	task->crashed_thread_id = thread_tid(self_thread);
2363 
2364 	kr = task_start_halt_locked(task, TRUE);
2365 	assert(kr == KERN_SUCCESS);
2366 
2367 	task_set_uniqueid(task);
2368 
2369 	task_unlock(task);
2370 
2371 	/*
2372 	 * ipc_task_reset() moved to last thread_terminate_self(): rdar://75737960.
2373 	 * disable old ports here instead.
2374 	 *
2375 	 * The vm_map and ipc_space must exist until this function returns,
2376 	 * convert_port_to_{map,space}_with_flavor relies on this behavior.
2377 	 */
2378 	ipc_task_disable(task);
2379 
2380 	/* terminate the ipc space */
2381 	ipc_space_terminate(task->itk_space);
2382 
2383 	/* Add it to global corpse task list */
2384 	task_add_to_corpse_task_list(task);
2385 
2386 	thread_terminate_internal(self_thread);
2387 
2388 	(void) thread_interrupt_level(wsave);
2389 	assert(task->halting == TRUE);
2390 
2391 out:
2392 #if CONFIG_MACF
2393 	mac_exc_free_label(crash_label);
2394 #endif
2395 	return kr;
2396 }
2397 
2398 /*
2399  *	task_set_uniqueid
2400  *
2401  *	Set task uniqueid to systemwide unique 64 bit value
2402  */
2403 void
task_set_uniqueid(task_t task)2404 task_set_uniqueid(task_t task)
2405 {
2406 	task->task_uniqueid = OSIncrementAtomic64(&next_taskuniqueid);
2407 }
2408 
2409 /*
2410  *	task_clear_corpse
2411  *
2412  *	Clears the corpse pending bit on task.
2413  *	Removes inspection bit on the threads.
2414  */
2415 void
task_clear_corpse(task_t task)2416 task_clear_corpse(task_t task)
2417 {
2418 	thread_t th_iter = NULL;
2419 
2420 	task_lock(task);
2421 	queue_iterate(&task->threads, th_iter, thread_t, task_threads)
2422 	{
2423 		thread_mtx_lock(th_iter);
2424 		th_iter->inspection = FALSE;
2425 		ipc_thread_disable(th_iter);
2426 		thread_mtx_unlock(th_iter);
2427 	}
2428 
2429 	thread_terminate_crashed_threads();
2430 	/* remove the pending corpse report flag */
2431 	task_clear_corpse_pending_report(task);
2432 
2433 	task_unlock(task);
2434 }
2435 
2436 /*
2437  *	task_port_no_senders
2438  *
2439  *	Called whenever the Mach port system detects no-senders on
2440  *	the task port of a corpse.
2441  *	Each notification that comes in should terminate the task (corpse).
2442  */
2443 static void
task_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)2444 task_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
2445 {
2446 	task_t task = ipc_kobject_get_locked(port, IKOT_TASK_CONTROL);
2447 
2448 	assert(task != TASK_NULL);
2449 	assert(task_is_a_corpse(task));
2450 
2451 	/* Remove the task from global corpse task list */
2452 	task_remove_from_corpse_task_list(task);
2453 
2454 	task_clear_corpse(task);
2455 	task_terminate_internal(task);
2456 }
2457 
2458 /*
2459  *	task_port_with_flavor_no_senders
2460  *
2461  *	Called whenever the Mach port system detects no-senders on
2462  *	the task inspect or read port. These ports are allocated lazily and
2463  *	should be deallocated here when there are no senders remaining.
2464  */
2465 static void
task_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)2466 task_port_with_flavor_no_senders(
2467 	ipc_port_t          port,
2468 	mach_port_mscount_t mscount __unused)
2469 {
2470 	task_t task;
2471 	mach_task_flavor_t flavor;
2472 	ipc_kobject_type_t kotype;
2473 
2474 	ip_mq_lock(port);
2475 	if (port->ip_srights > 0) {
2476 		ip_mq_unlock(port);
2477 		return;
2478 	}
2479 	kotype = ip_kotype(port);
2480 	assert((IKOT_TASK_READ == kotype) || (IKOT_TASK_INSPECT == kotype));
2481 	task = ipc_kobject_get_locked(port, kotype);
2482 	if (task != TASK_NULL) {
2483 		task_reference(task);
2484 	}
2485 	ip_mq_unlock(port);
2486 
2487 	if (task == TASK_NULL) {
2488 		/* The task is exiting or disabled; it will eventually deallocate the port */
2489 		return;
2490 	}
2491 
2492 	if (kotype == IKOT_TASK_READ) {
2493 		flavor = TASK_FLAVOR_READ;
2494 	} else {
2495 		flavor = TASK_FLAVOR_INSPECT;
2496 	}
2497 
2498 	itk_lock(task);
2499 	ip_mq_lock(port);
2500 
2501 	/*
2502 	 * If the port is no longer active, then ipc_task_terminate() ran
2503 	 * and destroyed the kobject already. Just deallocate the task
2504 	 * ref we took and go away.
2505 	 *
2506 	 * It is also possible that several nsrequests are in flight,
2507 	 * only one shall NULL-out the port entry, and this is the one
2508 	 * that gets to dealloc the port.
2509 	 *
2510 	 * Check for a stale no-senders notification. A call to any function
2511 	 * that vends out send rights to this port could resurrect it between
2512 	 * this notification being generated and actually being handled here.
2513 	 */
2514 	if (!ip_active(port) ||
2515 	    task->itk_task_ports[flavor] != port ||
2516 	    port->ip_srights > 0) {
2517 		ip_mq_unlock(port);
2518 		itk_unlock(task);
2519 		task_deallocate(task);
2520 		return;
2521 	}
2522 
2523 	assert(task->itk_task_ports[flavor] == port);
2524 	task->itk_task_ports[flavor] = IP_NULL;
2525 	itk_unlock(task);
2526 
2527 	ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
2528 
2529 	task_deallocate(task);
2530 }
2531 
2532 /*
2533  *	task_wait_till_threads_terminate_locked
2534  *
2535  *	Wait till all the threads in the task are terminated.
2536  *	Might release the task lock and re-acquire it.
2537  */
2538 void
task_wait_till_threads_terminate_locked(task_t task)2539 task_wait_till_threads_terminate_locked(task_t task)
2540 {
2541 	/* wait for all the threads in the task to terminate */
2542 	while (task->active_thread_count != 0) {
2543 		assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
2544 		task_unlock(task);
2545 		thread_block(THREAD_CONTINUE_NULL);
2546 
2547 		task_lock(task);
2548 	}
2549 }
2550 
2551 /*
2552  *	task_duplicate_map_and_threads
2553  *
2554  *	Copy vmmap of source task.
2555  *	Copy active threads from source task to destination task.
2556  *	Source task would be suspended during the copy.
2557  */
2558 kern_return_t
task_duplicate_map_and_threads(task_t task,void * p,task_t new_task,thread_t * thread_ret,uint64_t ** udata_buffer,int * size,int * num_udata,bool for_exception)2559 task_duplicate_map_and_threads(
2560 	task_t task,
2561 	void *p,
2562 	task_t new_task,
2563 	thread_t *thread_ret,
2564 	uint64_t **udata_buffer,
2565 	int *size,
2566 	int *num_udata,
2567 	bool for_exception)
2568 {
2569 	kern_return_t kr = KERN_SUCCESS;
2570 	int active;
2571 	thread_t thread, self, thread_return = THREAD_NULL;
2572 	thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL;
2573 	thread_t *thread_array;
2574 	uint32_t active_thread_count = 0, array_count = 0, i;
2575 	vm_map_t oldmap;
2576 	uint64_t *buffer = NULL;
2577 	int buf_size = 0;
2578 	int est_knotes = 0, num_knotes = 0;
2579 
2580 	self = current_thread();
2581 
2582 	/*
2583 	 * Suspend the task to copy thread state, use the internal
2584 	 * variant so that no user-space process can resume
2585 	 * the task from under us
2586 	 */
2587 	kr = task_suspend_internal(task);
2588 	if (kr != KERN_SUCCESS) {
2589 		return kr;
2590 	}
2591 
2592 	if (task->map->disable_vmentry_reuse == TRUE) {
2593 		/*
2594 		 * Quite likely GuardMalloc (or some debugging tool)
2595 		 * is being used on this task. And it has gone through
2596 		 * its limit. Making a corpse will likely encounter
2597 		 * a lot of VM entries that will need COW.
2598 		 *
2599 		 * Skip it.
2600 		 */
2601 #if DEVELOPMENT || DEBUG
2602 		memorystatus_abort_vm_map_fork(task);
2603 #endif
2604 		task_resume_internal(task);
2605 		return KERN_FAILURE;
2606 	}
2607 
2608 	/* Check with VM if vm_map_fork is allowed for this task */
2609 	if (memorystatus_allowed_vm_map_fork(task)) {
2610 		/* Setup new task's vmmap, switch from parent task's map to it COW map */
2611 		oldmap = new_task->map;
2612 		new_task->map = vm_map_fork(new_task->ledger,
2613 		    task->map,
2614 		    (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
2615 		    VM_MAP_FORK_PRESERVE_PURGEABLE |
2616 		    VM_MAP_FORK_CORPSE_FOOTPRINT));
2617 		if (new_task->map) {
2618 			vm_map_deallocate(oldmap);
2619 
2620 			/* copy ledgers that impact the memory footprint */
2621 			vm_map_copy_footprint_ledgers(task, new_task);
2622 
2623 			/* Get all the udata pointers from kqueue */
2624 			est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2625 			if (est_knotes > 0) {
2626 				buf_size = (est_knotes + 32) * sizeof(uint64_t);
2627 				buffer = kalloc_data(buf_size, Z_WAITOK);
2628 				num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2629 				if (num_knotes > est_knotes + 32) {
2630 					num_knotes = est_knotes + 32;
2631 				}
2632 			}
2633 		} else {
2634 			new_task->map = oldmap;
2635 #if DEVELOPMENT || DEBUG
2636 			memorystatus_abort_vm_map_fork(task);
2637 #endif
2638 			task_resume_internal(task);
2639 			return KERN_NO_SPACE;
2640 		}
2641 	} else if (!for_exception) {
2642 #if DEVELOPMENT || DEBUG
2643 		memorystatus_abort_vm_map_fork(task);
2644 #endif
2645 		task_resume_internal(task);
2646 		return KERN_NO_SPACE;
2647 	}
2648 
2649 	active_thread_count = task->active_thread_count;
2650 	if (active_thread_count == 0) {
2651 		kfree_data(buffer, buf_size);
2652 		task_resume_internal(task);
2653 		return KERN_FAILURE;
2654 	}
2655 
2656 	thread_array = kalloc_type(thread_t, active_thread_count, Z_WAITOK);
2657 
2658 	/* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
2659 	task_lock(task);
2660 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2661 		/* Skip inactive threads */
2662 		active = thread->active;
2663 		if (!active) {
2664 			continue;
2665 		}
2666 
2667 		if (array_count >= active_thread_count) {
2668 			break;
2669 		}
2670 
2671 		thread_array[array_count++] = thread;
2672 		thread_reference(thread);
2673 	}
2674 	task_unlock(task);
2675 
2676 	for (i = 0; i < array_count; i++) {
2677 		kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
2678 		if (kr != KERN_SUCCESS) {
2679 			break;
2680 		}
2681 
2682 		/* Equivalent of current thread in corpse */
2683 		if (thread_array[i] == self) {
2684 			thread_return = new_thread;
2685 			new_task->crashed_thread_id = thread_tid(new_thread);
2686 		} else if (first_thread == NULL) {
2687 			first_thread = new_thread;
2688 		} else {
2689 			/* drop the extra ref returned by thread_create_with_continuation */
2690 			thread_deallocate(new_thread);
2691 		}
2692 
2693 		kr = thread_dup2(thread_array[i], new_thread);
2694 		if (kr != KERN_SUCCESS) {
2695 			thread_mtx_lock(new_thread);
2696 			new_thread->corpse_dup = TRUE;
2697 			thread_mtx_unlock(new_thread);
2698 			continue;
2699 		}
2700 
2701 		/* Copy thread name */
2702 		bsd_copythreadname(get_bsdthread_info(new_thread),
2703 		    get_bsdthread_info(thread_array[i]));
2704 		new_thread->thread_tag = thread_array[i]->thread_tag &
2705 		    ~THREAD_TAG_USER_JOIN;
2706 		thread_copy_resource_info(new_thread, thread_array[i]);
2707 	}
2708 
2709 	/* return the first thread if we couldn't find the equivalent of current */
2710 	if (thread_return == THREAD_NULL) {
2711 		thread_return = first_thread;
2712 	} else if (first_thread != THREAD_NULL) {
2713 		/* drop the extra ref returned by thread_create_with_continuation */
2714 		thread_deallocate(first_thread);
2715 	}
2716 
2717 	task_resume_internal(task);
2718 
2719 	for (i = 0; i < array_count; i++) {
2720 		thread_deallocate(thread_array[i]);
2721 	}
2722 	kfree_type(thread_t, active_thread_count, thread_array);
2723 
2724 	if (kr == KERN_SUCCESS) {
2725 		*thread_ret = thread_return;
2726 		*udata_buffer = buffer;
2727 		*size = buf_size;
2728 		*num_udata = num_knotes;
2729 	} else {
2730 		if (thread_return != THREAD_NULL) {
2731 			thread_deallocate(thread_return);
2732 		}
2733 		kfree_data(buffer, buf_size);
2734 	}
2735 
2736 	return kr;
2737 }
2738 
2739 #if CONFIG_SECLUDED_MEMORY
2740 extern void task_set_can_use_secluded_mem_locked(
2741 	task_t          task,
2742 	boolean_t       can_use_secluded_mem);
2743 #endif /* CONFIG_SECLUDED_MEMORY */
2744 
2745 #if MACH_ASSERT
2746 int debug4k_panic_on_terminate = 0;
2747 #endif /* MACH_ASSERT */
2748 kern_return_t
task_terminate_internal(task_t task)2749 task_terminate_internal(
2750 	task_t                  task)
2751 {
2752 	thread_t                        thread, self;
2753 	task_t                          self_task;
2754 	boolean_t                       interrupt_save;
2755 	int                             pid = 0;
2756 
2757 	assert(task != kernel_task);
2758 
2759 	self = current_thread();
2760 	self_task = current_task();
2761 
2762 	/*
2763 	 *	Get the task locked and make sure that we are not racing
2764 	 *	with someone else trying to terminate us.
2765 	 */
2766 	if (task == self_task) {
2767 		task_lock(task);
2768 	} else if (task < self_task) {
2769 		task_lock(task);
2770 		task_lock(self_task);
2771 	} else {
2772 		task_lock(self_task);
2773 		task_lock(task);
2774 	}
2775 
2776 #if CONFIG_SECLUDED_MEMORY
2777 	if (task->task_can_use_secluded_mem) {
2778 		task_set_can_use_secluded_mem_locked(task, FALSE);
2779 	}
2780 	task->task_could_use_secluded_mem = FALSE;
2781 	task->task_could_also_use_secluded_mem = FALSE;
2782 
2783 	if (task->task_suppressed_secluded) {
2784 		stop_secluded_suppression(task);
2785 	}
2786 #endif /* CONFIG_SECLUDED_MEMORY */
2787 
2788 	if (!task->active) {
2789 		/*
2790 		 *	Task is already being terminated.
2791 		 *	Just return an error. If we are dying, this will
2792 		 *	just get us to our AST special handler and that
2793 		 *	will get us to finalize the termination of ourselves.
2794 		 */
2795 		task_unlock(task);
2796 		if (self_task != task) {
2797 			task_unlock(self_task);
2798 		}
2799 
2800 		return KERN_FAILURE;
2801 	}
2802 
2803 	if (task_corpse_pending_report(task)) {
2804 		/*
2805 		 *	Task is marked for reporting as corpse.
2806 		 *	Just return an error. This will
2807 		 *	just get us to our AST special handler and that
2808 		 *	will get us to finish the path to death
2809 		 */
2810 		task_unlock(task);
2811 		if (self_task != task) {
2812 			task_unlock(self_task);
2813 		}
2814 
2815 		return KERN_FAILURE;
2816 	}
2817 
2818 	if (self_task != task) {
2819 		task_unlock(self_task);
2820 	}
2821 
2822 	/*
2823 	 * Make sure the current thread does not get aborted out of
2824 	 * the waits inside these operations.
2825 	 */
2826 	interrupt_save = thread_interrupt_level(THREAD_UNINT);
2827 
2828 	/*
2829 	 *	Indicate that we want all the threads to stop executing
2830 	 *	at user space by holding the task (we would have held
2831 	 *	each thread independently in thread_terminate_internal -
2832 	 *	but this way we may be more likely to already find it
2833 	 *	held there).  Mark the task inactive, and prevent
2834 	 *	further task operations via the task port.
2835 	 *
2836 	 *	The vm_map and ipc_space must exist until this function returns,
2837 	 *	convert_port_to_{map,space}_with_flavor relies on this behavior.
2838 	 */
2839 	task_hold_locked(task);
2840 	task->active = FALSE;
2841 	ipc_task_disable(task);
2842 
2843 #if CONFIG_TELEMETRY
2844 	/*
2845 	 * Notify telemetry that this task is going away.
2846 	 */
2847 	telemetry_task_ctl_locked(task, TF_TELEMETRY, 0);
2848 #endif
2849 
2850 	/*
2851 	 *	Terminate each thread in the task.
2852 	 */
2853 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2854 		thread_terminate_internal(thread);
2855 	}
2856 
2857 #ifdef MACH_BSD
2858 	if (task->bsd_info != NULL && !task_is_exec_copy(task)) {
2859 		pid = proc_pid(task->bsd_info);
2860 	}
2861 #endif /* MACH_BSD */
2862 
2863 	task_unlock(task);
2864 
2865 	proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
2866 	    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
2867 
2868 	/* Early object reap phase */
2869 
2870 // PR-17045188: Revisit implementation
2871 //        task_partial_reap(task, pid);
2872 
2873 #if CONFIG_TASKWATCH
2874 	/*
2875 	 * remove all task watchers
2876 	 */
2877 	task_removewatchers(task);
2878 
2879 #endif /* CONFIG_TASKWATCH */
2880 
2881 	/*
2882 	 *	Destroy all synchronizers owned by the task.
2883 	 */
2884 	task_synchronizer_destroy_all(task);
2885 
2886 	/*
2887 	 *	Clear the watchport boost on the task.
2888 	 */
2889 	task_remove_turnstile_watchports(task);
2890 
2891 	/*
2892 	 *	Destroy the IPC space, leaving just a reference for it.
2893 	 */
2894 	ipc_space_terminate(task->itk_space);
2895 
2896 #if 00
2897 	/* if some ledgers go negative on tear-down again... */
2898 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
2899 	    task_ledgers.phys_footprint);
2900 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
2901 	    task_ledgers.internal);
2902 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
2903 	    task_ledgers.iokit_mapped);
2904 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
2905 	    task_ledgers.alternate_accounting);
2906 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
2907 	    task_ledgers.alternate_accounting_compressed);
2908 #endif
2909 
2910 	/*
2911 	 * If the current thread is a member of the task
2912 	 * being terminated, then the last reference to
2913 	 * the task will not be dropped until the thread
2914 	 * is finally reaped.  To avoid incurring the
2915 	 * expense of removing the address space regions
2916 	 * at reap time, we do it explictly here.
2917 	 */
2918 
2919 #if MACH_ASSERT
2920 	/*
2921 	 * Identify the pmap's process, in case the pmap ledgers drift
2922 	 * and we have to report it.
2923 	 */
2924 	char procname[17];
2925 	if (task->bsd_info && !task_is_exec_copy(task)) {
2926 		pid = proc_pid(task->bsd_info);
2927 		proc_name_kdp(task->bsd_info, procname, sizeof(procname));
2928 	} else {
2929 		pid = 0;
2930 		strlcpy(procname, "<unknown>", sizeof(procname));
2931 	}
2932 	pmap_set_process(task->map->pmap, pid, procname);
2933 	if (vm_map_page_shift(task->map) < (int)PAGE_SHIFT) {
2934 		DEBUG4K_LIFE("map %p procname: %s\n", task->map, procname);
2935 		if (debug4k_panic_on_terminate) {
2936 			panic("DEBUG4K: %s:%d %d[%s] map %p", __FUNCTION__, __LINE__, pid, procname, task->map);
2937 		}
2938 	}
2939 #endif /* MACH_ASSERT */
2940 
2941 	vm_map_terminate(task->map);
2942 
2943 	/* release our shared region */
2944 	vm_shared_region_set(task, NULL);
2945 
2946 #if __has_feature(ptrauth_calls)
2947 	task_set_shared_region_id(task, NULL);
2948 #endif /* __has_feature(ptrauth_calls) */
2949 
2950 	lck_mtx_lock(&tasks_threads_lock);
2951 	queue_remove(&tasks, task, task_t, tasks);
2952 	queue_enter(&terminated_tasks, task, task_t, tasks);
2953 	tasks_count--;
2954 	terminated_tasks_count++;
2955 	lck_mtx_unlock(&tasks_threads_lock);
2956 
2957 	/*
2958 	 * We no longer need to guard against being aborted, so restore
2959 	 * the previous interruptible state.
2960 	 */
2961 	thread_interrupt_level(interrupt_save);
2962 
2963 #if KPC
2964 	/* force the task to release all ctrs */
2965 	if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) {
2966 		kpc_force_all_ctrs(task, 0);
2967 	}
2968 #endif /* KPC */
2969 
2970 #if CONFIG_COALITIONS
2971 	/*
2972 	 * Leave the coalition for corpse task or task that
2973 	 * never had any active threads (e.g. fork, exec failure).
2974 	 * For task with active threads, the task will be removed
2975 	 * from coalition by last terminating thread.
2976 	 */
2977 	if (task->active_thread_count == 0) {
2978 		coalitions_remove_task(task);
2979 	}
2980 #endif
2981 
2982 #if CONFIG_FREEZE
2983 	extern int      vm_compressor_available;
2984 	if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE && vm_compressor_available) {
2985 		task_disown_frozen_csegs(task);
2986 		assert(queue_empty(&task->task_frozen_cseg_q));
2987 	}
2988 #endif /* CONFIG_FREEZE */
2989 
2990 
2991 	/*
2992 	 * Get rid of the task active reference on itself.
2993 	 */
2994 	task_deallocate_grp(task, TASK_GRP_INTERNAL);
2995 
2996 	return KERN_SUCCESS;
2997 }
2998 
2999 void
tasks_system_suspend(boolean_t suspend)3000 tasks_system_suspend(boolean_t suspend)
3001 {
3002 	task_t task;
3003 
3004 	KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SUSPEND_USERSPACE) |
3005 	    (suspend ? DBG_FUNC_START : DBG_FUNC_END));
3006 
3007 	lck_mtx_lock(&tasks_threads_lock);
3008 	assert(tasks_suspend_state != suspend);
3009 	tasks_suspend_state = suspend;
3010 	queue_iterate(&tasks, task, task_t, tasks) {
3011 		if (task == kernel_task) {
3012 			continue;
3013 		}
3014 		suspend ? task_suspend_internal(task) : task_resume_internal(task);
3015 	}
3016 	lck_mtx_unlock(&tasks_threads_lock);
3017 }
3018 
3019 /*
3020  * task_start_halt:
3021  *
3022  *      Shut the current task down (except for the current thread) in
3023  *	preparation for dramatic changes to the task (probably exec).
3024  *	We hold the task and mark all other threads in the task for
3025  *	termination.
3026  */
3027 kern_return_t
task_start_halt(task_t task)3028 task_start_halt(task_t task)
3029 {
3030 	kern_return_t kr = KERN_SUCCESS;
3031 	task_lock(task);
3032 	kr = task_start_halt_locked(task, FALSE);
3033 	task_unlock(task);
3034 	return kr;
3035 }
3036 
3037 static kern_return_t
task_start_halt_locked(task_t task,boolean_t should_mark_corpse)3038 task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
3039 {
3040 	thread_t thread, self;
3041 	uint64_t dispatchqueue_offset;
3042 
3043 	assert(task != kernel_task);
3044 
3045 	self = current_thread();
3046 
3047 	if (task != get_threadtask(self) && !task_is_a_corpse_fork(task)) {
3048 		return KERN_INVALID_ARGUMENT;
3049 	}
3050 
3051 	if (!should_mark_corpse &&
3052 	    (task->halting || !task->active || !self->active)) {
3053 		/*
3054 		 * Task or current thread is already being terminated.
3055 		 * Hurry up and return out of the current kernel context
3056 		 * so that we run our AST special handler to terminate
3057 		 * ourselves. If should_mark_corpse is set, the corpse
3058 		 * creation might have raced with exec, let the corpse
3059 		 * creation continue, once the current thread reaches AST
3060 		 * thread in exec will be woken up from task_complete_halt.
3061 		 * Exec will fail cause the proc was marked for exit.
3062 		 * Once the thread in exec reaches AST, it will call proc_exit
3063 		 * and deliver the EXC_CORPSE_NOTIFY.
3064 		 */
3065 		return KERN_FAILURE;
3066 	}
3067 
3068 	/* Thread creation will fail after this point of no return. */
3069 	task->halting = TRUE;
3070 
3071 	/*
3072 	 * Mark all the threads to keep them from starting any more
3073 	 * user-level execution. The thread_terminate_internal code
3074 	 * would do this on a thread by thread basis anyway, but this
3075 	 * gives us a better chance of not having to wait there.
3076 	 */
3077 	task_hold_locked(task);
3078 	dispatchqueue_offset = get_dispatchqueue_offset_from_proc(task->bsd_info);
3079 
3080 	/*
3081 	 * Terminate all the other threads in the task.
3082 	 */
3083 	queue_iterate(&task->threads, thread, thread_t, task_threads)
3084 	{
3085 		/*
3086 		 * Remove priority throttles for threads to terminate timely. This has
3087 		 * to be done after task_hold_locked() traps all threads to AST, but before
3088 		 * threads are marked inactive in thread_terminate_internal(). Takes thread
3089 		 * mutex lock.
3090 		 * See: thread_policy_update_tasklocked().
3091 		 */
3092 		proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE,
3093 		    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3094 
3095 		if (should_mark_corpse) {
3096 			thread_mtx_lock(thread);
3097 			thread->inspection = TRUE;
3098 			thread_mtx_unlock(thread);
3099 		}
3100 		if (thread != self) {
3101 			thread_terminate_internal(thread);
3102 		}
3103 	}
3104 	task->dispatchqueue_offset = dispatchqueue_offset;
3105 
3106 	task_release_locked(task);
3107 
3108 	return KERN_SUCCESS;
3109 }
3110 
3111 
3112 /*
3113  * task_complete_halt:
3114  *
3115  *	Complete task halt by waiting for threads to terminate, then clean
3116  *	up task resources (VM, port namespace, etc...) and then let the
3117  *	current thread go in the (practically empty) task context.
3118  *
3119  *	Note: task->halting flag is not cleared in order to avoid creation
3120  *	of new thread in old exec'ed task.
3121  */
3122 void
task_complete_halt(task_t task)3123 task_complete_halt(task_t task)
3124 {
3125 	task_lock(task);
3126 	assert(task->halting);
3127 	assert(task == current_task());
3128 
3129 	/*
3130 	 *	Wait for the other threads to get shut down.
3131 	 *      When the last other thread is reaped, we'll be
3132 	 *	woken up.
3133 	 */
3134 	if (task->thread_count > 1) {
3135 		assert_wait((event_t)&task->halting, THREAD_UNINT);
3136 		task_unlock(task);
3137 		thread_block(THREAD_CONTINUE_NULL);
3138 	} else {
3139 		task_unlock(task);
3140 	}
3141 
3142 	/*
3143 	 *	Give the machine dependent code a chance
3144 	 *	to perform cleanup of task-level resources
3145 	 *	associated with the current thread before
3146 	 *	ripping apart the task.
3147 	 */
3148 	machine_task_terminate(task);
3149 
3150 	/*
3151 	 *	Destroy all synchronizers owned by the task.
3152 	 */
3153 	task_synchronizer_destroy_all(task);
3154 
3155 	/*
3156 	 *	Terminate the IPC space.  A long time ago,
3157 	 *	this used to be ipc_space_clean() which would
3158 	 *	keep the space active but hollow it.
3159 	 *
3160 	 *	We really do not need this semantics given
3161 	 *	tasks die with exec now.
3162 	 */
3163 	ipc_space_terminate(task->itk_space);
3164 
3165 	/*
3166 	 * Clean out the address space, as we are going to be
3167 	 * getting a new one.
3168 	 */
3169 	vm_map_terminate(task->map);
3170 
3171 	/*
3172 	 * Kick out any IOKitUser handles to the task. At best they're stale,
3173 	 * at worst someone is racing a SUID exec.
3174 	 */
3175 	iokit_task_terminate(task);
3176 }
3177 
3178 /*
3179  *	task_hold_locked:
3180  *
3181  *	Suspend execution of the specified task.
3182  *	This is a recursive-style suspension of the task, a count of
3183  *	suspends is maintained.
3184  *
3185  *	CONDITIONS: the task is locked and active.
3186  */
3187 void
task_hold_locked(task_t task)3188 task_hold_locked(
3189 	task_t          task)
3190 {
3191 	thread_t        thread;
3192 
3193 	assert(task->active);
3194 
3195 	if (task->suspend_count++ > 0) {
3196 		return;
3197 	}
3198 
3199 	if (task->bsd_info) {
3200 		workq_proc_suspended(task->bsd_info);
3201 	}
3202 
3203 	/*
3204 	 *	Iterate through all the threads and hold them.
3205 	 */
3206 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3207 		thread_mtx_lock(thread);
3208 		thread_hold(thread);
3209 		thread_mtx_unlock(thread);
3210 	}
3211 }
3212 
3213 /*
3214  *	task_hold:
3215  *
3216  *	Same as the internal routine above, except that is must lock
3217  *	and verify that the task is active.  This differs from task_suspend
3218  *	in that it places a kernel hold on the task rather than just a
3219  *	user-level hold.  This keeps users from over resuming and setting
3220  *	it running out from under the kernel.
3221  *
3222  *      CONDITIONS: the caller holds a reference on the task
3223  */
3224 kern_return_t
task_hold(task_t task)3225 task_hold(
3226 	task_t          task)
3227 {
3228 	if (task == TASK_NULL) {
3229 		return KERN_INVALID_ARGUMENT;
3230 	}
3231 
3232 	task_lock(task);
3233 
3234 	if (!task->active) {
3235 		task_unlock(task);
3236 
3237 		return KERN_FAILURE;
3238 	}
3239 
3240 	task_hold_locked(task);
3241 	task_unlock(task);
3242 
3243 	return KERN_SUCCESS;
3244 }
3245 
3246 kern_return_t
task_wait(task_t task,boolean_t until_not_runnable)3247 task_wait(
3248 	task_t          task,
3249 	boolean_t       until_not_runnable)
3250 {
3251 	if (task == TASK_NULL) {
3252 		return KERN_INVALID_ARGUMENT;
3253 	}
3254 
3255 	task_lock(task);
3256 
3257 	if (!task->active) {
3258 		task_unlock(task);
3259 
3260 		return KERN_FAILURE;
3261 	}
3262 
3263 	task_wait_locked(task, until_not_runnable);
3264 	task_unlock(task);
3265 
3266 	return KERN_SUCCESS;
3267 }
3268 
3269 /*
3270  *	task_wait_locked:
3271  *
3272  *	Wait for all threads in task to stop.
3273  *
3274  * Conditions:
3275  *	Called with task locked, active, and held.
3276  */
3277 void
task_wait_locked(task_t task,boolean_t until_not_runnable)3278 task_wait_locked(
3279 	task_t          task,
3280 	boolean_t               until_not_runnable)
3281 {
3282 	thread_t        thread, self;
3283 
3284 	assert(task->active);
3285 	assert(task->suspend_count > 0);
3286 
3287 	self = current_thread();
3288 
3289 	/*
3290 	 *	Iterate through all the threads and wait for them to
3291 	 *	stop.  Do not wait for the current thread if it is within
3292 	 *	the task.
3293 	 */
3294 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3295 		if (thread != self) {
3296 			thread_wait(thread, until_not_runnable);
3297 		}
3298 	}
3299 }
3300 
3301 boolean_t
task_is_app_suspended(task_t task)3302 task_is_app_suspended(task_t task)
3303 {
3304 	return task->pidsuspended;
3305 }
3306 
3307 /*
3308  *	task_release_locked:
3309  *
3310  *	Release a kernel hold on a task.
3311  *
3312  *      CONDITIONS: the task is locked and active
3313  */
3314 void
task_release_locked(task_t task)3315 task_release_locked(
3316 	task_t          task)
3317 {
3318 	thread_t        thread;
3319 
3320 	assert(task->active);
3321 	assert(task->suspend_count > 0);
3322 
3323 	if (--task->suspend_count > 0) {
3324 		return;
3325 	}
3326 
3327 	if (task->bsd_info) {
3328 		workq_proc_resumed(task->bsd_info);
3329 	}
3330 
3331 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3332 		thread_mtx_lock(thread);
3333 		thread_release(thread);
3334 		thread_mtx_unlock(thread);
3335 	}
3336 }
3337 
3338 /*
3339  *	task_release:
3340  *
3341  *	Same as the internal routine above, except that it must lock
3342  *	and verify that the task is active.
3343  *
3344  *      CONDITIONS: The caller holds a reference to the task
3345  */
3346 kern_return_t
task_release(task_t task)3347 task_release(
3348 	task_t          task)
3349 {
3350 	if (task == TASK_NULL) {
3351 		return KERN_INVALID_ARGUMENT;
3352 	}
3353 
3354 	task_lock(task);
3355 
3356 	if (!task->active) {
3357 		task_unlock(task);
3358 
3359 		return KERN_FAILURE;
3360 	}
3361 
3362 	task_release_locked(task);
3363 	task_unlock(task);
3364 
3365 	return KERN_SUCCESS;
3366 }
3367 
3368 static kern_return_t
task_threads_internal(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * countp,mach_thread_flavor_t flavor)3369 task_threads_internal(
3370 	task_t                      task,
3371 	thread_act_array_t         *threads_out,
3372 	mach_msg_type_number_t     *countp,
3373 	mach_thread_flavor_t        flavor)
3374 {
3375 	mach_msg_type_number_t  actual, count, count_needed;
3376 	thread_t               *thread_list;
3377 	thread_t                thread;
3378 	unsigned int            i;
3379 
3380 	count = 0;
3381 	thread_list = NULL;
3382 
3383 	if (task == TASK_NULL) {
3384 		return KERN_INVALID_ARGUMENT;
3385 	}
3386 
3387 	assert(flavor <= THREAD_FLAVOR_INSPECT);
3388 
3389 	for (;;) {
3390 		task_lock(task);
3391 		if (!task->active) {
3392 			task_unlock(task);
3393 
3394 			kfree_type(thread_t, count, thread_list);
3395 			return KERN_FAILURE;
3396 		}
3397 
3398 		count_needed = actual = task->thread_count;
3399 		if (count_needed <= count) {
3400 			break;
3401 		}
3402 
3403 		/* unlock the task and allocate more memory */
3404 		task_unlock(task);
3405 
3406 		kfree_type(thread_t, count, thread_list);
3407 		count = count_needed;
3408 		thread_list = kalloc_type(thread_t, count, Z_WAITOK);
3409 
3410 		if (thread_list == NULL) {
3411 			return KERN_RESOURCE_SHORTAGE;
3412 		}
3413 	}
3414 
3415 	i = 0;
3416 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3417 		assert(i < actual);
3418 		thread_reference(thread);
3419 		thread_list[i++] = thread;
3420 	}
3421 
3422 	count_needed = actual;
3423 
3424 	/* can unlock task now that we've got the thread refs */
3425 	task_unlock(task);
3426 
3427 	if (actual == 0) {
3428 		/* no threads, so return null pointer and deallocate memory */
3429 
3430 		*threads_out = NULL;
3431 		*countp = 0;
3432 		kfree_type(thread_t, count, thread_list);
3433 	} else {
3434 		/* if we allocated too much, must copy */
3435 		if (count_needed < count) {
3436 			void *newaddr;
3437 
3438 			newaddr = kalloc_type(thread_t, count_needed, Z_WAITOK);
3439 			if (newaddr == NULL) {
3440 				for (i = 0; i < actual; ++i) {
3441 					thread_deallocate(thread_list[i]);
3442 				}
3443 				kfree_type(thread_t, count, thread_list);
3444 				return KERN_RESOURCE_SHORTAGE;
3445 			}
3446 
3447 			bcopy(thread_list, newaddr, count_needed * sizeof(thread_t));
3448 			kfree_type(thread_t, count, thread_list);
3449 			thread_list = (thread_t *)newaddr;
3450 		}
3451 
3452 		*threads_out = thread_list;
3453 		*countp = actual;
3454 
3455 		/* do the conversion that Mig should handle */
3456 
3457 		switch (flavor) {
3458 		case THREAD_FLAVOR_CONTROL:
3459 			if (task == current_task()) {
3460 				for (i = 0; i < actual; ++i) {
3461 					((ipc_port_t *) thread_list)[i] = convert_thread_to_port_pinned(thread_list[i]);
3462 				}
3463 			} else {
3464 				for (i = 0; i < actual; ++i) {
3465 					((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
3466 				}
3467 			}
3468 			break;
3469 		case THREAD_FLAVOR_READ:
3470 			for (i = 0; i < actual; ++i) {
3471 				((ipc_port_t *) thread_list)[i] = convert_thread_read_to_port(thread_list[i]);
3472 			}
3473 			break;
3474 		case THREAD_FLAVOR_INSPECT:
3475 			for (i = 0; i < actual; ++i) {
3476 				((ipc_port_t *) thread_list)[i] = convert_thread_inspect_to_port(thread_list[i]);
3477 			}
3478 			break;
3479 		}
3480 	}
3481 
3482 	return KERN_SUCCESS;
3483 }
3484 
3485 kern_return_t
task_threads(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3486 task_threads(
3487 	task_t                      task,
3488 	thread_act_array_t         *threads_out,
3489 	mach_msg_type_number_t     *count)
3490 {
3491 	return task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3492 }
3493 
3494 
3495 kern_return_t
task_threads_from_user(mach_port_t port,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3496 task_threads_from_user(
3497 	mach_port_t                 port,
3498 	thread_act_array_t         *threads_out,
3499 	mach_msg_type_number_t     *count)
3500 {
3501 	ipc_kobject_type_t kotype;
3502 	kern_return_t kr;
3503 
3504 	task_t task = convert_port_to_task_inspect_no_eval(port);
3505 
3506 	if (task == TASK_NULL) {
3507 		return KERN_INVALID_ARGUMENT;
3508 	}
3509 
3510 	kotype = ip_kotype(port);
3511 
3512 	switch (kotype) {
3513 	case IKOT_TASK_CONTROL:
3514 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3515 		break;
3516 	case IKOT_TASK_READ:
3517 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_READ);
3518 		break;
3519 	case IKOT_TASK_INSPECT:
3520 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_INSPECT);
3521 		break;
3522 	default:
3523 		panic("strange kobject type");
3524 		break;
3525 	}
3526 
3527 	task_deallocate(task);
3528 	return kr;
3529 }
3530 
3531 #define TASK_HOLD_NORMAL        0
3532 #define TASK_HOLD_PIDSUSPEND    1
3533 #define TASK_HOLD_LEGACY        2
3534 #define TASK_HOLD_LEGACY_ALL    3
3535 
3536 static kern_return_t
place_task_hold(task_t task,int mode)3537 place_task_hold(
3538 	task_t task,
3539 	int mode)
3540 {
3541 	if (!task->active && !task_is_a_corpse(task)) {
3542 		return KERN_FAILURE;
3543 	}
3544 
3545 	/* Return success for corpse task */
3546 	if (task_is_a_corpse(task)) {
3547 		return KERN_SUCCESS;
3548 	}
3549 
3550 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_SUSPEND),
3551 	    task_pid(task),
3552 	    task->thread_count > 0 ?((thread_t)queue_first(&task->threads))->thread_id : 0,
3553 	    task->user_stop_count, task->user_stop_count + 1);
3554 
3555 #if MACH_ASSERT
3556 	current_task()->suspends_outstanding++;
3557 #endif
3558 
3559 	if (mode == TASK_HOLD_LEGACY) {
3560 		task->legacy_stop_count++;
3561 	}
3562 
3563 	if (task->user_stop_count++ > 0) {
3564 		/*
3565 		 *	If the stop count was positive, the task is
3566 		 *	already stopped and we can exit.
3567 		 */
3568 		return KERN_SUCCESS;
3569 	}
3570 
3571 	/*
3572 	 * Put a kernel-level hold on the threads in the task (all
3573 	 * user-level task suspensions added together represent a
3574 	 * single kernel-level hold).  We then wait for the threads
3575 	 * to stop executing user code.
3576 	 */
3577 	task_hold_locked(task);
3578 	task_wait_locked(task, FALSE);
3579 
3580 	return KERN_SUCCESS;
3581 }
3582 
3583 static kern_return_t
release_task_hold(task_t task,int mode)3584 release_task_hold(
3585 	task_t          task,
3586 	int                     mode)
3587 {
3588 	boolean_t release = FALSE;
3589 
3590 	if (!task->active && !task_is_a_corpse(task)) {
3591 		return KERN_FAILURE;
3592 	}
3593 
3594 	/* Return success for corpse task */
3595 	if (task_is_a_corpse(task)) {
3596 		return KERN_SUCCESS;
3597 	}
3598 
3599 	if (mode == TASK_HOLD_PIDSUSPEND) {
3600 		if (task->pidsuspended == FALSE) {
3601 			return KERN_FAILURE;
3602 		}
3603 		task->pidsuspended = FALSE;
3604 	}
3605 
3606 	if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
3607 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3608 		    MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_RESUME) | DBG_FUNC_NONE,
3609 		    task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id,
3610 		    task->user_stop_count, mode, task->legacy_stop_count);
3611 
3612 #if MACH_ASSERT
3613 		/*
3614 		 * This is obviously not robust; if we suspend one task and then resume a different one,
3615 		 * we'll fly under the radar. This is only meant to catch the common case of a crashed
3616 		 * or buggy suspender.
3617 		 */
3618 		current_task()->suspends_outstanding--;
3619 #endif
3620 
3621 		if (mode == TASK_HOLD_LEGACY_ALL) {
3622 			if (task->legacy_stop_count >= task->user_stop_count) {
3623 				task->user_stop_count = 0;
3624 				release = TRUE;
3625 			} else {
3626 				task->user_stop_count -= task->legacy_stop_count;
3627 			}
3628 			task->legacy_stop_count = 0;
3629 		} else {
3630 			if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) {
3631 				task->legacy_stop_count--;
3632 			}
3633 			if (--task->user_stop_count == 0) {
3634 				release = TRUE;
3635 			}
3636 		}
3637 	} else {
3638 		return KERN_FAILURE;
3639 	}
3640 
3641 	/*
3642 	 *	Release the task if necessary.
3643 	 */
3644 	if (release) {
3645 		task_release_locked(task);
3646 	}
3647 
3648 	return KERN_SUCCESS;
3649 }
3650 
3651 boolean_t
get_task_suspended(task_t task)3652 get_task_suspended(task_t task)
3653 {
3654 	return 0 != task->user_stop_count;
3655 }
3656 
3657 /*
3658  *	task_suspend:
3659  *
3660  *	Implement an (old-fashioned) user-level suspension on a task.
3661  *
3662  *	Because the user isn't expecting to have to manage a suspension
3663  *	token, we'll track it for him in the kernel in the form of a naked
3664  *	send right to the task's resume port.  All such send rights
3665  *	account for a single suspension against the task (unlike task_suspend2()
3666  *	where each caller gets a unique suspension count represented by a
3667  *	unique send-once right).
3668  *
3669  * Conditions:
3670  *      The caller holds a reference to the task
3671  */
3672 kern_return_t
task_suspend(task_t task)3673 task_suspend(
3674 	task_t          task)
3675 {
3676 	kern_return_t                   kr;
3677 	mach_port_t                     port;
3678 	mach_port_name_t                name;
3679 
3680 	if (task == TASK_NULL || task == kernel_task) {
3681 		return KERN_INVALID_ARGUMENT;
3682 	}
3683 
3684 	/*
3685 	 * place a legacy hold on the task.
3686 	 */
3687 	task_lock(task);
3688 	kr = place_task_hold(task, TASK_HOLD_LEGACY);
3689 	task_unlock(task);
3690 
3691 	if (kr != KERN_SUCCESS) {
3692 		return kr;
3693 	}
3694 
3695 	/*
3696 	 * Claim a send right on the task resume port, and request a no-senders
3697 	 * notification on that port (if none outstanding).
3698 	 */
3699 	itk_lock(task);
3700 	(void)ipc_kobject_make_send_lazy_alloc_port((ipc_port_t *) &task->itk_resume,
3701 	    (ipc_kobject_t)task, IKOT_TASK_RESUME, IPC_KOBJECT_PTRAUTH_STORE,
3702 	    OS_PTRAUTH_DISCRIMINATOR("task.itk_resume"));
3703 	port = task->itk_resume; /* donates send right */
3704 	itk_unlock(task);
3705 
3706 	/*
3707 	 * Copyout the send right into the calling task's IPC space.  It won't know it is there,
3708 	 * but we'll look it up when calling a traditional resume.  Any IPC operations that
3709 	 * deallocate the send right will auto-release the suspension.
3710 	 */
3711 	if (IP_VALID(port)) {
3712 		kr = ipc_object_copyout(current_space(), ip_to_object(port),
3713 		    MACH_MSG_TYPE_MOVE_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
3714 		    NULL, NULL, &name);
3715 	} else {
3716 		kr = KERN_SUCCESS;
3717 	}
3718 	if (kr != KERN_SUCCESS) {
3719 		printf("warning: %s(%d) failed to copyout suspension "
3720 		    "token for pid %d with error: %d\n",
3721 		    proc_name_address(current_task()->bsd_info),
3722 		    proc_pid(current_task()->bsd_info),
3723 		    task_pid(task), kr);
3724 	}
3725 
3726 	return kr;
3727 }
3728 
3729 /*
3730  *	task_resume:
3731  *		Release a user hold on a task.
3732  *
3733  * Conditions:
3734  *		The caller holds a reference to the task
3735  */
3736 kern_return_t
task_resume(task_t task)3737 task_resume(
3738 	task_t  task)
3739 {
3740 	kern_return_t    kr;
3741 	mach_port_name_t resume_port_name;
3742 	ipc_entry_t              resume_port_entry;
3743 	ipc_space_t              space = current_task()->itk_space;
3744 
3745 	if (task == TASK_NULL || task == kernel_task) {
3746 		return KERN_INVALID_ARGUMENT;
3747 	}
3748 
3749 	/* release a legacy task hold */
3750 	task_lock(task);
3751 	kr = release_task_hold(task, TASK_HOLD_LEGACY);
3752 	task_unlock(task);
3753 
3754 	itk_lock(task); /* for itk_resume */
3755 	is_write_lock(space); /* spin lock */
3756 	if (is_active(space) && IP_VALID(task->itk_resume) &&
3757 	    ipc_hash_lookup(space, ip_to_object(task->itk_resume), &resume_port_name, &resume_port_entry) == TRUE) {
3758 		/*
3759 		 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
3760 		 * we are holding one less legacy hold on the task from this caller.  If the release failed,
3761 		 * go ahead and drop all the rights, as someone either already released our holds or the task
3762 		 * is gone.
3763 		 */
3764 		itk_unlock(task);
3765 		if (kr == KERN_SUCCESS) {
3766 			ipc_right_dealloc(space, resume_port_name, resume_port_entry);
3767 		} else {
3768 			ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0);
3769 		}
3770 		/* space unlocked */
3771 	} else {
3772 		itk_unlock(task);
3773 		is_write_unlock(space);
3774 		if (kr == KERN_SUCCESS) {
3775 			printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
3776 			    proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info),
3777 			    task_pid(task));
3778 		}
3779 	}
3780 
3781 	return kr;
3782 }
3783 
3784 /*
3785  * Suspend the target task.
3786  * Making/holding a token/reference/port is the callers responsibility.
3787  */
3788 kern_return_t
task_suspend_internal(task_t task)3789 task_suspend_internal(task_t task)
3790 {
3791 	kern_return_t    kr;
3792 
3793 	if (task == TASK_NULL || task == kernel_task) {
3794 		return KERN_INVALID_ARGUMENT;
3795 	}
3796 
3797 	task_lock(task);
3798 	kr = place_task_hold(task, TASK_HOLD_NORMAL);
3799 	task_unlock(task);
3800 	return kr;
3801 }
3802 
3803 /*
3804  * Suspend the target task, and return a suspension token. The token
3805  * represents a reference on the suspended task.
3806  */
3807 static kern_return_t
task_suspend2_grp(task_t task,task_suspension_token_t * suspend_token,task_grp_t grp)3808 task_suspend2_grp(
3809 	task_t                  task,
3810 	task_suspension_token_t *suspend_token,
3811 	task_grp_t              grp)
3812 {
3813 	kern_return_t    kr;
3814 
3815 	kr = task_suspend_internal(task);
3816 	if (kr != KERN_SUCCESS) {
3817 		*suspend_token = TASK_NULL;
3818 		return kr;
3819 	}
3820 
3821 	/*
3822 	 * Take a reference on the target task and return that to the caller
3823 	 * as a "suspension token," which can be converted into an SO right to
3824 	 * the now-suspended task's resume port.
3825 	 */
3826 	task_reference_grp(task, grp);
3827 	*suspend_token = task;
3828 
3829 	return KERN_SUCCESS;
3830 }
3831 
3832 kern_return_t
task_suspend2_mig(task_t task,task_suspension_token_t * suspend_token)3833 task_suspend2_mig(
3834 	task_t                  task,
3835 	task_suspension_token_t *suspend_token)
3836 {
3837 	return task_suspend2_grp(task, suspend_token, TASK_GRP_MIG);
3838 }
3839 
3840 kern_return_t
task_suspend2_external(task_t task,task_suspension_token_t * suspend_token)3841 task_suspend2_external(
3842 	task_t                  task,
3843 	task_suspension_token_t *suspend_token)
3844 {
3845 	return task_suspend2_grp(task, suspend_token, TASK_GRP_EXTERNAL);
3846 }
3847 
3848 /*
3849  * Resume the task
3850  * (reference/token/port management is caller's responsibility).
3851  */
3852 kern_return_t
task_resume_internal(task_suspension_token_t task)3853 task_resume_internal(
3854 	task_suspension_token_t         task)
3855 {
3856 	kern_return_t kr;
3857 
3858 	if (task == TASK_NULL || task == kernel_task) {
3859 		return KERN_INVALID_ARGUMENT;
3860 	}
3861 
3862 	task_lock(task);
3863 	kr = release_task_hold(task, TASK_HOLD_NORMAL);
3864 	task_unlock(task);
3865 	return kr;
3866 }
3867 
3868 /*
3869  * Resume the task using a suspension token. Consumes the token's ref.
3870  */
3871 static kern_return_t
task_resume2_grp(task_suspension_token_t task,task_grp_t grp)3872 task_resume2_grp(
3873 	task_suspension_token_t         task,
3874 	task_grp_t                      grp)
3875 {
3876 	kern_return_t kr;
3877 
3878 	kr = task_resume_internal(task);
3879 	task_suspension_token_deallocate_grp(task, grp);
3880 
3881 	return kr;
3882 }
3883 
3884 kern_return_t
task_resume2_mig(task_suspension_token_t task)3885 task_resume2_mig(
3886 	task_suspension_token_t         task)
3887 {
3888 	return task_resume2_grp(task, TASK_GRP_MIG);
3889 }
3890 
3891 kern_return_t
task_resume2_external(task_suspension_token_t task)3892 task_resume2_external(
3893 	task_suspension_token_t         task)
3894 {
3895 	return task_resume2_grp(task, TASK_GRP_EXTERNAL);
3896 }
3897 
3898 static void
task_suspension_no_senders(ipc_port_t port,mach_port_mscount_t mscount)3899 task_suspension_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
3900 {
3901 	task_t task = convert_port_to_task_suspension_token(port);
3902 	kern_return_t kr;
3903 
3904 	if (task == TASK_NULL) {
3905 		return;
3906 	}
3907 
3908 	if (task == kernel_task) {
3909 		task_suspension_token_deallocate(task);
3910 		return;
3911 	}
3912 
3913 	task_lock(task);
3914 
3915 	kr = ipc_kobject_nsrequest(port, mscount, NULL);
3916 	if (kr == KERN_FAILURE) {
3917 		/* release all the [remaining] outstanding legacy holds */
3918 		release_task_hold(task, TASK_HOLD_LEGACY_ALL);
3919 	}
3920 
3921 	task_unlock(task);
3922 
3923 	task_suspension_token_deallocate(task);         /* drop token reference */
3924 }
3925 
3926 /*
3927  * Fires when a send once made
3928  * by convert_task_suspension_token_to_port() dies.
3929  */
3930 void
task_suspension_send_once(ipc_port_t port)3931 task_suspension_send_once(ipc_port_t port)
3932 {
3933 	task_t task = convert_port_to_task_suspension_token(port);
3934 
3935 	if (task == TASK_NULL || task == kernel_task) {
3936 		return;         /* nothing to do */
3937 	}
3938 
3939 	/* release the hold held by this specific send-once right */
3940 	task_lock(task);
3941 	release_task_hold(task, TASK_HOLD_NORMAL);
3942 	task_unlock(task);
3943 
3944 	task_suspension_token_deallocate(task);         /* drop token reference */
3945 }
3946 
3947 static kern_return_t
task_pidsuspend_locked(task_t task)3948 task_pidsuspend_locked(task_t task)
3949 {
3950 	kern_return_t kr;
3951 
3952 	if (task->pidsuspended) {
3953 		kr = KERN_FAILURE;
3954 		goto out;
3955 	}
3956 
3957 	task->pidsuspended = TRUE;
3958 
3959 	kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
3960 	if (kr != KERN_SUCCESS) {
3961 		task->pidsuspended = FALSE;
3962 	}
3963 out:
3964 	return kr;
3965 }
3966 
3967 
3968 /*
3969  *	task_pidsuspend:
3970  *
3971  *	Suspends a task by placing a hold on its threads.
3972  *
3973  * Conditions:
3974  *      The caller holds a reference to the task
3975  */
3976 kern_return_t
task_pidsuspend(task_t task)3977 task_pidsuspend(
3978 	task_t          task)
3979 {
3980 	kern_return_t    kr;
3981 
3982 	if (task == TASK_NULL || task == kernel_task) {
3983 		return KERN_INVALID_ARGUMENT;
3984 	}
3985 
3986 	task_lock(task);
3987 
3988 	kr = task_pidsuspend_locked(task);
3989 
3990 	task_unlock(task);
3991 
3992 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
3993 		iokit_task_app_suspended_changed(task);
3994 	}
3995 
3996 	return kr;
3997 }
3998 
3999 /*
4000  *	task_pidresume:
4001  *		Resumes a previously suspended task.
4002  *
4003  * Conditions:
4004  *		The caller holds a reference to the task
4005  */
4006 kern_return_t
task_pidresume(task_t task)4007 task_pidresume(
4008 	task_t  task)
4009 {
4010 	kern_return_t    kr;
4011 
4012 	if (task == TASK_NULL || task == kernel_task) {
4013 		return KERN_INVALID_ARGUMENT;
4014 	}
4015 
4016 	task_lock(task);
4017 
4018 #if CONFIG_FREEZE
4019 
4020 	while (task->changing_freeze_state) {
4021 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4022 		task_unlock(task);
4023 		thread_block(THREAD_CONTINUE_NULL);
4024 
4025 		task_lock(task);
4026 	}
4027 	task->changing_freeze_state = TRUE;
4028 #endif
4029 
4030 	kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
4031 
4032 	task_unlock(task);
4033 
4034 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4035 		iokit_task_app_suspended_changed(task);
4036 	}
4037 
4038 #if CONFIG_FREEZE
4039 
4040 	task_lock(task);
4041 
4042 	if (kr == KERN_SUCCESS) {
4043 		task->frozen = FALSE;
4044 	}
4045 	task->changing_freeze_state = FALSE;
4046 	thread_wakeup(&task->changing_freeze_state);
4047 
4048 	task_unlock(task);
4049 #endif
4050 
4051 	return kr;
4052 }
4053 
4054 os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
4055 
4056 /*
4057  *	task_add_turnstile_watchports:
4058  *		Setup watchports to boost the main thread of the task.
4059  *
4060  *	Arguments:
4061  *		task: task being spawned
4062  *		thread: main thread of task
4063  *		portwatch_ports: array of watchports
4064  *		portwatch_count: number of watchports
4065  *
4066  *	Conditions:
4067  *		Nothing locked.
4068  */
4069 void
task_add_turnstile_watchports(task_t task,thread_t thread,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4070 task_add_turnstile_watchports(
4071 	task_t          task,
4072 	thread_t        thread,
4073 	ipc_port_t      *portwatch_ports,
4074 	uint32_t        portwatch_count)
4075 {
4076 	struct task_watchports *watchports = NULL;
4077 	struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
4078 	os_ref_count_t refs;
4079 
4080 	/* Check if the task has terminated */
4081 	if (!task->active) {
4082 		return;
4083 	}
4084 
4085 	assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
4086 
4087 	watchports = task_watchports_alloc_init(task, thread, portwatch_count);
4088 
4089 	/* Lock the ipc space */
4090 	is_write_lock(task->itk_space);
4091 
4092 	/* Setup watchports to boost the main thread */
4093 	refs = task_add_turnstile_watchports_locked(task,
4094 	    watchports, previous_elem_array, portwatch_ports,
4095 	    portwatch_count);
4096 
4097 	/* Drop the space lock */
4098 	is_write_unlock(task->itk_space);
4099 
4100 	if (refs == 0) {
4101 		task_watchports_deallocate(watchports);
4102 	}
4103 
4104 	/* Drop the ref on previous_elem_array */
4105 	for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
4106 		task_watchport_elem_deallocate(previous_elem_array[i]);
4107 	}
4108 }
4109 
4110 /*
4111  *	task_remove_turnstile_watchports:
4112  *		Clear all turnstile boost on the task from watchports.
4113  *
4114  *	Arguments:
4115  *		task: task being terminated
4116  *
4117  *	Conditions:
4118  *		Nothing locked.
4119  */
4120 void
task_remove_turnstile_watchports(task_t task)4121 task_remove_turnstile_watchports(
4122 	task_t          task)
4123 {
4124 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4125 	struct task_watchports *watchports = NULL;
4126 	ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
4127 	uint32_t portwatch_count;
4128 
4129 	/* Lock the ipc space */
4130 	is_write_lock(task->itk_space);
4131 
4132 	/* Check if watchport boost exist */
4133 	if (task->watchports == NULL) {
4134 		is_write_unlock(task->itk_space);
4135 		return;
4136 	}
4137 	watchports = task->watchports;
4138 	portwatch_count = watchports->tw_elem_array_count;
4139 
4140 	refs = task_remove_turnstile_watchports_locked(task, watchports,
4141 	    port_freelist);
4142 
4143 	is_write_unlock(task->itk_space);
4144 
4145 	/* Drop all the port references */
4146 	for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
4147 		ip_release(port_freelist[i]);
4148 	}
4149 
4150 	/* Clear the task and thread references for task_watchport */
4151 	if (refs == 0) {
4152 		task_watchports_deallocate(watchports);
4153 	}
4154 }
4155 
4156 /*
4157  *	task_transfer_turnstile_watchports:
4158  *		Transfer all watchport turnstile boost from old task to new task.
4159  *
4160  *	Arguments:
4161  *		old_task: task calling exec
4162  *		new_task: new exec'ed task
4163  *		thread: main thread of new task
4164  *
4165  *	Conditions:
4166  *		Nothing locked.
4167  */
4168 void
task_transfer_turnstile_watchports(task_t old_task,task_t new_task,thread_t new_thread)4169 task_transfer_turnstile_watchports(
4170 	task_t   old_task,
4171 	task_t   new_task,
4172 	thread_t new_thread)
4173 {
4174 	struct task_watchports *old_watchports = NULL;
4175 	struct task_watchports *new_watchports = NULL;
4176 	os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
4177 	os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
4178 	uint32_t portwatch_count;
4179 
4180 	if (old_task->watchports == NULL || !new_task->active) {
4181 		return;
4182 	}
4183 
4184 	/* Get the watch port count from the old task */
4185 	is_write_lock(old_task->itk_space);
4186 	if (old_task->watchports == NULL) {
4187 		is_write_unlock(old_task->itk_space);
4188 		return;
4189 	}
4190 
4191 	portwatch_count = old_task->watchports->tw_elem_array_count;
4192 	is_write_unlock(old_task->itk_space);
4193 
4194 	new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
4195 
4196 	/* Lock the ipc space for old task */
4197 	is_write_lock(old_task->itk_space);
4198 
4199 	/* Lock the ipc space for new task */
4200 	is_write_lock(new_task->itk_space);
4201 
4202 	/* Check if watchport boost exist */
4203 	if (old_task->watchports == NULL || !new_task->active) {
4204 		is_write_unlock(new_task->itk_space);
4205 		is_write_unlock(old_task->itk_space);
4206 		(void)task_watchports_release(new_watchports);
4207 		task_watchports_deallocate(new_watchports);
4208 		return;
4209 	}
4210 
4211 	old_watchports = old_task->watchports;
4212 	assert(portwatch_count == old_task->watchports->tw_elem_array_count);
4213 
4214 	/* Setup new task watchports */
4215 	new_task->watchports = new_watchports;
4216 
4217 	for (uint32_t i = 0; i < portwatch_count; i++) {
4218 		ipc_port_t port = old_watchports->tw_elem[i].twe_port;
4219 
4220 		if (port == NULL) {
4221 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4222 			continue;
4223 		}
4224 
4225 		/* Lock the port and check if it has the entry */
4226 		ip_mq_lock(port);
4227 
4228 		task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
4229 
4230 		if (ipc_port_replace_watchport_elem_conditional_locked(port,
4231 		    &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
4232 			task_watchport_elem_clear(&old_watchports->tw_elem[i]);
4233 
4234 			task_watchports_retain(new_watchports);
4235 			old_refs = task_watchports_release(old_watchports);
4236 
4237 			/* Check if all ports are cleaned */
4238 			if (old_refs == 0) {
4239 				old_task->watchports = NULL;
4240 			}
4241 		} else {
4242 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4243 		}
4244 		/* port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
4245 	}
4246 
4247 	/* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
4248 	new_refs = task_watchports_release(new_watchports);
4249 	if (new_refs == 0) {
4250 		new_task->watchports = NULL;
4251 	}
4252 
4253 	is_write_unlock(new_task->itk_space);
4254 	is_write_unlock(old_task->itk_space);
4255 
4256 	/* Clear the task and thread references for old_watchport */
4257 	if (old_refs == 0) {
4258 		task_watchports_deallocate(old_watchports);
4259 	}
4260 
4261 	/* Clear the task and thread references for new_watchport */
4262 	if (new_refs == 0) {
4263 		task_watchports_deallocate(new_watchports);
4264 	}
4265 }
4266 
4267 /*
4268  *	task_add_turnstile_watchports_locked:
4269  *		Setup watchports to boost the main thread of the task.
4270  *
4271  *	Arguments:
4272  *		task: task to boost
4273  *		watchports: watchport structure to be attached to the task
4274  *		previous_elem_array: an array of old watchport_elem to be returned to caller
4275  *		portwatch_ports: array of watchports
4276  *		portwatch_count: number of watchports
4277  *
4278  *	Conditions:
4279  *		ipc space of the task locked.
4280  *		returns array of old watchport_elem in previous_elem_array
4281  */
4282 static os_ref_count_t
task_add_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,struct task_watchport_elem ** previous_elem_array,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4283 task_add_turnstile_watchports_locked(
4284 	task_t                      task,
4285 	struct task_watchports      *watchports,
4286 	struct task_watchport_elem  **previous_elem_array,
4287 	ipc_port_t                  *portwatch_ports,
4288 	uint32_t                    portwatch_count)
4289 {
4290 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4291 
4292 	/* Check if the task is still active */
4293 	if (!task->active) {
4294 		refs = task_watchports_release(watchports);
4295 		return refs;
4296 	}
4297 
4298 	assert(task->watchports == NULL);
4299 	task->watchports = watchports;
4300 
4301 	for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
4302 		ipc_port_t port = portwatch_ports[i];
4303 
4304 		task_watchport_elem_init(&watchports->tw_elem[i], task, port);
4305 		if (port == NULL) {
4306 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4307 			continue;
4308 		}
4309 
4310 		ip_mq_lock(port);
4311 
4312 		/* Check if port is in valid state to be setup as watchport */
4313 		if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
4314 		    &previous_elem_array[j]) != KERN_SUCCESS) {
4315 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4316 			continue;
4317 		}
4318 		/* port unlocked on return */
4319 
4320 		ip_reference(port);
4321 		task_watchports_retain(watchports);
4322 		if (previous_elem_array[j] != NULL) {
4323 			j++;
4324 		}
4325 	}
4326 
4327 	/* Drop the reference on task_watchport struct returned by os_ref_init */
4328 	refs = task_watchports_release(watchports);
4329 	if (refs == 0) {
4330 		task->watchports = NULL;
4331 	}
4332 
4333 	return refs;
4334 }
4335 
4336 /*
4337  *	task_remove_turnstile_watchports_locked:
4338  *		Clear all turnstile boost on the task from watchports.
4339  *
4340  *	Arguments:
4341  *		task: task to remove watchports from
4342  *		watchports: watchports structure for the task
4343  *		port_freelist: array of ports returned with ref to caller
4344  *
4345  *
4346  *	Conditions:
4347  *		ipc space of the task locked.
4348  *		array of ports with refs are returned in port_freelist
4349  */
4350 static os_ref_count_t
task_remove_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,ipc_port_t * port_freelist)4351 task_remove_turnstile_watchports_locked(
4352 	task_t                 task,
4353 	struct task_watchports *watchports,
4354 	ipc_port_t             *port_freelist)
4355 {
4356 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4357 
4358 	for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
4359 		ipc_port_t port = watchports->tw_elem[i].twe_port;
4360 		if (port == NULL) {
4361 			continue;
4362 		}
4363 
4364 		/* Lock the port and check if it has the entry */
4365 		ip_mq_lock(port);
4366 		if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
4367 		    &watchports->tw_elem[i]) == KERN_SUCCESS) {
4368 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4369 			port_freelist[j++] = port;
4370 			refs = task_watchports_release(watchports);
4371 
4372 			/* Check if all ports are cleaned */
4373 			if (refs == 0) {
4374 				task->watchports = NULL;
4375 				break;
4376 			}
4377 		}
4378 		/* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
4379 	}
4380 	return refs;
4381 }
4382 
4383 /*
4384  *	task_watchports_alloc_init:
4385  *		Allocate and initialize task watchport struct.
4386  *
4387  *	Conditions:
4388  *		Nothing locked.
4389  */
4390 static struct task_watchports *
task_watchports_alloc_init(task_t task,thread_t thread,uint32_t count)4391 task_watchports_alloc_init(
4392 	task_t        task,
4393 	thread_t      thread,
4394 	uint32_t      count)
4395 {
4396 	struct task_watchports *watchports = kalloc_type(struct task_watchports,
4397 	    struct task_watchport_elem, count, Z_WAITOK | Z_ZERO | Z_NOFAIL);
4398 
4399 	task_reference(task);
4400 	thread_reference(thread);
4401 	watchports->tw_task = task;
4402 	watchports->tw_thread = thread;
4403 	watchports->tw_elem_array_count = count;
4404 	os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
4405 
4406 	return watchports;
4407 }
4408 
4409 /*
4410  *	task_watchports_deallocate:
4411  *		Deallocate task watchport struct.
4412  *
4413  *	Conditions:
4414  *		Nothing locked.
4415  */
4416 static void
task_watchports_deallocate(struct task_watchports * watchports)4417 task_watchports_deallocate(
4418 	struct task_watchports *watchports)
4419 {
4420 	uint32_t portwatch_count = watchports->tw_elem_array_count;
4421 
4422 	task_deallocate(watchports->tw_task);
4423 	thread_deallocate(watchports->tw_thread);
4424 	kfree_type(struct task_watchports, struct task_watchport_elem,
4425 	    portwatch_count, watchports);
4426 }
4427 
4428 /*
4429  *	task_watchport_elem_deallocate:
4430  *		Deallocate task watchport element and release its ref on task_watchport.
4431  *
4432  *	Conditions:
4433  *		Nothing locked.
4434  */
4435 void
task_watchport_elem_deallocate(struct task_watchport_elem * watchport_elem)4436 task_watchport_elem_deallocate(
4437 	struct task_watchport_elem *watchport_elem)
4438 {
4439 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4440 	task_t task = watchport_elem->twe_task;
4441 	struct task_watchports *watchports = NULL;
4442 	ipc_port_t port = NULL;
4443 
4444 	assert(task != NULL);
4445 
4446 	/* Take the space lock to modify the elememt */
4447 	is_write_lock(task->itk_space);
4448 
4449 	watchports = task->watchports;
4450 	assert(watchports != NULL);
4451 
4452 	port = watchport_elem->twe_port;
4453 	assert(port != NULL);
4454 
4455 	task_watchport_elem_clear(watchport_elem);
4456 	refs = task_watchports_release(watchports);
4457 
4458 	if (refs == 0) {
4459 		task->watchports = NULL;
4460 	}
4461 
4462 	is_write_unlock(task->itk_space);
4463 
4464 	ip_release(port);
4465 	if (refs == 0) {
4466 		task_watchports_deallocate(watchports);
4467 	}
4468 }
4469 
4470 /*
4471  *	task_has_watchports:
4472  *		Return TRUE if task has watchport boosts.
4473  *
4474  *	Conditions:
4475  *		Nothing locked.
4476  */
4477 boolean_t
task_has_watchports(task_t task)4478 task_has_watchports(task_t task)
4479 {
4480 	return task->watchports != NULL;
4481 }
4482 
4483 #if DEVELOPMENT || DEBUG
4484 
4485 extern void IOSleep(int);
4486 
4487 kern_return_t
task_disconnect_page_mappings(task_t task)4488 task_disconnect_page_mappings(task_t task)
4489 {
4490 	int     n;
4491 
4492 	if (task == TASK_NULL || task == kernel_task) {
4493 		return KERN_INVALID_ARGUMENT;
4494 	}
4495 
4496 	/*
4497 	 * this function is used to strip all of the mappings from
4498 	 * the pmap for the specified task to force the task to
4499 	 * re-fault all of the pages it is actively using... this
4500 	 * allows us to approximate the true working set of the
4501 	 * specified task.  We only engage if at least 1 of the
4502 	 * threads in the task is runnable, but we want to continuously
4503 	 * sweep (at least for a while - I've arbitrarily set the limit at
4504 	 * 100 sweeps to be re-looked at as we gain experience) to get a better
4505 	 * view into what areas within a page are being visited (as opposed to only
4506 	 * seeing the first fault of a page after the task becomes
4507 	 * runnable)...  in the future I may
4508 	 * try to block until awakened by a thread in this task
4509 	 * being made runnable, but for now we'll periodically poll from the
4510 	 * user level debug tool driving the sysctl
4511 	 */
4512 	for (n = 0; n < 100; n++) {
4513 		thread_t        thread;
4514 		boolean_t       runnable;
4515 		boolean_t       do_unnest;
4516 		int             page_count;
4517 
4518 		runnable = FALSE;
4519 		do_unnest = FALSE;
4520 
4521 		task_lock(task);
4522 
4523 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
4524 			if (thread->state & TH_RUN) {
4525 				runnable = TRUE;
4526 				break;
4527 			}
4528 		}
4529 		if (n == 0) {
4530 			task->task_disconnected_count++;
4531 		}
4532 
4533 		if (task->task_unnested == FALSE) {
4534 			if (runnable == TRUE) {
4535 				task->task_unnested = TRUE;
4536 				do_unnest = TRUE;
4537 			}
4538 		}
4539 		task_unlock(task);
4540 
4541 		if (runnable == FALSE) {
4542 			break;
4543 		}
4544 
4545 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
4546 		    task, do_unnest, task->task_disconnected_count, 0, 0);
4547 
4548 		page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
4549 
4550 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
4551 		    task, page_count, 0, 0, 0);
4552 
4553 		if ((n % 5) == 4) {
4554 			IOSleep(1);
4555 		}
4556 	}
4557 	return KERN_SUCCESS;
4558 }
4559 
4560 #endif
4561 
4562 
4563 #if CONFIG_FREEZE
4564 
4565 /*
4566  *	task_freeze:
4567  *
4568  *	Freeze a task.
4569  *
4570  * Conditions:
4571  *      The caller holds a reference to the task
4572  */
4573 extern void             vm_wake_compactor_swapper(void);
4574 extern queue_head_t     c_swapout_list_head;
4575 extern struct freezer_context freezer_context_global;
4576 
4577 kern_return_t
task_freeze(task_t task,uint32_t * purgeable_count,uint32_t * wired_count,uint32_t * clean_count,uint32_t * dirty_count,uint32_t dirty_budget,uint32_t * shared_count,int * freezer_error_code,boolean_t eval_only)4578 task_freeze(
4579 	task_t    task,
4580 	uint32_t           *purgeable_count,
4581 	uint32_t           *wired_count,
4582 	uint32_t           *clean_count,
4583 	uint32_t           *dirty_count,
4584 	uint32_t           dirty_budget,
4585 	uint32_t           *shared_count,
4586 	int                *freezer_error_code,
4587 	boolean_t          eval_only)
4588 {
4589 	kern_return_t kr = KERN_SUCCESS;
4590 
4591 	if (task == TASK_NULL || task == kernel_task) {
4592 		return KERN_INVALID_ARGUMENT;
4593 	}
4594 
4595 	task_lock(task);
4596 
4597 	while (task->changing_freeze_state) {
4598 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4599 		task_unlock(task);
4600 		thread_block(THREAD_CONTINUE_NULL);
4601 
4602 		task_lock(task);
4603 	}
4604 	if (task->frozen) {
4605 		task_unlock(task);
4606 		return KERN_FAILURE;
4607 	}
4608 	task->changing_freeze_state = TRUE;
4609 
4610 	freezer_context_global.freezer_ctx_task = task;
4611 
4612 	task_unlock(task);
4613 
4614 	kr = vm_map_freeze(task,
4615 	    purgeable_count,
4616 	    wired_count,
4617 	    clean_count,
4618 	    dirty_count,
4619 	    dirty_budget,
4620 	    shared_count,
4621 	    freezer_error_code,
4622 	    eval_only);
4623 
4624 	task_lock(task);
4625 
4626 	if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) {
4627 		task->frozen = TRUE;
4628 
4629 		freezer_context_global.freezer_ctx_task = NULL;
4630 		freezer_context_global.freezer_ctx_uncompressed_pages = 0;
4631 
4632 		if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
4633 			/*
4634 			 * reset the counter tracking the # of swapped compressed pages
4635 			 * because we are now done with this freeze session and task.
4636 			 */
4637 
4638 			*dirty_count = (uint32_t) (freezer_context_global.freezer_ctx_swapped_bytes / PAGE_SIZE_64);         /*used to track pageouts*/
4639 		}
4640 
4641 		freezer_context_global.freezer_ctx_swapped_bytes = 0;
4642 	}
4643 
4644 	task->changing_freeze_state = FALSE;
4645 	thread_wakeup(&task->changing_freeze_state);
4646 
4647 	task_unlock(task);
4648 
4649 	if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
4650 	    (kr == KERN_SUCCESS) &&
4651 	    (eval_only == FALSE)) {
4652 		vm_wake_compactor_swapper();
4653 		/*
4654 		 * We do an explicit wakeup of the swapout thread here
4655 		 * because the compact_and_swap routines don't have
4656 		 * knowledge about these kind of "per-task packed c_segs"
4657 		 * and so will not be evaluating whether we need to do
4658 		 * a wakeup there.
4659 		 */
4660 		thread_wakeup((event_t)&c_swapout_list_head);
4661 	}
4662 
4663 	return kr;
4664 }
4665 
4666 /*
4667  *	task_thaw:
4668  *
4669  *	Thaw a currently frozen task.
4670  *
4671  * Conditions:
4672  *      The caller holds a reference to the task
4673  */
4674 kern_return_t
task_thaw(task_t task)4675 task_thaw(
4676 	task_t          task)
4677 {
4678 	if (task == TASK_NULL || task == kernel_task) {
4679 		return KERN_INVALID_ARGUMENT;
4680 	}
4681 
4682 	task_lock(task);
4683 
4684 	while (task->changing_freeze_state) {
4685 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4686 		task_unlock(task);
4687 		thread_block(THREAD_CONTINUE_NULL);
4688 
4689 		task_lock(task);
4690 	}
4691 	if (!task->frozen) {
4692 		task_unlock(task);
4693 		return KERN_FAILURE;
4694 	}
4695 	task->frozen = FALSE;
4696 
4697 	task_unlock(task);
4698 
4699 	return KERN_SUCCESS;
4700 }
4701 
4702 void
task_update_frozen_to_swap_acct(task_t task,int64_t amount,freezer_acct_op_t op)4703 task_update_frozen_to_swap_acct(task_t task, int64_t amount, freezer_acct_op_t op)
4704 {
4705 	/*
4706 	 * We don't assert that the task lock is held because we call this
4707 	 * routine from the decompression path and we won't be holding the
4708 	 * task lock. However, since we are in the context of the task we are
4709 	 * safe.
4710 	 * In the case of the task_freeze path, we call it from behind the task
4711 	 * lock but we don't need to because we have a reference on the proc
4712 	 * being frozen.
4713 	 */
4714 
4715 	assert(task);
4716 	if (amount == 0) {
4717 		return;
4718 	}
4719 
4720 	if (op == CREDIT_TO_SWAP) {
4721 		ledger_credit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
4722 	} else if (op == DEBIT_FROM_SWAP) {
4723 		ledger_debit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
4724 	} else {
4725 		panic("task_update_frozen_to_swap_acct: Invalid ledger op");
4726 	}
4727 }
4728 #endif /* CONFIG_FREEZE */
4729 
4730 kern_return_t
task_set_security_tokens(task_t task,security_token_t sec_token,audit_token_t audit_token,host_priv_t host_priv)4731 task_set_security_tokens(
4732 	task_t           task,
4733 	security_token_t sec_token,
4734 	audit_token_t    audit_token,
4735 	host_priv_t      host_priv)
4736 {
4737 	ipc_port_t       host_port;
4738 	kern_return_t    kr;
4739 
4740 	if (task == TASK_NULL) {
4741 		return KERN_INVALID_ARGUMENT;
4742 	}
4743 
4744 	task_lock(task);
4745 	task_set_tokens(task, &sec_token, &audit_token);
4746 	task_unlock(task);
4747 
4748 	if (host_priv != HOST_PRIV_NULL) {
4749 		kr = host_get_host_priv_port(host_priv, &host_port);
4750 	} else {
4751 		kr = host_get_host_port(host_priv_self(), &host_port);
4752 	}
4753 	assert(kr == KERN_SUCCESS);
4754 
4755 	kr = task_set_special_port_internal(task, TASK_HOST_PORT, host_port);
4756 	return kr;
4757 }
4758 
4759 kern_return_t
task_send_trace_memory(__unused task_t target_task,__unused uint32_t pid,__unused uint64_t uniqueid)4760 task_send_trace_memory(
4761 	__unused task_t   target_task,
4762 	__unused uint32_t pid,
4763 	__unused uint64_t uniqueid)
4764 {
4765 	return KERN_INVALID_ARGUMENT;
4766 }
4767 
4768 /*
4769  * This routine was added, pretty much exclusively, for registering the
4770  * RPC glue vector for in-kernel short circuited tasks.  Rather than
4771  * removing it completely, I have only disabled that feature (which was
4772  * the only feature at the time).  It just appears that we are going to
4773  * want to add some user data to tasks in the future (i.e. bsd info,
4774  * task names, etc...), so I left it in the formal task interface.
4775  */
4776 kern_return_t
task_set_info(task_t task,task_flavor_t flavor,__unused task_info_t task_info_in,__unused mach_msg_type_number_t task_info_count)4777 task_set_info(
4778 	task_t          task,
4779 	task_flavor_t   flavor,
4780 	__unused task_info_t    task_info_in,           /* pointer to IN array */
4781 	__unused mach_msg_type_number_t task_info_count)
4782 {
4783 	if (task == TASK_NULL) {
4784 		return KERN_INVALID_ARGUMENT;
4785 	}
4786 	switch (flavor) {
4787 #if CONFIG_ATM
4788 	case TASK_TRACE_MEMORY_INFO:
4789 		return KERN_NOT_SUPPORTED;
4790 #endif // CONFIG_ATM
4791 	default:
4792 		return KERN_INVALID_ARGUMENT;
4793 	}
4794 }
4795 
4796 int radar_20146450 = 1;
4797 kern_return_t
task_info(task_t task,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)4798 task_info(
4799 	task_t                  task,
4800 	task_flavor_t           flavor,
4801 	task_info_t             task_info_out,
4802 	mach_msg_type_number_t  *task_info_count)
4803 {
4804 	kern_return_t error = KERN_SUCCESS;
4805 	mach_msg_type_number_t  original_task_info_count;
4806 	bool is_kernel_task = (task == kernel_task);
4807 
4808 	if (task == TASK_NULL) {
4809 		return KERN_INVALID_ARGUMENT;
4810 	}
4811 
4812 	original_task_info_count = *task_info_count;
4813 	task_lock(task);
4814 
4815 	if ((task != current_task()) && (!task->active)) {
4816 		task_unlock(task);
4817 		return KERN_INVALID_ARGUMENT;
4818 	}
4819 
4820 
4821 	switch (flavor) {
4822 	case TASK_BASIC_INFO_32:
4823 	case TASK_BASIC2_INFO_32:
4824 #if defined(__arm__) || defined(__arm64__)
4825 	case TASK_BASIC_INFO_64:
4826 #endif
4827 		{
4828 			task_basic_info_32_t    basic_info;
4829 			vm_map_t                                map;
4830 			clock_sec_t                             secs;
4831 			clock_usec_t                    usecs;
4832 			ledger_amount_t tmp;
4833 
4834 			if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
4835 				error = KERN_INVALID_ARGUMENT;
4836 				break;
4837 			}
4838 
4839 			basic_info = (task_basic_info_32_t)task_info_out;
4840 
4841 			map = (task == kernel_task)? kernel_map: task->map;
4842 			basic_info->virtual_size = (typeof(basic_info->virtual_size))vm_map_adjusted_size(map);
4843 			if (flavor == TASK_BASIC2_INFO_32) {
4844 				/*
4845 				 * The "BASIC2" flavor gets the maximum resident
4846 				 * size instead of the current resident size...
4847 				 */
4848 				ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, &tmp);
4849 			} else {
4850 				ledger_get_balance(task->ledger, task_ledgers.phys_mem, &tmp);
4851 			}
4852 			basic_info->resident_size = (natural_t) MIN((ledger_amount_t) UINT32_MAX, tmp);
4853 
4854 			basic_info->policy = ((task != kernel_task)?
4855 			    POLICY_TIMESHARE: POLICY_RR);
4856 			basic_info->suspend_count = task->user_stop_count;
4857 
4858 			absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
4859 			basic_info->user_time.seconds =
4860 			    (typeof(basic_info->user_time.seconds))secs;
4861 			basic_info->user_time.microseconds = usecs;
4862 
4863 			absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
4864 			basic_info->system_time.seconds =
4865 			    (typeof(basic_info->system_time.seconds))secs;
4866 			basic_info->system_time.microseconds = usecs;
4867 
4868 			*task_info_count = TASK_BASIC_INFO_32_COUNT;
4869 			break;
4870 		}
4871 
4872 #if defined(__arm__) || defined(__arm64__)
4873 	case TASK_BASIC_INFO_64_2:
4874 	{
4875 		task_basic_info_64_2_t  basic_info;
4876 		vm_map_t                                map;
4877 		clock_sec_t                             secs;
4878 		clock_usec_t                    usecs;
4879 
4880 		if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) {
4881 			error = KERN_INVALID_ARGUMENT;
4882 			break;
4883 		}
4884 
4885 		basic_info = (task_basic_info_64_2_t)task_info_out;
4886 
4887 		map = (task == kernel_task)? kernel_map: task->map;
4888 		basic_info->virtual_size  = vm_map_adjusted_size(map);
4889 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size);
4890 
4891 		basic_info->policy = ((task != kernel_task)?
4892 		    POLICY_TIMESHARE: POLICY_RR);
4893 		basic_info->suspend_count = task->user_stop_count;
4894 
4895 		absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
4896 		basic_info->user_time.seconds =
4897 		    (typeof(basic_info->user_time.seconds))secs;
4898 		basic_info->user_time.microseconds = usecs;
4899 
4900 		absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
4901 		basic_info->system_time.seconds =
4902 		    (typeof(basic_info->system_time.seconds))secs;
4903 		basic_info->system_time.microseconds = usecs;
4904 
4905 		*task_info_count = TASK_BASIC_INFO_64_2_COUNT;
4906 		break;
4907 	}
4908 
4909 #else /* defined(__arm__) || defined(__arm64__) */
4910 	case TASK_BASIC_INFO_64:
4911 	{
4912 		task_basic_info_64_t    basic_info;
4913 		vm_map_t                                map;
4914 		clock_sec_t                             secs;
4915 		clock_usec_t                    usecs;
4916 
4917 		if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
4918 			error = KERN_INVALID_ARGUMENT;
4919 			break;
4920 		}
4921 
4922 		basic_info = (task_basic_info_64_t)task_info_out;
4923 
4924 		map = (task == kernel_task)? kernel_map: task->map;
4925 		basic_info->virtual_size  = vm_map_adjusted_size(map);
4926 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *)&basic_info->resident_size);
4927 
4928 		basic_info->policy = ((task != kernel_task)?
4929 		    POLICY_TIMESHARE: POLICY_RR);
4930 		basic_info->suspend_count = task->user_stop_count;
4931 
4932 		absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
4933 		basic_info->user_time.seconds =
4934 		    (typeof(basic_info->user_time.seconds))secs;
4935 		basic_info->user_time.microseconds = usecs;
4936 
4937 		absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
4938 		basic_info->system_time.seconds =
4939 		    (typeof(basic_info->system_time.seconds))secs;
4940 		basic_info->system_time.microseconds = usecs;
4941 
4942 		*task_info_count = TASK_BASIC_INFO_64_COUNT;
4943 		break;
4944 	}
4945 #endif /* defined(__arm__) || defined(__arm64__) */
4946 
4947 	case MACH_TASK_BASIC_INFO:
4948 	{
4949 		mach_task_basic_info_t  basic_info;
4950 		vm_map_t                map;
4951 		clock_sec_t             secs;
4952 		clock_usec_t            usecs;
4953 
4954 		if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
4955 			error = KERN_INVALID_ARGUMENT;
4956 			break;
4957 		}
4958 
4959 		basic_info = (mach_task_basic_info_t)task_info_out;
4960 
4961 		map = (task == kernel_task) ? kernel_map : task->map;
4962 
4963 		basic_info->virtual_size  = vm_map_adjusted_size(map);
4964 
4965 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size);
4966 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size_max);
4967 
4968 		basic_info->policy = ((task != kernel_task) ?
4969 		    POLICY_TIMESHARE : POLICY_RR);
4970 
4971 		basic_info->suspend_count = task->user_stop_count;
4972 
4973 		absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
4974 		basic_info->user_time.seconds =
4975 		    (typeof(basic_info->user_time.seconds))secs;
4976 		basic_info->user_time.microseconds = usecs;
4977 
4978 		absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
4979 		basic_info->system_time.seconds =
4980 		    (typeof(basic_info->system_time.seconds))secs;
4981 		basic_info->system_time.microseconds = usecs;
4982 
4983 		*task_info_count = MACH_TASK_BASIC_INFO_COUNT;
4984 		break;
4985 	}
4986 
4987 	case TASK_THREAD_TIMES_INFO:
4988 	{
4989 		task_thread_times_info_t        times_info;
4990 		thread_t                                        thread;
4991 
4992 		if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
4993 			error = KERN_INVALID_ARGUMENT;
4994 			break;
4995 		}
4996 
4997 		times_info = (task_thread_times_info_t) task_info_out;
4998 		times_info->user_time.seconds = 0;
4999 		times_info->user_time.microseconds = 0;
5000 		times_info->system_time.seconds = 0;
5001 		times_info->system_time.microseconds = 0;
5002 
5003 
5004 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5005 			time_value_t    user_time, system_time;
5006 
5007 			if (thread->options & TH_OPT_IDLE_THREAD) {
5008 				continue;
5009 			}
5010 
5011 			thread_read_times(thread, &user_time, &system_time, NULL);
5012 
5013 			time_value_add(&times_info->user_time, &user_time);
5014 			time_value_add(&times_info->system_time, &system_time);
5015 		}
5016 
5017 		*task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
5018 		break;
5019 	}
5020 
5021 	case TASK_ABSOLUTETIME_INFO:
5022 	{
5023 		task_absolutetime_info_t        info;
5024 		thread_t                        thread;
5025 
5026 		if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
5027 			error = KERN_INVALID_ARGUMENT;
5028 			break;
5029 		}
5030 
5031 		info = (task_absolutetime_info_t)task_info_out;
5032 		info->threads_user = info->threads_system = 0;
5033 
5034 
5035 		info->total_user = task->total_user_time;
5036 		info->total_system = task->total_system_time;
5037 
5038 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5039 			uint64_t        tval;
5040 			spl_t           x;
5041 
5042 			if (thread->options & TH_OPT_IDLE_THREAD) {
5043 				continue;
5044 			}
5045 
5046 			x = splsched();
5047 			thread_lock(thread);
5048 
5049 			tval = timer_grab(&thread->user_timer);
5050 			info->threads_user += tval;
5051 			info->total_user += tval;
5052 
5053 			tval = timer_grab(&thread->system_timer);
5054 			if (thread->precise_user_kernel_time) {
5055 				info->threads_system += tval;
5056 				info->total_system += tval;
5057 			} else {
5058 				/* system_timer may represent either sys or user */
5059 				info->threads_user += tval;
5060 				info->total_user += tval;
5061 			}
5062 
5063 			thread_unlock(thread);
5064 			splx(x);
5065 		}
5066 
5067 
5068 		*task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
5069 		break;
5070 	}
5071 
5072 	case TASK_DYLD_INFO:
5073 	{
5074 		task_dyld_info_t info;
5075 
5076 		/*
5077 		 * We added the format field to TASK_DYLD_INFO output.  For
5078 		 * temporary backward compatibility, accept the fact that
5079 		 * clients may ask for the old version - distinquished by the
5080 		 * size of the expected result structure.
5081 		 */
5082 #define TASK_LEGACY_DYLD_INFO_COUNT \
5083 	        offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
5084 
5085 		if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
5086 			error = KERN_INVALID_ARGUMENT;
5087 			break;
5088 		}
5089 
5090 		info = (task_dyld_info_t)task_info_out;
5091 		info->all_image_info_addr = task->all_image_info_addr;
5092 		info->all_image_info_size = task->all_image_info_size;
5093 
5094 		/* only set format on output for those expecting it */
5095 		if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
5096 			info->all_image_info_format = task_has_64Bit_addr(task) ?
5097 			    TASK_DYLD_ALL_IMAGE_INFO_64 :
5098 			    TASK_DYLD_ALL_IMAGE_INFO_32;
5099 			*task_info_count = TASK_DYLD_INFO_COUNT;
5100 		} else {
5101 			*task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
5102 		}
5103 		break;
5104 	}
5105 
5106 	case TASK_EXTMOD_INFO:
5107 	{
5108 		task_extmod_info_t info;
5109 		void *p;
5110 
5111 		if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
5112 			error = KERN_INVALID_ARGUMENT;
5113 			break;
5114 		}
5115 
5116 		info = (task_extmod_info_t)task_info_out;
5117 
5118 		p = get_bsdtask_info(task);
5119 		if (p) {
5120 			proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
5121 		} else {
5122 			bzero(info->task_uuid, sizeof(info->task_uuid));
5123 		}
5124 		info->extmod_statistics = task->extmod_statistics;
5125 		*task_info_count = TASK_EXTMOD_INFO_COUNT;
5126 
5127 		break;
5128 	}
5129 
5130 	case TASK_KERNELMEMORY_INFO:
5131 	{
5132 		task_kernelmemory_info_t        tkm_info;
5133 		ledger_amount_t                 credit, debit;
5134 
5135 		if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
5136 			error = KERN_INVALID_ARGUMENT;
5137 			break;
5138 		}
5139 
5140 		tkm_info = (task_kernelmemory_info_t) task_info_out;
5141 		tkm_info->total_palloc = 0;
5142 		tkm_info->total_pfree = 0;
5143 		tkm_info->total_salloc = 0;
5144 		tkm_info->total_sfree = 0;
5145 
5146 		if (task == kernel_task) {
5147 			/*
5148 			 * All shared allocs/frees from other tasks count against
5149 			 * the kernel private memory usage.  If we are looking up
5150 			 * info for the kernel task, gather from everywhere.
5151 			 */
5152 			task_unlock(task);
5153 
5154 			/* start by accounting for all the terminated tasks against the kernel */
5155 			tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
5156 			tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
5157 
5158 			/* count all other task/thread shared alloc/free against the kernel */
5159 			lck_mtx_lock(&tasks_threads_lock);
5160 
5161 			/* XXX this really shouldn't be using the function parameter 'task' as a local var! */
5162 			queue_iterate(&tasks, task, task_t, tasks) {
5163 				if (task == kernel_task) {
5164 					if (ledger_get_entries(task->ledger,
5165 					    task_ledgers.tkm_private, &credit,
5166 					    &debit) == KERN_SUCCESS) {
5167 						tkm_info->total_palloc += credit;
5168 						tkm_info->total_pfree += debit;
5169 					}
5170 				}
5171 				if (!ledger_get_entries(task->ledger,
5172 				    task_ledgers.tkm_shared, &credit, &debit)) {
5173 					tkm_info->total_palloc += credit;
5174 					tkm_info->total_pfree += debit;
5175 				}
5176 			}
5177 			lck_mtx_unlock(&tasks_threads_lock);
5178 		} else {
5179 			if (!ledger_get_entries(task->ledger,
5180 			    task_ledgers.tkm_private, &credit, &debit)) {
5181 				tkm_info->total_palloc = credit;
5182 				tkm_info->total_pfree = debit;
5183 			}
5184 			if (!ledger_get_entries(task->ledger,
5185 			    task_ledgers.tkm_shared, &credit, &debit)) {
5186 				tkm_info->total_salloc = credit;
5187 				tkm_info->total_sfree = debit;
5188 			}
5189 			task_unlock(task);
5190 		}
5191 
5192 		*task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
5193 		return KERN_SUCCESS;
5194 	}
5195 
5196 	/* OBSOLETE */
5197 	case TASK_SCHED_FIFO_INFO:
5198 	{
5199 		if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
5200 			error = KERN_INVALID_ARGUMENT;
5201 			break;
5202 		}
5203 
5204 		error = KERN_INVALID_POLICY;
5205 		break;
5206 	}
5207 
5208 	/* OBSOLETE */
5209 	case TASK_SCHED_RR_INFO:
5210 	{
5211 		policy_rr_base_t        rr_base;
5212 		uint32_t quantum_time;
5213 		uint64_t quantum_ns;
5214 
5215 		if (*task_info_count < POLICY_RR_BASE_COUNT) {
5216 			error = KERN_INVALID_ARGUMENT;
5217 			break;
5218 		}
5219 
5220 		rr_base = (policy_rr_base_t) task_info_out;
5221 
5222 		if (task != kernel_task) {
5223 			error = KERN_INVALID_POLICY;
5224 			break;
5225 		}
5226 
5227 		rr_base->base_priority = task->priority;
5228 
5229 		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
5230 		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
5231 
5232 		rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
5233 
5234 		*task_info_count = POLICY_RR_BASE_COUNT;
5235 		break;
5236 	}
5237 
5238 	/* OBSOLETE */
5239 	case TASK_SCHED_TIMESHARE_INFO:
5240 	{
5241 		policy_timeshare_base_t ts_base;
5242 
5243 		if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
5244 			error = KERN_INVALID_ARGUMENT;
5245 			break;
5246 		}
5247 
5248 		ts_base = (policy_timeshare_base_t) task_info_out;
5249 
5250 		if (task == kernel_task) {
5251 			error = KERN_INVALID_POLICY;
5252 			break;
5253 		}
5254 
5255 		ts_base->base_priority = task->priority;
5256 
5257 		*task_info_count = POLICY_TIMESHARE_BASE_COUNT;
5258 		break;
5259 	}
5260 
5261 	case TASK_SECURITY_TOKEN:
5262 	{
5263 		security_token_t        *sec_token_p;
5264 
5265 		if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
5266 			error = KERN_INVALID_ARGUMENT;
5267 			break;
5268 		}
5269 
5270 		sec_token_p = (security_token_t *) task_info_out;
5271 
5272 		*sec_token_p = *task_get_sec_token(task);
5273 
5274 		*task_info_count = TASK_SECURITY_TOKEN_COUNT;
5275 		break;
5276 	}
5277 
5278 	case TASK_AUDIT_TOKEN:
5279 	{
5280 		audit_token_t   *audit_token_p;
5281 
5282 		if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
5283 			error = KERN_INVALID_ARGUMENT;
5284 			break;
5285 		}
5286 
5287 		audit_token_p = (audit_token_t *) task_info_out;
5288 
5289 		*audit_token_p = *task_get_audit_token(task);
5290 
5291 		*task_info_count = TASK_AUDIT_TOKEN_COUNT;
5292 		break;
5293 	}
5294 
5295 	case TASK_SCHED_INFO:
5296 		error = KERN_INVALID_ARGUMENT;
5297 		break;
5298 
5299 	case TASK_EVENTS_INFO:
5300 	{
5301 		task_events_info_t      events_info;
5302 		thread_t                thread;
5303 		uint64_t                n_syscalls_mach, n_syscalls_unix, n_csw;
5304 
5305 		if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
5306 			error = KERN_INVALID_ARGUMENT;
5307 			break;
5308 		}
5309 
5310 		events_info = (task_events_info_t) task_info_out;
5311 
5312 
5313 		events_info->faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
5314 		events_info->pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
5315 		events_info->cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
5316 		events_info->messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
5317 		events_info->messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
5318 
5319 		n_syscalls_mach = task->syscalls_mach;
5320 		n_syscalls_unix = task->syscalls_unix;
5321 		n_csw = task->c_switch;
5322 
5323 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5324 			n_csw           += thread->c_switch;
5325 			n_syscalls_mach += thread->syscalls_mach;
5326 			n_syscalls_unix += thread->syscalls_unix;
5327 		}
5328 
5329 		events_info->syscalls_mach = (int32_t) MIN(n_syscalls_mach, INT32_MAX);
5330 		events_info->syscalls_unix = (int32_t) MIN(n_syscalls_unix, INT32_MAX);
5331 		events_info->csw = (int32_t) MIN(n_csw, INT32_MAX);
5332 
5333 		*task_info_count = TASK_EVENTS_INFO_COUNT;
5334 		break;
5335 	}
5336 	case TASK_AFFINITY_TAG_INFO:
5337 	{
5338 		if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
5339 			error = KERN_INVALID_ARGUMENT;
5340 			break;
5341 		}
5342 
5343 		error = task_affinity_info(task, task_info_out, task_info_count);
5344 		break;
5345 	}
5346 	case TASK_POWER_INFO:
5347 	{
5348 		if (*task_info_count < TASK_POWER_INFO_COUNT) {
5349 			error = KERN_INVALID_ARGUMENT;
5350 			break;
5351 		}
5352 
5353 		task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL, NULL);
5354 		break;
5355 	}
5356 
5357 	case TASK_POWER_INFO_V2:
5358 	{
5359 		if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
5360 			error = KERN_INVALID_ARGUMENT;
5361 			break;
5362 		}
5363 		task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
5364 		task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2, NULL);
5365 		break;
5366 	}
5367 
5368 	case TASK_VM_INFO:
5369 	case TASK_VM_INFO_PURGEABLE:
5370 	{
5371 		task_vm_info_t          vm_info;
5372 		vm_map_t                map;
5373 		ledger_amount_t         tmp_amount;
5374 
5375 #if __arm64__
5376 		struct proc *p;
5377 		uint32_t platform, sdk;
5378 		p = current_proc();
5379 		platform = proc_platform(p);
5380 		sdk = proc_sdk(p);
5381 		if (original_task_info_count > TASK_VM_INFO_REV2_COUNT &&
5382 		    platform == PLATFORM_IOS &&
5383 		    sdk != 0 &&
5384 		    (sdk >> 16) <= 12) {
5385 			/*
5386 			 * Some iOS apps pass an incorrect value for
5387 			 * task_info_count, expressed in number of bytes
5388 			 * instead of number of "natural_t" elements.
5389 			 * For the sake of backwards binary compatibility
5390 			 * for apps built with an iOS12 or older SDK and using
5391 			 * the "rev2" data structure, let's fix task_info_count
5392 			 * for them, to avoid stomping past the actual end
5393 			 * of their buffer.
5394 			 */
5395 #if DEVELOPMENT || DEBUG
5396 			printf("%s:%d %d[%s] rdar://49484582 task_info_count %d -> %d platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p), proc_name_address(p), original_task_info_count, TASK_VM_INFO_REV2_COUNT, platform, (sdk >> 16), ((sdk >> 8) & 0xff), (sdk & 0xff));
5397 #endif /* DEVELOPMENT || DEBUG */
5398 			DTRACE_VM4(workaround_task_vm_info_count,
5399 			    mach_msg_type_number_t, original_task_info_count,
5400 			    mach_msg_type_number_t, TASK_VM_INFO_REV2_COUNT,
5401 			    uint32_t, platform,
5402 			    uint32_t, sdk);
5403 			original_task_info_count = TASK_VM_INFO_REV2_COUNT;
5404 			*task_info_count = original_task_info_count;
5405 		}
5406 #endif /* __arm64__ */
5407 
5408 		if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
5409 			error = KERN_INVALID_ARGUMENT;
5410 			break;
5411 		}
5412 
5413 		vm_info = (task_vm_info_t)task_info_out;
5414 
5415 		/*
5416 		 * Do not hold both the task and map locks,
5417 		 * so convert the task lock into a map reference,
5418 		 * drop the task lock, then lock the map.
5419 		 */
5420 		if (is_kernel_task) {
5421 			map = kernel_map;
5422 			task_unlock(task);
5423 			/* no lock, no reference */
5424 		} else {
5425 			map = task->map;
5426 			vm_map_reference(map);
5427 			task_unlock(task);
5428 			vm_map_lock_read(map);
5429 		}
5430 
5431 		vm_info->virtual_size = (typeof(vm_info->virtual_size))vm_map_adjusted_size(map);
5432 		vm_info->region_count = map->hdr.nentries;
5433 		vm_info->page_size = vm_map_page_size(map);
5434 
5435 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size);
5436 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size_peak);
5437 
5438 		vm_info->device = 0;
5439 		vm_info->device_peak = 0;
5440 		ledger_get_balance(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external);
5441 		ledger_get_lifetime_max(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external_peak);
5442 		ledger_get_balance(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal);
5443 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal_peak);
5444 		ledger_get_balance(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable);
5445 		ledger_get_lifetime_max(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable_peak);
5446 		ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed);
5447 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_peak);
5448 		ledger_get_entries(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_lifetime, &tmp_amount);
5449 
5450 		vm_info->purgeable_volatile_pmap = 0;
5451 		vm_info->purgeable_volatile_resident = 0;
5452 		vm_info->purgeable_volatile_virtual = 0;
5453 		if (is_kernel_task) {
5454 			/*
5455 			 * We do not maintain the detailed stats for the
5456 			 * kernel_pmap, so just count everything as
5457 			 * "internal"...
5458 			 */
5459 			vm_info->internal = vm_info->resident_size;
5460 			/*
5461 			 * ... but since the memory held by the VM compressor
5462 			 * in the kernel address space ought to be attributed
5463 			 * to user-space tasks, we subtract it from "internal"
5464 			 * to give memory reporting tools a more accurate idea
5465 			 * of what the kernel itself is actually using, instead
5466 			 * of making it look like the kernel is leaking memory
5467 			 * when the system is under memory pressure.
5468 			 */
5469 			vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
5470 			    PAGE_SIZE);
5471 		} else {
5472 			mach_vm_size_t  volatile_virtual_size;
5473 			mach_vm_size_t  volatile_resident_size;
5474 			mach_vm_size_t  volatile_compressed_size;
5475 			mach_vm_size_t  volatile_pmap_size;
5476 			mach_vm_size_t  volatile_compressed_pmap_size;
5477 			kern_return_t   kr;
5478 
5479 			if (flavor == TASK_VM_INFO_PURGEABLE) {
5480 				kr = vm_map_query_volatile(
5481 					map,
5482 					&volatile_virtual_size,
5483 					&volatile_resident_size,
5484 					&volatile_compressed_size,
5485 					&volatile_pmap_size,
5486 					&volatile_compressed_pmap_size);
5487 				if (kr == KERN_SUCCESS) {
5488 					vm_info->purgeable_volatile_pmap =
5489 					    volatile_pmap_size;
5490 					if (radar_20146450) {
5491 						vm_info->compressed -=
5492 						    volatile_compressed_pmap_size;
5493 					}
5494 					vm_info->purgeable_volatile_resident =
5495 					    volatile_resident_size;
5496 					vm_info->purgeable_volatile_virtual =
5497 					    volatile_virtual_size;
5498 				}
5499 			}
5500 		}
5501 		*task_info_count = TASK_VM_INFO_REV0_COUNT;
5502 
5503 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5504 			/* must be captured while we still have the map lock */
5505 			vm_info->min_address = map->min_offset;
5506 			vm_info->max_address = map->max_offset;
5507 		}
5508 
5509 		/*
5510 		 * Done with vm map things, can drop the map lock and reference,
5511 		 * and take the task lock back.
5512 		 *
5513 		 * Re-validate that the task didn't die on us.
5514 		 */
5515 		if (!is_kernel_task) {
5516 			vm_map_unlock_read(map);
5517 			vm_map_deallocate(map);
5518 		}
5519 		map = VM_MAP_NULL;
5520 
5521 		task_lock(task);
5522 
5523 		if ((task != current_task()) && (!task->active)) {
5524 			error = KERN_INVALID_ARGUMENT;
5525 			break;
5526 		}
5527 
5528 		if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
5529 			vm_info->phys_footprint =
5530 			    (mach_vm_size_t) get_task_phys_footprint(task);
5531 			*task_info_count = TASK_VM_INFO_REV1_COUNT;
5532 		}
5533 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5534 			/* data was captured above */
5535 			*task_info_count = TASK_VM_INFO_REV2_COUNT;
5536 		}
5537 
5538 		if (original_task_info_count >= TASK_VM_INFO_REV3_COUNT) {
5539 			ledger_get_lifetime_max(task->ledger,
5540 			    task_ledgers.phys_footprint,
5541 			    &vm_info->ledger_phys_footprint_peak);
5542 			ledger_get_balance(task->ledger,
5543 			    task_ledgers.purgeable_nonvolatile,
5544 			    &vm_info->ledger_purgeable_nonvolatile);
5545 			ledger_get_balance(task->ledger,
5546 			    task_ledgers.purgeable_nonvolatile_compressed,
5547 			    &vm_info->ledger_purgeable_novolatile_compressed);
5548 			ledger_get_balance(task->ledger,
5549 			    task_ledgers.purgeable_volatile,
5550 			    &vm_info->ledger_purgeable_volatile);
5551 			ledger_get_balance(task->ledger,
5552 			    task_ledgers.purgeable_volatile_compressed,
5553 			    &vm_info->ledger_purgeable_volatile_compressed);
5554 			ledger_get_balance(task->ledger,
5555 			    task_ledgers.network_nonvolatile,
5556 			    &vm_info->ledger_tag_network_nonvolatile);
5557 			ledger_get_balance(task->ledger,
5558 			    task_ledgers.network_nonvolatile_compressed,
5559 			    &vm_info->ledger_tag_network_nonvolatile_compressed);
5560 			ledger_get_balance(task->ledger,
5561 			    task_ledgers.network_volatile,
5562 			    &vm_info->ledger_tag_network_volatile);
5563 			ledger_get_balance(task->ledger,
5564 			    task_ledgers.network_volatile_compressed,
5565 			    &vm_info->ledger_tag_network_volatile_compressed);
5566 			ledger_get_balance(task->ledger,
5567 			    task_ledgers.media_footprint,
5568 			    &vm_info->ledger_tag_media_footprint);
5569 			ledger_get_balance(task->ledger,
5570 			    task_ledgers.media_footprint_compressed,
5571 			    &vm_info->ledger_tag_media_footprint_compressed);
5572 			ledger_get_balance(task->ledger,
5573 			    task_ledgers.media_nofootprint,
5574 			    &vm_info->ledger_tag_media_nofootprint);
5575 			ledger_get_balance(task->ledger,
5576 			    task_ledgers.media_nofootprint_compressed,
5577 			    &vm_info->ledger_tag_media_nofootprint_compressed);
5578 			ledger_get_balance(task->ledger,
5579 			    task_ledgers.graphics_footprint,
5580 			    &vm_info->ledger_tag_graphics_footprint);
5581 			ledger_get_balance(task->ledger,
5582 			    task_ledgers.graphics_footprint_compressed,
5583 			    &vm_info->ledger_tag_graphics_footprint_compressed);
5584 			ledger_get_balance(task->ledger,
5585 			    task_ledgers.graphics_nofootprint,
5586 			    &vm_info->ledger_tag_graphics_nofootprint);
5587 			ledger_get_balance(task->ledger,
5588 			    task_ledgers.graphics_nofootprint_compressed,
5589 			    &vm_info->ledger_tag_graphics_nofootprint_compressed);
5590 			ledger_get_balance(task->ledger,
5591 			    task_ledgers.neural_footprint,
5592 			    &vm_info->ledger_tag_neural_footprint);
5593 			ledger_get_balance(task->ledger,
5594 			    task_ledgers.neural_footprint_compressed,
5595 			    &vm_info->ledger_tag_neural_footprint_compressed);
5596 			ledger_get_balance(task->ledger,
5597 			    task_ledgers.neural_nofootprint,
5598 			    &vm_info->ledger_tag_neural_nofootprint);
5599 			ledger_get_balance(task->ledger,
5600 			    task_ledgers.neural_nofootprint_compressed,
5601 			    &vm_info->ledger_tag_neural_nofootprint_compressed);
5602 			*task_info_count = TASK_VM_INFO_REV3_COUNT;
5603 		}
5604 		if (original_task_info_count >= TASK_VM_INFO_REV4_COUNT) {
5605 			if (task->bsd_info) {
5606 				vm_info->limit_bytes_remaining =
5607 				    memorystatus_available_memory_internal(task->bsd_info);
5608 			} else {
5609 				vm_info->limit_bytes_remaining = 0;
5610 			}
5611 			*task_info_count = TASK_VM_INFO_REV4_COUNT;
5612 		}
5613 		if (original_task_info_count >= TASK_VM_INFO_REV5_COUNT) {
5614 			thread_t thread;
5615 			uint64_t total = task->decompressions;
5616 			queue_iterate(&task->threads, thread, thread_t, task_threads) {
5617 				total += thread->decompressions;
5618 			}
5619 			vm_info->decompressions = (int32_t) MIN(total, INT32_MAX);
5620 			*task_info_count = TASK_VM_INFO_REV5_COUNT;
5621 		}
5622 		if (original_task_info_count >= TASK_VM_INFO_REV6_COUNT) {
5623 			ledger_get_balance(task->ledger, task_ledgers.swapins,
5624 			    &vm_info->ledger_swapins);
5625 			*task_info_count = TASK_VM_INFO_REV6_COUNT;
5626 		}
5627 
5628 		break;
5629 	}
5630 
5631 	case TASK_WAIT_STATE_INFO:
5632 	{
5633 		/*
5634 		 * Deprecated flavor. Currently allowing some results until all users
5635 		 * stop calling it. The results may not be accurate.
5636 		 */
5637 		task_wait_state_info_t  wait_state_info;
5638 		uint64_t total_sfi_ledger_val = 0;
5639 
5640 		if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
5641 			error = KERN_INVALID_ARGUMENT;
5642 			break;
5643 		}
5644 
5645 		wait_state_info = (task_wait_state_info_t) task_info_out;
5646 
5647 		wait_state_info->total_wait_state_time = 0;
5648 		bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
5649 
5650 #if CONFIG_SCHED_SFI
5651 		int i, prev_lentry = -1;
5652 		int64_t  val_credit, val_debit;
5653 
5654 		for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
5655 			val_credit = 0;
5656 			/*
5657 			 * checking with prev_lentry != entry ensures adjacent classes
5658 			 * which share the same ledger do not add wait times twice.
5659 			 * Note: Use ledger() call to get data for each individual sfi class.
5660 			 */
5661 			if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
5662 			    KERN_SUCCESS == ledger_get_entries(task->ledger,
5663 			    task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
5664 				total_sfi_ledger_val += val_credit;
5665 			}
5666 			prev_lentry = task_ledgers.sfi_wait_times[i];
5667 		}
5668 
5669 #endif /* CONFIG_SCHED_SFI */
5670 		wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
5671 		*task_info_count = TASK_WAIT_STATE_INFO_COUNT;
5672 
5673 		break;
5674 	}
5675 	case TASK_VM_INFO_PURGEABLE_ACCOUNT:
5676 	{
5677 #if DEVELOPMENT || DEBUG
5678 		pvm_account_info_t      acnt_info;
5679 
5680 		if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
5681 			error = KERN_INVALID_ARGUMENT;
5682 			break;
5683 		}
5684 
5685 		if (task_info_out == NULL) {
5686 			error = KERN_INVALID_ARGUMENT;
5687 			break;
5688 		}
5689 
5690 		acnt_info = (pvm_account_info_t) task_info_out;
5691 
5692 		error = vm_purgeable_account(task, acnt_info);
5693 
5694 		*task_info_count = PVM_ACCOUNT_INFO_COUNT;
5695 
5696 		break;
5697 #else /* DEVELOPMENT || DEBUG */
5698 		error = KERN_NOT_SUPPORTED;
5699 		break;
5700 #endif /* DEVELOPMENT || DEBUG */
5701 	}
5702 	case TASK_FLAGS_INFO:
5703 	{
5704 		task_flags_info_t               flags_info;
5705 
5706 		if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
5707 			error = KERN_INVALID_ARGUMENT;
5708 			break;
5709 		}
5710 
5711 		flags_info = (task_flags_info_t)task_info_out;
5712 
5713 		/* only publish the 64-bit flag of the task */
5714 		flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA);
5715 
5716 		*task_info_count = TASK_FLAGS_INFO_COUNT;
5717 		break;
5718 	}
5719 
5720 	case TASK_DEBUG_INFO_INTERNAL:
5721 	{
5722 #if DEVELOPMENT || DEBUG
5723 		task_debug_info_internal_t dbg_info;
5724 		ipc_space_t space = task->itk_space;
5725 		if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
5726 			error = KERN_NOT_SUPPORTED;
5727 			break;
5728 		}
5729 
5730 		if (task_info_out == NULL) {
5731 			error = KERN_INVALID_ARGUMENT;
5732 			break;
5733 		}
5734 		dbg_info = (task_debug_info_internal_t) task_info_out;
5735 		dbg_info->ipc_space_size = 0;
5736 
5737 		if (space) {
5738 #if MACH_LOCKFREE_SPACE
5739 			hazard_guard_t guard = hazard_guard_get(0);
5740 			ipc_entry_t table = hazard_guard_acquire(guard, &space->is_table);
5741 			if (table) {
5742 				dbg_info->ipc_space_size = table->ie_size;
5743 			}
5744 			hazard_guard_put(guard);
5745 #else
5746 			is_read_lock(space);
5747 			if (is_active(space)) {
5748 				dbg_info->ipc_space_size =
5749 				    is_active_table(space)->ie_size;
5750 			}
5751 			is_read_unlock(space);
5752 #endif
5753 		}
5754 
5755 		dbg_info->suspend_count = task->suspend_count;
5756 
5757 		error = KERN_SUCCESS;
5758 		*task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
5759 		break;
5760 #else /* DEVELOPMENT || DEBUG */
5761 		error = KERN_NOT_SUPPORTED;
5762 		break;
5763 #endif /* DEVELOPMENT || DEBUG */
5764 	}
5765 	default:
5766 		error = KERN_INVALID_ARGUMENT;
5767 	}
5768 
5769 	task_unlock(task);
5770 	return error;
5771 }
5772 
5773 /*
5774  * task_info_from_user
5775  *
5776  * When calling task_info from user space,
5777  * this function will be executed as mig server side
5778  * instead of calling directly into task_info.
5779  * This gives the possibility to perform more security
5780  * checks on task_port.
5781  *
5782  * In the case of TASK_DYLD_INFO, we require the more
5783  * privileged task_read_port not the less-privileged task_name_port.
5784  *
5785  */
5786 kern_return_t
task_info_from_user(mach_port_t task_port,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)5787 task_info_from_user(
5788 	mach_port_t             task_port,
5789 	task_flavor_t           flavor,
5790 	task_info_t             task_info_out,
5791 	mach_msg_type_number_t  *task_info_count)
5792 {
5793 	task_t task;
5794 	kern_return_t ret;
5795 
5796 	if (flavor == TASK_DYLD_INFO) {
5797 		task = convert_port_to_task_read(task_port);
5798 	} else {
5799 		task = convert_port_to_task_name(task_port);
5800 	}
5801 
5802 	ret = task_info(task, flavor, task_info_out, task_info_count);
5803 
5804 	task_deallocate(task);
5805 
5806 	return ret;
5807 }
5808 
5809 /*
5810  * Routine: task_dyld_process_info_update_helper
5811  *
5812  * Release send rights in release_ports.
5813  *
5814  * If no active ports found in task's dyld notifier array, unset the magic value
5815  * in user space to indicate so.
5816  *
5817  * Condition:
5818  *      task's itk_lock is locked, and is unlocked upon return.
5819  *      Global g_dyldinfo_mtx is locked, and is unlocked upon return.
5820  */
5821 void
task_dyld_process_info_update_helper(task_t task,size_t active_count,vm_map_address_t magic_addr,ipc_port_t * release_ports,size_t release_count)5822 task_dyld_process_info_update_helper(
5823 	task_t                  task,
5824 	size_t                  active_count,
5825 	vm_map_address_t        magic_addr,    /* a userspace address */
5826 	ipc_port_t             *release_ports,
5827 	size_t                  release_count)
5828 {
5829 	void *notifiers_ptr = NULL;
5830 
5831 	assert(release_count <= DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT);
5832 
5833 	if (active_count == 0) {
5834 		assert(task->itk_dyld_notify != NULL);
5835 		notifiers_ptr = task->itk_dyld_notify;
5836 		task->itk_dyld_notify = NULL;
5837 		itk_unlock(task);
5838 
5839 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
5840 		(void)copyoutmap_atomic32(task->map, MACH_PORT_NULL, magic_addr); /* unset magic */
5841 	} else {
5842 		itk_unlock(task);
5843 		(void)copyoutmap_atomic32(task->map, (mach_port_name_t)DYLD_PROCESS_INFO_NOTIFY_MAGIC,
5844 		    magic_addr);     /* reset magic */
5845 	}
5846 
5847 	lck_mtx_unlock(&g_dyldinfo_mtx);
5848 
5849 	for (size_t i = 0; i < release_count; i++) {
5850 		ipc_port_release_send(release_ports[i]);
5851 	}
5852 }
5853 
5854 /*
5855  * Routine: task_dyld_process_info_notify_register
5856  *
5857  * Insert a send right to target task's itk_dyld_notify array. Allocate kernel
5858  * memory for the array if it's the first port to be registered. Also cleanup
5859  * any dead rights found in the array.
5860  *
5861  * Consumes sright if returns KERN_SUCCESS, otherwise MIG will destroy it.
5862  *
5863  * Args:
5864  *     task:   Target task for the registration.
5865  *     sright: A send right.
5866  *
5867  * Returns:
5868  *     KERN_SUCCESS: Registration succeeded.
5869  *     KERN_INVALID_TASK: task is invalid.
5870  *     KERN_INVALID_RIGHT: sright is invalid.
5871  *     KERN_DENIED: Security policy denied this call.
5872  *     KERN_RESOURCE_SHORTAGE: Kernel memory allocation failed.
5873  *     KERN_NO_SPACE: No available notifier port slot left for this task.
5874  *     KERN_RIGHT_EXISTS: The notifier port is already registered and active.
5875  *
5876  *     Other error code see task_info().
5877  *
5878  * See Also:
5879  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
5880  */
5881 kern_return_t
task_dyld_process_info_notify_register(task_t task,ipc_port_t sright)5882 task_dyld_process_info_notify_register(
5883 	task_t                  task,
5884 	ipc_port_t              sright)
5885 {
5886 	struct task_dyld_info dyld_info;
5887 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
5888 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
5889 	uint32_t release_count = 0, active_count = 0;
5890 	mach_vm_address_t ports_addr; /* a user space address */
5891 	kern_return_t kr;
5892 	boolean_t right_exists = false;
5893 	ipc_port_t *notifiers_ptr = NULL;
5894 	ipc_port_t *portp;
5895 
5896 	if (task == TASK_NULL || task == kernel_task) {
5897 		return KERN_INVALID_TASK;
5898 	}
5899 
5900 	if (!IP_VALID(sright)) {
5901 		return KERN_INVALID_RIGHT;
5902 	}
5903 
5904 #if CONFIG_MACF
5905 	if (mac_task_check_dyld_process_info_notify_register()) {
5906 		return KERN_DENIED;
5907 	}
5908 #endif
5909 
5910 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
5911 	if (kr) {
5912 		return kr;
5913 	}
5914 
5915 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
5916 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
5917 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
5918 	} else {
5919 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
5920 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
5921 	}
5922 
5923 	if (task->itk_dyld_notify == NULL) {
5924 		notifiers_ptr = kalloc_type(ipc_port_t,
5925 		    DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT,
5926 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
5927 	}
5928 
5929 	lck_mtx_lock(&g_dyldinfo_mtx);
5930 	itk_lock(task);
5931 
5932 	if (task->itk_dyld_notify == NULL) {
5933 		task->itk_dyld_notify = notifiers_ptr;
5934 		notifiers_ptr = NULL;
5935 	}
5936 
5937 	assert(task->itk_dyld_notify != NULL);
5938 	/* First pass: clear dead names and check for duplicate registration */
5939 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
5940 		portp = &task->itk_dyld_notify[slot];
5941 		if (*portp != IPC_PORT_NULL && !ip_active(*portp)) {
5942 			release_ports[release_count++] = *portp;
5943 			*portp = IPC_PORT_NULL;
5944 		} else if (*portp == sright) {
5945 			/* the port is already registered and is active */
5946 			right_exists = true;
5947 		}
5948 
5949 		if (*portp != IPC_PORT_NULL) {
5950 			active_count++;
5951 		}
5952 	}
5953 
5954 	if (right_exists) {
5955 		/* skip second pass */
5956 		kr = KERN_RIGHT_EXISTS;
5957 		goto out;
5958 	}
5959 
5960 	/* Second pass: register the port */
5961 	kr = KERN_NO_SPACE;
5962 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
5963 		portp = &task->itk_dyld_notify[slot];
5964 		if (*portp == IPC_PORT_NULL) {
5965 			*portp = sright;
5966 			active_count++;
5967 			kr = KERN_SUCCESS;
5968 			break;
5969 		}
5970 	}
5971 
5972 out:
5973 	assert(active_count > 0);
5974 
5975 	task_dyld_process_info_update_helper(task, active_count,
5976 	    (vm_map_address_t)ports_addr, release_ports, release_count);
5977 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
5978 
5979 	kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
5980 
5981 	return kr;
5982 }
5983 
5984 /*
5985  * Routine: task_dyld_process_info_notify_deregister
5986  *
5987  * Remove a send right in target task's itk_dyld_notify array matching the receive
5988  * right name passed in. Deallocate kernel memory for the array if it's the last port to
5989  * be deregistered, or all ports have died. Also cleanup any dead rights found in the array.
5990  *
5991  * Does not consume any reference.
5992  *
5993  * Args:
5994  *     task: Target task for the deregistration.
5995  *     rcv_name: The name denoting the receive right in caller's space.
5996  *
5997  * Returns:
5998  *     KERN_SUCCESS: A matching entry found and degistration succeeded.
5999  *     KERN_INVALID_TASK: task is invalid.
6000  *     KERN_INVALID_NAME: name is invalid.
6001  *     KERN_DENIED: Security policy denied this call.
6002  *     KERN_FAILURE: A matching entry is not found.
6003  *     KERN_INVALID_RIGHT: The name passed in does not represent a valid rcv right.
6004  *
6005  *     Other error code see task_info().
6006  *
6007  * See Also:
6008  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6009  */
6010 kern_return_t
task_dyld_process_info_notify_deregister(task_t task,mach_port_name_t rcv_name)6011 task_dyld_process_info_notify_deregister(
6012 	task_t                  task,
6013 	mach_port_name_t        rcv_name)
6014 {
6015 	struct task_dyld_info dyld_info;
6016 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6017 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6018 	uint32_t release_count = 0, active_count = 0;
6019 	boolean_t port_found = false;
6020 	mach_vm_address_t ports_addr; /* a user space address */
6021 	ipc_port_t sright;
6022 	kern_return_t kr;
6023 	ipc_port_t *portp;
6024 
6025 	if (task == TASK_NULL || task == kernel_task) {
6026 		return KERN_INVALID_TASK;
6027 	}
6028 
6029 	if (!MACH_PORT_VALID(rcv_name)) {
6030 		return KERN_INVALID_NAME;
6031 	}
6032 
6033 #if CONFIG_MACF
6034 	if (mac_task_check_dyld_process_info_notify_register()) {
6035 		return KERN_DENIED;
6036 	}
6037 #endif
6038 
6039 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6040 	if (kr) {
6041 		return kr;
6042 	}
6043 
6044 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6045 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6046 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6047 	} else {
6048 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6049 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6050 	}
6051 
6052 	kr = ipc_port_translate_receive(current_space(), rcv_name, &sright); /* does not produce port ref */
6053 	if (kr) {
6054 		return KERN_INVALID_RIGHT;
6055 	}
6056 
6057 	ip_reference(sright);
6058 	ip_mq_unlock(sright);
6059 
6060 	assert(sright != IPC_PORT_NULL);
6061 
6062 	lck_mtx_lock(&g_dyldinfo_mtx);
6063 	itk_lock(task);
6064 
6065 	if (task->itk_dyld_notify == NULL) {
6066 		itk_unlock(task);
6067 		lck_mtx_unlock(&g_dyldinfo_mtx);
6068 		ip_release(sright);
6069 		return KERN_FAILURE;
6070 	}
6071 
6072 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6073 		portp = &task->itk_dyld_notify[slot];
6074 		if (*portp == sright) {
6075 			release_ports[release_count++] = *portp;
6076 			*portp = IPC_PORT_NULL;
6077 			port_found = true;
6078 		} else if ((*portp != IPC_PORT_NULL && !ip_active(*portp))) {
6079 			release_ports[release_count++] = *portp;
6080 			*portp = IPC_PORT_NULL;
6081 		}
6082 
6083 		if (*portp != IPC_PORT_NULL) {
6084 			active_count++;
6085 		}
6086 	}
6087 
6088 	task_dyld_process_info_update_helper(task, active_count,
6089 	    (vm_map_address_t)ports_addr, release_ports, release_count);
6090 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6091 
6092 	ip_release(sright);
6093 
6094 	return port_found ? KERN_SUCCESS : KERN_FAILURE;
6095 }
6096 
6097 /*
6098  *	task_power_info
6099  *
6100  *	Returns power stats for the task.
6101  *	Note: Called with task locked.
6102  */
6103 void
task_power_info_locked(task_t task,task_power_info_t info,gpu_energy_data_t ginfo,task_power_info_v2_t infov2,uint64_t * runnable_time)6104 task_power_info_locked(
6105 	task_t                  task,
6106 	task_power_info_t       info,
6107 	gpu_energy_data_t       ginfo,
6108 	task_power_info_v2_t    infov2,
6109 	uint64_t                *runnable_time)
6110 {
6111 	thread_t                thread;
6112 	ledger_amount_t         tmp;
6113 
6114 	uint64_t                runnable_time_sum = 0;
6115 
6116 	task_lock_assert_owned(task);
6117 
6118 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
6119 	    (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
6120 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
6121 	    (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
6122 
6123 	info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
6124 	info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
6125 
6126 	info->total_user = task->total_user_time;
6127 	info->total_system = task->total_system_time;
6128 	runnable_time_sum = task->total_runnable_time;
6129 
6130 #if defined(__arm__) || defined(__arm64__)
6131 	if (infov2) {
6132 		infov2->task_energy = task->task_energy;
6133 	}
6134 #endif /* defined(__arm__) || defined(__arm64__) */
6135 
6136 	if (ginfo) {
6137 		ginfo->task_gpu_utilisation = task->task_gpu_ns;
6138 	}
6139 
6140 	if (infov2) {
6141 		infov2->task_ptime = task->total_ptime;
6142 		infov2->task_pset_switches = task->ps_switch;
6143 	}
6144 
6145 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6146 		uint64_t        tval;
6147 		spl_t           x;
6148 
6149 		if (thread->options & TH_OPT_IDLE_THREAD) {
6150 			continue;
6151 		}
6152 
6153 		x = splsched();
6154 		thread_lock(thread);
6155 
6156 		info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
6157 		info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
6158 
6159 #if defined(__arm__) || defined(__arm64__)
6160 		if (infov2) {
6161 			infov2->task_energy += ml_energy_stat(thread);
6162 		}
6163 #endif /* defined(__arm__) || defined(__arm64__) */
6164 
6165 		tval = timer_grab(&thread->user_timer);
6166 		info->total_user += tval;
6167 
6168 		if (infov2) {
6169 			tval = timer_grab(&thread->ptime);
6170 			infov2->task_ptime += tval;
6171 			infov2->task_pset_switches += thread->ps_switch;
6172 		}
6173 
6174 		tval = timer_grab(&thread->system_timer);
6175 		if (thread->precise_user_kernel_time) {
6176 			info->total_system += tval;
6177 		} else {
6178 			/* system_timer may represent either sys or user */
6179 			info->total_user += tval;
6180 		}
6181 
6182 		tval = timer_grab(&thread->runnable_timer);
6183 
6184 		runnable_time_sum += tval;
6185 
6186 		if (ginfo) {
6187 			ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
6188 		}
6189 		thread_unlock(thread);
6190 		splx(x);
6191 	}
6192 
6193 	if (runnable_time) {
6194 		*runnable_time = runnable_time_sum;
6195 	}
6196 }
6197 
6198 /*
6199  *	task_gpu_utilisation
6200  *
6201  *	Returns the total gpu time used by the all the threads of the task
6202  *  (both dead and alive)
6203  */
6204 uint64_t
task_gpu_utilisation(task_t task)6205 task_gpu_utilisation(
6206 	task_t  task)
6207 {
6208 	uint64_t gpu_time = 0;
6209 #if defined(__x86_64__)
6210 	thread_t thread;
6211 
6212 	task_lock(task);
6213 	gpu_time += task->task_gpu_ns;
6214 
6215 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6216 		spl_t x;
6217 		x = splsched();
6218 		thread_lock(thread);
6219 		gpu_time += ml_gpu_stat(thread);
6220 		thread_unlock(thread);
6221 		splx(x);
6222 	}
6223 
6224 	task_unlock(task);
6225 #else /* defined(__x86_64__) */
6226 	/* silence compiler warning */
6227 	(void)task;
6228 #endif /* defined(__x86_64__) */
6229 	return gpu_time;
6230 }
6231 
6232 /*
6233  *	task_energy
6234  *
6235  *	Returns the total energy used by the all the threads of the task
6236  *  (both dead and alive)
6237  */
6238 uint64_t
task_energy(task_t task)6239 task_energy(
6240 	task_t  task)
6241 {
6242 	uint64_t energy = 0;
6243 	thread_t thread;
6244 
6245 	task_lock(task);
6246 	energy += task->task_energy;
6247 
6248 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6249 		spl_t x;
6250 		x = splsched();
6251 		thread_lock(thread);
6252 		energy += ml_energy_stat(thread);
6253 		thread_unlock(thread);
6254 		splx(x);
6255 	}
6256 
6257 	task_unlock(task);
6258 	return energy;
6259 }
6260 
6261 #if __AMP__
6262 
6263 uint64_t
task_cpu_ptime(task_t task)6264 task_cpu_ptime(
6265 	task_t  task)
6266 {
6267 	uint64_t cpu_ptime = 0;
6268 	thread_t thread;
6269 
6270 	task_lock(task);
6271 	cpu_ptime += task->total_ptime;
6272 
6273 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6274 		if (thread->options & TH_OPT_IDLE_THREAD) {
6275 			continue;
6276 		}
6277 		cpu_ptime += timer_grab(&thread->ptime);
6278 	}
6279 
6280 	task_unlock(task);
6281 	return cpu_ptime;
6282 }
6283 
6284 #else /* __AMP__ */
6285 
6286 uint64_t
task_cpu_ptime(__unused task_t task)6287 task_cpu_ptime(
6288 	__unused task_t  task)
6289 {
6290 	return 0;
6291 }
6292 
6293 #endif /* __AMP__ */
6294 
6295 /* This function updates the cpu time in the arrays for each
6296  * effective and requested QoS class
6297  */
6298 void
task_update_cpu_time_qos_stats(task_t task,uint64_t * eqos_stats,uint64_t * rqos_stats)6299 task_update_cpu_time_qos_stats(
6300 	task_t  task,
6301 	uint64_t *eqos_stats,
6302 	uint64_t *rqos_stats)
6303 {
6304 	if (!eqos_stats && !rqos_stats) {
6305 		return;
6306 	}
6307 
6308 	task_lock(task);
6309 	thread_t thread;
6310 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6311 		if (thread->options & TH_OPT_IDLE_THREAD) {
6312 			continue;
6313 		}
6314 
6315 		thread_update_qos_cpu_time(thread);
6316 	}
6317 
6318 	if (eqos_stats) {
6319 		eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default;
6320 		eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
6321 		eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background;
6322 		eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility;
6323 		eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy;
6324 		eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
6325 		eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
6326 	}
6327 
6328 	if (rqos_stats) {
6329 		rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default;
6330 		rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance;
6331 		rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background;
6332 		rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility;
6333 		rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy;
6334 		rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated;
6335 		rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive;
6336 	}
6337 
6338 	task_unlock(task);
6339 }
6340 
6341 kern_return_t
task_purgable_info(task_t task,task_purgable_info_t * stats)6342 task_purgable_info(
6343 	task_t                  task,
6344 	task_purgable_info_t    *stats)
6345 {
6346 	if (task == TASK_NULL || stats == NULL) {
6347 		return KERN_INVALID_ARGUMENT;
6348 	}
6349 	/* Take task reference */
6350 	task_reference(task);
6351 	vm_purgeable_stats((vm_purgeable_info_t)stats, task);
6352 	/* Drop task reference */
6353 	task_deallocate(task);
6354 	return KERN_SUCCESS;
6355 }
6356 
6357 void
task_vtimer_set(task_t task,integer_t which)6358 task_vtimer_set(
6359 	task_t          task,
6360 	integer_t       which)
6361 {
6362 	thread_t        thread;
6363 	spl_t           x;
6364 
6365 	task_lock(task);
6366 
6367 	task->vtimers |= which;
6368 
6369 	switch (which) {
6370 	case TASK_VTIMER_USER:
6371 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6372 			x = splsched();
6373 			thread_lock(thread);
6374 			if (thread->precise_user_kernel_time) {
6375 				thread->vtimer_user_save = timer_grab(&thread->user_timer);
6376 			} else {
6377 				thread->vtimer_user_save = timer_grab(&thread->system_timer);
6378 			}
6379 			thread_unlock(thread);
6380 			splx(x);
6381 		}
6382 		break;
6383 
6384 	case TASK_VTIMER_PROF:
6385 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6386 			x = splsched();
6387 			thread_lock(thread);
6388 			thread->vtimer_prof_save = timer_grab(&thread->user_timer);
6389 			thread->vtimer_prof_save += timer_grab(&thread->system_timer);
6390 			thread_unlock(thread);
6391 			splx(x);
6392 		}
6393 		break;
6394 
6395 	case TASK_VTIMER_RLIM:
6396 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6397 			x = splsched();
6398 			thread_lock(thread);
6399 			thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
6400 			thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
6401 			thread_unlock(thread);
6402 			splx(x);
6403 		}
6404 		break;
6405 	}
6406 
6407 	task_unlock(task);
6408 }
6409 
6410 void
task_vtimer_clear(task_t task,integer_t which)6411 task_vtimer_clear(
6412 	task_t          task,
6413 	integer_t       which)
6414 {
6415 	assert(task == current_task());
6416 
6417 	task_lock(task);
6418 
6419 	task->vtimers &= ~which;
6420 
6421 	task_unlock(task);
6422 }
6423 
6424 void
task_vtimer_update(__unused task_t task,integer_t which,uint32_t * microsecs)6425 task_vtimer_update(
6426 	__unused
6427 	task_t          task,
6428 	integer_t       which,
6429 	uint32_t        *microsecs)
6430 {
6431 	thread_t        thread = current_thread();
6432 	uint32_t        tdelt = 0;
6433 	clock_sec_t     secs = 0;
6434 	uint64_t        tsum;
6435 
6436 	assert(task == current_task());
6437 
6438 	spl_t s = splsched();
6439 	thread_lock(thread);
6440 
6441 	if ((task->vtimers & which) != (uint32_t)which) {
6442 		thread_unlock(thread);
6443 		splx(s);
6444 		return;
6445 	}
6446 
6447 	switch (which) {
6448 	case TASK_VTIMER_USER:
6449 		if (thread->precise_user_kernel_time) {
6450 			tdelt = (uint32_t)timer_delta(&thread->user_timer,
6451 			    &thread->vtimer_user_save);
6452 		} else {
6453 			tdelt = (uint32_t)timer_delta(&thread->system_timer,
6454 			    &thread->vtimer_user_save);
6455 		}
6456 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6457 		break;
6458 
6459 	case TASK_VTIMER_PROF:
6460 		tsum = timer_grab(&thread->user_timer);
6461 		tsum += timer_grab(&thread->system_timer);
6462 		tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
6463 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6464 		/* if the time delta is smaller than a usec, ignore */
6465 		if (*microsecs != 0) {
6466 			thread->vtimer_prof_save = tsum;
6467 		}
6468 		break;
6469 
6470 	case TASK_VTIMER_RLIM:
6471 		tsum = timer_grab(&thread->user_timer);
6472 		tsum += timer_grab(&thread->system_timer);
6473 		tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
6474 		thread->vtimer_rlim_save = tsum;
6475 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6476 		break;
6477 	}
6478 
6479 	thread_unlock(thread);
6480 	splx(s);
6481 }
6482 
6483 /*
6484  *	task_assign:
6485  *
6486  *	Change the assigned processor set for the task
6487  */
6488 kern_return_t
task_assign(__unused task_t task,__unused processor_set_t new_pset,__unused boolean_t assign_threads)6489 task_assign(
6490 	__unused task_t         task,
6491 	__unused processor_set_t        new_pset,
6492 	__unused boolean_t      assign_threads)
6493 {
6494 	return KERN_FAILURE;
6495 }
6496 
6497 /*
6498  *	task_assign_default:
6499  *
6500  *	Version of task_assign to assign to default processor set.
6501  */
6502 kern_return_t
task_assign_default(task_t task,boolean_t assign_threads)6503 task_assign_default(
6504 	task_t          task,
6505 	boolean_t       assign_threads)
6506 {
6507 	return task_assign(task, &pset0, assign_threads);
6508 }
6509 
6510 /*
6511  *	task_get_assignment
6512  *
6513  *	Return name of processor set that task is assigned to.
6514  */
6515 kern_return_t
task_get_assignment(task_t task,processor_set_t * pset)6516 task_get_assignment(
6517 	task_t          task,
6518 	processor_set_t *pset)
6519 {
6520 	if (!task || !task->active) {
6521 		return KERN_FAILURE;
6522 	}
6523 
6524 	*pset = &pset0;
6525 
6526 	return KERN_SUCCESS;
6527 }
6528 
6529 uint64_t
get_task_dispatchqueue_offset(task_t task)6530 get_task_dispatchqueue_offset(
6531 	task_t          task)
6532 {
6533 	return task->dispatchqueue_offset;
6534 }
6535 
6536 /*
6537  *      task_policy
6538  *
6539  *	Set scheduling policy and parameters, both base and limit, for
6540  *	the given task. Policy must be a policy which is enabled for the
6541  *	processor set. Change contained threads if requested.
6542  */
6543 kern_return_t
task_policy(__unused task_t task,__unused policy_t policy_id,__unused policy_base_t base,__unused mach_msg_type_number_t count,__unused boolean_t set_limit,__unused boolean_t change)6544 task_policy(
6545 	__unused task_t                 task,
6546 	__unused policy_t                       policy_id,
6547 	__unused policy_base_t          base,
6548 	__unused mach_msg_type_number_t count,
6549 	__unused boolean_t                      set_limit,
6550 	__unused boolean_t                      change)
6551 {
6552 	return KERN_FAILURE;
6553 }
6554 
6555 /*
6556  *	task_set_policy
6557  *
6558  *	Set scheduling policy and parameters, both base and limit, for
6559  *	the given task. Policy can be any policy implemented by the
6560  *	processor set, whether enabled or not. Change contained threads
6561  *	if requested.
6562  */
6563 kern_return_t
task_set_policy(__unused task_t task,__unused processor_set_t pset,__unused policy_t policy_id,__unused policy_base_t base,__unused mach_msg_type_number_t base_count,__unused policy_limit_t limit,__unused mach_msg_type_number_t limit_count,__unused boolean_t change)6564 task_set_policy(
6565 	__unused task_t                 task,
6566 	__unused processor_set_t                pset,
6567 	__unused policy_t                       policy_id,
6568 	__unused policy_base_t          base,
6569 	__unused mach_msg_type_number_t base_count,
6570 	__unused policy_limit_t         limit,
6571 	__unused mach_msg_type_number_t limit_count,
6572 	__unused boolean_t                      change)
6573 {
6574 	return KERN_FAILURE;
6575 }
6576 
6577 kern_return_t
task_set_ras_pc(__unused task_t task,__unused vm_offset_t pc,__unused vm_offset_t endpc)6578 task_set_ras_pc(
6579 	__unused task_t task,
6580 	__unused vm_offset_t    pc,
6581 	__unused vm_offset_t    endpc)
6582 {
6583 	return KERN_FAILURE;
6584 }
6585 
6586 void
task_synchronizer_destroy_all(task_t task)6587 task_synchronizer_destroy_all(task_t task)
6588 {
6589 	/*
6590 	 *  Destroy owned semaphores
6591 	 */
6592 	semaphore_destroy_all(task);
6593 }
6594 
6595 /*
6596  * Install default (machine-dependent) initial thread state
6597  * on the task.  Subsequent thread creation will have this initial
6598  * state set on the thread by machine_thread_inherit_taskwide().
6599  * Flavors and structures are exactly the same as those to thread_set_state()
6600  */
6601 kern_return_t
task_set_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t state_count)6602 task_set_state(
6603 	task_t task,
6604 	int flavor,
6605 	thread_state_t state,
6606 	mach_msg_type_number_t state_count)
6607 {
6608 	kern_return_t ret;
6609 
6610 	if (task == TASK_NULL) {
6611 		return KERN_INVALID_ARGUMENT;
6612 	}
6613 
6614 	task_lock(task);
6615 
6616 	if (!task->active) {
6617 		task_unlock(task);
6618 		return KERN_FAILURE;
6619 	}
6620 
6621 	ret = machine_task_set_state(task, flavor, state, state_count);
6622 
6623 	task_unlock(task);
6624 	return ret;
6625 }
6626 
6627 /*
6628  * Examine the default (machine-dependent) initial thread state
6629  * on the task, as set by task_set_state().  Flavors and structures
6630  * are exactly the same as those passed to thread_get_state().
6631  */
6632 kern_return_t
task_get_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)6633 task_get_state(
6634 	task_t  task,
6635 	int     flavor,
6636 	thread_state_t state,
6637 	mach_msg_type_number_t *state_count)
6638 {
6639 	kern_return_t ret;
6640 
6641 	if (task == TASK_NULL) {
6642 		return KERN_INVALID_ARGUMENT;
6643 	}
6644 
6645 	task_lock(task);
6646 
6647 	if (!task->active) {
6648 		task_unlock(task);
6649 		return KERN_FAILURE;
6650 	}
6651 
6652 	ret = machine_task_get_state(task, flavor, state, state_count);
6653 
6654 	task_unlock(task);
6655 	return ret;
6656 }
6657 
6658 
6659 static kern_return_t __attribute__((noinline, not_tail_called))
PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason)6660 PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND(
6661 	mach_exception_code_t code,
6662 	mach_exception_subcode_t subcode,
6663 	void *reason)
6664 {
6665 #ifdef MACH_BSD
6666 	if (1 == proc_selfpid()) {
6667 		return KERN_NOT_SUPPORTED;              // initproc is immune
6668 	}
6669 #endif
6670 	mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
6671 		[0] = code,
6672 		[1] = subcode,
6673 	};
6674 	task_t task = current_task();
6675 	kern_return_t kr;
6676 
6677 	/* (See jetsam-related comments below) */
6678 
6679 	proc_memstat_skip(task->bsd_info, TRUE);
6680 	kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason);
6681 	proc_memstat_skip(task->bsd_info, FALSE);
6682 	return kr;
6683 }
6684 
6685 kern_return_t
task_violated_guard(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason)6686 task_violated_guard(
6687 	mach_exception_code_t code,
6688 	mach_exception_subcode_t subcode,
6689 	void *reason)
6690 {
6691 	return PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND(code, subcode, reason);
6692 }
6693 
6694 
6695 #if CONFIG_MEMORYSTATUS
6696 
6697 boolean_t
task_get_memlimit_is_active(task_t task)6698 task_get_memlimit_is_active(task_t task)
6699 {
6700 	assert(task != NULL);
6701 
6702 	if (task->memlimit_is_active == 1) {
6703 		return TRUE;
6704 	} else {
6705 		return FALSE;
6706 	}
6707 }
6708 
6709 void
task_set_memlimit_is_active(task_t task,boolean_t memlimit_is_active)6710 task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
6711 {
6712 	assert(task != NULL);
6713 
6714 	if (memlimit_is_active) {
6715 		task->memlimit_is_active = 1;
6716 	} else {
6717 		task->memlimit_is_active = 0;
6718 	}
6719 }
6720 
6721 boolean_t
task_get_memlimit_is_fatal(task_t task)6722 task_get_memlimit_is_fatal(task_t task)
6723 {
6724 	assert(task != NULL);
6725 
6726 	if (task->memlimit_is_fatal == 1) {
6727 		return TRUE;
6728 	} else {
6729 		return FALSE;
6730 	}
6731 }
6732 
6733 void
task_set_memlimit_is_fatal(task_t task,boolean_t memlimit_is_fatal)6734 task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
6735 {
6736 	assert(task != NULL);
6737 
6738 	if (memlimit_is_fatal) {
6739 		task->memlimit_is_fatal = 1;
6740 	} else {
6741 		task->memlimit_is_fatal = 0;
6742 	}
6743 }
6744 
6745 uint64_t
task_get_dirty_start(task_t task)6746 task_get_dirty_start(task_t task)
6747 {
6748 	return task->memstat_dirty_start;
6749 }
6750 
6751 void
task_set_dirty_start(task_t task,uint64_t start)6752 task_set_dirty_start(task_t task, uint64_t start)
6753 {
6754 	task_lock(task);
6755 	task->memstat_dirty_start = start;
6756 	task_unlock(task);
6757 }
6758 
6759 boolean_t
task_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)6760 task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
6761 {
6762 	boolean_t triggered = FALSE;
6763 
6764 	assert(task == current_task());
6765 
6766 	/*
6767 	 * Returns true, if task has already triggered an exc_resource exception.
6768 	 */
6769 
6770 	if (memlimit_is_active) {
6771 		triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
6772 	} else {
6773 		triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
6774 	}
6775 
6776 	return triggered;
6777 }
6778 
6779 void
task_mark_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)6780 task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
6781 {
6782 	assert(task == current_task());
6783 
6784 	/*
6785 	 * We allow one exc_resource per process per active/inactive limit.
6786 	 * The limit's fatal attribute does not come into play.
6787 	 */
6788 
6789 	if (memlimit_is_active) {
6790 		task->memlimit_active_exc_resource = 1;
6791 	} else {
6792 		task->memlimit_inactive_exc_resource = 1;
6793 	}
6794 }
6795 
6796 #define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
6797 
6798 void __attribute__((noinline))
PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb,boolean_t is_fatal)6799 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal)
6800 {
6801 	task_t                                          task            = current_task();
6802 	int                                                     pid         = 0;
6803 	const char                                      *procname       = "unknown";
6804 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
6805 	boolean_t send_sync_exc_resource = FALSE;
6806 
6807 #ifdef MACH_BSD
6808 	pid = proc_selfpid();
6809 
6810 	if (pid == 1) {
6811 		/*
6812 		 * Cannot have ReportCrash analyzing
6813 		 * a suspended initproc.
6814 		 */
6815 		return;
6816 	}
6817 
6818 	if (task->bsd_info != NULL) {
6819 		procname = proc_name_address(current_task()->bsd_info);
6820 		send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(current_task()->bsd_info);
6821 	}
6822 #endif
6823 #if CONFIG_COREDUMP
6824 	if (hwm_user_cores) {
6825 		int                             error;
6826 		uint64_t                starttime, end;
6827 		clock_sec_t             secs = 0;
6828 		uint32_t                microsecs = 0;
6829 
6830 		starttime = mach_absolute_time();
6831 		/*
6832 		 * Trigger a coredump of this process. Don't proceed unless we know we won't
6833 		 * be filling up the disk; and ignore the core size resource limit for this
6834 		 * core file.
6835 		 */
6836 		if ((error = coredump(current_task()->bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
6837 			printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
6838 		}
6839 		/*
6840 		 * coredump() leaves the task suspended.
6841 		 */
6842 		task_resume_internal(current_task());
6843 
6844 		end = mach_absolute_time();
6845 		absolutetime_to_microtime(end - starttime, &secs, &microsecs);
6846 		printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
6847 		    proc_name_address(current_task()->bsd_info), pid, (int)secs, microsecs);
6848 	}
6849 #endif /* CONFIG_COREDUMP */
6850 
6851 	if (disable_exc_resource) {
6852 		printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
6853 		    "supressed by a boot-arg.\n", procname, pid, max_footprint_mb);
6854 		return;
6855 	}
6856 
6857 	/*
6858 	 * A task that has triggered an EXC_RESOURCE, should not be
6859 	 * jetsammed when the device is under memory pressure.  Here
6860 	 * we set the P_MEMSTAT_SKIP flag so that the process
6861 	 * will be skipped if the memorystatus_thread wakes up.
6862 	 *
6863 	 * This is a debugging aid to ensure we can get a corpse before
6864 	 * the jetsam thread kills the process.
6865 	 * Note that proc_memstat_skip is a no-op on release kernels.
6866 	 */
6867 	proc_memstat_skip(current_task()->bsd_info, TRUE);
6868 
6869 	code[0] = code[1] = 0;
6870 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
6871 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK);
6872 	EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
6873 
6874 	/*
6875 	 * Do not generate a corpse fork if the violation is a fatal one
6876 	 * or the process wants synchronous EXC_RESOURCE exceptions.
6877 	 */
6878 	if (is_fatal || send_sync_exc_resource || !exc_via_corpse_forking) {
6879 		/* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */
6880 		if (send_sync_exc_resource || !corpse_for_fatal_memkill) {
6881 			/*
6882 			 * Use the _internal_ variant so that no user-space
6883 			 * process can resume our task from under us.
6884 			 */
6885 			task_suspend_internal(task);
6886 			exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
6887 			task_resume_internal(task);
6888 		}
6889 	} else {
6890 		if (audio_active) {
6891 			printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
6892 			    "supressed due to audio playback.\n", procname, pid, max_footprint_mb);
6893 		} else {
6894 			task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
6895 			    code, EXCEPTION_CODE_MAX, NULL);
6896 		}
6897 	}
6898 
6899 	/*
6900 	 * After the EXC_RESOURCE has been handled, we must clear the
6901 	 * P_MEMSTAT_SKIP flag so that the process can again be
6902 	 * considered for jetsam if the memorystatus_thread wakes up.
6903 	 */
6904 	proc_memstat_skip(current_task()->bsd_info, FALSE);         /* clear the flag */
6905 }
6906 
6907 /*
6908  * Callback invoked when a task exceeds its physical footprint limit.
6909  */
6910 void
task_footprint_exceeded(int warning,__unused const void * param0,__unused const void * param1)6911 task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
6912 {
6913 	ledger_amount_t max_footprint, max_footprint_mb;
6914 	task_t task;
6915 	boolean_t is_warning;
6916 	boolean_t memlimit_is_active;
6917 	boolean_t memlimit_is_fatal;
6918 
6919 	if (warning == LEDGER_WARNING_DIPPED_BELOW) {
6920 		/*
6921 		 * Task memory limits only provide a warning on the way up.
6922 		 */
6923 		return;
6924 	} else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
6925 		/*
6926 		 * This task is in danger of violating a memory limit,
6927 		 * It has exceeded a percentage level of the limit.
6928 		 */
6929 		is_warning = TRUE;
6930 	} else {
6931 		/*
6932 		 * The task has exceeded the physical footprint limit.
6933 		 * This is not a warning but a true limit violation.
6934 		 */
6935 		is_warning = FALSE;
6936 	}
6937 
6938 	task = current_task();
6939 
6940 	ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint);
6941 	max_footprint_mb = max_footprint >> 20;
6942 
6943 	memlimit_is_active = task_get_memlimit_is_active(task);
6944 	memlimit_is_fatal = task_get_memlimit_is_fatal(task);
6945 
6946 	/*
6947 	 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
6948 	 * We only generate the exception once per process per memlimit (active/inactive limit).
6949 	 * To enforce this, we monitor state based on the  memlimit's active/inactive attribute
6950 	 * and we disable it by marking that memlimit as exception triggered.
6951 	 */
6952 	if ((is_warning == FALSE) && (!task_has_triggered_exc_resource(task, memlimit_is_active))) {
6953 		PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)max_footprint_mb, memlimit_is_fatal);
6954 		memorystatus_log_exception((int)max_footprint_mb, memlimit_is_active, memlimit_is_fatal);
6955 		task_mark_has_triggered_exc_resource(task, memlimit_is_active);
6956 	}
6957 
6958 	memorystatus_on_ledger_footprint_exceeded(is_warning, memlimit_is_active, memlimit_is_fatal);
6959 }
6960 
6961 extern int proc_check_footprint_priv(void);
6962 
6963 kern_return_t
task_set_phys_footprint_limit(task_t task,int new_limit_mb,int * old_limit_mb)6964 task_set_phys_footprint_limit(
6965 	task_t task,
6966 	int new_limit_mb,
6967 	int *old_limit_mb)
6968 {
6969 	kern_return_t error;
6970 
6971 	boolean_t memlimit_is_active;
6972 	boolean_t memlimit_is_fatal;
6973 
6974 	if ((error = proc_check_footprint_priv())) {
6975 		return KERN_NO_ACCESS;
6976 	}
6977 
6978 	/*
6979 	 * This call should probably be obsoleted.
6980 	 * But for now, we default to current state.
6981 	 */
6982 	memlimit_is_active = task_get_memlimit_is_active(task);
6983 	memlimit_is_fatal = task_get_memlimit_is_fatal(task);
6984 
6985 	return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
6986 }
6987 
6988 kern_return_t
task_convert_phys_footprint_limit(int limit_mb,int * converted_limit_mb)6989 task_convert_phys_footprint_limit(
6990 	int limit_mb,
6991 	int *converted_limit_mb)
6992 {
6993 	if (limit_mb == -1) {
6994 		/*
6995 		 * No limit
6996 		 */
6997 		if (max_task_footprint != 0) {
6998 			*converted_limit_mb = (int)(max_task_footprint / 1024 / 1024);         /* bytes to MB */
6999 		} else {
7000 			*converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
7001 		}
7002 	} else {
7003 		/* nothing to convert */
7004 		*converted_limit_mb = limit_mb;
7005 	}
7006 	return KERN_SUCCESS;
7007 }
7008 
7009 
7010 kern_return_t
task_set_phys_footprint_limit_internal(task_t task,int new_limit_mb,int * old_limit_mb,boolean_t memlimit_is_active,boolean_t memlimit_is_fatal)7011 task_set_phys_footprint_limit_internal(
7012 	task_t task,
7013 	int new_limit_mb,
7014 	int *old_limit_mb,
7015 	boolean_t memlimit_is_active,
7016 	boolean_t memlimit_is_fatal)
7017 {
7018 	ledger_amount_t old;
7019 	kern_return_t ret;
7020 
7021 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
7022 
7023 	if (ret != KERN_SUCCESS) {
7024 		return ret;
7025 	}
7026 
7027 	/*
7028 	 * Check that limit >> 20 will not give an "unexpected" 32-bit
7029 	 * result. There are, however, implicit assumptions that -1 mb limit
7030 	 * equates to LEDGER_LIMIT_INFINITY.
7031 	 */
7032 	assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
7033 
7034 	if (old_limit_mb) {
7035 		*old_limit_mb = (int)(old >> 20);
7036 	}
7037 
7038 	if (new_limit_mb == -1) {
7039 		/*
7040 		 * Caller wishes to remove the limit.
7041 		 */
7042 		ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7043 		    max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
7044 		    max_task_footprint ? (uint8_t)max_task_footprint_warning_level : 0);
7045 
7046 		task_lock(task);
7047 		task_set_memlimit_is_active(task, memlimit_is_active);
7048 		task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7049 		task_unlock(task);
7050 
7051 		return KERN_SUCCESS;
7052 	}
7053 
7054 #ifdef CONFIG_NOMONITORS
7055 	return KERN_SUCCESS;
7056 #endif /* CONFIG_NOMONITORS */
7057 
7058 	task_lock(task);
7059 
7060 	if ((memlimit_is_active == task_get_memlimit_is_active(task)) &&
7061 	    (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) &&
7062 	    (((ledger_amount_t)new_limit_mb << 20) == old)) {
7063 		/*
7064 		 * memlimit state is not changing
7065 		 */
7066 		task_unlock(task);
7067 		return KERN_SUCCESS;
7068 	}
7069 
7070 	task_set_memlimit_is_active(task, memlimit_is_active);
7071 	task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7072 
7073 	ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7074 	    (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
7075 
7076 	if (task == current_task()) {
7077 		ledger_check_new_balance(current_thread(), task->ledger,
7078 		    task_ledgers.phys_footprint);
7079 	}
7080 
7081 	task_unlock(task);
7082 
7083 	return KERN_SUCCESS;
7084 }
7085 
7086 kern_return_t
task_get_phys_footprint_limit(task_t task,int * limit_mb)7087 task_get_phys_footprint_limit(
7088 	task_t task,
7089 	int *limit_mb)
7090 {
7091 	ledger_amount_t limit;
7092 	kern_return_t ret;
7093 
7094 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
7095 	if (ret != KERN_SUCCESS) {
7096 		return ret;
7097 	}
7098 
7099 	/*
7100 	 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
7101 	 * result. There are, however, implicit assumptions that -1 mb limit
7102 	 * equates to LEDGER_LIMIT_INFINITY.
7103 	 */
7104 	assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
7105 	*limit_mb = (int)(limit >> 20);
7106 
7107 	return KERN_SUCCESS;
7108 }
7109 #else /* CONFIG_MEMORYSTATUS */
7110 kern_return_t
task_set_phys_footprint_limit(__unused task_t task,__unused int new_limit_mb,__unused int * old_limit_mb)7111 task_set_phys_footprint_limit(
7112 	__unused task_t task,
7113 	__unused int new_limit_mb,
7114 	__unused int *old_limit_mb)
7115 {
7116 	return KERN_FAILURE;
7117 }
7118 
7119 kern_return_t
task_get_phys_footprint_limit(__unused task_t task,__unused int * limit_mb)7120 task_get_phys_footprint_limit(
7121 	__unused task_t task,
7122 	__unused int *limit_mb)
7123 {
7124 	return KERN_FAILURE;
7125 }
7126 #endif /* CONFIG_MEMORYSTATUS */
7127 
7128 security_token_t *
task_get_sec_token(task_t task)7129 task_get_sec_token(task_t task)
7130 {
7131 	return &task_get_ro(task)->task_tokens.sec_token;
7132 }
7133 
7134 void
task_set_sec_token(task_t task,security_token_t * token)7135 task_set_sec_token(task_t task, security_token_t *token)
7136 {
7137 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7138 	    task_tokens.sec_token, token);
7139 }
7140 
7141 audit_token_t *
task_get_audit_token(task_t task)7142 task_get_audit_token(task_t task)
7143 {
7144 	return &task_get_ro(task)->task_tokens.audit_token;
7145 }
7146 
7147 void
task_set_audit_token(task_t task,audit_token_t * token)7148 task_set_audit_token(task_t task, audit_token_t *token)
7149 {
7150 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7151 	    task_tokens.audit_token, token);
7152 }
7153 
7154 void
task_set_tokens(task_t task,security_token_t * sec_token,audit_token_t * audit_token)7155 task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token)
7156 {
7157 	struct task_token_ro_data tokens;
7158 
7159 	tokens = task_get_ro(task)->task_tokens;
7160 	tokens.sec_token = *sec_token;
7161 	tokens.audit_token = *audit_token;
7162 
7163 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task), task_tokens,
7164 	    &tokens);
7165 }
7166 
7167 boolean_t
task_is_privileged(task_t task)7168 task_is_privileged(task_t task)
7169 {
7170 	return task_get_sec_token(task)->val[0] == 0;
7171 }
7172 
7173 #ifdef CONFIG_MACF
7174 uint8_t *
task_get_mach_trap_filter_mask(task_t task)7175 task_get_mach_trap_filter_mask(task_t task)
7176 {
7177 	return task_get_ro(task)->task_filters.mach_trap_filter_mask;
7178 }
7179 
7180 void
task_set_mach_trap_filter_mask(task_t task,uint8_t * mask)7181 task_set_mach_trap_filter_mask(task_t task, uint8_t *mask)
7182 {
7183 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7184 	    task_filters.mach_trap_filter_mask, &mask);
7185 }
7186 
7187 uint8_t *
task_get_mach_kobj_filter_mask(task_t task)7188 task_get_mach_kobj_filter_mask(task_t task)
7189 {
7190 	return task_get_ro(task)->task_filters.mach_kobj_filter_mask;
7191 }
7192 
7193 void
task_set_mach_kobj_filter_mask(task_t task,uint8_t * mask)7194 task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask)
7195 {
7196 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7197 	    task_filters.mach_kobj_filter_mask, &mask);
7198 }
7199 
7200 void
task_copy_filter_masks(task_t new_task,task_t old_task)7201 task_copy_filter_masks(task_t new_task, task_t old_task)
7202 {
7203 	struct task_filter_ro_data filters;
7204 
7205 	filters = task_get_ro(new_task)->task_filters;
7206 	filters.mach_trap_filter_mask = task_get_mach_trap_filter_mask(old_task);
7207 	filters.mach_kobj_filter_mask = task_get_mach_kobj_filter_mask(old_task);
7208 
7209 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(new_task),
7210 	    task_filters, &filters);
7211 }
7212 #endif /* CONFIG_MACF */
7213 
7214 void
task_set_thread_limit(task_t task,uint16_t thread_limit)7215 task_set_thread_limit(task_t task, uint16_t thread_limit)
7216 {
7217 	assert(task != kernel_task);
7218 	if (thread_limit <= TASK_MAX_THREAD_LIMIT) {
7219 		task_lock(task);
7220 		task->task_thread_limit = thread_limit;
7221 		task_unlock(task);
7222 	}
7223 }
7224 
7225 #if CONFIG_PROC_RESOURCE_LIMITS
7226 kern_return_t
task_set_port_space_limits(task_t task,uint32_t soft_limit,uint32_t hard_limit)7227 task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit)
7228 {
7229 	return ipc_space_set_table_size_limits(task->itk_space, soft_limit, hard_limit);
7230 }
7231 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7232 
7233 #if XNU_TARGET_OS_OSX
7234 boolean_t
task_has_system_version_compat_enabled(task_t task)7235 task_has_system_version_compat_enabled(task_t task)
7236 {
7237 	boolean_t enabled = FALSE;
7238 
7239 	task_lock(task);
7240 	enabled = (task->t_flags & TF_SYS_VERSION_COMPAT);
7241 	task_unlock(task);
7242 
7243 	return enabled;
7244 }
7245 
7246 void
task_set_system_version_compat_enabled(task_t task,boolean_t enable_system_version_compat)7247 task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat)
7248 {
7249 	assert(task == current_task());
7250 	assert(task != kernel_task);
7251 
7252 	task_lock(task);
7253 	if (enable_system_version_compat) {
7254 		task->t_flags |= TF_SYS_VERSION_COMPAT;
7255 	} else {
7256 		task->t_flags &= ~TF_SYS_VERSION_COMPAT;
7257 	}
7258 	task_unlock(task);
7259 }
7260 #endif /* XNU_TARGET_OS_OSX */
7261 
7262 /*
7263  * We need to export some functions to other components that
7264  * are currently implemented in macros within the osfmk
7265  * component.  Just export them as functions of the same name.
7266  */
7267 boolean_t
is_kerneltask(task_t t)7268 is_kerneltask(task_t t)
7269 {
7270 	if (t == kernel_task) {
7271 		return TRUE;
7272 	}
7273 
7274 	return FALSE;
7275 }
7276 
7277 boolean_t
is_corpsetask(task_t t)7278 is_corpsetask(task_t t)
7279 {
7280 	return task_is_a_corpse(t);
7281 }
7282 
7283 boolean_t
is_corpsefork(task_t t)7284 is_corpsefork(task_t t)
7285 {
7286 	return task_is_a_corpse_fork(t);
7287 }
7288 
7289 task_t
current_task_early(void)7290 current_task_early(void)
7291 {
7292 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
7293 		if (current_thread()->t_tro == NULL) {
7294 			return TASK_NULL;
7295 		}
7296 	}
7297 	return get_threadtask(current_thread());
7298 }
7299 
7300 task_t
current_task(void)7301 current_task(void)
7302 {
7303 	return get_threadtask(current_thread());
7304 }
7305 
7306 /* defined in bsd/kern/kern_prot.c */
7307 extern int get_audit_token_pid(audit_token_t *audit_token);
7308 
7309 int
task_pid(task_t task)7310 task_pid(task_t task)
7311 {
7312 	if (task) {
7313 		return get_audit_token_pid(task_get_audit_token(task));
7314 	}
7315 	return -1;
7316 }
7317 
7318 #if __has_feature(ptrauth_calls)
7319 /*
7320  * Get the shared region id and jop signing key for the task.
7321  * The function will allocate a kalloc buffer and return
7322  * it to caller, the caller needs to free it. This is used
7323  * for getting the information via task port.
7324  */
7325 char *
task_get_vm_shared_region_id_and_jop_pid(task_t task,uint64_t * jop_pid)7326 task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid)
7327 {
7328 	size_t len;
7329 	char *shared_region_id = NULL;
7330 
7331 	task_lock(task);
7332 	if (task->shared_region_id == NULL) {
7333 		task_unlock(task);
7334 		return NULL;
7335 	}
7336 	len = strlen(task->shared_region_id) + 1;
7337 
7338 	/* don't hold task lock while allocating */
7339 	task_unlock(task);
7340 	shared_region_id = kalloc_data(len, Z_WAITOK);
7341 	task_lock(task);
7342 
7343 	if (task->shared_region_id == NULL) {
7344 		task_unlock(task);
7345 		kfree_data(shared_region_id, len);
7346 		return NULL;
7347 	}
7348 	assert(len == strlen(task->shared_region_id) + 1);         /* should never change */
7349 	strlcpy(shared_region_id, task->shared_region_id, len);
7350 	task_unlock(task);
7351 
7352 	/* find key from its auth pager */
7353 	if (jop_pid != NULL) {
7354 		*jop_pid = shared_region_find_key(shared_region_id);
7355 	}
7356 
7357 	return shared_region_id;
7358 }
7359 
7360 /*
7361  * set the shared region id for a task
7362  */
7363 void
task_set_shared_region_id(task_t task,char * id)7364 task_set_shared_region_id(task_t task, char *id)
7365 {
7366 	char *old_id;
7367 
7368 	task_lock(task);
7369 	old_id = task->shared_region_id;
7370 	task->shared_region_id = id;
7371 	task->shared_region_auth_remapped = FALSE;
7372 	task_unlock(task);
7373 
7374 	/* free any pre-existing shared region id */
7375 	if (old_id != NULL) {
7376 		shared_region_key_dealloc(old_id);
7377 		kfree_data(old_id, strlen(old_id) + 1);
7378 	}
7379 }
7380 #endif /* __has_feature(ptrauth_calls) */
7381 
7382 /*
7383  * This routine finds a thread in a task by its unique id
7384  * Returns a referenced thread or THREAD_NULL if the thread was not found
7385  *
7386  * TODO: This is super inefficient - it's an O(threads in task) list walk!
7387  *       We should make a tid hash, or transition all tid clients to thread ports
7388  *
7389  * Precondition: No locks held (will take task lock)
7390  */
7391 thread_t
task_findtid(task_t task,uint64_t tid)7392 task_findtid(task_t task, uint64_t tid)
7393 {
7394 	thread_t self           = current_thread();
7395 	thread_t found_thread   = THREAD_NULL;
7396 	thread_t iter_thread    = THREAD_NULL;
7397 
7398 	/* Short-circuit the lookup if we're looking up ourselves */
7399 	if (tid == self->thread_id || tid == TID_NULL) {
7400 		assert(get_threadtask(self) == task);
7401 
7402 		thread_reference(self);
7403 
7404 		return self;
7405 	}
7406 
7407 	task_lock(task);
7408 
7409 	queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
7410 		if (iter_thread->thread_id == tid) {
7411 			found_thread = iter_thread;
7412 			thread_reference(found_thread);
7413 			break;
7414 		}
7415 	}
7416 
7417 	task_unlock(task);
7418 
7419 	return found_thread;
7420 }
7421 
7422 int
pid_from_task(task_t task)7423 pid_from_task(task_t task)
7424 {
7425 	int pid = -1;
7426 
7427 	if (task->bsd_info) {
7428 		pid = proc_pid(task->bsd_info);
7429 	} else {
7430 		pid = task_pid(task);
7431 	}
7432 
7433 	return pid;
7434 }
7435 
7436 /*
7437  * Control the CPU usage monitor for a task.
7438  */
7439 kern_return_t
task_cpu_usage_monitor_ctl(task_t task,uint32_t * flags)7440 task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
7441 {
7442 	int error = KERN_SUCCESS;
7443 
7444 	if (*flags & CPUMON_MAKE_FATAL) {
7445 		task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
7446 	} else {
7447 		error = KERN_INVALID_ARGUMENT;
7448 	}
7449 
7450 	return error;
7451 }
7452 
7453 /*
7454  * Control the wakeups monitor for a task.
7455  */
7456 kern_return_t
task_wakeups_monitor_ctl(task_t task,uint32_t * flags,int32_t * rate_hz)7457 task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
7458 {
7459 	ledger_t ledger = task->ledger;
7460 
7461 	task_lock(task);
7462 	if (*flags & WAKEMON_GET_PARAMS) {
7463 		ledger_amount_t limit;
7464 		uint64_t                period;
7465 
7466 		ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
7467 		ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
7468 
7469 		if (limit != LEDGER_LIMIT_INFINITY) {
7470 			/*
7471 			 * An active limit means the wakeups monitor is enabled.
7472 			 */
7473 			*rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
7474 			*flags = WAKEMON_ENABLE;
7475 			if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
7476 				*flags |= WAKEMON_MAKE_FATAL;
7477 			}
7478 		} else {
7479 			*flags = WAKEMON_DISABLE;
7480 			*rate_hz = -1;
7481 		}
7482 
7483 		/*
7484 		 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
7485 		 */
7486 		task_unlock(task);
7487 		return KERN_SUCCESS;
7488 	}
7489 
7490 	if (*flags & WAKEMON_ENABLE) {
7491 		if (*flags & WAKEMON_SET_DEFAULTS) {
7492 			*rate_hz = task_wakeups_monitor_rate;
7493 		}
7494 
7495 #ifndef CONFIG_NOMONITORS
7496 		if (*flags & WAKEMON_MAKE_FATAL) {
7497 			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
7498 		}
7499 #endif /* CONFIG_NOMONITORS */
7500 
7501 		if (*rate_hz <= 0) {
7502 			task_unlock(task);
7503 			return KERN_INVALID_ARGUMENT;
7504 		}
7505 
7506 #ifndef CONFIG_NOMONITORS
7507 		ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
7508 		    (uint8_t)task_wakeups_monitor_ustackshots_trigger_pct);
7509 		ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
7510 		ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
7511 #endif /* CONFIG_NOMONITORS */
7512 	} else if (*flags & WAKEMON_DISABLE) {
7513 		/*
7514 		 * Caller wishes to disable wakeups monitor on the task.
7515 		 *
7516 		 * Disable telemetry if it was triggered by the wakeups monitor, and
7517 		 * remove the limit & callback on the wakeups ledger entry.
7518 		 */
7519 #if CONFIG_TELEMETRY
7520 		telemetry_task_ctl_locked(task, TF_WAKEMON_WARNING, 0);
7521 #endif
7522 		ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
7523 		ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
7524 	}
7525 
7526 	task_unlock(task);
7527 	return KERN_SUCCESS;
7528 }
7529 
7530 void
task_wakeups_rate_exceeded(int warning,__unused const void * param0,__unused const void * param1)7531 task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
7532 {
7533 	if (warning == LEDGER_WARNING_ROSE_ABOVE) {
7534 #if CONFIG_TELEMETRY
7535 		/*
7536 		 * This task is in danger of violating the wakeups monitor. Enable telemetry on this task
7537 		 * so there are micro-stackshots available if and when EXC_RESOURCE is triggered.
7538 		 */
7539 		telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 1);
7540 #endif
7541 		return;
7542 	}
7543 
7544 #if CONFIG_TELEMETRY
7545 	/*
7546 	 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
7547 	 * exceeded the limit, turn telemetry off for the task.
7548 	 */
7549 	telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 0);
7550 #endif
7551 
7552 	if (warning == 0) {
7553 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
7554 	}
7555 }
7556 
7557 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)7558 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
7559 {
7560 	task_t                      task        = current_task();
7561 	int                         pid         = 0;
7562 	const char                  *procname   = "unknown";
7563 	boolean_t                   fatal;
7564 	kern_return_t               kr;
7565 #ifdef EXC_RESOURCE_MONITORS
7566 	mach_exception_data_type_t  code[EXCEPTION_CODE_MAX];
7567 #endif /* EXC_RESOURCE_MONITORS */
7568 	struct ledger_entry_info    lei;
7569 
7570 #ifdef MACH_BSD
7571 	pid = proc_selfpid();
7572 	if (task->bsd_info != NULL) {
7573 		procname = proc_name_address(current_task()->bsd_info);
7574 	}
7575 #endif
7576 
7577 	ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
7578 
7579 	/*
7580 	 * Disable the exception notification so we don't overwhelm
7581 	 * the listener with an endless stream of redundant exceptions.
7582 	 * TODO: detect whether another thread is already reporting the violation.
7583 	 */
7584 	uint32_t flags = WAKEMON_DISABLE;
7585 	task_wakeups_monitor_ctl(task, &flags, NULL);
7586 
7587 	fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
7588 	trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
7589 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
7590 	    "over ~%llu seconds, averaging %llu wakes / second and "
7591 	    "violating a %slimit of %llu wakes over %llu seconds.\n",
7592 	    procname, pid,
7593 	    lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
7594 	    lei.lei_last_refill == 0 ? 0 :
7595 	    (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
7596 	    fatal ? "FATAL " : "",
7597 	    lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
7598 
7599 	kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
7600 	    fatal ? kRNFatalLimitFlag : 0);
7601 	if (kr) {
7602 		printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
7603 	}
7604 
7605 #ifdef EXC_RESOURCE_MONITORS
7606 	if (disable_exc_resource) {
7607 		printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
7608 		    "supressed by a boot-arg\n", procname, pid);
7609 		return;
7610 	}
7611 	if (audio_active) {
7612 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
7613 		    "supressed due to audio playback\n", procname, pid);
7614 		return;
7615 	}
7616 	if (lei.lei_last_refill == 0) {
7617 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
7618 		    "supressed due to lei.lei_last_refill = 0 \n", procname, pid);
7619 	}
7620 
7621 	code[0] = code[1] = 0;
7622 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
7623 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
7624 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
7625 	    NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
7626 	EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
7627 	    lei.lei_last_refill);
7628 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
7629 	    NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
7630 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
7631 #endif /* EXC_RESOURCE_MONITORS */
7632 
7633 	if (fatal) {
7634 		task_terminate_internal(task);
7635 	}
7636 }
7637 
7638 static boolean_t
global_update_logical_writes(int64_t io_delta,int64_t * global_write_count)7639 global_update_logical_writes(int64_t io_delta, int64_t *global_write_count)
7640 {
7641 	int64_t old_count, new_count;
7642 	boolean_t needs_telemetry;
7643 
7644 	do {
7645 		new_count = old_count = *global_write_count;
7646 		new_count += io_delta;
7647 		if (new_count >= io_telemetry_limit) {
7648 			new_count = 0;
7649 			needs_telemetry = TRUE;
7650 		} else {
7651 			needs_telemetry = FALSE;
7652 		}
7653 	} while (!OSCompareAndSwap64(old_count, new_count, global_write_count));
7654 	return needs_telemetry;
7655 }
7656 
7657 void
task_update_physical_writes(__unused task_t task,__unused task_physical_write_flavor_t flavor,__unused uint64_t io_size,__unused task_balance_flags_t flags)7658 task_update_physical_writes(__unused task_t task, __unused task_physical_write_flavor_t flavor, __unused uint64_t io_size, __unused task_balance_flags_t flags)
7659 {
7660 #if CONFIG_PHYS_WRITE_ACCT
7661 	if (!io_size) {
7662 		return;
7663 	}
7664 
7665 	/*
7666 	 * task == NULL means that we have to update kernel_task ledgers
7667 	 */
7668 	if (!task) {
7669 		task = kernel_task;
7670 	}
7671 
7672 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PHYS_WRITE_ACCT)) | DBG_FUNC_NONE,
7673 	    task_pid(task), flavor, io_size, flags, 0);
7674 	DTRACE_IO4(physical_writes, struct task *, task, task_physical_write_flavor_t, flavor, uint64_t, io_size, task_balance_flags_t, flags);
7675 
7676 	if (flags & TASK_BALANCE_CREDIT) {
7677 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
7678 			OSAddAtomic64(io_size, (SInt64 *)&(task->task_fs_metadata_writes));
7679 			ledger_credit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
7680 		}
7681 	} else if (flags & TASK_BALANCE_DEBIT) {
7682 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
7683 			OSAddAtomic64(-1 * io_size, (SInt64 *)&(task->task_fs_metadata_writes));
7684 			ledger_debit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
7685 		}
7686 	}
7687 #endif /* CONFIG_PHYS_WRITE_ACCT */
7688 }
7689 
7690 void
task_update_logical_writes(task_t task,uint32_t io_size,int flags,void * vp)7691 task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
7692 {
7693 	int64_t io_delta = 0;
7694 	int64_t * global_counter_to_update;
7695 	boolean_t needs_telemetry = FALSE;
7696 	boolean_t is_external_device = FALSE;
7697 	int ledger_to_update = 0;
7698 	struct task_writes_counters * writes_counters_to_update;
7699 
7700 	if ((!task) || (!io_size) || (!vp)) {
7701 		return;
7702 	}
7703 
7704 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_DATA_WRITE)) | DBG_FUNC_NONE,
7705 	    task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp), 0);
7706 	DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
7707 
7708 	// Is the drive backing this vnode internal or external to the system?
7709 	if (vnode_isonexternalstorage(vp) == false) {
7710 		global_counter_to_update = &global_logical_writes_count;
7711 		ledger_to_update = task_ledgers.logical_writes;
7712 		writes_counters_to_update = &task->task_writes_counters_internal;
7713 		is_external_device = FALSE;
7714 	} else {
7715 		global_counter_to_update = &global_logical_writes_to_external_count;
7716 		ledger_to_update = task_ledgers.logical_writes_to_external;
7717 		writes_counters_to_update = &task->task_writes_counters_external;
7718 		is_external_device = TRUE;
7719 	}
7720 
7721 	switch (flags) {
7722 	case TASK_WRITE_IMMEDIATE:
7723 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_immediate_writes));
7724 		ledger_credit(task->ledger, ledger_to_update, io_size);
7725 		if (!is_external_device) {
7726 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
7727 		}
7728 		break;
7729 	case TASK_WRITE_DEFERRED:
7730 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_deferred_writes));
7731 		ledger_credit(task->ledger, ledger_to_update, io_size);
7732 		if (!is_external_device) {
7733 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
7734 		}
7735 		break;
7736 	case TASK_WRITE_INVALIDATED:
7737 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_invalidated_writes));
7738 		ledger_debit(task->ledger, ledger_to_update, io_size);
7739 		if (!is_external_device) {
7740 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, FALSE, io_size);
7741 		}
7742 		break;
7743 	case TASK_WRITE_METADATA:
7744 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_metadata_writes));
7745 		ledger_credit(task->ledger, ledger_to_update, io_size);
7746 		if (!is_external_device) {
7747 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
7748 		}
7749 		break;
7750 	}
7751 
7752 	io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
7753 	if (io_telemetry_limit != 0) {
7754 		/* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
7755 		needs_telemetry = global_update_logical_writes(io_delta, global_counter_to_update);
7756 		if (needs_telemetry && !is_external_device) {
7757 			act_set_io_telemetry_ast(current_thread());
7758 		}
7759 	}
7760 }
7761 
7762 /*
7763  * Control the I/O monitor for a task.
7764  */
7765 kern_return_t
task_io_monitor_ctl(task_t task,uint32_t * flags)7766 task_io_monitor_ctl(task_t task, uint32_t *flags)
7767 {
7768 	ledger_t ledger = task->ledger;
7769 
7770 	task_lock(task);
7771 	if (*flags & IOMON_ENABLE) {
7772 		/* Configure the physical I/O ledger */
7773 		ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
7774 		ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
7775 	} else if (*flags & IOMON_DISABLE) {
7776 		/*
7777 		 * Caller wishes to disable I/O monitor on the task.
7778 		 */
7779 		ledger_disable_refill(ledger, task_ledgers.physical_writes);
7780 		ledger_disable_callback(ledger, task_ledgers.physical_writes);
7781 	}
7782 
7783 	task_unlock(task);
7784 	return KERN_SUCCESS;
7785 }
7786 
7787 void
task_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)7788 task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
7789 {
7790 	if (warning == 0) {
7791 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
7792 	}
7793 }
7794 
7795 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)7796 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
7797 {
7798 	int                             pid = 0;
7799 	task_t                          task = current_task();
7800 #ifdef EXC_RESOURCE_MONITORS
7801 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
7802 #endif /* EXC_RESOURCE_MONITORS */
7803 	struct ledger_entry_info        lei = {};
7804 	kern_return_t                   kr;
7805 
7806 #ifdef MACH_BSD
7807 	pid = proc_selfpid();
7808 #endif
7809 	/*
7810 	 * Get the ledger entry info. We need to do this before disabling the exception
7811 	 * to get correct values for all fields.
7812 	 */
7813 	switch (flavor) {
7814 	case FLAVOR_IO_PHYSICAL_WRITES:
7815 		ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
7816 		break;
7817 	}
7818 
7819 
7820 	/*
7821 	 * Disable the exception notification so we don't overwhelm
7822 	 * the listener with an endless stream of redundant exceptions.
7823 	 * TODO: detect whether another thread is already reporting the violation.
7824 	 */
7825 	uint32_t flags = IOMON_DISABLE;
7826 	task_io_monitor_ctl(task, &flags);
7827 
7828 	if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
7829 		trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
7830 	}
7831 	os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
7832 	    pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
7833 
7834 	kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
7835 	if (kr) {
7836 		printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
7837 	}
7838 
7839 #ifdef EXC_RESOURCE_MONITORS
7840 	code[0] = code[1] = 0;
7841 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
7842 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
7843 	EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
7844 	EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
7845 	EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
7846 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
7847 #endif /* EXC_RESOURCE_MONITORS */
7848 }
7849 
7850 void
task_port_space_ast(__unused task_t task)7851 task_port_space_ast(__unused task_t task)
7852 {
7853 	uint32_t current_size, soft_limit, hard_limit;
7854 	assert(task == current_task());
7855 	kern_return_t ret = ipc_space_get_table_size_and_limits(task->itk_space,
7856 	    &current_size, &soft_limit, &hard_limit);
7857 	if (ret == KERN_SUCCESS) {
7858 		SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task, current_size, soft_limit, hard_limit);
7859 	}
7860 }
7861 
7862 #if CONFIG_PROC_RESOURCE_LIMITS
7863 static mach_port_t
task_allocate_fatal_port(void)7864 task_allocate_fatal_port(void)
7865 {
7866 	mach_port_t task_fatal_port = MACH_PORT_NULL;
7867 	task_id_token_t token;
7868 
7869 	kern_return_t kr = task_create_identity_token(current_task(), &token); /* Takes a reference on the token */
7870 	if (kr) {
7871 		return MACH_PORT_NULL;
7872 	}
7873 	task_fatal_port = ipc_kobject_alloc_port((ipc_kobject_t)token, IKOT_TASK_FATAL,
7874 	    IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
7875 
7876 	task_id_token_set_port(token, task_fatal_port);
7877 
7878 	return task_fatal_port;
7879 }
7880 
7881 static void
task_fatal_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)7882 task_fatal_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
7883 {
7884 	task_t task = TASK_NULL;
7885 	kern_return_t kr;
7886 
7887 	task_id_token_t token = ipc_kobject_get_stable(port, IKOT_TASK_FATAL);
7888 
7889 	assert(token != NULL);
7890 	if (token) {
7891 		kr = task_identity_token_get_task_grp(token, &task, TASK_GRP_KERNEL); /* takes a reference on task */
7892 		if (task) {
7893 			task_bsdtask_kill(task);
7894 			task_deallocate(task);
7895 		}
7896 		task_id_token_release(token); /* consumes ref given by notification */
7897 	}
7898 }
7899 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7900 
7901 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,uint32_t current_size,uint32_t soft_limit,uint32_t hard_limit)7902 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task, uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit)
7903 {
7904 	int pid = 0;
7905 	char *procname = (char *) "unknown";
7906 	__unused kern_return_t kr;
7907 	__unused resource_notify_flags_t flags = kRNFlagsNone;
7908 	__unused uint32_t limit;
7909 	__unused mach_port_t task_fatal_port = MACH_PORT_NULL;
7910 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
7911 
7912 #ifdef MACH_BSD
7913 	pid = proc_selfpid();
7914 	if (task->bsd_info != NULL) {
7915 		procname = proc_name_address(task->bsd_info);
7916 	}
7917 #endif
7918 	/*
7919 	 * Only kernel_task and launchd may be allowed to
7920 	 * have really large ipc space.
7921 	 */
7922 	if (pid == 0 || pid == 1) {
7923 		return;
7924 	}
7925 
7926 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many mach ports. \
7927 	    Num of ports allocated %u; \n", procname, pid, current_size);
7928 
7929 	/* Abort the process if it has hit the system-wide limit for ipc port table size */
7930 	if (!hard_limit && !soft_limit) {
7931 		code[0] = code[1] = 0;
7932 		EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_PORTS);
7933 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_PORT_SPACE_FULL);
7934 		EXC_RESOURCE_PORTS_ENCODE_PORTS(code[0], current_size);
7935 
7936 		exit_with_port_space_exception(current_proc(), code[0], code[1]);
7937 
7938 		return;
7939 	}
7940 
7941 #if CONFIG_PROC_RESOURCE_LIMITS
7942 	if (hard_limit > 0) {
7943 		flags |= kRNHardLimitFlag;
7944 		limit = hard_limit;
7945 		task_fatal_port = task_allocate_fatal_port();
7946 		if (!task_fatal_port) {
7947 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
7948 			task_bsdtask_kill(task);
7949 		}
7950 	} else {
7951 		flags |= kRNSoftLimitFlag;
7952 		limit = soft_limit;
7953 	}
7954 
7955 	kr = send_resource_violation_with_fatal_port(send_port_space_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
7956 	if (kr) {
7957 		os_log(OS_LOG_DEFAULT, "send_resource_violation(ports, ...): error %#x\n", kr);
7958 	}
7959 	if (task_fatal_port) {
7960 		ipc_port_release_send(task_fatal_port);
7961 	}
7962 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7963 }
7964 
7965 void
task_filedesc_ast(__unused task_t task,__unused int current_size,__unused int soft_limit,__unused int hard_limit)7966 task_filedesc_ast(__unused task_t task, __unused int current_size, __unused int soft_limit, __unused int hard_limit)
7967 {
7968 #if CONFIG_PROC_RESOURCE_LIMITS
7969 	assert(task == current_task());
7970 	SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task, current_size, soft_limit, hard_limit);
7971 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7972 }
7973 
7974 #if CONFIG_PROC_RESOURCE_LIMITS
7975 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task,int current_size,int soft_limit,int hard_limit)7976 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit)
7977 {
7978 	int pid = 0;
7979 	char *procname = (char *) "unknown";
7980 	kern_return_t kr;
7981 	resource_notify_flags_t flags = kRNFlagsNone;
7982 	int limit;
7983 	mach_port_t task_fatal_port = MACH_PORT_NULL;
7984 
7985 #ifdef MACH_BSD
7986 	pid = proc_selfpid();
7987 	if (task->bsd_info != NULL) {
7988 		procname = proc_name_address(task->bsd_info);
7989 	}
7990 #endif
7991 	/*
7992 	 * Only kernel_task and launchd may be allowed to
7993 	 * have really large ipc space.
7994 	 */
7995 	if (pid == 0 || pid == 1) {
7996 		return;
7997 	}
7998 
7999 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many file descriptors. \
8000 	    Num of fds allocated %u; \n", procname, pid, current_size);
8001 
8002 	if (hard_limit > 0) {
8003 		flags |= kRNHardLimitFlag;
8004 		limit = hard_limit;
8005 		task_fatal_port = task_allocate_fatal_port();
8006 		if (!task_fatal_port) {
8007 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8008 			task_bsdtask_kill(task);
8009 		}
8010 	} else {
8011 		flags |= kRNSoftLimitFlag;
8012 		limit = soft_limit;
8013 	}
8014 
8015 	kr = send_resource_violation_with_fatal_port(send_file_descriptors_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8016 	if (kr) {
8017 		os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(filedesc, ...): error %#x\n", kr);
8018 	}
8019 	if (task_fatal_port) {
8020 		ipc_port_release_send(task_fatal_port);
8021 	}
8022 }
8023 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8024 
8025 /* Placeholders for the task set/get voucher interfaces */
8026 kern_return_t
task_get_mach_voucher(task_t task,mach_voucher_selector_t __unused which,ipc_voucher_t * voucher)8027 task_get_mach_voucher(
8028 	task_t                  task,
8029 	mach_voucher_selector_t __unused which,
8030 	ipc_voucher_t           *voucher)
8031 {
8032 	if (TASK_NULL == task) {
8033 		return KERN_INVALID_TASK;
8034 	}
8035 
8036 	*voucher = NULL;
8037 	return KERN_SUCCESS;
8038 }
8039 
8040 kern_return_t
task_set_mach_voucher(task_t task,ipc_voucher_t __unused voucher)8041 task_set_mach_voucher(
8042 	task_t                  task,
8043 	ipc_voucher_t           __unused voucher)
8044 {
8045 	if (TASK_NULL == task) {
8046 		return KERN_INVALID_TASK;
8047 	}
8048 
8049 	return KERN_SUCCESS;
8050 }
8051 
8052 kern_return_t
task_swap_mach_voucher(__unused task_t task,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)8053 task_swap_mach_voucher(
8054 	__unused task_t         task,
8055 	__unused ipc_voucher_t  new_voucher,
8056 	ipc_voucher_t          *in_out_old_voucher)
8057 {
8058 	/*
8059 	 * Currently this function is only called from a MIG generated
8060 	 * routine which doesn't release the reference on the voucher
8061 	 * addressed by in_out_old_voucher. To avoid leaking this reference,
8062 	 * a call to release it has been added here.
8063 	 */
8064 	ipc_voucher_release(*in_out_old_voucher);
8065 	OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
8066 }
8067 
8068 void
task_set_gpu_denied(task_t task,boolean_t denied)8069 task_set_gpu_denied(task_t task, boolean_t denied)
8070 {
8071 	task_lock(task);
8072 
8073 	if (denied) {
8074 		task->t_flags |= TF_GPU_DENIED;
8075 	} else {
8076 		task->t_flags &= ~TF_GPU_DENIED;
8077 	}
8078 
8079 	task_unlock(task);
8080 }
8081 
8082 boolean_t
task_is_gpu_denied(task_t task)8083 task_is_gpu_denied(task_t task)
8084 {
8085 	/* We don't need the lock to read this flag */
8086 	return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE;
8087 }
8088 
8089 
8090 uint64_t
get_task_memory_region_count(task_t task)8091 get_task_memory_region_count(task_t task)
8092 {
8093 	vm_map_t map;
8094 	map = (task == kernel_task) ? kernel_map: task->map;
8095 	return (uint64_t)get_map_nentries(map);
8096 }
8097 
8098 static void
kdebug_trace_dyld_internal(uint32_t base_code,struct dyld_kernel_image_info * info)8099 kdebug_trace_dyld_internal(uint32_t base_code,
8100     struct dyld_kernel_image_info *info)
8101 {
8102 	static_assert(sizeof(info->uuid) >= 16);
8103 
8104 #if defined(__LP64__)
8105 	uint64_t *uuid = (uint64_t *)&(info->uuid);
8106 
8107 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8108 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
8109 	    uuid[1], info->load_addr,
8110 	    (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
8111 	    0);
8112 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8113 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
8114 	    (uint64_t)info->fsobjid.fid_objno |
8115 	    ((uint64_t)info->fsobjid.fid_generation << 32),
8116 	    0, 0, 0, 0);
8117 #else /* defined(__LP64__) */
8118 	uint32_t *uuid = (uint32_t *)&(info->uuid);
8119 
8120 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8121 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
8122 	    uuid[1], uuid[2], uuid[3], 0);
8123 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8124 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
8125 	    (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
8126 	    info->fsobjid.fid_objno, 0);
8127 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8128 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
8129 	    info->fsobjid.fid_generation, 0, 0, 0, 0);
8130 #endif /* !defined(__LP64__) */
8131 }
8132 
8133 static kern_return_t
kdebug_trace_dyld(task_t task,uint32_t base_code,vm_map_copy_t infos_copy,mach_msg_type_number_t infos_len)8134 kdebug_trace_dyld(task_t task, uint32_t base_code,
8135     vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
8136 {
8137 	kern_return_t kr;
8138 	dyld_kernel_image_info_array_t infos;
8139 	vm_map_offset_t map_data;
8140 	vm_offset_t data;
8141 
8142 	if (!infos_copy) {
8143 		return KERN_INVALID_ADDRESS;
8144 	}
8145 
8146 	if (!kdebug_enable ||
8147 	    !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) {
8148 		vm_map_copy_discard(infos_copy);
8149 		return KERN_SUCCESS;
8150 	}
8151 
8152 	if (task == NULL || task != current_task()) {
8153 		return KERN_INVALID_TASK;
8154 	}
8155 
8156 	kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
8157 	if (kr != KERN_SUCCESS) {
8158 		return kr;
8159 	}
8160 
8161 	infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
8162 
8163 	for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
8164 		kdebug_trace_dyld_internal(base_code, &(infos[i]));
8165 	}
8166 
8167 	data = CAST_DOWN(vm_offset_t, map_data);
8168 	mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
8169 	return KERN_SUCCESS;
8170 }
8171 
8172 kern_return_t
task_register_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8173 task_register_dyld_image_infos(task_t task,
8174     dyld_kernel_image_info_array_t infos_copy,
8175     mach_msg_type_number_t infos_len)
8176 {
8177 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
8178 	           (vm_map_copy_t)infos_copy, infos_len);
8179 }
8180 
8181 kern_return_t
task_unregister_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8182 task_unregister_dyld_image_infos(task_t task,
8183     dyld_kernel_image_info_array_t infos_copy,
8184     mach_msg_type_number_t infos_len)
8185 {
8186 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
8187 	           (vm_map_copy_t)infos_copy, infos_len);
8188 }
8189 
8190 kern_return_t
task_get_dyld_image_infos(__unused task_t task,__unused dyld_kernel_image_info_array_t * dyld_images,__unused mach_msg_type_number_t * dyld_imagesCnt)8191 task_get_dyld_image_infos(__unused task_t task,
8192     __unused dyld_kernel_image_info_array_t * dyld_images,
8193     __unused mach_msg_type_number_t * dyld_imagesCnt)
8194 {
8195 	return KERN_NOT_SUPPORTED;
8196 }
8197 
8198 kern_return_t
task_register_dyld_shared_cache_image_info(task_t task,dyld_kernel_image_info_t cache_img,__unused boolean_t no_cache,__unused boolean_t private_cache)8199 task_register_dyld_shared_cache_image_info(task_t task,
8200     dyld_kernel_image_info_t cache_img,
8201     __unused boolean_t no_cache,
8202     __unused boolean_t private_cache)
8203 {
8204 	if (task == NULL || task != current_task()) {
8205 		return KERN_INVALID_TASK;
8206 	}
8207 
8208 	kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
8209 	return KERN_SUCCESS;
8210 }
8211 
8212 kern_return_t
task_register_dyld_set_dyld_state(__unused task_t task,__unused uint8_t dyld_state)8213 task_register_dyld_set_dyld_state(__unused task_t task,
8214     __unused uint8_t dyld_state)
8215 {
8216 	return KERN_NOT_SUPPORTED;
8217 }
8218 
8219 kern_return_t
task_register_dyld_get_process_state(__unused task_t task,__unused dyld_kernel_process_info_t * dyld_process_state)8220 task_register_dyld_get_process_state(__unused task_t task,
8221     __unused dyld_kernel_process_info_t * dyld_process_state)
8222 {
8223 	return KERN_NOT_SUPPORTED;
8224 }
8225 
8226 kern_return_t
task_inspect(task_inspect_t task_insp,task_inspect_flavor_t flavor,task_inspect_info_t info_out,mach_msg_type_number_t * size_in_out)8227 task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
8228     task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out)
8229 {
8230 #if MONOTONIC
8231 	task_t task = (task_t)task_insp;
8232 	kern_return_t kr = KERN_SUCCESS;
8233 	mach_msg_type_number_t size;
8234 
8235 	if (task == TASK_NULL) {
8236 		return KERN_INVALID_ARGUMENT;
8237 	}
8238 
8239 	size = *size_in_out;
8240 
8241 	switch (flavor) {
8242 	case TASK_INSPECT_BASIC_COUNTS: {
8243 		struct task_inspect_basic_counts *bc;
8244 		uint64_t task_counts[MT_CORE_NFIXED] = { 0 };
8245 
8246 		if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
8247 			kr = KERN_INVALID_ARGUMENT;
8248 			break;
8249 		}
8250 
8251 		mt_fixed_task_counts(task, task_counts);
8252 		bc = (struct task_inspect_basic_counts *)info_out;
8253 #ifdef MT_CORE_INSTRS
8254 		bc->instructions = task_counts[MT_CORE_INSTRS];
8255 #else /* defined(MT_CORE_INSTRS) */
8256 		bc->instructions = 0;
8257 #endif /* !defined(MT_CORE_INSTRS) */
8258 		bc->cycles = task_counts[MT_CORE_CYCLES];
8259 		size = TASK_INSPECT_BASIC_COUNTS_COUNT;
8260 		break;
8261 	}
8262 	default:
8263 		kr = KERN_INVALID_ARGUMENT;
8264 		break;
8265 	}
8266 
8267 	if (kr == KERN_SUCCESS) {
8268 		*size_in_out = size;
8269 	}
8270 	return kr;
8271 #else /* MONOTONIC */
8272 #pragma unused(task_insp, flavor, info_out, size_in_out)
8273 	return KERN_NOT_SUPPORTED;
8274 #endif /* !MONOTONIC */
8275 }
8276 
8277 #if CONFIG_SECLUDED_MEMORY
8278 int num_tasks_can_use_secluded_mem = 0;
8279 
8280 void
task_set_can_use_secluded_mem(task_t task,boolean_t can_use_secluded_mem)8281 task_set_can_use_secluded_mem(
8282 	task_t          task,
8283 	boolean_t       can_use_secluded_mem)
8284 {
8285 	if (!task->task_could_use_secluded_mem) {
8286 		return;
8287 	}
8288 	task_lock(task);
8289 	task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
8290 	task_unlock(task);
8291 }
8292 
8293 void
task_set_can_use_secluded_mem_locked(task_t task,boolean_t can_use_secluded_mem)8294 task_set_can_use_secluded_mem_locked(
8295 	task_t          task,
8296 	boolean_t       can_use_secluded_mem)
8297 {
8298 	assert(task->task_could_use_secluded_mem);
8299 	if (can_use_secluded_mem &&
8300 	    secluded_for_apps &&         /* global boot-arg */
8301 	    !task->task_can_use_secluded_mem) {
8302 		assert(num_tasks_can_use_secluded_mem >= 0);
8303 		OSAddAtomic(+1,
8304 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
8305 		task->task_can_use_secluded_mem = TRUE;
8306 	} else if (!can_use_secluded_mem &&
8307 	    task->task_can_use_secluded_mem) {
8308 		assert(num_tasks_can_use_secluded_mem > 0);
8309 		OSAddAtomic(-1,
8310 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
8311 		task->task_can_use_secluded_mem = FALSE;
8312 	}
8313 }
8314 
8315 void
task_set_could_use_secluded_mem(task_t task,boolean_t could_use_secluded_mem)8316 task_set_could_use_secluded_mem(
8317 	task_t          task,
8318 	boolean_t       could_use_secluded_mem)
8319 {
8320 	task->task_could_use_secluded_mem = !!could_use_secluded_mem;
8321 }
8322 
8323 void
task_set_could_also_use_secluded_mem(task_t task,boolean_t could_also_use_secluded_mem)8324 task_set_could_also_use_secluded_mem(
8325 	task_t          task,
8326 	boolean_t       could_also_use_secluded_mem)
8327 {
8328 	task->task_could_also_use_secluded_mem = !!could_also_use_secluded_mem;
8329 }
8330 
8331 boolean_t
task_can_use_secluded_mem(task_t task,boolean_t is_alloc)8332 task_can_use_secluded_mem(
8333 	task_t          task,
8334 	boolean_t       is_alloc)
8335 {
8336 	if (task->task_can_use_secluded_mem) {
8337 		assert(task->task_could_use_secluded_mem);
8338 		assert(num_tasks_can_use_secluded_mem > 0);
8339 		return TRUE;
8340 	}
8341 	if (task->task_could_also_use_secluded_mem &&
8342 	    num_tasks_can_use_secluded_mem > 0) {
8343 		assert(num_tasks_can_use_secluded_mem > 0);
8344 		return TRUE;
8345 	}
8346 
8347 	/*
8348 	 * If a single task is using more than some large amount of
8349 	 * memory (i.e. secluded_shutoff_trigger) and is approaching
8350 	 * its task limit, allow it to dip into secluded and begin
8351 	 * suppression of rebuilding secluded memory until that task exits.
8352 	 */
8353 	if (is_alloc && secluded_shutoff_trigger != 0) {
8354 		uint64_t phys_used = get_task_phys_footprint(task);
8355 		uint64_t limit = get_task_phys_footprint_limit(task);
8356 		if (phys_used > secluded_shutoff_trigger &&
8357 		    limit > secluded_shutoff_trigger &&
8358 		    phys_used > limit - secluded_shutoff_headroom) {
8359 			start_secluded_suppression(task);
8360 			return TRUE;
8361 		}
8362 	}
8363 
8364 	return FALSE;
8365 }
8366 
8367 boolean_t
task_could_use_secluded_mem(task_t task)8368 task_could_use_secluded_mem(
8369 	task_t  task)
8370 {
8371 	return task->task_could_use_secluded_mem;
8372 }
8373 
8374 boolean_t
task_could_also_use_secluded_mem(task_t task)8375 task_could_also_use_secluded_mem(
8376 	task_t  task)
8377 {
8378 	return task->task_could_also_use_secluded_mem;
8379 }
8380 #endif /* CONFIG_SECLUDED_MEMORY */
8381 
8382 queue_head_t *
task_io_user_clients(task_t task)8383 task_io_user_clients(task_t task)
8384 {
8385 	return &task->io_user_clients;
8386 }
8387 
8388 void
task_set_message_app_suspended(task_t task,boolean_t enable)8389 task_set_message_app_suspended(task_t task, boolean_t enable)
8390 {
8391 	task->message_app_suspended = enable;
8392 }
8393 
8394 void
task_copy_fields_for_exec(task_t dst_task,task_t src_task)8395 task_copy_fields_for_exec(task_t dst_task, task_t src_task)
8396 {
8397 	dst_task->vtimers = src_task->vtimers;
8398 }
8399 
8400 #if DEVELOPMENT || DEBUG
8401 int vm_region_footprint = 0;
8402 #endif /* DEVELOPMENT || DEBUG */
8403 
8404 boolean_t
task_self_region_footprint(void)8405 task_self_region_footprint(void)
8406 {
8407 #if DEVELOPMENT || DEBUG
8408 	if (vm_region_footprint) {
8409 		/* system-wide override */
8410 		return TRUE;
8411 	}
8412 #endif /* DEVELOPMENT || DEBUG */
8413 	return current_task()->task_region_footprint;
8414 }
8415 
8416 void
task_self_region_footprint_set(boolean_t newval)8417 task_self_region_footprint_set(
8418 	boolean_t newval)
8419 {
8420 	task_t  curtask;
8421 
8422 	curtask = current_task();
8423 	task_lock(curtask);
8424 	if (newval) {
8425 		curtask->task_region_footprint = TRUE;
8426 	} else {
8427 		curtask->task_region_footprint = FALSE;
8428 	}
8429 	task_unlock(curtask);
8430 }
8431 
8432 void
task_set_darkwake_mode(task_t task,boolean_t set_mode)8433 task_set_darkwake_mode(task_t task, boolean_t set_mode)
8434 {
8435 	assert(task);
8436 
8437 	task_lock(task);
8438 
8439 	if (set_mode) {
8440 		task->t_flags |= TF_DARKWAKE_MODE;
8441 	} else {
8442 		task->t_flags &= ~(TF_DARKWAKE_MODE);
8443 	}
8444 
8445 	task_unlock(task);
8446 }
8447 
8448 boolean_t
task_get_darkwake_mode(task_t task)8449 task_get_darkwake_mode(task_t task)
8450 {
8451 	assert(task);
8452 	return (task->t_flags & TF_DARKWAKE_MODE) != 0;
8453 }
8454 
8455 /*
8456  * Set default behavior for task's control port and EXC_GUARD variants that have
8457  * settable behavior.
8458  *
8459  * Platform binaries typically have one behavior, third parties another -
8460  * but there are special exception we may need to account for.
8461  */
8462 void
task_set_exc_guard_ctrl_port_default(task_t task,thread_t main_thread,const char * name,unsigned int namelen,boolean_t is_simulated,uint32_t platform,uint32_t sdk)8463 task_set_exc_guard_ctrl_port_default(
8464 	task_t task,
8465 	thread_t main_thread,
8466 	const char *name,
8467 	unsigned int namelen,
8468 	boolean_t is_simulated,
8469 	uint32_t platform,
8470 	uint32_t sdk)
8471 {
8472 	if (task->t_flags & TF_PLATFORM) {
8473 		/* set exc guard default behavior for first-party code */
8474 		task->task_exc_guard = (task_exc_guard_default & TASK_EXC_GUARD_ALL);
8475 
8476 		if (1 == task_pid(task)) {
8477 			/* special flags for inittask - delivery every instance as corpse */
8478 			task->task_exc_guard = _TASK_EXC_GUARD_ALL_CORPSE;
8479 		} else if (task_exc_guard_default & TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS) {
8480 			/* honor by-name default setting overrides */
8481 
8482 			int count = sizeof(task_exc_guard_named_defaults) / sizeof(struct task_exc_guard_named_default);
8483 
8484 			for (int i = 0; i < count; i++) {
8485 				const struct task_exc_guard_named_default *named_default =
8486 				    &task_exc_guard_named_defaults[i];
8487 				if (strncmp(named_default->name, name, namelen) == 0 &&
8488 				    strlen(named_default->name) == namelen) {
8489 					task->task_exc_guard = named_default->behavior;
8490 					break;
8491 				}
8492 			}
8493 		}
8494 
8495 		/* set control port options for 1p code, inherited from parent task by default */
8496 		task->task_control_port_options = (ipc_control_port_options & ICP_OPTIONS_1P_MASK);
8497 	} else {
8498 		/* set exc guard default behavior for third-party code */
8499 		task->task_exc_guard = ((task_exc_guard_default >> TASK_EXC_GUARD_THIRD_PARTY_DEFAULT_SHIFT) & TASK_EXC_GUARD_ALL);
8500 		/* set control port options for 3p code, inherited from parent task by default */
8501 		task->task_control_port_options = (ipc_control_port_options & ICP_OPTIONS_3P_MASK) >> ICP_OPTIONS_3P_SHIFT;
8502 	}
8503 
8504 	if (is_simulated) {
8505 		/* If simulated and built against pre-iOS 15 SDK, disable all EXC_GUARD */
8506 		if ((platform == PLATFORM_IOSSIMULATOR && sdk < 0xf0000) ||
8507 		    (platform == PLATFORM_TVOSSIMULATOR && sdk < 0xf0000) ||
8508 		    (platform == PLATFORM_WATCHOSSIMULATOR && sdk < 0x80000)) {
8509 			task->task_exc_guard = TASK_EXC_GUARD_NONE;
8510 		}
8511 		/* Disable protection for control ports for simulated binaries */
8512 		task->task_control_port_options = TASK_CONTROL_PORT_OPTIONS_NONE;
8513 	}
8514 
8515 
8516 	task_set_immovable_pinned(task);
8517 	main_thread_set_immovable_pinned(main_thread);
8518 }
8519 
8520 kern_return_t
task_get_exc_guard_behavior(task_t task,task_exc_guard_behavior_t * behaviorp)8521 task_get_exc_guard_behavior(
8522 	task_t task,
8523 	task_exc_guard_behavior_t *behaviorp)
8524 {
8525 	if (task == TASK_NULL) {
8526 		return KERN_INVALID_TASK;
8527 	}
8528 	*behaviorp = task->task_exc_guard;
8529 	return KERN_SUCCESS;
8530 }
8531 
8532 kern_return_t
task_set_exc_guard_behavior(task_t task,task_exc_guard_behavior_t new_behavior)8533 task_set_exc_guard_behavior(
8534 	task_t task,
8535 	task_exc_guard_behavior_t new_behavior)
8536 {
8537 	if (task == TASK_NULL) {
8538 		return KERN_INVALID_TASK;
8539 	}
8540 	if (new_behavior & ~TASK_EXC_GUARD_ALL) {
8541 		return KERN_INVALID_VALUE;
8542 	}
8543 
8544 	/* limit setting to that allowed for this config */
8545 	new_behavior = new_behavior & task_exc_guard_config_mask;
8546 
8547 #if !defined (DEBUG) && !defined (DEVELOPMENT)
8548 	/* On release kernels, only allow _upgrading_ exc guard behavior */
8549 	task_exc_guard_behavior_t cur_behavior;
8550 
8551 	os_atomic_rmw_loop(&task->task_exc_guard, cur_behavior, new_behavior, relaxed, {
8552 		if ((cur_behavior & task_exc_guard_no_unset_mask) & ~(new_behavior & task_exc_guard_no_unset_mask)) {
8553 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
8554 		}
8555 
8556 		if ((new_behavior & task_exc_guard_no_set_mask) & ~(cur_behavior & task_exc_guard_no_set_mask)) {
8557 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
8558 		}
8559 
8560 		/* no restrictions on CORPSE bit */
8561 	});
8562 #else
8563 	task->task_exc_guard = new_behavior;
8564 #endif
8565 	return KERN_SUCCESS;
8566 }
8567 
8568 kern_return_t
task_set_corpse_forking_behavior(task_t task,task_corpse_forking_behavior_t behavior)8569 task_set_corpse_forking_behavior(task_t task, task_corpse_forking_behavior_t behavior)
8570 {
8571 #if DEVELOPMENT || DEBUG
8572 	if (task == TASK_NULL) {
8573 		return KERN_INVALID_TASK;
8574 	}
8575 
8576 	task_lock(task);
8577 	if (behavior & TASK_CORPSE_FORKING_DISABLED_MEM_DIAG) {
8578 		task->t_flags |= TF_NO_CORPSE_FORKING;
8579 	} else {
8580 		task->t_flags &= ~TF_NO_CORPSE_FORKING;
8581 	}
8582 	task_unlock(task);
8583 
8584 	return KERN_SUCCESS;
8585 #else
8586 	(void)task;
8587 	(void)behavior;
8588 	return KERN_NOT_SUPPORTED;
8589 #endif
8590 }
8591 
8592 boolean_t
task_corpse_forking_disabled(task_t task)8593 task_corpse_forking_disabled(task_t task)
8594 {
8595 	boolean_t disabled = FALSE;
8596 
8597 	task_lock(task);
8598 	disabled = (task->t_flags & TF_NO_CORPSE_FORKING);
8599 	task_unlock(task);
8600 
8601 	return disabled;
8602 }
8603 
8604 #if __arm64__
8605 extern int legacy_footprint_entitlement_mode;
8606 extern void memorystatus_act_on_legacy_footprint_entitlement(struct proc *, boolean_t);
8607 extern void memorystatus_act_on_ios13extended_footprint_entitlement(struct proc *);
8608 
8609 
8610 void
task_set_legacy_footprint(task_t task)8611 task_set_legacy_footprint(
8612 	task_t task)
8613 {
8614 	task_lock(task);
8615 	task->task_legacy_footprint = TRUE;
8616 	task_unlock(task);
8617 }
8618 
8619 void
task_set_extra_footprint_limit(task_t task)8620 task_set_extra_footprint_limit(
8621 	task_t task)
8622 {
8623 	if (task->task_extra_footprint_limit) {
8624 		return;
8625 	}
8626 	task_lock(task);
8627 	if (task->task_extra_footprint_limit) {
8628 		task_unlock(task);
8629 		return;
8630 	}
8631 	task->task_extra_footprint_limit = TRUE;
8632 	task_unlock(task);
8633 	memorystatus_act_on_legacy_footprint_entitlement(task->bsd_info, TRUE);
8634 }
8635 
8636 void
task_set_ios13extended_footprint_limit(task_t task)8637 task_set_ios13extended_footprint_limit(
8638 	task_t task)
8639 {
8640 	if (task->task_ios13extended_footprint_limit) {
8641 		return;
8642 	}
8643 	task_lock(task);
8644 	if (task->task_ios13extended_footprint_limit) {
8645 		task_unlock(task);
8646 		return;
8647 	}
8648 	task->task_ios13extended_footprint_limit = TRUE;
8649 	task_unlock(task);
8650 	memorystatus_act_on_ios13extended_footprint_entitlement(task->bsd_info);
8651 }
8652 #endif /* __arm64__ */
8653 
8654 static inline ledger_amount_t
task_ledger_get_balance(ledger_t ledger,int ledger_idx)8655 task_ledger_get_balance(
8656 	ledger_t        ledger,
8657 	int             ledger_idx)
8658 {
8659 	ledger_amount_t amount;
8660 	amount = 0;
8661 	ledger_get_balance(ledger, ledger_idx, &amount);
8662 	return amount;
8663 }
8664 
8665 /*
8666  * Gather the amount of memory counted in a task's footprint due to
8667  * being in a specific set of ledgers.
8668  */
8669 void
task_ledgers_footprint(ledger_t ledger,ledger_amount_t * ledger_resident,ledger_amount_t * ledger_compressed)8670 task_ledgers_footprint(
8671 	ledger_t        ledger,
8672 	ledger_amount_t *ledger_resident,
8673 	ledger_amount_t *ledger_compressed)
8674 {
8675 	*ledger_resident = 0;
8676 	*ledger_compressed = 0;
8677 
8678 	/* purgeable non-volatile memory */
8679 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile);
8680 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile_compressed);
8681 
8682 	/* "default" tagged memory */
8683 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint);
8684 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint_compressed);
8685 
8686 	/* "network" currently never counts in the footprint... */
8687 
8688 	/* "media" tagged memory */
8689 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.media_footprint);
8690 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.media_footprint_compressed);
8691 
8692 	/* "graphics" tagged memory */
8693 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint);
8694 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint_compressed);
8695 
8696 	/* "neural" tagged memory */
8697 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.neural_footprint);
8698 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.neural_footprint_compressed);
8699 }
8700 
8701 #if CONFIG_MEMORYSTATUS
8702 /*
8703  * Credit any outstanding task dirty time to the ledger.
8704  * memstat_dirty_start is pushed forward to prevent any possibility of double
8705  * counting, making it safe to call this as often as necessary to ensure that
8706  * anyone reading the ledger gets up-to-date information.
8707  */
8708 void
task_ledger_settle_dirty_time(task_t t)8709 task_ledger_settle_dirty_time(task_t t)
8710 {
8711 	task_lock(t);
8712 
8713 	uint64_t start = t->memstat_dirty_start;
8714 	if (start) {
8715 		uint64_t now = mach_absolute_time();
8716 
8717 		uint64_t duration;
8718 		absolutetime_to_nanoseconds(now - start, &duration);
8719 
8720 		ledger_t ledger = get_task_ledger(t);
8721 		ledger_credit(ledger, task_ledgers.memorystatus_dirty_time, duration);
8722 
8723 		t->memstat_dirty_start = now;
8724 	}
8725 
8726 	task_unlock(t);
8727 }
8728 #endif /* CONFIG_MEMORYSTATUS */
8729 
8730 void
task_set_memory_ownership_transfer(task_t task,boolean_t value)8731 task_set_memory_ownership_transfer(
8732 	task_t    task,
8733 	boolean_t value)
8734 {
8735 	task_lock(task);
8736 	task->task_can_transfer_memory_ownership = !!value;
8737 	task_unlock(task);
8738 }
8739 
8740 #if DEVELOPMENT || DEBUG
8741 
8742 void
task_set_no_footprint_for_debug(task_t task,boolean_t value)8743 task_set_no_footprint_for_debug(task_t task, boolean_t value)
8744 {
8745 	task_lock(task);
8746 	task->task_no_footprint_for_debug = !!value;
8747 	task_unlock(task);
8748 }
8749 
8750 int
task_get_no_footprint_for_debug(task_t task)8751 task_get_no_footprint_for_debug(task_t task)
8752 {
8753 	return task->task_no_footprint_for_debug;
8754 }
8755 
8756 #endif /* DEVELOPMENT || DEBUG */
8757 
8758 void
task_copy_vmobjects(task_t task,vm_object_query_t query,size_t len,size_t * num)8759 task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num)
8760 {
8761 	vm_object_t find_vmo;
8762 	size_t size = 0;
8763 
8764 	task_objq_lock(task);
8765 	if (query != NULL) {
8766 		queue_iterate(&task->task_objq, find_vmo, vm_object_t, task_objq)
8767 		{
8768 			vm_object_query_t p = &query[size++];
8769 
8770 			/* make sure to not overrun */
8771 			if (size * sizeof(vm_object_query_data_t) > len) {
8772 				--size;
8773 				break;
8774 			}
8775 
8776 			bzero(p, sizeof(*p));
8777 			p->object_id = (vm_object_id_t) VM_KERNEL_ADDRPERM(find_vmo);
8778 			p->virtual_size = find_vmo->internal ? find_vmo->vo_size : 0;
8779 			p->resident_size = find_vmo->resident_page_count * PAGE_SIZE;
8780 			p->wired_size = find_vmo->wired_page_count * PAGE_SIZE;
8781 			p->reusable_size = find_vmo->reusable_page_count * PAGE_SIZE;
8782 			p->vo_no_footprint = find_vmo->vo_no_footprint;
8783 			p->vo_ledger_tag = find_vmo->vo_ledger_tag;
8784 			p->purgable = find_vmo->purgable;
8785 
8786 			if (find_vmo->internal && find_vmo->pager_created && find_vmo->pager != NULL) {
8787 				p->compressed_size = vm_compressor_pager_get_count(find_vmo->pager) * PAGE_SIZE;
8788 			} else {
8789 				p->compressed_size = 0;
8790 			}
8791 		}
8792 	} else {
8793 		size = (size_t)task->task_owned_objects;
8794 	}
8795 	task_objq_unlock(task);
8796 
8797 	*num = size;
8798 }
8799 
8800 void
task_get_owned_vmobjects(task_t task,size_t buffer_size,vmobject_list_output_t buffer,size_t * output_size,size_t * entries)8801 task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries)
8802 {
8803 	assert(output_size);
8804 	assert(entries);
8805 
8806 	/* copy the vmobjects and vmobject data out of the task */
8807 	if (buffer_size == 0) {
8808 		task_copy_vmobjects(task, NULL, 0, entries);
8809 		*output_size = (*entries > 0) ? *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer) : 0;
8810 	} else {
8811 		assert(buffer);
8812 		task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(*buffer), entries);
8813 		buffer->entries = (uint64_t)*entries;
8814 		*output_size = *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer);
8815 	}
8816 }
8817 
8818 void
task_store_owned_vmobject_info(task_t to_task,task_t from_task)8819 task_store_owned_vmobject_info(task_t to_task, task_t from_task)
8820 {
8821 	size_t buffer_size;
8822 	vmobject_list_output_t buffer;
8823 	size_t output_size;
8824 	size_t entries;
8825 
8826 	assert(to_task != from_task);
8827 
8828 	/* get the size, allocate a bufferr, and populate */
8829 	entries = 0;
8830 	output_size = 0;
8831 	task_get_owned_vmobjects(from_task, 0, NULL, &output_size, &entries);
8832 
8833 	if (output_size) {
8834 		buffer_size = output_size;
8835 		buffer = kalloc_data(buffer_size, Z_WAITOK);
8836 
8837 		if (buffer) {
8838 			entries = 0;
8839 			output_size = 0;
8840 
8841 			task_get_owned_vmobjects(from_task, buffer_size, buffer, &output_size, &entries);
8842 
8843 			if (entries) {
8844 				to_task->corpse_vmobject_list = buffer;
8845 				to_task->corpse_vmobject_list_size = buffer_size;
8846 			}
8847 		}
8848 	}
8849 }
8850 
8851 void
task_set_filter_msg_flag(task_t task,boolean_t flag)8852 task_set_filter_msg_flag(
8853 	task_t task,
8854 	boolean_t flag)
8855 {
8856 	assert(task != TASK_NULL);
8857 
8858 	task_lock(task);
8859 	if (flag) {
8860 		task->t_flags |= TF_FILTER_MSG;
8861 	} else {
8862 		task->t_flags &= ~TF_FILTER_MSG;
8863 	}
8864 	task_unlock(task);
8865 }
8866 
8867 boolean_t
task_get_filter_msg_flag(task_t task)8868 task_get_filter_msg_flag(
8869 	task_t task)
8870 {
8871 	uint32_t flags = 0;
8872 
8873 	if (!task) {
8874 		return false;
8875 	}
8876 
8877 	flags = os_atomic_load(&task->t_flags, relaxed);
8878 	return (flags & TF_FILTER_MSG) ? TRUE : FALSE;
8879 }
8880 bool
task_is_exotic(task_t task)8881 task_is_exotic(
8882 	task_t task)
8883 {
8884 	if (task == TASK_NULL) {
8885 		return false;
8886 	}
8887 	return vm_map_is_exotic(get_task_map(task));
8888 }
8889 
8890 bool
task_is_alien(task_t task)8891 task_is_alien(
8892 	task_t task)
8893 {
8894 	if (task == TASK_NULL) {
8895 		return false;
8896 	}
8897 	return vm_map_is_alien(get_task_map(task));
8898 }
8899 
8900 
8901 
8902 #if CONFIG_MACF
8903 /* Set the filter mask for Mach traps. */
8904 void
mac_task_set_mach_filter_mask(task_t task,uint8_t * maskptr)8905 mac_task_set_mach_filter_mask(task_t task, uint8_t *maskptr)
8906 {
8907 	assert(task);
8908 
8909 	task_set_mach_trap_filter_mask(task, maskptr);
8910 }
8911 
8912 /* Set the filter mask for kobject msgs. */
8913 void
mac_task_set_kobj_filter_mask(task_t task,uint8_t * maskptr)8914 mac_task_set_kobj_filter_mask(task_t task, uint8_t *maskptr)
8915 {
8916 	assert(task);
8917 
8918 	task_set_mach_kobj_filter_mask(task, maskptr);
8919 }
8920 
8921 /* Hook for mach trap/sc filter evaluation policy. */
8922 mac_task_mach_filter_cbfunc_t mac_task_mach_trap_evaluate = NULL;
8923 
8924 /* Hook for kobj message filter evaluation policy. */
8925 mac_task_kobj_filter_cbfunc_t mac_task_kobj_msg_evaluate = NULL;
8926 
8927 /* Set the callback hooks for the filtering policy. */
8928 int
mac_task_register_filter_callbacks(const mac_task_mach_filter_cbfunc_t mach_cbfunc,const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)8929 mac_task_register_filter_callbacks(
8930 	const mac_task_mach_filter_cbfunc_t mach_cbfunc,
8931 	const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)
8932 {
8933 	if (mach_cbfunc != NULL) {
8934 		if (mac_task_mach_trap_evaluate != NULL) {
8935 			return KERN_FAILURE;
8936 		}
8937 		mac_task_mach_trap_evaluate = mach_cbfunc;
8938 	}
8939 	if (kobj_cbfunc != NULL) {
8940 		if (mac_task_kobj_msg_evaluate != NULL) {
8941 			return KERN_FAILURE;
8942 		}
8943 		mac_task_kobj_msg_evaluate = kobj_cbfunc;
8944 	}
8945 
8946 	return KERN_SUCCESS;
8947 }
8948 #endif /* CONFIG_MACF */
8949 
8950 void
task_transfer_mach_filter_bits(task_t new_task,task_t old_task)8951 task_transfer_mach_filter_bits(
8952 	task_t new_task,
8953 	task_t old_task)
8954 {
8955 #ifdef CONFIG_MACF
8956 	/* Copy mach trap and kernel object mask pointers to new task. */
8957 	task_copy_filter_masks(new_task, old_task);
8958 #endif
8959 	/* If filter message flag is set then set it in the new task. */
8960 	if (task_get_filter_msg_flag(old_task)) {
8961 		new_task->t_flags |= TF_FILTER_MSG;
8962 	}
8963 }
8964 
8965 
8966 #if __has_feature(ptrauth_calls)
8967 /* All pac violations will be delivered as fatal exceptions irrespective of
8968  * the enable_pac_exception boot-arg value.
8969  */
8970 #define PAC_EXCEPTION_ENTITLEMENT "com.apple.private.pac.exception"
8971 /*
8972  * When enable_pac_exception boot-arg is set to true, processes
8973  * can choose to get non-fatal pac exception delivery by setting
8974  * this entitlement.
8975  */
8976 #define SKIP_PAC_EXCEPTION_ENTITLEMENT "com.apple.private.skip.pac.exception"
8977 
8978 void
task_set_pac_exception_fatal_flag(task_t task)8979 task_set_pac_exception_fatal_flag(
8980 	task_t task)
8981 {
8982 	assert(task != TASK_NULL);
8983 	bool pac_entitlement = false;
8984 
8985 	if (enable_pac_exception && IOTaskHasEntitlement(task, SKIP_PAC_EXCEPTION_ENTITLEMENT)) {
8986 		return;
8987 	}
8988 
8989 	if (IOTaskHasEntitlement(task, PAC_EXCEPTION_ENTITLEMENT)) {
8990 		pac_entitlement = true;
8991 	}
8992 
8993 	task_lock(task);
8994 
8995 	if (pac_entitlement) {
8996 		task->t_flags |= TF_PAC_ENFORCE_USER_STATE;
8997 	}
8998 	if (pac_entitlement || (enable_pac_exception && task->t_flags & TF_PLATFORM)) {
8999 		task->t_flags |= TF_PAC_EXC_FATAL;
9000 	}
9001 	task_unlock(task);
9002 }
9003 
9004 bool
task_is_pac_exception_fatal(task_t task)9005 task_is_pac_exception_fatal(
9006 	task_t task)
9007 {
9008 	uint32_t flags = 0;
9009 
9010 	assert(task != TASK_NULL);
9011 
9012 	flags = os_atomic_load(&task->t_flags, relaxed);
9013 	return (bool)(flags & TF_PAC_EXC_FATAL);
9014 }
9015 #endif /* __has_feature(ptrauth_calls) */
9016 
9017 bool
task_needs_user_signed_thread_state(task_t task)9018 task_needs_user_signed_thread_state(
9019 	task_t task)
9020 {
9021 	uint32_t flags = 0;
9022 
9023 	assert(task != TASK_NULL);
9024 
9025 	flags = os_atomic_load(&task->t_flags, relaxed);
9026 	return !!(flags & TF_PAC_ENFORCE_USER_STATE);
9027 }
9028 
9029 void
task_set_tecs(task_t task)9030 task_set_tecs(task_t task)
9031 {
9032 	if (task == TASK_NULL) {
9033 		task = current_task();
9034 	}
9035 
9036 	if (!machine_csv(CPUVN_CI)) {
9037 		return;
9038 	}
9039 
9040 	LCK_MTX_ASSERT(&task->lock, LCK_MTX_ASSERT_NOTOWNED);
9041 
9042 	task_lock(task);
9043 
9044 	task->t_flags |= TF_TECS;
9045 
9046 	thread_t thread;
9047 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
9048 		machine_tecs(thread);
9049 	}
9050 	task_unlock(task);
9051 }
9052 
9053 kern_return_t
task_test_sync_upcall(task_t task,ipc_port_t send_port)9054 task_test_sync_upcall(
9055 	task_t     task,
9056 	ipc_port_t send_port)
9057 {
9058 #if DEVELOPMENT || DEBUG
9059 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9060 		return KERN_INVALID_ARGUMENT;
9061 	}
9062 
9063 	/* Block on sync kernel upcall on the given send port */
9064 	mach_test_sync_upcall(send_port);
9065 
9066 	ipc_port_release_send(send_port);
9067 	return KERN_SUCCESS;
9068 #else
9069 	(void)task;
9070 	(void)send_port;
9071 	return KERN_NOT_SUPPORTED;
9072 #endif
9073 }
9074 
9075 kern_return_t
task_test_async_upcall_propagation(task_t task,ipc_port_t send_port,int qos,int iotier)9076 task_test_async_upcall_propagation(
9077 	task_t      task,
9078 	ipc_port_t  send_port,
9079 	int         qos,
9080 	int         iotier)
9081 {
9082 #if DEVELOPMENT || DEBUG
9083 	kern_return_t kr;
9084 
9085 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9086 		return KERN_INVALID_ARGUMENT;
9087 	}
9088 
9089 	if (qos < THREAD_QOS_DEFAULT || qos > THREAD_QOS_USER_INTERACTIVE ||
9090 	    iotier < THROTTLE_LEVEL_START || iotier > THROTTLE_LEVEL_END) {
9091 		return KERN_INVALID_ARGUMENT;
9092 	}
9093 
9094 	struct thread_attr_for_ipc_propagation attr = {
9095 		.tafip_iotier = iotier,
9096 		.tafip_qos = qos
9097 	};
9098 
9099 	/* Apply propagate attr to port */
9100 	kr = ipc_port_propagate_thread_attr(send_port, attr);
9101 	if (kr != KERN_SUCCESS) {
9102 		return kr;
9103 	}
9104 
9105 	thread_enable_send_importance(current_thread(), TRUE);
9106 
9107 	/* Perform an async kernel upcall on the given send port */
9108 	mach_test_async_upcall(send_port);
9109 	thread_enable_send_importance(current_thread(), FALSE);
9110 
9111 	ipc_port_release_send(send_port);
9112 	return KERN_SUCCESS;
9113 #else
9114 	(void)task;
9115 	(void)send_port;
9116 	(void)qos;
9117 	(void)iotier;
9118 	return KERN_NOT_SUPPORTED;
9119 #endif
9120 }
9121 
9122 #if CONFIG_PROC_RESOURCE_LIMITS
9123 mach_port_name_t
current_task_get_fatal_port_name(void)9124 current_task_get_fatal_port_name(void)
9125 {
9126 	mach_port_t task_fatal_port = MACH_PORT_NULL;
9127 	mach_port_name_t port_name = 0;
9128 
9129 	task_fatal_port = task_allocate_fatal_port();
9130 
9131 	if (task_fatal_port) {
9132 		ipc_object_copyout(current_space(), ip_to_object(task_fatal_port), MACH_MSG_TYPE_PORT_SEND,
9133 		    IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, &port_name);
9134 	}
9135 
9136 	return port_name;
9137 }
9138 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
9139 
9140 #if defined(__x86_64__)
9141 bool
curtask_get_insn_copy_optout(void)9142 curtask_get_insn_copy_optout(void)
9143 {
9144 	bool optout;
9145 	task_t cur_task = current_task();
9146 
9147 	task_lock(cur_task);
9148 	optout = (cur_task->t_flags & TF_INSN_COPY_OPTOUT) ? true : false;
9149 	task_unlock(cur_task);
9150 
9151 	return optout;
9152 }
9153 
9154 void
curtask_set_insn_copy_optout(void)9155 curtask_set_insn_copy_optout(void)
9156 {
9157 	task_t cur_task = current_task();
9158 
9159 	task_lock(cur_task);
9160 
9161 	cur_task->t_flags |= TF_INSN_COPY_OPTOUT;
9162 
9163 	thread_t thread;
9164 	queue_iterate(&cur_task->threads, thread, thread_t, task_threads) {
9165 		machine_thread_set_insn_copy_optout(thread);
9166 	}
9167 	task_unlock(cur_task);
9168 }
9169 #endif /* defined(__x86_64__) */
9170 
9171 void
task_get_corpse_vmobject_list(task_t task,vmobject_list_output_t * list,size_t * list_size)9172 task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size)
9173 {
9174 	assert(task);
9175 	assert(list_size);
9176 
9177 	*list = task->corpse_vmobject_list;
9178 	*list_size = (size_t)task->corpse_vmobject_list_size;
9179 }
9180 
9181 __abortlike
9182 static void
panic_proc_ro_task_backref_mismatch(task_t t,proc_ro_t ro)9183 panic_proc_ro_task_backref_mismatch(task_t t, proc_ro_t ro)
9184 {
9185 	panic("proc_ro->task backref mismatch: t=%p, ro=%p, "
9186 	    "proc_ro_task(ro)=%p", t, ro, proc_ro_task(ro));
9187 }
9188 
9189 proc_ro_t
task_get_ro(task_t t)9190 task_get_ro(task_t t)
9191 {
9192 	proc_ro_t ro = (proc_ro_t)t->bsd_info_ro;
9193 
9194 	zone_require_ro(ZONE_ID_PROC_RO, sizeof(struct proc_ro), ro);
9195 	if (__improbable(proc_ro_task(ro) != t)) {
9196 		panic_proc_ro_task_backref_mismatch(t, ro);
9197 	}
9198 
9199 	return ro;
9200 }
9201