xref: /xnu-8020.101.4/osfmk/kern/task.c (revision e7776783b89a353188416a9a346c6cdb4928faad) !
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  *	File:	kern/task.c
58  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59  *		David Black
60  *
61  *	Task management primitives implementation.
62  */
63 /*
64  * Copyright (c) 1993 The University of Utah and
65  * the Computer Systems Laboratory (CSL).  All rights reserved.
66  *
67  * Permission to use, copy, modify and distribute this software and its
68  * documentation is hereby granted, provided that both the copyright
69  * notice and this permission notice appear in all copies of the
70  * software, derivative works or modified versions, and any portions
71  * thereof, and that both notices appear in supporting documentation.
72  *
73  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76  *
77  * CSL requests users of this software to return to [email protected] any
78  * improvements that they make and grant CSL redistribution rights.
79  *
80  */
81 /*
82  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83  * support for mandatory and extensible security protections.  This notice
84  * is included in support of clause 2.2 (b) of the Apple Public License,
85  * Version 2.0.
86  * Copyright (c) 2005 SPARTA, Inc.
87  */
88 
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/host_priv.h>
92 #include <mach/machine/vm_types.h>
93 #include <mach/vm_param.h>
94 #include <mach/mach_vm.h>
95 #include <mach/semaphore.h>
96 #include <mach/task_info.h>
97 #include <mach/task_inspect.h>
98 #include <mach/task_special_ports.h>
99 #include <mach/sdt.h>
100 #include <mach/mach_test_upcall.h>
101 
102 #include <ipc/ipc_importance.h>
103 #include <ipc/ipc_types.h>
104 #include <ipc/ipc_space.h>
105 #include <ipc/ipc_entry.h>
106 #include <ipc/ipc_hash.h>
107 #include <ipc/ipc_init.h>
108 
109 #include <kern/kern_types.h>
110 #include <kern/mach_param.h>
111 #include <kern/misc_protos.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/coalition.h>
115 #include <kern/zalloc.h>
116 #include <kern/kalloc.h>
117 #include <kern/kern_cdata.h>
118 #include <kern/processor.h>
119 #include <kern/sched_prim.h>    /* for thread_wakeup */
120 #include <kern/ipc_tt.h>
121 #include <kern/host.h>
122 #include <kern/clock.h>
123 #include <kern/timer.h>
124 #include <kern/assert.h>
125 #include <kern/affinity.h>
126 #include <kern/exc_resource.h>
127 #include <kern/machine.h>
128 #include <kern/policy_internal.h>
129 #include <kern/restartable.h>
130 #include <kern/ipc_kobject.h>
131 
132 #include <corpses/task_corpse.h>
133 #if CONFIG_TELEMETRY
134 #include <kern/telemetry.h>
135 #endif
136 
137 #if MONOTONIC
138 #include <kern/monotonic.h>
139 #include <machine/monotonic.h>
140 #endif /* MONOTONIC */
141 
142 #include <os/log.h>
143 
144 #include <vm/pmap.h>
145 #include <vm/vm_map.h>
146 #include <vm/vm_kern.h>         /* for kernel_map, ipc_kernel_map */
147 #include <vm/vm_pageout.h>
148 #include <vm/vm_protos.h>
149 #include <vm/vm_purgeable_internal.h>
150 #include <vm/vm_compressor_pager.h>
151 
152 #include <sys/proc_ro.h>
153 #include <sys/resource.h>
154 #include <sys/signalvar.h> /* for coredump */
155 #include <sys/bsdtask_info.h>
156 /*
157  * Exported interfaces
158  */
159 
160 #include <mach/task_server.h>
161 #include <mach/mach_host_server.h>
162 #include <mach/mach_port_server.h>
163 
164 #include <vm/vm_shared_region.h>
165 
166 #include <libkern/OSDebug.h>
167 #include <libkern/OSAtomic.h>
168 #include <libkern/section_keywords.h>
169 
170 #include <mach-o/loader.h>
171 #include <kdp/kdp_dyld.h>
172 
173 #include <kern/sfi.h>           /* picks up ledger.h */
174 
175 #if CONFIG_MACF
176 #include <security/mac_mach_internal.h>
177 #endif
178 
179 #include <IOKit/IOBSD.h>
180 
181 #if KPERF
182 extern int kpc_force_all_ctrs(task_t, int);
183 #endif
184 
185 SECURITY_READ_ONLY_LATE(task_t) kernel_task;
186 
187 int64_t         next_taskuniqueid = 0;
188 
189 ZONE_DEFINE_ID(ZONE_ID_TASK, "tasks", struct task, ZC_ZFREE_CLEARMEM);
190 
191 extern uint32_t ipc_control_port_options;
192 
193 extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p);
194 extern void task_disown_frozen_csegs(task_t owner_task);
195 
196 static void task_port_no_senders(ipc_port_t, mach_msg_type_number_t);
197 static void task_port_with_flavor_no_senders(ipc_port_t, mach_msg_type_number_t);
198 static void task_suspension_no_senders(ipc_port_t, mach_msg_type_number_t);
199 
200 IPC_KOBJECT_DEFINE(IKOT_TASK_NAME);
201 IPC_KOBJECT_DEFINE(IKOT_TASK_CONTROL,
202     .iko_op_no_senders = task_port_no_senders);
203 IPC_KOBJECT_DEFINE(IKOT_TASK_READ,
204     .iko_op_no_senders = task_port_with_flavor_no_senders);
205 IPC_KOBJECT_DEFINE(IKOT_TASK_INSPECT,
206     .iko_op_no_senders = task_port_with_flavor_no_senders);
207 IPC_KOBJECT_DEFINE(IKOT_TASK_RESUME,
208     .iko_op_no_senders = task_suspension_no_senders);
209 
210 #if CONFIG_PROC_RESOURCE_LIMITS
211 static void task_fatal_port_no_senders(ipc_port_t, mach_msg_type_number_t);
212 static mach_port_t task_allocate_fatal_port(void);
213 
214 IPC_KOBJECT_DEFINE(IKOT_TASK_FATAL,
215     .iko_op_stable     = true,
216     .iko_op_no_senders = task_fatal_port_no_senders);
217 
218 extern void task_id_token_set_port(task_id_token_t token, ipc_port_t port);
219 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
220 
221 /* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
222 int audio_active = 0;
223 
224 /*
225  *	structure for tracking zone usage
226  *	Used either one per task/thread for all zones or <per-task,per-zone>.
227  */
228 typedef struct zinfo_usage_store_t {
229 	/* These fields may be updated atomically, and so must be 8 byte aligned */
230 	uint64_t        alloc __attribute__((aligned(8)));              /* allocation counter */
231 	uint64_t        free __attribute__((aligned(8)));               /* free counter */
232 } zinfo_usage_store_t;
233 
234 zinfo_usage_store_t tasks_tkm_private;
235 zinfo_usage_store_t tasks_tkm_shared;
236 
237 /* A container to accumulate statistics for expired tasks */
238 expired_task_statistics_t               dead_task_statistics;
239 LCK_SPIN_DECLARE_ATTR(dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
240 
241 ledger_template_t task_ledger_template = NULL;
242 
243 /* global lock for task_dyld_process_info_notify_{register, deregister, get_trap} */
244 LCK_GRP_DECLARE(g_dyldinfo_mtx_grp, "g_dyldinfo");
245 LCK_MTX_DECLARE(g_dyldinfo_mtx, &g_dyldinfo_mtx_grp);
246 
247 SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__((used)) =
248 {.cpu_time = -1,
249  .tkm_private = -1,
250  .tkm_shared = -1,
251  .phys_mem = -1,
252  .wired_mem = -1,
253  .internal = -1,
254  .iokit_mapped = -1,
255  .external = -1,
256  .reusable = -1,
257  .alternate_accounting = -1,
258  .alternate_accounting_compressed = -1,
259  .page_table = -1,
260  .phys_footprint = -1,
261  .internal_compressed = -1,
262  .purgeable_volatile = -1,
263  .purgeable_nonvolatile = -1,
264  .purgeable_volatile_compressed = -1,
265  .purgeable_nonvolatile_compressed = -1,
266  .tagged_nofootprint = -1,
267  .tagged_footprint = -1,
268  .tagged_nofootprint_compressed = -1,
269  .tagged_footprint_compressed = -1,
270  .network_volatile = -1,
271  .network_nonvolatile = -1,
272  .network_volatile_compressed = -1,
273  .network_nonvolatile_compressed = -1,
274  .media_nofootprint = -1,
275  .media_footprint = -1,
276  .media_nofootprint_compressed = -1,
277  .media_footprint_compressed = -1,
278  .graphics_nofootprint = -1,
279  .graphics_footprint = -1,
280  .graphics_nofootprint_compressed = -1,
281  .graphics_footprint_compressed = -1,
282  .neural_nofootprint = -1,
283  .neural_footprint = -1,
284  .neural_nofootprint_compressed = -1,
285  .neural_footprint_compressed = -1,
286  .platform_idle_wakeups = -1,
287  .interrupt_wakeups = -1,
288 #if CONFIG_SCHED_SFI
289  .sfi_wait_times = { 0 /* initialized at runtime */},
290 #endif /* CONFIG_SCHED_SFI */
291  .cpu_time_billed_to_me = -1,
292  .cpu_time_billed_to_others = -1,
293  .physical_writes = -1,
294  .logical_writes = -1,
295  .logical_writes_to_external = -1,
296 #if DEBUG || DEVELOPMENT
297  .pages_grabbed = -1,
298  .pages_grabbed_kern = -1,
299  .pages_grabbed_iopl = -1,
300  .pages_grabbed_upl = -1,
301 #endif
302 #if CONFIG_FREEZE
303  .frozen_to_swap = -1,
304 #endif /* CONFIG_FREEZE */
305  .energy_billed_to_me = -1,
306  .energy_billed_to_others = -1,
307 #if CONFIG_PHYS_WRITE_ACCT
308  .fs_metadata_writes = -1,
309 #endif /* CONFIG_PHYS_WRITE_ACCT */
310 #if CONFIG_MEMORYSTATUS
311  .memorystatus_dirty_time = -1,
312 #endif /* CONFIG_MEMORYSTATUS */
313  .swapins = -1, };
314 
315 /* System sleep state */
316 boolean_t tasks_suspend_state;
317 
318 
319 void init_task_ledgers(void);
320 void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
321 void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
322 void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
323 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
324 void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal);
325 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
326 #if CONFIG_PROC_RESOURCE_LIMITS
327 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit);
328 mach_port_name_t current_task_get_fatal_port_name(void);
329 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
330 
331 kern_return_t task_suspend_internal(task_t);
332 kern_return_t task_resume_internal(task_t);
333 static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
334 
335 extern kern_return_t iokit_task_terminate(task_t task);
336 extern void          iokit_task_app_suspended_changed(task_t task);
337 
338 extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
339 extern void bsd_copythreadname(void *dst_uth, void *src_uth);
340 extern kern_return_t thread_resume(thread_t thread);
341 
342 extern int exit_with_port_space_exception(void *proc, mach_exception_code_t code, mach_exception_subcode_t subcode);
343 
344 // Warn tasks when they hit 80% of their memory limit.
345 #define PHYS_FOOTPRINT_WARNING_LEVEL 80
346 
347 #define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT              150 /* wakeups per second */
348 #define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL   300 /* in seconds. */
349 
350 /*
351  * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
352  *
353  * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
354  *  stacktraces, aka micro-stackshots)
355  */
356 #define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER        70
357 
358 int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
359 int task_wakeups_monitor_rate;     /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
360 
361 unsigned int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
362 
363 int disable_exc_resource; /* Global override to supress EXC_RESOURCE for resource monitor violations. */
364 
365 ledger_amount_t max_task_footprint = 0;  /* Per-task limit on physical memory consumption in bytes     */
366 unsigned int max_task_footprint_warning_level = 0;  /* Per-task limit warning percentage */
367 int max_task_footprint_mb = 0;  /* Per-task limit on physical memory consumption in megabytes */
368 
369 /* I/O Monitor Limits */
370 #define IOMON_DEFAULT_LIMIT                     (20480ull)      /* MB of logical/physical I/O */
371 #define IOMON_DEFAULT_INTERVAL                  (86400ull)      /* in seconds */
372 
373 uint64_t task_iomon_limit_mb;           /* Per-task I/O monitor limit in MBs */
374 uint64_t task_iomon_interval_secs;      /* Per-task I/O monitor interval in secs */
375 
376 #define IO_TELEMETRY_DEFAULT_LIMIT              (10ll * 1024ll * 1024ll)
377 int64_t io_telemetry_limit;                     /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
378 int64_t global_logical_writes_count = 0;        /* Global count for logical writes */
379 int64_t global_logical_writes_to_external_count = 0;        /* Global count for logical writes to external storage*/
380 static boolean_t global_update_logical_writes(int64_t, int64_t*);
381 
382 #define TASK_MAX_THREAD_LIMIT 256
383 
384 #if MACH_ASSERT
385 int pmap_ledgers_panic = 1;
386 int pmap_ledgers_panic_leeway = 3;
387 #endif /* MACH_ASSERT */
388 
389 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
390 
391 #if CONFIG_COREDUMP
392 int hwm_user_cores = 0; /* high watermark violations generate user core files */
393 #endif
394 
395 #ifdef MACH_BSD
396 extern uint32_t proc_platform(const struct proc *);
397 extern uint32_t proc_sdk(struct proc *);
398 extern void     proc_getexecutableuuid(void *, unsigned char *, unsigned long);
399 extern int      proc_pid(struct proc *p);
400 extern int      proc_selfpid(void);
401 extern struct proc *current_proc(void);
402 extern char     *proc_name_address(struct proc *p);
403 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
404 extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize);
405 extern void workq_proc_suspended(struct proc *p);
406 extern void workq_proc_resumed(struct proc *p);
407 
408 #if CONFIG_MEMORYSTATUS
409 extern void     proc_memstat_skip(struct proc* p, boolean_t set);
410 extern void     memorystatus_on_ledger_footprint_exceeded(int warning, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal);
411 extern void     memorystatus_log_exception(const int max_footprint_mb, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal);
412 extern boolean_t memorystatus_allowed_vm_map_fork(task_t task);
413 extern uint64_t  memorystatus_available_memory_internal(struct proc *p);
414 
415 #if DEVELOPMENT || DEBUG
416 extern void memorystatus_abort_vm_map_fork(task_t);
417 #endif
418 
419 #endif /* CONFIG_MEMORYSTATUS */
420 
421 #endif /* MACH_BSD */
422 
423 #if DEVELOPMENT || DEBUG
424 int exc_resource_threads_enabled;
425 #endif /* DEVELOPMENT || DEBUG */
426 
427 /* Boot-arg that turns on fatal pac exception delivery for all first-party apps */
428 static TUNABLE(bool, enable_pac_exception, "enable_pac_exception", false);
429 
430 /*
431  * Defaults for controllable EXC_GUARD behaviors
432  *
433  * Internal builds are fatal by default (except BRIDGE).
434  * Create an alternate set of defaults for special processes by name.
435  */
436 struct task_exc_guard_named_default {
437 	char *name;
438 	uint32_t behavior;
439 };
440 #define _TASK_EXC_GUARD_MP_CORPSE  (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_CORPSE)
441 #define _TASK_EXC_GUARD_MP_ONCE    (_TASK_EXC_GUARD_MP_CORPSE | TASK_EXC_GUARD_MP_ONCE)
442 #define _TASK_EXC_GUARD_MP_FATAL   (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_FATAL)
443 
444 #define _TASK_EXC_GUARD_VM_CORPSE  (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_ONCE)
445 #define _TASK_EXC_GUARD_VM_ONCE    (_TASK_EXC_GUARD_VM_CORPSE | TASK_EXC_GUARD_VM_ONCE)
446 #define _TASK_EXC_GUARD_VM_FATAL   (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_FATAL)
447 
448 #define _TASK_EXC_GUARD_ALL_CORPSE (_TASK_EXC_GUARD_MP_CORPSE | _TASK_EXC_GUARD_VM_CORPSE)
449 #define _TASK_EXC_GUARD_ALL_ONCE   (_TASK_EXC_GUARD_MP_ONCE | _TASK_EXC_GUARD_VM_ONCE)
450 #define _TASK_EXC_GUARD_ALL_FATAL  (_TASK_EXC_GUARD_MP_FATAL | _TASK_EXC_GUARD_VM_FATAL)
451 
452 /* cannot turn off FATAL and DELIVER bit if set */
453 uint32_t task_exc_guard_no_unset_mask = TASK_EXC_GUARD_MP_FATAL | TASK_EXC_GUARD_VM_FATAL |
454     TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_VM_DELIVER;
455 /* cannot turn on ONCE bit if unset */
456 uint32_t task_exc_guard_no_set_mask = TASK_EXC_GUARD_MP_ONCE | TASK_EXC_GUARD_VM_ONCE;
457 
458 #if !defined(XNU_TARGET_OS_BRIDGE)
459 
460 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_FATAL;
461 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
462 /*
463  * These "by-process-name" default overrides are intended to be a short-term fix to
464  * quickly get over races between changes introducing new EXC_GUARD raising behaviors
465  * in some process and a change in default behavior for same. We should ship with
466  * these lists empty (by fixing the bugs, or explicitly changing the task's EXC_GUARD
467  * exception behavior via task_set_exc_guard_behavior()).
468  *
469  * XXX Remember to add/remove TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS back to
470  * task_exc_guard_default when transitioning this list between empty and
471  * non-empty.
472  */
473 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
474 
475 #else /* !defined(XNU_TARGET_OS_BRIDGE) */
476 
477 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_ONCE;
478 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
479 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
480 
481 #endif /* !defined(XNU_TARGET_OS_BRIDGE) */
482 
483 /* Forwards */
484 
485 static void task_hold_locked(task_t task);
486 static void task_wait_locked(task_t task, boolean_t until_not_runnable);
487 static void task_release_locked(task_t task);
488 
489 static void task_synchronizer_destroy_all(task_t task);
490 static os_ref_count_t
491 task_add_turnstile_watchports_locked(
492 	task_t                      task,
493 	struct task_watchports      *watchports,
494 	struct task_watchport_elem  **previous_elem_array,
495 	ipc_port_t                  *portwatch_ports,
496 	uint32_t                    portwatch_count);
497 
498 static os_ref_count_t
499 task_remove_turnstile_watchports_locked(
500 	task_t                 task,
501 	struct task_watchports *watchports,
502 	ipc_port_t             *port_freelist);
503 
504 static struct task_watchports *
505 task_watchports_alloc_init(
506 	task_t        task,
507 	thread_t      thread,
508 	uint32_t      count);
509 
510 static void
511 task_watchports_deallocate(
512 	struct task_watchports *watchports);
513 
514 void
task_set_64bit(task_t task,boolean_t is_64bit,boolean_t is_64bit_data)515 task_set_64bit(
516 	task_t task,
517 	boolean_t is_64bit,
518 	boolean_t is_64bit_data)
519 {
520 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
521 	thread_t thread;
522 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
523 
524 	task_lock(task);
525 
526 	/*
527 	 * Switching to/from 64-bit address spaces
528 	 */
529 	if (is_64bit) {
530 		if (!task_has_64Bit_addr(task)) {
531 			task_set_64Bit_addr(task);
532 		}
533 	} else {
534 		if (task_has_64Bit_addr(task)) {
535 			task_clear_64Bit_addr(task);
536 		}
537 	}
538 
539 	/*
540 	 * Switching to/from 64-bit register state.
541 	 */
542 	if (is_64bit_data) {
543 		if (task_has_64Bit_data(task)) {
544 			goto out;
545 		}
546 
547 		task_set_64Bit_data(task);
548 	} else {
549 		if (!task_has_64Bit_data(task)) {
550 			goto out;
551 		}
552 
553 		task_clear_64Bit_data(task);
554 	}
555 
556 	/* FIXME: On x86, the thread save state flavor can diverge from the
557 	 * task's 64-bit feature flag due to the 32-bit/64-bit register save
558 	 * state dichotomy. Since we can be pre-empted in this interval,
559 	 * certain routines may observe the thread as being in an inconsistent
560 	 * state with respect to its task's 64-bitness.
561 	 */
562 
563 #if defined(__x86_64__) || defined(__arm64__)
564 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
565 		thread_mtx_lock(thread);
566 		machine_thread_switch_addrmode(thread);
567 		thread_mtx_unlock(thread);
568 	}
569 #endif /* defined(__x86_64__) || defined(__arm64__) */
570 
571 out:
572 	task_unlock(task);
573 }
574 
575 bool
task_get_64bit_addr(task_t task)576 task_get_64bit_addr(task_t task)
577 {
578 	return task_has_64Bit_addr(task);
579 }
580 
581 bool
task_get_64bit_data(task_t task)582 task_get_64bit_data(task_t task)
583 {
584 	return task_has_64Bit_data(task);
585 }
586 
587 void
task_set_platform_binary(task_t task,boolean_t is_platform)588 task_set_platform_binary(
589 	task_t task,
590 	boolean_t is_platform)
591 {
592 	task_lock(task);
593 	if (is_platform) {
594 		task->t_flags |= TF_PLATFORM;
595 	} else {
596 		task->t_flags &= ~(TF_PLATFORM);
597 	}
598 	task_unlock(task);
599 }
600 
601 void
task_set_immovable_pinned(task_t task)602 task_set_immovable_pinned(task_t task)
603 {
604 	ipc_task_set_immovable_pinned(task);
605 }
606 
607 /*
608  * Set or clear per-task TF_CA_CLIENT_WI flag according to specified argument.
609  * Returns "false" if flag is already set, and "true" in other cases.
610  */
611 bool
task_set_ca_client_wi(task_t task,boolean_t set_or_clear)612 task_set_ca_client_wi(
613 	task_t task,
614 	boolean_t set_or_clear)
615 {
616 	bool ret = true;
617 	task_lock(task);
618 	if (set_or_clear) {
619 		/* Tasks can have only one CA_CLIENT work interval */
620 		if (task->t_flags & TF_CA_CLIENT_WI) {
621 			ret = false;
622 		} else {
623 			task->t_flags |= TF_CA_CLIENT_WI;
624 		}
625 	} else {
626 		task->t_flags &= ~TF_CA_CLIENT_WI;
627 	}
628 	task_unlock(task);
629 	return ret;
630 }
631 
632 void
task_set_dyld_info(task_t task,mach_vm_address_t addr,mach_vm_size_t size)633 task_set_dyld_info(
634 	task_t task,
635 	mach_vm_address_t addr,
636 	mach_vm_size_t size)
637 {
638 	task_lock(task);
639 	task->all_image_info_addr = addr;
640 	task->all_image_info_size = size;
641 	task_unlock(task);
642 }
643 
644 void
task_set_mach_header_address(task_t task,mach_vm_address_t addr)645 task_set_mach_header_address(
646 	task_t task,
647 	mach_vm_address_t addr)
648 {
649 	task_lock(task);
650 	task->mach_header_vm_address = addr;
651 	task_unlock(task);
652 }
653 
654 void
task_bank_reset(__unused task_t task)655 task_bank_reset(__unused task_t task)
656 {
657 	if (task->bank_context != NULL) {
658 		bank_task_destroy(task);
659 	}
660 }
661 
662 /*
663  * NOTE: This should only be called when the P_LINTRANSIT
664  *	 flag is set (the proc_trans lock is held) on the
665  *	 proc associated with the task.
666  */
667 void
task_bank_init(__unused task_t task)668 task_bank_init(__unused task_t task)
669 {
670 	if (task->bank_context != NULL) {
671 		panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
672 	}
673 	bank_task_initialize(task);
674 }
675 
676 void
task_set_did_exec_flag(task_t task)677 task_set_did_exec_flag(task_t task)
678 {
679 	task->t_procflags |= TPF_DID_EXEC;
680 }
681 
682 void
task_clear_exec_copy_flag(task_t task)683 task_clear_exec_copy_flag(task_t task)
684 {
685 	task->t_procflags &= ~TPF_EXEC_COPY;
686 }
687 
688 event_t
task_get_return_wait_event(task_t task)689 task_get_return_wait_event(task_t task)
690 {
691 	return (event_t)&task->returnwait_inheritor;
692 }
693 
694 void
task_clear_return_wait(task_t task,uint32_t flags)695 task_clear_return_wait(task_t task, uint32_t flags)
696 {
697 	if (flags & TCRW_CLEAR_INITIAL_WAIT) {
698 		thread_wakeup(task_get_return_wait_event(task));
699 	}
700 
701 	if (flags & TCRW_CLEAR_FINAL_WAIT) {
702 		is_write_lock(task->itk_space);
703 
704 		task->t_returnwaitflags &= ~TRW_LRETURNWAIT;
705 		task->returnwait_inheritor = NULL;
706 
707 		if (task->t_returnwaitflags & TRW_LRETURNWAITER) {
708 			struct turnstile *turnstile = turnstile_prepare((uintptr_t) task_get_return_wait_event(task),
709 			    NULL, TURNSTILE_NULL, TURNSTILE_ULOCK);
710 
711 			waitq_wakeup64_all(&turnstile->ts_waitq,
712 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
713 			    THREAD_AWAKENED, 0);
714 
715 			turnstile_update_inheritor(turnstile, NULL,
716 			    TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD);
717 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_HELD);
718 
719 			turnstile_complete((uintptr_t) task_get_return_wait_event(task), NULL, NULL, TURNSTILE_ULOCK);
720 			turnstile_cleanup();
721 			task->t_returnwaitflags &= ~TRW_LRETURNWAITER;
722 		}
723 		is_write_unlock(task->itk_space);
724 	}
725 }
726 
727 void __attribute__((noreturn))
task_wait_to_return(void)728 task_wait_to_return(void)
729 {
730 	task_t task = current_task();
731 
732 	is_write_lock(task->itk_space);
733 
734 	if (task->t_returnwaitflags & TRW_LRETURNWAIT) {
735 		struct turnstile *turnstile = turnstile_prepare((uintptr_t) task_get_return_wait_event(task),
736 		    NULL, TURNSTILE_NULL, TURNSTILE_ULOCK);
737 
738 		do {
739 			task->t_returnwaitflags |= TRW_LRETURNWAITER;
740 			turnstile_update_inheritor(turnstile, task->returnwait_inheritor,
741 			    (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
742 
743 			waitq_assert_wait64(&turnstile->ts_waitq,
744 			    CAST_EVENT64_T(task_get_return_wait_event(task)),
745 			    THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
746 
747 			is_write_unlock(task->itk_space);
748 
749 			turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
750 
751 			thread_block(THREAD_CONTINUE_NULL);
752 
753 			is_write_lock(task->itk_space);
754 		} while (task->t_returnwaitflags & TRW_LRETURNWAIT);
755 
756 		turnstile_complete((uintptr_t) task_get_return_wait_event(task), NULL, NULL, TURNSTILE_ULOCK);
757 	}
758 
759 	is_write_unlock(task->itk_space);
760 	turnstile_cleanup();
761 
762 
763 #if CONFIG_MACF
764 	/*
765 	 * Before jumping to userspace and allowing this process to execute any code,
766 	 * notify any interested parties.
767 	 */
768 	mac_proc_notify_exec_complete(current_proc());
769 #endif
770 
771 	thread_bootstrap_return();
772 }
773 
774 #ifdef CONFIG_32BIT_TELEMETRY
775 boolean_t
task_consume_32bit_log_flag(task_t task)776 task_consume_32bit_log_flag(task_t task)
777 {
778 	if ((task->t_procflags & TPF_LOG_32BIT_TELEMETRY) != 0) {
779 		task->t_procflags &= ~TPF_LOG_32BIT_TELEMETRY;
780 		return TRUE;
781 	} else {
782 		return FALSE;
783 	}
784 }
785 
786 void
task_set_32bit_log_flag(task_t task)787 task_set_32bit_log_flag(task_t task)
788 {
789 	task->t_procflags |= TPF_LOG_32BIT_TELEMETRY;
790 }
791 #endif /* CONFIG_32BIT_TELEMETRY */
792 
793 boolean_t
task_is_exec_copy(task_t task)794 task_is_exec_copy(task_t task)
795 {
796 	return task_is_exec_copy_internal(task);
797 }
798 
799 boolean_t
task_did_exec(task_t task)800 task_did_exec(task_t task)
801 {
802 	return task_did_exec_internal(task);
803 }
804 
805 boolean_t
task_is_active(task_t task)806 task_is_active(task_t task)
807 {
808 	return task->active;
809 }
810 
811 boolean_t
task_is_halting(task_t task)812 task_is_halting(task_t task)
813 {
814 	return task->halting;
815 }
816 
817 void
task_init(void)818 task_init(void)
819 {
820 	/*
821 	 * Configure per-task memory limit.
822 	 * The boot-arg is interpreted as Megabytes,
823 	 * and takes precedence over the device tree.
824 	 * Setting the boot-arg to 0 disables task limits.
825 	 */
826 	if (!PE_parse_boot_argn("max_task_pmem", &max_task_footprint_mb,
827 	    sizeof(max_task_footprint_mb))) {
828 		/*
829 		 * No limit was found in boot-args, so go look in the device tree.
830 		 */
831 		if (!PE_get_default("kern.max_task_pmem", &max_task_footprint_mb,
832 		    sizeof(max_task_footprint_mb))) {
833 			/*
834 			 * No limit was found in device tree.
835 			 */
836 			max_task_footprint_mb = 0;
837 		}
838 	}
839 
840 	if (max_task_footprint_mb != 0) {
841 #if CONFIG_MEMORYSTATUS
842 		if (max_task_footprint_mb < 50) {
843 			printf("Warning: max_task_pmem %d below minimum.\n",
844 			    max_task_footprint_mb);
845 			max_task_footprint_mb = 50;
846 		}
847 		printf("Limiting task physical memory footprint to %d MB\n",
848 		    max_task_footprint_mb);
849 
850 		max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024;         // Convert MB to bytes
851 
852 		/*
853 		 * Configure the per-task memory limit warning level.
854 		 * This is computed as a percentage.
855 		 */
856 		max_task_footprint_warning_level = 0;
857 
858 		if (max_mem < 0x40000000) {
859 			/*
860 			 * On devices with < 1GB of memory:
861 			 *    -- set warnings to 50MB below the per-task limit.
862 			 */
863 			if (max_task_footprint_mb > 50) {
864 				max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
865 			}
866 		} else {
867 			/*
868 			 * On devices with >= 1GB of memory:
869 			 *    -- set warnings to 100MB below the per-task limit.
870 			 */
871 			if (max_task_footprint_mb > 100) {
872 				max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
873 			}
874 		}
875 
876 		/*
877 		 * Never allow warning level to land below the default.
878 		 */
879 		if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
880 			max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
881 		}
882 
883 		printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
884 
885 #else
886 		printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
887 #endif /* CONFIG_MEMORYSTATUS */
888 	}
889 
890 #if DEVELOPMENT || DEBUG
891 	if (!PE_parse_boot_argn("exc_resource_threads",
892 	    &exc_resource_threads_enabled,
893 	    sizeof(exc_resource_threads_enabled))) {
894 		exc_resource_threads_enabled = 1;
895 	}
896 	PE_parse_boot_argn("task_exc_guard_default",
897 	    &task_exc_guard_default,
898 	    sizeof(task_exc_guard_default));
899 #endif /* DEVELOPMENT || DEBUG */
900 
901 #if CONFIG_COREDUMP
902 	if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
903 	    sizeof(hwm_user_cores))) {
904 		hwm_user_cores = 0;
905 	}
906 #endif
907 
908 	proc_init_cpumon_params();
909 
910 	if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof(task_wakeups_monitor_rate))) {
911 		task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
912 	}
913 
914 	if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof(task_wakeups_monitor_interval))) {
915 		task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
916 	}
917 
918 	if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
919 	    sizeof(task_wakeups_monitor_ustackshots_trigger_pct))) {
920 		task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
921 	}
922 
923 	if (!PE_parse_boot_argn("disable_exc_resource", &disable_exc_resource,
924 	    sizeof(disable_exc_resource))) {
925 		disable_exc_resource = 0;
926 	}
927 
928 	if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof(task_iomon_limit_mb))) {
929 		task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
930 	}
931 
932 	if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof(task_iomon_interval_secs))) {
933 		task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
934 	}
935 
936 	if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof(io_telemetry_limit))) {
937 		io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
938 	}
939 
940 /*
941  * If we have coalitions, coalition_init() will call init_task_ledgers() as it
942  * sets up the ledgers for the default coalition. If we don't have coalitions,
943  * then we have to call it now.
944  */
945 #if CONFIG_COALITIONS
946 	assert(task_ledger_template);
947 #else /* CONFIG_COALITIONS */
948 	init_task_ledgers();
949 #endif /* CONFIG_COALITIONS */
950 
951 	task_ref_init();
952 
953 	/*
954 	 * Create the kernel task as the first task.
955 	 */
956 #ifdef __LP64__
957 	if (task_create_internal(TASK_NULL, NULL, NULL, FALSE, TRUE, TRUE, TF_NONE, TPF_NONE, TWF_NONE, &kernel_task) != KERN_SUCCESS)
958 #else
959 	if (task_create_internal(TASK_NULL, NULL, NULL, FALSE, FALSE, FALSE, TF_NONE, TPF_NONE, TWF_NONE, &kernel_task) != KERN_SUCCESS)
960 #endif
961 	{ panic("task_init");}
962 
963 #if defined(HAS_APPLE_PAC)
964 	kernel_task->rop_pid = ml_default_rop_pid();
965 	kernel_task->jop_pid = ml_default_jop_pid();
966 	// kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
967 	// disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
968 	ml_task_set_disable_user_jop(kernel_task, FALSE);
969 #endif
970 
971 	vm_map_deallocate(kernel_task->map);
972 	kernel_task->map = kernel_map;
973 }
974 
975 /*
976  * Create a task running in the kernel address space.  It may
977  * have its own map of size mem_size and may have ipc privileges.
978  */
979 kern_return_t
kernel_task_create(__unused task_t parent_task,__unused vm_offset_t map_base,__unused vm_size_t map_size,__unused task_t * child_task)980 kernel_task_create(
981 	__unused task_t         parent_task,
982 	__unused vm_offset_t            map_base,
983 	__unused vm_size_t              map_size,
984 	__unused task_t         *child_task)
985 {
986 	return KERN_INVALID_ARGUMENT;
987 }
988 
989 kern_return_t
task_create(task_t parent_task,__unused ledger_port_array_t ledger_ports,__unused mach_msg_type_number_t num_ledger_ports,__unused boolean_t inherit_memory,__unused task_t * child_task)990 task_create(
991 	task_t                          parent_task,
992 	__unused ledger_port_array_t    ledger_ports,
993 	__unused mach_msg_type_number_t num_ledger_ports,
994 	__unused boolean_t              inherit_memory,
995 	__unused task_t                 *child_task)        /* OUT */
996 {
997 	if (parent_task == TASK_NULL) {
998 		return KERN_INVALID_ARGUMENT;
999 	}
1000 
1001 	/*
1002 	 * No longer supported: too many calls assume that a task has a valid
1003 	 * process attached.
1004 	 */
1005 	return KERN_FAILURE;
1006 }
1007 
1008 /*
1009  * Task ledgers
1010  * ------------
1011  *
1012  * phys_footprint
1013  *   Physical footprint: This is the sum of:
1014  *     + (internal - alternate_accounting)
1015  *     + (internal_compressed - alternate_accounting_compressed)
1016  *     + iokit_mapped
1017  *     + purgeable_nonvolatile
1018  *     + purgeable_nonvolatile_compressed
1019  *     + page_table
1020  *
1021  * internal
1022  *   The task's anonymous memory, which on iOS is always resident.
1023  *
1024  * internal_compressed
1025  *   Amount of this task's internal memory which is held by the compressor.
1026  *   Such memory is no longer actually resident for the task [i.e., resident in its pmap],
1027  *   and could be either decompressed back into memory, or paged out to storage, depending
1028  *   on our implementation.
1029  *
1030  * iokit_mapped
1031  *   IOKit mappings: The total size of all IOKit mappings in this task, regardless of
1032  *    clean/dirty or internal/external state].
1033  *
1034  * alternate_accounting
1035  *   The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
1036  *   are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
1037  *   double counting.
1038  *
1039  * pages_grabbed
1040  *   pages_grabbed counts all page grabs in a task.  It is also broken out into three subtypes
1041  *   which track UPL, IOPL and Kernel page grabs.
1042  */
1043 void
init_task_ledgers(void)1044 init_task_ledgers(void)
1045 {
1046 	ledger_template_t t;
1047 
1048 	assert(task_ledger_template == NULL);
1049 	assert(kernel_task == TASK_NULL);
1050 
1051 #if MACH_ASSERT
1052 	PE_parse_boot_argn("pmap_ledgers_panic",
1053 	    &pmap_ledgers_panic,
1054 	    sizeof(pmap_ledgers_panic));
1055 	PE_parse_boot_argn("pmap_ledgers_panic_leeway",
1056 	    &pmap_ledgers_panic_leeway,
1057 	    sizeof(pmap_ledgers_panic_leeway));
1058 #endif /* MACH_ASSERT */
1059 
1060 	if ((t = ledger_template_create("Per-task ledger")) == NULL) {
1061 		panic("couldn't create task ledger template");
1062 	}
1063 
1064 	task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
1065 	task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
1066 	    "physmem", "bytes");
1067 	task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
1068 	    "bytes");
1069 	task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
1070 	    "bytes");
1071 	task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
1072 	    "bytes");
1073 	task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
1074 	    "bytes");
1075 	task_ledgers.iokit_mapped = ledger_entry_add_with_flags(t, "iokit_mapped", "mappings",
1076 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1077 	task_ledgers.alternate_accounting = ledger_entry_add_with_flags(t, "alternate_accounting", "physmem",
1078 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1079 	task_ledgers.alternate_accounting_compressed = ledger_entry_add_with_flags(t, "alternate_accounting_compressed", "physmem",
1080 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1081 	task_ledgers.page_table = ledger_entry_add_with_flags(t, "page_table", "physmem",
1082 	    "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1083 	task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
1084 	    "bytes");
1085 	task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
1086 	    "bytes");
1087 	task_ledgers.reusable = ledger_entry_add(t, "reusable", "physmem", "bytes");
1088 	task_ledgers.external = ledger_entry_add(t, "external", "physmem", "bytes");
1089 	task_ledgers.purgeable_volatile = ledger_entry_add_with_flags(t, "purgeable_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1090 	task_ledgers.purgeable_nonvolatile = ledger_entry_add_with_flags(t, "purgeable_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1091 	task_ledgers.purgeable_volatile_compressed = ledger_entry_add_with_flags(t, "purgeable_volatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1092 	task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add_with_flags(t, "purgeable_nonvolatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1093 #if DEBUG || DEVELOPMENT
1094 	task_ledgers.pages_grabbed = ledger_entry_add_with_flags(t, "pages_grabbed", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1095 	task_ledgers.pages_grabbed_kern = ledger_entry_add_with_flags(t, "pages_grabbed_kern", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1096 	task_ledgers.pages_grabbed_iopl = ledger_entry_add_with_flags(t, "pages_grabbed_iopl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1097 	task_ledgers.pages_grabbed_upl = ledger_entry_add_with_flags(t, "pages_grabbed_upl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1098 #endif
1099 	task_ledgers.tagged_nofootprint = ledger_entry_add_with_flags(t, "tagged_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1100 	task_ledgers.tagged_footprint = ledger_entry_add_with_flags(t, "tagged_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1101 	task_ledgers.tagged_nofootprint_compressed = ledger_entry_add_with_flags(t, "tagged_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1102 	task_ledgers.tagged_footprint_compressed = ledger_entry_add_with_flags(t, "tagged_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1103 	task_ledgers.network_volatile = ledger_entry_add_with_flags(t, "network_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1104 	task_ledgers.network_nonvolatile = ledger_entry_add_with_flags(t, "network_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1105 	task_ledgers.network_volatile_compressed = ledger_entry_add_with_flags(t, "network_volatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1106 	task_ledgers.network_nonvolatile_compressed = ledger_entry_add_with_flags(t, "network_nonvolatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1107 	task_ledgers.media_nofootprint = ledger_entry_add_with_flags(t, "media_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1108 	task_ledgers.media_footprint = ledger_entry_add_with_flags(t, "media_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1109 	task_ledgers.media_nofootprint_compressed = ledger_entry_add_with_flags(t, "media_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1110 	task_ledgers.media_footprint_compressed = ledger_entry_add_with_flags(t, "media_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1111 	task_ledgers.graphics_nofootprint = ledger_entry_add_with_flags(t, "graphics_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1112 	task_ledgers.graphics_footprint = ledger_entry_add_with_flags(t, "graphics_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1113 	task_ledgers.graphics_nofootprint_compressed = ledger_entry_add_with_flags(t, "graphics_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1114 	task_ledgers.graphics_footprint_compressed = ledger_entry_add_with_flags(t, "graphics_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1115 	task_ledgers.neural_nofootprint = ledger_entry_add_with_flags(t, "neural_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1116 	task_ledgers.neural_footprint = ledger_entry_add_with_flags(t, "neural_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1117 	task_ledgers.neural_nofootprint_compressed = ledger_entry_add_with_flags(t, "neural_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1118 	task_ledgers.neural_footprint_compressed = ledger_entry_add_with_flags(t, "neural_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1119 
1120 #if CONFIG_FREEZE
1121 	task_ledgers.frozen_to_swap = ledger_entry_add(t, "frozen_to_swap", "physmem", "bytes");
1122 #endif /* CONFIG_FREEZE */
1123 
1124 	task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
1125 	    "count");
1126 	task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
1127 	    "count");
1128 
1129 #if CONFIG_SCHED_SFI
1130 	sfi_class_id_t class_id, ledger_alias;
1131 	for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1132 		task_ledgers.sfi_wait_times[class_id] = -1;
1133 	}
1134 
1135 	/* don't account for UNSPECIFIED */
1136 	for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
1137 		ledger_alias = sfi_get_ledger_alias_for_class(class_id);
1138 		if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
1139 			/* Check to see if alias has been registered yet */
1140 			if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
1141 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
1142 			} else {
1143 				/* Otherwise, initialize it first */
1144 				task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
1145 			}
1146 		} else {
1147 			task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
1148 		}
1149 
1150 		if (task_ledgers.sfi_wait_times[class_id] < 0) {
1151 			panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
1152 		}
1153 	}
1154 
1155 	assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID - 1] != -1);
1156 #endif /* CONFIG_SCHED_SFI */
1157 
1158 	task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
1159 	task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
1160 	task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
1161 	task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
1162 	task_ledgers.logical_writes_to_external = ledger_entry_add(t, "logical_writes_to_external", "res", "bytes");
1163 #if CONFIG_PHYS_WRITE_ACCT
1164 	task_ledgers.fs_metadata_writes = ledger_entry_add(t, "fs_metadata_writes", "res", "bytes");
1165 #endif /* CONFIG_PHYS_WRITE_ACCT */
1166 	task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj");
1167 	task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj");
1168 
1169 #if CONFIG_MEMORYSTATUS
1170 	task_ledgers.memorystatus_dirty_time = ledger_entry_add(t, "memorystatus_dirty_time", "physmem", "ns");
1171 #endif /* CONFIG_MEMORYSTATUS */
1172 
1173 	task_ledgers.swapins = ledger_entry_add_with_flags(t, "swapins", "physmem", "bytes",
1174 	    LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1175 
1176 	if ((task_ledgers.cpu_time < 0) ||
1177 	    (task_ledgers.tkm_private < 0) ||
1178 	    (task_ledgers.tkm_shared < 0) ||
1179 	    (task_ledgers.phys_mem < 0) ||
1180 	    (task_ledgers.wired_mem < 0) ||
1181 	    (task_ledgers.internal < 0) ||
1182 	    (task_ledgers.external < 0) ||
1183 	    (task_ledgers.reusable < 0) ||
1184 	    (task_ledgers.iokit_mapped < 0) ||
1185 	    (task_ledgers.alternate_accounting < 0) ||
1186 	    (task_ledgers.alternate_accounting_compressed < 0) ||
1187 	    (task_ledgers.page_table < 0) ||
1188 	    (task_ledgers.phys_footprint < 0) ||
1189 	    (task_ledgers.internal_compressed < 0) ||
1190 	    (task_ledgers.purgeable_volatile < 0) ||
1191 	    (task_ledgers.purgeable_nonvolatile < 0) ||
1192 	    (task_ledgers.purgeable_volatile_compressed < 0) ||
1193 	    (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
1194 	    (task_ledgers.tagged_nofootprint < 0) ||
1195 	    (task_ledgers.tagged_footprint < 0) ||
1196 	    (task_ledgers.tagged_nofootprint_compressed < 0) ||
1197 	    (task_ledgers.tagged_footprint_compressed < 0) ||
1198 #if CONFIG_FREEZE
1199 	    (task_ledgers.frozen_to_swap < 0) ||
1200 #endif /* CONFIG_FREEZE */
1201 	    (task_ledgers.network_volatile < 0) ||
1202 	    (task_ledgers.network_nonvolatile < 0) ||
1203 	    (task_ledgers.network_volatile_compressed < 0) ||
1204 	    (task_ledgers.network_nonvolatile_compressed < 0) ||
1205 	    (task_ledgers.media_nofootprint < 0) ||
1206 	    (task_ledgers.media_footprint < 0) ||
1207 	    (task_ledgers.media_nofootprint_compressed < 0) ||
1208 	    (task_ledgers.media_footprint_compressed < 0) ||
1209 	    (task_ledgers.graphics_nofootprint < 0) ||
1210 	    (task_ledgers.graphics_footprint < 0) ||
1211 	    (task_ledgers.graphics_nofootprint_compressed < 0) ||
1212 	    (task_ledgers.graphics_footprint_compressed < 0) ||
1213 	    (task_ledgers.neural_nofootprint < 0) ||
1214 	    (task_ledgers.neural_footprint < 0) ||
1215 	    (task_ledgers.neural_nofootprint_compressed < 0) ||
1216 	    (task_ledgers.neural_footprint_compressed < 0) ||
1217 	    (task_ledgers.platform_idle_wakeups < 0) ||
1218 	    (task_ledgers.interrupt_wakeups < 0) ||
1219 	    (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
1220 	    (task_ledgers.physical_writes < 0) ||
1221 	    (task_ledgers.logical_writes < 0) ||
1222 	    (task_ledgers.logical_writes_to_external < 0) ||
1223 #if CONFIG_PHYS_WRITE_ACCT
1224 	    (task_ledgers.fs_metadata_writes < 0) ||
1225 #endif /* CONFIG_PHYS_WRITE_ACCT */
1226 #if CONFIG_MEMORYSTATUS
1227 	    (task_ledgers.memorystatus_dirty_time < 0) ||
1228 #endif /* CONFIG_MEMORYSTATUS */
1229 	    (task_ledgers.energy_billed_to_me < 0) ||
1230 	    (task_ledgers.energy_billed_to_others < 0) ||
1231 	    (task_ledgers.swapins < 0)
1232 	    ) {
1233 		panic("couldn't create entries for task ledger template");
1234 	}
1235 
1236 	ledger_track_credit_only(t, task_ledgers.phys_footprint);
1237 	ledger_track_credit_only(t, task_ledgers.internal);
1238 	ledger_track_credit_only(t, task_ledgers.external);
1239 	ledger_track_credit_only(t, task_ledgers.reusable);
1240 
1241 	ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
1242 	ledger_track_maximum(t, task_ledgers.phys_mem, 60);
1243 	ledger_track_maximum(t, task_ledgers.internal, 60);
1244 	ledger_track_maximum(t, task_ledgers.internal_compressed, 60);
1245 	ledger_track_maximum(t, task_ledgers.reusable, 60);
1246 	ledger_track_maximum(t, task_ledgers.external, 60);
1247 #if MACH_ASSERT
1248 	if (pmap_ledgers_panic) {
1249 		ledger_panic_on_negative(t, task_ledgers.phys_footprint);
1250 		ledger_panic_on_negative(t, task_ledgers.page_table);
1251 		ledger_panic_on_negative(t, task_ledgers.internal);
1252 		ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
1253 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
1254 		ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
1255 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
1256 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
1257 		ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
1258 		ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
1259 #if CONFIG_PHYS_WRITE_ACCT
1260 		ledger_panic_on_negative(t, task_ledgers.fs_metadata_writes);
1261 #endif /* CONFIG_PHYS_WRITE_ACCT */
1262 
1263 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint);
1264 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint);
1265 		ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint_compressed);
1266 		ledger_panic_on_negative(t, task_ledgers.tagged_footprint_compressed);
1267 		ledger_panic_on_negative(t, task_ledgers.network_volatile);
1268 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile);
1269 		ledger_panic_on_negative(t, task_ledgers.network_volatile_compressed);
1270 		ledger_panic_on_negative(t, task_ledgers.network_nonvolatile_compressed);
1271 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint);
1272 		ledger_panic_on_negative(t, task_ledgers.media_footprint);
1273 		ledger_panic_on_negative(t, task_ledgers.media_nofootprint_compressed);
1274 		ledger_panic_on_negative(t, task_ledgers.media_footprint_compressed);
1275 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint);
1276 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint);
1277 		ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint_compressed);
1278 		ledger_panic_on_negative(t, task_ledgers.graphics_footprint_compressed);
1279 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint);
1280 		ledger_panic_on_negative(t, task_ledgers.neural_footprint);
1281 		ledger_panic_on_negative(t, task_ledgers.neural_nofootprint_compressed);
1282 		ledger_panic_on_negative(t, task_ledgers.neural_footprint_compressed);
1283 	}
1284 #endif /* MACH_ASSERT */
1285 
1286 #if CONFIG_MEMORYSTATUS
1287 	ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
1288 #endif /* CONFIG_MEMORYSTATUS */
1289 
1290 	ledger_set_callback(t, task_ledgers.interrupt_wakeups,
1291 	    task_wakeups_rate_exceeded, NULL, NULL);
1292 	ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
1293 
1294 #if XNU_MONITOR
1295 	ledger_template_complete_secure_alloc(t);
1296 #else /* XNU_MONITOR */
1297 	ledger_template_complete(t);
1298 #endif /* XNU_MONITOR */
1299 	task_ledger_template = t;
1300 }
1301 
1302 kern_return_t
task_create_internal(task_t parent_task,proc_ro_t proc_ro,coalition_t * parent_coalitions __unused,boolean_t inherit_memory,boolean_t is_64bit __unused,boolean_t is_64bit_data,uint32_t t_flags,uint32_t t_procflags,uint8_t t_returnwaitflags,task_t * child_task)1303 task_create_internal(
1304 	task_t             parent_task,            /* Null-able */
1305 	proc_ro_t          proc_ro,
1306 	coalition_t        *parent_coalitions __unused,
1307 	boolean_t          inherit_memory,
1308 	boolean_t          is_64bit __unused,
1309 	boolean_t          is_64bit_data,
1310 	uint32_t           t_flags,
1311 	uint32_t           t_procflags,
1312 	uint8_t            t_returnwaitflags,
1313 	task_t             *child_task)            /* OUT */
1314 {
1315 	task_t                  new_task;
1316 	vm_shared_region_t      shared_region;
1317 	ledger_t                ledger = NULL;
1318 	struct task_ro_data     task_ro_data = {};
1319 
1320 	*child_task = NULL;
1321 	new_task = zalloc_id(ZONE_ID_TASK, Z_WAITOK | Z_NOFAIL);
1322 
1323 	if (task_ref_count_init(new_task) != KERN_SUCCESS) {
1324 		zfree_id(ZONE_ID_TASK, new_task);
1325 		return KERN_RESOURCE_SHORTAGE;
1326 	}
1327 
1328 	/* allocate with active entries */
1329 	assert(task_ledger_template != NULL);
1330 	ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1331 	if (ledger == NULL) {
1332 		task_ref_count_fini(new_task);
1333 		zfree_id(ZONE_ID_TASK, new_task);
1334 		return KERN_RESOURCE_SHORTAGE;
1335 	}
1336 
1337 	counter_alloc(&(new_task->faults));
1338 
1339 #if defined(HAS_APPLE_PAC)
1340 	ml_task_set_rop_pid(new_task, parent_task, inherit_memory);
1341 	ml_task_set_jop_pid(new_task, parent_task, inherit_memory);
1342 	ml_task_set_disable_user_jop(new_task, inherit_memory ? parent_task->disable_user_jop : FALSE);
1343 #endif
1344 
1345 
1346 	new_task->ledger = ledger;
1347 
1348 	/* if inherit_memory is true, parent_task MUST not be NULL */
1349 	if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) {
1350 		new_task->map = vm_map_fork(ledger, parent_task->map, 0);
1351 	} else {
1352 		unsigned int pmap_flags = is_64bit ? PMAP_CREATE_64BIT : 0;
1353 		pmap_t pmap = pmap_create_options(ledger, 0, pmap_flags);
1354 		if (pmap == NULL) {
1355 			counter_free(&new_task->faults);
1356 			ledger_dereference(ledger);
1357 			task_ref_count_fini(new_task);
1358 			zfree_id(ZONE_ID_TASK, new_task);
1359 			return KERN_RESOURCE_SHORTAGE;
1360 		}
1361 		new_task->map = vm_map_create_options(pmap,
1362 		    (vm_map_offset_t)(VM_MIN_ADDRESS),
1363 		    (vm_map_offset_t)(VM_MAX_ADDRESS),
1364 		    VM_MAP_CREATE_PAGEABLE);
1365 	}
1366 
1367 	if (new_task->map == NULL) {
1368 		counter_free(&new_task->faults);
1369 		ledger_dereference(ledger);
1370 		task_ref_count_fini(new_task);
1371 		zfree_id(ZONE_ID_TASK, new_task);
1372 		return KERN_RESOURCE_SHORTAGE;
1373 	}
1374 
1375 #if defined(CONFIG_SCHED_MULTIQ)
1376 	new_task->sched_group = sched_group_create();
1377 #endif
1378 
1379 	/* Inherit address space and memlock limit from parent */
1380 	if (parent_task) {
1381 		vm_map_set_size_limit(new_task->map, parent_task->map->size_limit);
1382 		vm_map_set_data_limit(new_task->map, parent_task->map->data_limit);
1383 		vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit);
1384 	}
1385 
1386 	lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
1387 	queue_init(&new_task->threads);
1388 	new_task->suspend_count = 0;
1389 	new_task->thread_count = 0;
1390 	new_task->active_thread_count = 0;
1391 	new_task->user_stop_count = 0;
1392 	new_task->legacy_stop_count = 0;
1393 	new_task->active = TRUE;
1394 	new_task->halting = FALSE;
1395 	new_task->priv_flags = 0;
1396 	new_task->t_flags = t_flags;
1397 	new_task->t_procflags = t_procflags;
1398 	new_task->t_returnwaitflags = t_returnwaitflags;
1399 	new_task->returnwait_inheritor = current_thread();
1400 	new_task->importance = 0;
1401 	new_task->crashed_thread_id = 0;
1402 	new_task->exec_token = 0;
1403 	new_task->watchports = NULL;
1404 	new_task->restartable_ranges = NULL;
1405 
1406 	new_task->bank_context = NULL;
1407 
1408 #ifdef MACH_BSD
1409 	new_task->bsd_info = NULL;
1410 	new_task->corpse_info = NULL;
1411 #endif /* MACH_BSD */
1412 
1413 	/* kern_task not created by this function has unique id 0, start with 1 here. */
1414 	task_set_uniqueid(new_task);
1415 
1416 #if CONFIG_MACF
1417 	set_task_crash_label(new_task, NULL);
1418 
1419 	task_ro_data.task_filters.mach_trap_filter_mask = NULL;
1420 	task_ro_data.task_filters.mach_kobj_filter_mask = NULL;
1421 #endif
1422 
1423 #if CONFIG_MEMORYSTATUS
1424 	if (max_task_footprint != 0) {
1425 		ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1426 	}
1427 #endif /* CONFIG_MEMORYSTATUS */
1428 
1429 	if (task_wakeups_monitor_rate != 0) {
1430 		uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1431 		int32_t  rate;        // Ignored because of WAKEMON_SET_DEFAULTS
1432 		task_wakeups_monitor_ctl(new_task, &flags, &rate);
1433 	}
1434 
1435 #if CONFIG_IO_ACCOUNTING
1436 	uint32_t flags = IOMON_ENABLE;
1437 	task_io_monitor_ctl(new_task, &flags);
1438 #endif /* CONFIG_IO_ACCOUNTING */
1439 
1440 	machine_task_init(new_task, parent_task, inherit_memory);
1441 
1442 	new_task->task_debug = NULL;
1443 
1444 #if DEVELOPMENT || DEBUG
1445 	new_task->task_unnested = FALSE;
1446 	new_task->task_disconnected_count = 0;
1447 #endif
1448 	queue_init(&new_task->semaphore_list);
1449 	new_task->semaphores_owned = 0;
1450 
1451 	ipc_task_init(new_task, parent_task);
1452 
1453 	new_task->vtimers = 0;
1454 
1455 	new_task->shared_region = NULL;
1456 
1457 	new_task->affinity_space = NULL;
1458 
1459 	new_task->t_kpc = 0;
1460 
1461 	new_task->pidsuspended = FALSE;
1462 	new_task->frozen = FALSE;
1463 	new_task->changing_freeze_state = FALSE;
1464 	new_task->rusage_cpu_flags = 0;
1465 	new_task->rusage_cpu_percentage = 0;
1466 	new_task->rusage_cpu_interval = 0;
1467 	new_task->rusage_cpu_deadline = 0;
1468 	new_task->rusage_cpu_callt = NULL;
1469 #if MACH_ASSERT
1470 	new_task->suspends_outstanding = 0;
1471 #endif
1472 
1473 #if HYPERVISOR
1474 	new_task->hv_task_target = NULL;
1475 #endif /* HYPERVISOR */
1476 
1477 #if CONFIG_TASKWATCH
1478 	queue_init(&new_task->task_watchers);
1479 	new_task->num_taskwatchers  = 0;
1480 	new_task->watchapplying  = 0;
1481 #endif /* CONFIG_TASKWATCH */
1482 
1483 	new_task->mem_notify_reserved = 0;
1484 	new_task->memlimit_attrs_reserved = 0;
1485 
1486 	new_task->requested_policy = default_task_requested_policy;
1487 	new_task->effective_policy = default_task_effective_policy;
1488 
1489 	new_task->task_shared_region_slide = -1;
1490 
1491 	if (parent_task != NULL) {
1492 		task_ro_data.task_tokens.sec_token = *task_get_sec_token(parent_task);
1493 		task_ro_data.task_tokens.audit_token = *task_get_audit_token(parent_task);
1494 	} else {
1495 		task_ro_data.task_tokens.sec_token = KERNEL_SECURITY_TOKEN;
1496 		task_ro_data.task_tokens.audit_token = KERNEL_AUDIT_TOKEN;
1497 	}
1498 
1499 	/* must set before task_importance_init_from_parent: */
1500 	if (proc_ro != NULL) {
1501 		new_task->bsd_info_ro = proc_ro_ref_task(proc_ro, new_task, &task_ro_data);
1502 	} else {
1503 		new_task->bsd_info_ro = proc_ro_alloc(NULL, NULL, new_task, &task_ro_data);
1504 	}
1505 
1506 	task_importance_init_from_parent(new_task, parent_task);
1507 
1508 	new_task->corpse_vmobject_list = NULL;
1509 
1510 	if (parent_task != TASK_NULL) {
1511 		/* inherit the parent's shared region */
1512 		shared_region = vm_shared_region_get(parent_task);
1513 		vm_shared_region_set(new_task, shared_region);
1514 
1515 #if __has_feature(ptrauth_calls)
1516 		/* use parent's shared_region_id */
1517 		char *shared_region_id = task_get_vm_shared_region_id_and_jop_pid(parent_task, NULL);
1518 		if (shared_region_id != NULL) {
1519 			shared_region_key_alloc(shared_region_id, FALSE, 0);         /* get a reference */
1520 		}
1521 		task_set_shared_region_id(new_task, shared_region_id);
1522 #endif /* __has_feature(ptrauth_calls) */
1523 
1524 		if (task_has_64Bit_addr(parent_task)) {
1525 			task_set_64Bit_addr(new_task);
1526 		}
1527 
1528 		if (task_has_64Bit_data(parent_task)) {
1529 			task_set_64Bit_data(new_task);
1530 		}
1531 
1532 		new_task->all_image_info_addr = parent_task->all_image_info_addr;
1533 		new_task->all_image_info_size = parent_task->all_image_info_size;
1534 		new_task->mach_header_vm_address = 0;
1535 
1536 		if (inherit_memory && parent_task->affinity_space) {
1537 			task_affinity_create(parent_task, new_task);
1538 		}
1539 
1540 		new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
1541 
1542 		new_task->task_exc_guard = parent_task->task_exc_guard;
1543 		/* only inherit the option bits, no effect until task_set_immovable_pinned() */
1544 		new_task->task_control_port_options = parent_task->task_control_port_options;
1545 
1546 		if (parent_task->t_flags & TF_NO_SMT) {
1547 			new_task->t_flags |= TF_NO_SMT;
1548 		}
1549 
1550 		if (parent_task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE) {
1551 			new_task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
1552 		}
1553 
1554 		if (parent_task->t_flags & TF_TECS) {
1555 			new_task->t_flags |= TF_TECS;
1556 		}
1557 
1558 		if (parent_task->t_flags & TF_FILTER_MSG) {
1559 			new_task->t_flags |= TF_FILTER_MSG;
1560 		}
1561 
1562 #if defined(__x86_64__)
1563 		if (parent_task->t_flags & TF_INSN_COPY_OPTOUT) {
1564 			new_task->t_flags |= TF_INSN_COPY_OPTOUT;
1565 		}
1566 #endif
1567 		new_task->priority = BASEPRI_DEFAULT;
1568 		new_task->max_priority = MAXPRI_USER;
1569 
1570 		task_policy_create(new_task, parent_task);
1571 	} else {
1572 #ifdef __LP64__
1573 		if (is_64bit) {
1574 			task_set_64Bit_addr(new_task);
1575 		}
1576 #endif
1577 
1578 		if (is_64bit_data) {
1579 			task_set_64Bit_data(new_task);
1580 		}
1581 
1582 		new_task->all_image_info_addr = (mach_vm_address_t)0;
1583 		new_task->all_image_info_size = (mach_vm_size_t)0;
1584 
1585 		new_task->pset_hint = PROCESSOR_SET_NULL;
1586 
1587 		new_task->task_exc_guard = TASK_EXC_GUARD_NONE;
1588 		new_task->task_control_port_options = TASK_CONTROL_PORT_OPTIONS_NONE;
1589 
1590 		if (kernel_task == TASK_NULL) {
1591 			new_task->priority = BASEPRI_KERNEL;
1592 			new_task->max_priority = MAXPRI_KERNEL;
1593 		} else {
1594 			new_task->priority = BASEPRI_DEFAULT;
1595 			new_task->max_priority = MAXPRI_USER;
1596 		}
1597 	}
1598 
1599 	bzero(new_task->coalition, sizeof(new_task->coalition));
1600 	for (int i = 0; i < COALITION_NUM_TYPES; i++) {
1601 		queue_chain_init(new_task->task_coalition[i]);
1602 	}
1603 
1604 	/* Allocate I/O Statistics */
1605 	new_task->task_io_stats = kalloc_data(sizeof(struct io_stat_info),
1606 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1607 
1608 	bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats));
1609 	bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats));
1610 
1611 	bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
1612 
1613 	counter_alloc(&(new_task->pageins));
1614 	counter_alloc(&(new_task->cow_faults));
1615 	counter_alloc(&(new_task->messages_sent));
1616 	counter_alloc(&(new_task->messages_received));
1617 
1618 	/* Copy resource acc. info from Parent for Corpe Forked task. */
1619 	if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
1620 		task_rollup_accounting_info(new_task, parent_task);
1621 		task_store_owned_vmobject_info(new_task, parent_task);
1622 	} else {
1623 		/* Initialize to zero for standard fork/spawn case */
1624 		new_task->total_user_time = 0;
1625 		new_task->total_system_time = 0;
1626 		new_task->total_ptime = 0;
1627 		new_task->total_runnable_time = 0;
1628 		new_task->syscalls_mach = 0;
1629 		new_task->syscalls_unix = 0;
1630 		new_task->c_switch = 0;
1631 		new_task->p_switch = 0;
1632 		new_task->ps_switch = 0;
1633 		new_task->decompressions = 0;
1634 		new_task->low_mem_notified_warn = 0;
1635 		new_task->low_mem_notified_critical = 0;
1636 		new_task->purged_memory_warn = 0;
1637 		new_task->purged_memory_critical = 0;
1638 		new_task->low_mem_privileged_listener = 0;
1639 		new_task->memlimit_is_active = 0;
1640 		new_task->memlimit_is_fatal = 0;
1641 		new_task->memlimit_active_exc_resource = 0;
1642 		new_task->memlimit_inactive_exc_resource = 0;
1643 		new_task->task_timer_wakeups_bin_1 = 0;
1644 		new_task->task_timer_wakeups_bin_2 = 0;
1645 		new_task->task_gpu_ns = 0;
1646 		new_task->task_writes_counters_internal.task_immediate_writes = 0;
1647 		new_task->task_writes_counters_internal.task_deferred_writes = 0;
1648 		new_task->task_writes_counters_internal.task_invalidated_writes = 0;
1649 		new_task->task_writes_counters_internal.task_metadata_writes = 0;
1650 		new_task->task_writes_counters_external.task_immediate_writes = 0;
1651 		new_task->task_writes_counters_external.task_deferred_writes = 0;
1652 		new_task->task_writes_counters_external.task_invalidated_writes = 0;
1653 		new_task->task_writes_counters_external.task_metadata_writes = 0;
1654 #if CONFIG_PHYS_WRITE_ACCT
1655 		new_task->task_fs_metadata_writes = 0;
1656 #endif /* CONFIG_PHYS_WRITE_ACCT */
1657 
1658 		new_task->task_energy = 0;
1659 #if MONOTONIC
1660 		memset(&new_task->task_monotonic, 0, sizeof(new_task->task_monotonic));
1661 #endif /* MONOTONIC */
1662 	}
1663 
1664 
1665 #if CONFIG_COALITIONS
1666 	if (!(t_flags & TF_CORPSE_FORK)) {
1667 		/* TODO: there is no graceful failure path here... */
1668 		if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1669 			coalitions_adopt_task(parent_coalitions, new_task);
1670 		} else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1671 			/*
1672 			 * all tasks at least have a resource coalition, so
1673 			 * if the parent has one then inherit all coalitions
1674 			 * the parent is a part of
1675 			 */
1676 			coalitions_adopt_task(parent_task->coalition, new_task);
1677 		} else {
1678 			/* TODO: assert that new_task will be PID 1 (launchd) */
1679 			coalitions_adopt_init_task(new_task);
1680 		}
1681 		/*
1682 		 * on exec, we need to transfer the coalition roles from the
1683 		 * parent task to the exec copy task.
1684 		 */
1685 		if (parent_task && (t_procflags & TPF_EXEC_COPY)) {
1686 			int coal_roles[COALITION_NUM_TYPES];
1687 			task_coalition_roles(parent_task, coal_roles);
1688 			(void)coalitions_set_roles(new_task->coalition, new_task, coal_roles);
1689 		}
1690 	} else {
1691 		coalitions_adopt_corpse_task(new_task);
1692 	}
1693 
1694 	if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1695 		panic("created task is not a member of a resource coalition");
1696 	}
1697 	task_set_coalition_member(new_task);
1698 #endif /* CONFIG_COALITIONS */
1699 
1700 	new_task->dispatchqueue_offset = 0;
1701 	if (parent_task != NULL) {
1702 		new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
1703 	}
1704 
1705 	new_task->task_can_transfer_memory_ownership = FALSE;
1706 	new_task->task_volatile_objects = 0;
1707 	new_task->task_nonvolatile_objects = 0;
1708 	new_task->task_objects_disowning = FALSE;
1709 	new_task->task_objects_disowned = FALSE;
1710 	new_task->task_owned_objects = 0;
1711 	queue_init(&new_task->task_objq);
1712 
1713 #if CONFIG_FREEZE
1714 	queue_init(&new_task->task_frozen_cseg_q);
1715 #endif /* CONFIG_FREEZE */
1716 
1717 	task_objq_lock_init(new_task);
1718 
1719 #if __arm64__
1720 	new_task->task_legacy_footprint = FALSE;
1721 	new_task->task_extra_footprint_limit = FALSE;
1722 	new_task->task_ios13extended_footprint_limit = FALSE;
1723 #endif /* __arm64__ */
1724 	new_task->task_region_footprint = FALSE;
1725 	new_task->task_has_crossed_thread_limit = FALSE;
1726 	new_task->task_thread_limit = 0;
1727 #if CONFIG_SECLUDED_MEMORY
1728 	new_task->task_can_use_secluded_mem = FALSE;
1729 	new_task->task_could_use_secluded_mem = FALSE;
1730 	new_task->task_could_also_use_secluded_mem = FALSE;
1731 	new_task->task_suppressed_secluded = FALSE;
1732 #endif /* CONFIG_SECLUDED_MEMORY */
1733 
1734 	/*
1735 	 * t_flags is set up above. But since we don't
1736 	 * support darkwake mode being set that way
1737 	 * currently, we clear it out here explicitly.
1738 	 */
1739 	new_task->t_flags &= ~(TF_DARKWAKE_MODE);
1740 
1741 	queue_init(&new_task->io_user_clients);
1742 	new_task->loadTag = 0;
1743 
1744 	ipc_task_enable(new_task);
1745 
1746 	lck_mtx_lock(&tasks_threads_lock);
1747 	queue_enter(&tasks, new_task, task_t, tasks);
1748 	tasks_count++;
1749 	if (tasks_suspend_state) {
1750 		task_suspend_internal(new_task);
1751 	}
1752 	lck_mtx_unlock(&tasks_threads_lock);
1753 
1754 	*child_task = new_task;
1755 	return KERN_SUCCESS;
1756 }
1757 
1758 /*
1759  *	task_rollup_accounting_info
1760  *
1761  *	Roll up accounting stats. Used to rollup stats
1762  *	for exec copy task and corpse fork.
1763  */
1764 void
task_rollup_accounting_info(task_t to_task,task_t from_task)1765 task_rollup_accounting_info(task_t to_task, task_t from_task)
1766 {
1767 	assert(from_task != to_task);
1768 
1769 	to_task->total_user_time = from_task->total_user_time;
1770 	to_task->total_system_time = from_task->total_system_time;
1771 	to_task->total_ptime = from_task->total_ptime;
1772 	to_task->total_runnable_time = from_task->total_runnable_time;
1773 	counter_add(&to_task->faults, counter_load(&from_task->faults));
1774 	counter_add(&to_task->pageins, counter_load(&from_task->pageins));
1775 	counter_add(&to_task->cow_faults, counter_load(&from_task->cow_faults));
1776 	counter_add(&to_task->messages_sent, counter_load(&from_task->messages_sent));
1777 	counter_add(&to_task->messages_received, counter_load(&from_task->messages_received));
1778 	to_task->decompressions = from_task->decompressions;
1779 	to_task->syscalls_mach = from_task->syscalls_mach;
1780 	to_task->syscalls_unix = from_task->syscalls_unix;
1781 	to_task->c_switch = from_task->c_switch;
1782 	to_task->p_switch = from_task->p_switch;
1783 	to_task->ps_switch = from_task->ps_switch;
1784 	to_task->extmod_statistics = from_task->extmod_statistics;
1785 	to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
1786 	to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
1787 	to_task->purged_memory_warn = from_task->purged_memory_warn;
1788 	to_task->purged_memory_critical = from_task->purged_memory_critical;
1789 	to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
1790 	*to_task->task_io_stats = *from_task->task_io_stats;
1791 	to_task->cpu_time_eqos_stats = from_task->cpu_time_eqos_stats;
1792 	to_task->cpu_time_rqos_stats = from_task->cpu_time_rqos_stats;
1793 	to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
1794 	to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
1795 	to_task->task_gpu_ns = from_task->task_gpu_ns;
1796 	to_task->task_writes_counters_internal.task_immediate_writes = from_task->task_writes_counters_internal.task_immediate_writes;
1797 	to_task->task_writes_counters_internal.task_deferred_writes = from_task->task_writes_counters_internal.task_deferred_writes;
1798 	to_task->task_writes_counters_internal.task_invalidated_writes = from_task->task_writes_counters_internal.task_invalidated_writes;
1799 	to_task->task_writes_counters_internal.task_metadata_writes = from_task->task_writes_counters_internal.task_metadata_writes;
1800 	to_task->task_writes_counters_external.task_immediate_writes = from_task->task_writes_counters_external.task_immediate_writes;
1801 	to_task->task_writes_counters_external.task_deferred_writes = from_task->task_writes_counters_external.task_deferred_writes;
1802 	to_task->task_writes_counters_external.task_invalidated_writes = from_task->task_writes_counters_external.task_invalidated_writes;
1803 	to_task->task_writes_counters_external.task_metadata_writes = from_task->task_writes_counters_external.task_metadata_writes;
1804 #if CONFIG_PHYS_WRITE_ACCT
1805 	to_task->task_fs_metadata_writes = from_task->task_fs_metadata_writes;
1806 #endif /* CONFIG_PHYS_WRITE_ACCT */
1807 	to_task->task_energy = from_task->task_energy;
1808 
1809 #if CONFIG_MEMORYSTATUS
1810 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.memorystatus_dirty_time);
1811 #endif /* CONFIG_MEMORYSTATUS */
1812 
1813 	/* Skip ledger roll up for memory accounting entries */
1814 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
1815 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
1816 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
1817 #if CONFIG_SCHED_SFI
1818 	for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1819 		ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
1820 	}
1821 #endif
1822 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
1823 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
1824 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
1825 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
1826 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_me);
1827 	ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_others);
1828 }
1829 
1830 /*
1831  *	task_deallocate_internal:
1832  *
1833  *	Drop a reference on a task.
1834  *	Don't call this directly.
1835  */
1836 extern void task_deallocate_internal(task_t task, os_ref_count_t refs);
1837 void
task_deallocate_internal(task_t task,os_ref_count_t refs)1838 task_deallocate_internal(
1839 	task_t          task,
1840 	os_ref_count_t  refs)
1841 {
1842 	ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
1843 
1844 	if (task == TASK_NULL) {
1845 		return;
1846 	}
1847 
1848 #if IMPORTANCE_INHERITANCE
1849 	if (refs == 1) {
1850 		/*
1851 		 * If last ref potentially comes from the task's importance,
1852 		 * disconnect it.  But more task refs may be added before
1853 		 * that completes, so wait for the reference to go to zero
1854 		 * naturally (it may happen on a recursive task_deallocate()
1855 		 * from the ipc_importance_disconnect_task() call).
1856 		 */
1857 		if (IIT_NULL != task->task_imp_base) {
1858 			ipc_importance_disconnect_task(task);
1859 		}
1860 		return;
1861 	}
1862 #endif /* IMPORTANCE_INHERITANCE */
1863 
1864 	if (refs > 0) {
1865 		return;
1866 	}
1867 
1868 	/*
1869 	 * The task should be dead at this point. Ensure other resources
1870 	 * like threads, are gone before we trash the world.
1871 	 */
1872 	assert(queue_empty(&task->threads));
1873 	assert(task->bsd_info == NULL);
1874 	assert(!is_active(task->itk_space));
1875 	assert(!task->active);
1876 	assert(task->active_thread_count == 0);
1877 
1878 	lck_mtx_lock(&tasks_threads_lock);
1879 	assert(terminated_tasks_count > 0);
1880 	queue_remove(&terminated_tasks, task, task_t, tasks);
1881 	terminated_tasks_count--;
1882 	lck_mtx_unlock(&tasks_threads_lock);
1883 
1884 	/*
1885 	 * remove the reference on bank context
1886 	 */
1887 	task_bank_reset(task);
1888 
1889 	kfree_data(task->task_io_stats, sizeof(struct io_stat_info));
1890 
1891 	/*
1892 	 *	Give the machine dependent code a chance
1893 	 *	to perform cleanup before ripping apart
1894 	 *	the task.
1895 	 */
1896 	machine_task_terminate(task);
1897 
1898 	ipc_task_terminate(task);
1899 
1900 	/* let iokit know */
1901 	iokit_task_terminate(task);
1902 
1903 	if (task->affinity_space) {
1904 		task_affinity_deallocate(task);
1905 	}
1906 
1907 #if MACH_ASSERT
1908 	if (task->ledger != NULL &&
1909 	    task->map != NULL &&
1910 	    task->map->pmap != NULL &&
1911 	    task->map->pmap->ledger != NULL) {
1912 		assert(task->ledger == task->map->pmap->ledger);
1913 	}
1914 #endif /* MACH_ASSERT */
1915 
1916 	vm_owned_objects_disown(task);
1917 	assert(task->task_objects_disowned);
1918 	if (task->task_owned_objects != 0) {
1919 		panic("task_deallocate(%p): "
1920 		    "volatile_objects=%d nonvolatile_objects=%d owned=%d\n",
1921 		    task,
1922 		    task->task_volatile_objects,
1923 		    task->task_nonvolatile_objects,
1924 		    task->task_owned_objects);
1925 	}
1926 
1927 	vm_map_deallocate(task->map);
1928 	is_release(task->itk_space);
1929 	if (task->restartable_ranges) {
1930 		restartable_ranges_release(task->restartable_ranges);
1931 	}
1932 
1933 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
1934 	    &interrupt_wakeups, &debit);
1935 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
1936 	    &platform_idle_wakeups, &debit);
1937 
1938 #if defined(CONFIG_SCHED_MULTIQ)
1939 	sched_group_destroy(task->sched_group);
1940 #endif
1941 
1942 	/* Accumulate statistics for dead tasks */
1943 	lck_spin_lock(&dead_task_statistics_lock);
1944 	dead_task_statistics.total_user_time += task->total_user_time;
1945 	dead_task_statistics.total_system_time += task->total_system_time;
1946 
1947 	dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
1948 	dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
1949 
1950 	dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
1951 	dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
1952 	dead_task_statistics.total_ptime += task->total_ptime;
1953 	dead_task_statistics.total_pset_switches += task->ps_switch;
1954 	dead_task_statistics.task_gpu_ns += task->task_gpu_ns;
1955 	dead_task_statistics.task_energy += task->task_energy;
1956 
1957 	lck_spin_unlock(&dead_task_statistics_lock);
1958 	lck_mtx_destroy(&task->lock, &task_lck_grp);
1959 
1960 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
1961 	    &debit)) {
1962 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
1963 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
1964 	}
1965 	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
1966 	    &debit)) {
1967 		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
1968 		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
1969 	}
1970 	ledger_dereference(task->ledger);
1971 
1972 	counter_free(&task->faults);
1973 	counter_free(&task->pageins);
1974 	counter_free(&task->cow_faults);
1975 	counter_free(&task->messages_sent);
1976 	counter_free(&task->messages_received);
1977 
1978 #if CONFIG_COALITIONS
1979 	task_release_coalitions(task);
1980 #endif /* CONFIG_COALITIONS */
1981 
1982 	bzero(task->coalition, sizeof(task->coalition));
1983 
1984 #if MACH_BSD
1985 	/* clean up collected information since last reference to task is gone */
1986 	if (task->corpse_info) {
1987 		void *corpse_info_kernel = kcdata_memory_get_begin_addr(task->corpse_info);
1988 		task_crashinfo_destroy(task->corpse_info);
1989 		task->corpse_info = NULL;
1990 		kfree_data(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
1991 	}
1992 #endif
1993 
1994 #if CONFIG_MACF
1995 	if (get_task_crash_label(task)) {
1996 		mac_exc_free_label(get_task_crash_label(task));
1997 		set_task_crash_label(task, NULL);
1998 	}
1999 #endif
2000 
2001 	assert(queue_empty(&task->task_objq));
2002 	task_objq_lock_destroy(task);
2003 
2004 	if (task->corpse_vmobject_list) {
2005 		kfree_data(task->corpse_vmobject_list,
2006 		    (vm_size_t)task->corpse_vmobject_list_size);
2007 	}
2008 
2009 	task_ref_count_fini(task);
2010 
2011 	task->bsd_info_ro = proc_ro_release_task((proc_ro_t)task->bsd_info_ro);
2012 
2013 	if (task->bsd_info_ro != NULL) {
2014 		proc_ro_free(task->bsd_info_ro);
2015 		task->bsd_info_ro = NULL;
2016 	}
2017 
2018 	zfree_id(ZONE_ID_TASK, task);
2019 }
2020 
2021 /*
2022  *	task_name_deallocate_mig:
2023  *
2024  *	Drop a reference on a task name.
2025  */
2026 void
task_name_deallocate_mig(task_name_t task_name)2027 task_name_deallocate_mig(
2028 	task_name_t             task_name)
2029 {
2030 	return task_deallocate_grp((task_t)task_name, TASK_GRP_MIG);
2031 }
2032 
2033 /*
2034  *	task_policy_set_deallocate_mig:
2035  *
2036  *	Drop a reference on a task type.
2037  */
2038 void
task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)2039 task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)
2040 {
2041 	return task_deallocate_grp((task_t)task_policy_set, TASK_GRP_MIG);
2042 }
2043 
2044 /*
2045  *	task_policy_get_deallocate_mig:
2046  *
2047  *	Drop a reference on a task type.
2048  */
2049 void
task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)2050 task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)
2051 {
2052 	return task_deallocate_grp((task_t)task_policy_get, TASK_GRP_MIG);
2053 }
2054 
2055 /*
2056  *	task_inspect_deallocate_mig:
2057  *
2058  *	Drop a task inspection reference.
2059  */
2060 void
task_inspect_deallocate_mig(task_inspect_t task_inspect)2061 task_inspect_deallocate_mig(
2062 	task_inspect_t          task_inspect)
2063 {
2064 	return task_deallocate_grp((task_t)task_inspect, TASK_GRP_MIG);
2065 }
2066 
2067 /*
2068  *	task_read_deallocate_mig:
2069  *
2070  *	Drop a reference on task read port.
2071  */
2072 void
task_read_deallocate_mig(task_read_t task_read)2073 task_read_deallocate_mig(
2074 	task_read_t          task_read)
2075 {
2076 	return task_deallocate_grp((task_t)task_read, TASK_GRP_MIG);
2077 }
2078 
2079 /*
2080  *	task_suspension_token_deallocate:
2081  *
2082  *	Drop a reference on a task suspension token.
2083  */
2084 void
task_suspension_token_deallocate(task_suspension_token_t token)2085 task_suspension_token_deallocate(
2086 	task_suspension_token_t         token)
2087 {
2088 	return task_deallocate((task_t)token);
2089 }
2090 
2091 void
task_suspension_token_deallocate_grp(task_suspension_token_t token,task_grp_t grp)2092 task_suspension_token_deallocate_grp(
2093 	task_suspension_token_t         token,
2094 	task_grp_t                      grp)
2095 {
2096 	return task_deallocate_grp((task_t)token, grp);
2097 }
2098 
2099 /*
2100  * task_collect_crash_info:
2101  *
2102  * collect crash info from bsd and mach based data
2103  */
2104 kern_return_t
task_collect_crash_info(task_t task,struct label * crash_label,int is_corpse_fork)2105 task_collect_crash_info(
2106 	task_t task,
2107 #ifdef CONFIG_MACF
2108 	struct label *crash_label,
2109 #endif
2110 	int is_corpse_fork)
2111 {
2112 	kern_return_t kr = KERN_SUCCESS;
2113 
2114 	kcdata_descriptor_t crash_data = NULL;
2115 	kcdata_descriptor_t crash_data_release = NULL;
2116 	mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
2117 	mach_vm_offset_t crash_data_ptr = 0;
2118 	void *crash_data_kernel = NULL;
2119 	void *crash_data_kernel_release = NULL;
2120 #if CONFIG_MACF
2121 	struct label *label, *free_label;
2122 #endif
2123 
2124 	if (!corpses_enabled()) {
2125 		return KERN_NOT_SUPPORTED;
2126 	}
2127 
2128 #if CONFIG_MACF
2129 	free_label = label = mac_exc_create_label(NULL);
2130 #endif
2131 
2132 	task_lock(task);
2133 
2134 	assert(is_corpse_fork || task->bsd_info != NULL);
2135 	if (task->corpse_info == NULL && (is_corpse_fork || task->bsd_info != NULL)) {
2136 #if CONFIG_MACF
2137 		/* Set the crash label, used by the exception delivery mac hook */
2138 		free_label = get_task_crash_label(task);         // Most likely NULL.
2139 		set_task_crash_label(task, label);
2140 		mac_exc_update_task_crash_label(task, crash_label);
2141 #endif
2142 		task_unlock(task);
2143 
2144 		crash_data_kernel = kalloc_data(CORPSEINFO_ALLOCATION_SIZE,
2145 		    Z_WAITOK | Z_ZERO);
2146 		if (crash_data_kernel == NULL) {
2147 			kr = KERN_RESOURCE_SHORTAGE;
2148 			goto out_no_lock;
2149 		}
2150 		crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
2151 
2152 		/* Do not get a corpse ref for corpse fork */
2153 		crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size,
2154 		    is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF,
2155 		    KCFLAG_USE_MEMCOPY);
2156 		if (crash_data) {
2157 			task_lock(task);
2158 			crash_data_release = task->corpse_info;
2159 			crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);
2160 			task->corpse_info = crash_data;
2161 
2162 			task_unlock(task);
2163 			kr = KERN_SUCCESS;
2164 		} else {
2165 			kfree_data(crash_data_kernel,
2166 			    CORPSEINFO_ALLOCATION_SIZE);
2167 			kr = KERN_FAILURE;
2168 		}
2169 
2170 		if (crash_data_release != NULL) {
2171 			task_crashinfo_destroy(crash_data_release);
2172 		}
2173 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2174 	} else {
2175 		task_unlock(task);
2176 	}
2177 
2178 out_no_lock:
2179 #if CONFIG_MACF
2180 	if (free_label != NULL) {
2181 		mac_exc_free_label(free_label);
2182 	}
2183 #endif
2184 	return kr;
2185 }
2186 
2187 /*
2188  * task_deliver_crash_notification:
2189  *
2190  * Makes outcall to registered host port for a corpse.
2191  */
2192 kern_return_t
task_deliver_crash_notification(task_t corpse,thread_t thread,exception_type_t etype,mach_exception_subcode_t subcode)2193 task_deliver_crash_notification(
2194 	task_t corpse, /* corpse or corpse fork */
2195 	thread_t thread,
2196 	exception_type_t etype,
2197 	mach_exception_subcode_t subcode)
2198 {
2199 	kcdata_descriptor_t crash_info = corpse->corpse_info;
2200 	thread_t th_iter = NULL;
2201 	kern_return_t kr = KERN_SUCCESS;
2202 	wait_interrupt_t wsave;
2203 	mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2204 	ipc_port_t corpse_port;
2205 
2206 	if (crash_info == NULL) {
2207 		return KERN_FAILURE;
2208 	}
2209 
2210 	assert(task_is_a_corpse(corpse));
2211 
2212 	task_lock(corpse);
2213 
2214 	/*
2215 	 * Always populate code[0] as the effective exception type for EXC_CORPSE_NOTIFY.
2216 	 * Crash reporters should derive whether it's fatal from corpse blob.
2217 	 */
2218 	code[0] = etype;
2219 	code[1] = subcode;
2220 
2221 	queue_iterate(&corpse->threads, th_iter, thread_t, task_threads)
2222 	{
2223 		if (th_iter->corpse_dup == FALSE) {
2224 			ipc_thread_reset(th_iter);
2225 		}
2226 	}
2227 	task_unlock(corpse);
2228 
2229 	/* Arm the no-sender notification for taskport */
2230 	task_reference(corpse);
2231 	corpse_port = convert_corpse_to_port_and_nsrequest(corpse);
2232 
2233 	wsave = thread_interrupt_level(THREAD_UNINT);
2234 	kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
2235 	if (kr != KERN_SUCCESS) {
2236 		printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(corpse));
2237 	}
2238 
2239 	(void)thread_interrupt_level(wsave);
2240 
2241 	/*
2242 	 * Drop the send right on corpse port, will fire the
2243 	 * no-sender notification if exception deliver failed.
2244 	 */
2245 	ipc_port_release_send(corpse_port);
2246 	return kr;
2247 }
2248 
2249 /*
2250  *	task_terminate:
2251  *
2252  *	Terminate the specified task.  See comments on thread_terminate
2253  *	(kern/thread.c) about problems with terminating the "current task."
2254  */
2255 
2256 kern_return_t
task_terminate(task_t task)2257 task_terminate(
2258 	task_t          task)
2259 {
2260 	if (task == TASK_NULL) {
2261 		return KERN_INVALID_ARGUMENT;
2262 	}
2263 
2264 	if (task->bsd_info) {
2265 		return KERN_FAILURE;
2266 	}
2267 
2268 	return task_terminate_internal(task);
2269 }
2270 
2271 #if MACH_ASSERT
2272 extern int proc_pid(struct proc *);
2273 extern void proc_name_kdp(struct proc *p, char *buf, int size);
2274 #endif /* MACH_ASSERT */
2275 
2276 #define VM_MAP_PARTIAL_REAP 0x54  /* 0x150 */
2277 static void
task_partial_reap(task_t task,__unused int pid)2278 __unused task_partial_reap(task_t task, __unused int pid)
2279 {
2280 	unsigned int    reclaimed_resident = 0;
2281 	unsigned int    reclaimed_compressed = 0;
2282 	uint64_t        task_page_count;
2283 
2284 	task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
2285 
2286 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_START),
2287 	    pid, task_page_count, 0, 0, 0);
2288 
2289 	vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
2290 
2291 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_END),
2292 	    pid, reclaimed_resident, reclaimed_compressed, 0, 0);
2293 }
2294 
2295 /*
2296  * task_mark_corpse:
2297  *
2298  * Mark the task as a corpse. Called by crashing thread.
2299  */
2300 kern_return_t
task_mark_corpse(task_t task)2301 task_mark_corpse(task_t task)
2302 {
2303 	kern_return_t kr = KERN_SUCCESS;
2304 	thread_t self_thread;
2305 	(void) self_thread;
2306 	wait_interrupt_t wsave;
2307 #if CONFIG_MACF
2308 	struct label *crash_label = NULL;
2309 #endif
2310 
2311 	assert(task != kernel_task);
2312 	assert(task == current_task());
2313 	assert(!task_is_a_corpse(task));
2314 
2315 #if CONFIG_MACF
2316 	crash_label = mac_exc_create_label_for_proc((struct proc*)task->bsd_info);
2317 #endif
2318 
2319 	kr = task_collect_crash_info(task,
2320 #if CONFIG_MACF
2321 	    crash_label,
2322 #endif
2323 	    FALSE);
2324 	if (kr != KERN_SUCCESS) {
2325 		goto out;
2326 	}
2327 
2328 	self_thread = current_thread();
2329 
2330 	wsave = thread_interrupt_level(THREAD_UNINT);
2331 	task_lock(task);
2332 
2333 	/*
2334 	 * Check if any other thread called task_terminate_internal
2335 	 * and made the task inactive before we could mark it for
2336 	 * corpse pending report. Bail out if the task is inactive.
2337 	 */
2338 	if (!task->active) {
2339 		kcdata_descriptor_t crash_data_release = task->corpse_info;;
2340 		void *crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);;
2341 
2342 		task->corpse_info = NULL;
2343 		task_unlock(task);
2344 
2345 		if (crash_data_release != NULL) {
2346 			task_crashinfo_destroy(crash_data_release);
2347 		}
2348 		kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2349 		return KERN_TERMINATED;
2350 	}
2351 
2352 	task_set_corpse_pending_report(task);
2353 	task_set_corpse(task);
2354 	task->crashed_thread_id = thread_tid(self_thread);
2355 
2356 	kr = task_start_halt_locked(task, TRUE);
2357 	assert(kr == KERN_SUCCESS);
2358 
2359 	task_set_uniqueid(task);
2360 
2361 	task_unlock(task);
2362 
2363 	/*
2364 	 * ipc_task_reset() moved to last thread_terminate_self(): rdar://75737960.
2365 	 * disable old ports here instead.
2366 	 *
2367 	 * The vm_map and ipc_space must exist until this function returns,
2368 	 * convert_port_to_{map,space}_with_flavor relies on this behavior.
2369 	 */
2370 	ipc_task_disable(task);
2371 
2372 	/* terminate the ipc space */
2373 	ipc_space_terminate(task->itk_space);
2374 
2375 	/* Add it to global corpse task list */
2376 	task_add_to_corpse_task_list(task);
2377 
2378 	thread_terminate_internal(self_thread);
2379 
2380 	(void) thread_interrupt_level(wsave);
2381 	assert(task->halting == TRUE);
2382 
2383 out:
2384 #if CONFIG_MACF
2385 	mac_exc_free_label(crash_label);
2386 #endif
2387 	return kr;
2388 }
2389 
2390 /*
2391  *	task_set_uniqueid
2392  *
2393  *	Set task uniqueid to systemwide unique 64 bit value
2394  */
2395 void
task_set_uniqueid(task_t task)2396 task_set_uniqueid(task_t task)
2397 {
2398 	task->task_uniqueid = OSIncrementAtomic64(&next_taskuniqueid);
2399 }
2400 
2401 /*
2402  *	task_clear_corpse
2403  *
2404  *	Clears the corpse pending bit on task.
2405  *	Removes inspection bit on the threads.
2406  */
2407 void
task_clear_corpse(task_t task)2408 task_clear_corpse(task_t task)
2409 {
2410 	thread_t th_iter = NULL;
2411 
2412 	task_lock(task);
2413 	queue_iterate(&task->threads, th_iter, thread_t, task_threads)
2414 	{
2415 		thread_mtx_lock(th_iter);
2416 		th_iter->inspection = FALSE;
2417 		ipc_thread_disable(th_iter);
2418 		thread_mtx_unlock(th_iter);
2419 	}
2420 
2421 	thread_terminate_crashed_threads();
2422 	/* remove the pending corpse report flag */
2423 	task_clear_corpse_pending_report(task);
2424 
2425 	task_unlock(task);
2426 }
2427 
2428 /*
2429  *	task_port_no_senders
2430  *
2431  *	Called whenever the Mach port system detects no-senders on
2432  *	the task port of a corpse.
2433  *	Each notification that comes in should terminate the task (corpse).
2434  */
2435 static void
task_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)2436 task_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
2437 {
2438 	task_t task = ipc_kobject_get_locked(port, IKOT_TASK_CONTROL);
2439 
2440 	assert(task != TASK_NULL);
2441 	assert(task_is_a_corpse(task));
2442 
2443 	/* Remove the task from global corpse task list */
2444 	task_remove_from_corpse_task_list(task);
2445 
2446 	task_clear_corpse(task);
2447 	task_terminate_internal(task);
2448 }
2449 
2450 /*
2451  *	task_port_with_flavor_no_senders
2452  *
2453  *	Called whenever the Mach port system detects no-senders on
2454  *	the task inspect or read port. These ports are allocated lazily and
2455  *	should be deallocated here when there are no senders remaining.
2456  */
2457 static void
task_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)2458 task_port_with_flavor_no_senders(
2459 	ipc_port_t          port,
2460 	mach_port_mscount_t mscount __unused)
2461 {
2462 	task_t task;
2463 	mach_task_flavor_t flavor;
2464 	ipc_kobject_type_t kotype;
2465 
2466 	ip_mq_lock(port);
2467 	if (port->ip_srights > 0) {
2468 		ip_mq_unlock(port);
2469 		return;
2470 	}
2471 	kotype = ip_kotype(port);
2472 	assert((IKOT_TASK_READ == kotype) || (IKOT_TASK_INSPECT == kotype));
2473 	task = ipc_kobject_get_locked(port, kotype);
2474 	if (task != TASK_NULL) {
2475 		task_reference(task);
2476 	}
2477 	ip_mq_unlock(port);
2478 
2479 	if (task == TASK_NULL) {
2480 		/* The task is exiting or disabled; it will eventually deallocate the port */
2481 		return;
2482 	}
2483 
2484 	if (kotype == IKOT_TASK_READ) {
2485 		flavor = TASK_FLAVOR_READ;
2486 	} else {
2487 		flavor = TASK_FLAVOR_INSPECT;
2488 	}
2489 
2490 	itk_lock(task);
2491 	ip_mq_lock(port);
2492 
2493 	/*
2494 	 * If the port is no longer active, then ipc_task_terminate() ran
2495 	 * and destroyed the kobject already. Just deallocate the task
2496 	 * ref we took and go away.
2497 	 *
2498 	 * It is also possible that several nsrequests are in flight,
2499 	 * only one shall NULL-out the port entry, and this is the one
2500 	 * that gets to dealloc the port.
2501 	 *
2502 	 * Check for a stale no-senders notification. A call to any function
2503 	 * that vends out send rights to this port could resurrect it between
2504 	 * this notification being generated and actually being handled here.
2505 	 */
2506 	if (!ip_active(port) ||
2507 	    task->itk_task_ports[flavor] != port ||
2508 	    port->ip_srights > 0) {
2509 		ip_mq_unlock(port);
2510 		itk_unlock(task);
2511 		task_deallocate(task);
2512 		return;
2513 	}
2514 
2515 	assert(task->itk_task_ports[flavor] == port);
2516 	task->itk_task_ports[flavor] = IP_NULL;
2517 	itk_unlock(task);
2518 
2519 	ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
2520 
2521 	task_deallocate(task);
2522 }
2523 
2524 /*
2525  *	task_wait_till_threads_terminate_locked
2526  *
2527  *	Wait till all the threads in the task are terminated.
2528  *	Might release the task lock and re-acquire it.
2529  */
2530 void
task_wait_till_threads_terminate_locked(task_t task)2531 task_wait_till_threads_terminate_locked(task_t task)
2532 {
2533 	/* wait for all the threads in the task to terminate */
2534 	while (task->active_thread_count != 0) {
2535 		assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
2536 		task_unlock(task);
2537 		thread_block(THREAD_CONTINUE_NULL);
2538 
2539 		task_lock(task);
2540 	}
2541 }
2542 
2543 /*
2544  *	task_duplicate_map_and_threads
2545  *
2546  *	Copy vmmap of source task.
2547  *	Copy active threads from source task to destination task.
2548  *	Source task would be suspended during the copy.
2549  */
2550 kern_return_t
task_duplicate_map_and_threads(task_t task,void * p,task_t new_task,thread_t * thread_ret,uint64_t ** udata_buffer,int * size,int * num_udata,bool for_exception)2551 task_duplicate_map_and_threads(
2552 	task_t task,
2553 	void *p,
2554 	task_t new_task,
2555 	thread_t *thread_ret,
2556 	uint64_t **udata_buffer,
2557 	int *size,
2558 	int *num_udata,
2559 	bool for_exception)
2560 {
2561 	kern_return_t kr = KERN_SUCCESS;
2562 	int active;
2563 	thread_t thread, self, thread_return = THREAD_NULL;
2564 	thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL;
2565 	thread_t *thread_array;
2566 	uint32_t active_thread_count = 0, array_count = 0, i;
2567 	vm_map_t oldmap;
2568 	uint64_t *buffer = NULL;
2569 	int buf_size = 0;
2570 	int est_knotes = 0, num_knotes = 0;
2571 
2572 	self = current_thread();
2573 
2574 	/*
2575 	 * Suspend the task to copy thread state, use the internal
2576 	 * variant so that no user-space process can resume
2577 	 * the task from under us
2578 	 */
2579 	kr = task_suspend_internal(task);
2580 	if (kr != KERN_SUCCESS) {
2581 		return kr;
2582 	}
2583 
2584 	if (task->map->disable_vmentry_reuse == TRUE) {
2585 		/*
2586 		 * Quite likely GuardMalloc (or some debugging tool)
2587 		 * is being used on this task. And it has gone through
2588 		 * its limit. Making a corpse will likely encounter
2589 		 * a lot of VM entries that will need COW.
2590 		 *
2591 		 * Skip it.
2592 		 */
2593 #if DEVELOPMENT || DEBUG
2594 		memorystatus_abort_vm_map_fork(task);
2595 #endif
2596 		task_resume_internal(task);
2597 		return KERN_FAILURE;
2598 	}
2599 
2600 	/* Check with VM if vm_map_fork is allowed for this task */
2601 	if (memorystatus_allowed_vm_map_fork(task)) {
2602 		/* Setup new task's vmmap, switch from parent task's map to it COW map */
2603 		oldmap = new_task->map;
2604 		new_task->map = vm_map_fork(new_task->ledger,
2605 		    task->map,
2606 		    (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
2607 		    VM_MAP_FORK_PRESERVE_PURGEABLE |
2608 		    VM_MAP_FORK_CORPSE_FOOTPRINT));
2609 		if (new_task->map) {
2610 			vm_map_deallocate(oldmap);
2611 
2612 			/* copy ledgers that impact the memory footprint */
2613 			vm_map_copy_footprint_ledgers(task, new_task);
2614 
2615 			/* Get all the udata pointers from kqueue */
2616 			est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2617 			if (est_knotes > 0) {
2618 				buf_size = (est_knotes + 32) * sizeof(uint64_t);
2619 				buffer = kalloc_data(buf_size, Z_WAITOK);
2620 				num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2621 				if (num_knotes > est_knotes + 32) {
2622 					num_knotes = est_knotes + 32;
2623 				}
2624 			}
2625 		} else {
2626 			new_task->map = oldmap;
2627 #if DEVELOPMENT || DEBUG
2628 			memorystatus_abort_vm_map_fork(task);
2629 #endif
2630 			task_resume_internal(task);
2631 			return KERN_NO_SPACE;
2632 		}
2633 	} else if (!for_exception) {
2634 #if DEVELOPMENT || DEBUG
2635 		memorystatus_abort_vm_map_fork(task);
2636 #endif
2637 		task_resume_internal(task);
2638 		return KERN_NO_SPACE;
2639 	}
2640 
2641 	active_thread_count = task->active_thread_count;
2642 	if (active_thread_count == 0) {
2643 		kfree_data(buffer, buf_size);
2644 		task_resume_internal(task);
2645 		return KERN_FAILURE;
2646 	}
2647 
2648 	thread_array = kalloc_type(thread_t, active_thread_count, Z_WAITOK);
2649 
2650 	/* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
2651 	task_lock(task);
2652 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2653 		/* Skip inactive threads */
2654 		active = thread->active;
2655 		if (!active) {
2656 			continue;
2657 		}
2658 
2659 		if (array_count >= active_thread_count) {
2660 			break;
2661 		}
2662 
2663 		thread_array[array_count++] = thread;
2664 		thread_reference(thread);
2665 	}
2666 	task_unlock(task);
2667 
2668 	for (i = 0; i < array_count; i++) {
2669 		kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
2670 		if (kr != KERN_SUCCESS) {
2671 			break;
2672 		}
2673 
2674 		/* Equivalent of current thread in corpse */
2675 		if (thread_array[i] == self) {
2676 			thread_return = new_thread;
2677 			new_task->crashed_thread_id = thread_tid(new_thread);
2678 		} else if (first_thread == NULL) {
2679 			first_thread = new_thread;
2680 		} else {
2681 			/* drop the extra ref returned by thread_create_with_continuation */
2682 			thread_deallocate(new_thread);
2683 		}
2684 
2685 		kr = thread_dup2(thread_array[i], new_thread);
2686 		if (kr != KERN_SUCCESS) {
2687 			thread_mtx_lock(new_thread);
2688 			new_thread->corpse_dup = TRUE;
2689 			thread_mtx_unlock(new_thread);
2690 			continue;
2691 		}
2692 
2693 		/* Copy thread name */
2694 		bsd_copythreadname(get_bsdthread_info(new_thread),
2695 		    get_bsdthread_info(thread_array[i]));
2696 		new_thread->thread_tag = thread_array[i]->thread_tag &
2697 		    ~THREAD_TAG_USER_JOIN;
2698 		thread_copy_resource_info(new_thread, thread_array[i]);
2699 	}
2700 
2701 	/* return the first thread if we couldn't find the equivalent of current */
2702 	if (thread_return == THREAD_NULL) {
2703 		thread_return = first_thread;
2704 	} else if (first_thread != THREAD_NULL) {
2705 		/* drop the extra ref returned by thread_create_with_continuation */
2706 		thread_deallocate(first_thread);
2707 	}
2708 
2709 	task_resume_internal(task);
2710 
2711 	for (i = 0; i < array_count; i++) {
2712 		thread_deallocate(thread_array[i]);
2713 	}
2714 	kfree_type(thread_t, active_thread_count, thread_array);
2715 
2716 	if (kr == KERN_SUCCESS) {
2717 		*thread_ret = thread_return;
2718 		*udata_buffer = buffer;
2719 		*size = buf_size;
2720 		*num_udata = num_knotes;
2721 	} else {
2722 		if (thread_return != THREAD_NULL) {
2723 			thread_deallocate(thread_return);
2724 		}
2725 		kfree_data(buffer, buf_size);
2726 	}
2727 
2728 	return kr;
2729 }
2730 
2731 #if CONFIG_SECLUDED_MEMORY
2732 extern void task_set_can_use_secluded_mem_locked(
2733 	task_t          task,
2734 	boolean_t       can_use_secluded_mem);
2735 #endif /* CONFIG_SECLUDED_MEMORY */
2736 
2737 #if MACH_ASSERT
2738 int debug4k_panic_on_terminate = 0;
2739 #endif /* MACH_ASSERT */
2740 kern_return_t
task_terminate_internal(task_t task)2741 task_terminate_internal(
2742 	task_t                  task)
2743 {
2744 	thread_t                        thread, self;
2745 	task_t                          self_task;
2746 	boolean_t                       interrupt_save;
2747 	int                             pid = 0;
2748 
2749 	assert(task != kernel_task);
2750 
2751 	self = current_thread();
2752 	self_task = current_task();
2753 
2754 	/*
2755 	 *	Get the task locked and make sure that we are not racing
2756 	 *	with someone else trying to terminate us.
2757 	 */
2758 	if (task == self_task) {
2759 		task_lock(task);
2760 	} else if (task < self_task) {
2761 		task_lock(task);
2762 		task_lock(self_task);
2763 	} else {
2764 		task_lock(self_task);
2765 		task_lock(task);
2766 	}
2767 
2768 #if CONFIG_SECLUDED_MEMORY
2769 	if (task->task_can_use_secluded_mem) {
2770 		task_set_can_use_secluded_mem_locked(task, FALSE);
2771 	}
2772 	task->task_could_use_secluded_mem = FALSE;
2773 	task->task_could_also_use_secluded_mem = FALSE;
2774 
2775 	if (task->task_suppressed_secluded) {
2776 		stop_secluded_suppression(task);
2777 	}
2778 #endif /* CONFIG_SECLUDED_MEMORY */
2779 
2780 	if (!task->active) {
2781 		/*
2782 		 *	Task is already being terminated.
2783 		 *	Just return an error. If we are dying, this will
2784 		 *	just get us to our AST special handler and that
2785 		 *	will get us to finalize the termination of ourselves.
2786 		 */
2787 		task_unlock(task);
2788 		if (self_task != task) {
2789 			task_unlock(self_task);
2790 		}
2791 
2792 		return KERN_FAILURE;
2793 	}
2794 
2795 	if (task_corpse_pending_report(task)) {
2796 		/*
2797 		 *	Task is marked for reporting as corpse.
2798 		 *	Just return an error. This will
2799 		 *	just get us to our AST special handler and that
2800 		 *	will get us to finish the path to death
2801 		 */
2802 		task_unlock(task);
2803 		if (self_task != task) {
2804 			task_unlock(self_task);
2805 		}
2806 
2807 		return KERN_FAILURE;
2808 	}
2809 
2810 	if (self_task != task) {
2811 		task_unlock(self_task);
2812 	}
2813 
2814 	/*
2815 	 * Make sure the current thread does not get aborted out of
2816 	 * the waits inside these operations.
2817 	 */
2818 	interrupt_save = thread_interrupt_level(THREAD_UNINT);
2819 
2820 	/*
2821 	 *	Indicate that we want all the threads to stop executing
2822 	 *	at user space by holding the task (we would have held
2823 	 *	each thread independently in thread_terminate_internal -
2824 	 *	but this way we may be more likely to already find it
2825 	 *	held there).  Mark the task inactive, and prevent
2826 	 *	further task operations via the task port.
2827 	 *
2828 	 *	The vm_map and ipc_space must exist until this function returns,
2829 	 *	convert_port_to_{map,space}_with_flavor relies on this behavior.
2830 	 */
2831 	task_hold_locked(task);
2832 	task->active = FALSE;
2833 	ipc_task_disable(task);
2834 
2835 #if CONFIG_TELEMETRY
2836 	/*
2837 	 * Notify telemetry that this task is going away.
2838 	 */
2839 	telemetry_task_ctl_locked(task, TF_TELEMETRY, 0);
2840 #endif
2841 
2842 	/*
2843 	 *	Terminate each thread in the task.
2844 	 */
2845 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2846 		thread_terminate_internal(thread);
2847 	}
2848 
2849 #ifdef MACH_BSD
2850 	if (task->bsd_info != NULL && !task_is_exec_copy(task)) {
2851 		pid = proc_pid(task->bsd_info);
2852 	}
2853 #endif /* MACH_BSD */
2854 
2855 	task_unlock(task);
2856 
2857 	proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
2858 	    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
2859 
2860 	/* Early object reap phase */
2861 
2862 // PR-17045188: Revisit implementation
2863 //        task_partial_reap(task, pid);
2864 
2865 #if CONFIG_TASKWATCH
2866 	/*
2867 	 * remove all task watchers
2868 	 */
2869 	task_removewatchers(task);
2870 
2871 #endif /* CONFIG_TASKWATCH */
2872 
2873 	/*
2874 	 *	Destroy all synchronizers owned by the task.
2875 	 */
2876 	task_synchronizer_destroy_all(task);
2877 
2878 	/*
2879 	 *	Clear the watchport boost on the task.
2880 	 */
2881 	task_remove_turnstile_watchports(task);
2882 
2883 	/*
2884 	 *	Destroy the IPC space, leaving just a reference for it.
2885 	 */
2886 	ipc_space_terminate(task->itk_space);
2887 
2888 #if 00
2889 	/* if some ledgers go negative on tear-down again... */
2890 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
2891 	    task_ledgers.phys_footprint);
2892 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
2893 	    task_ledgers.internal);
2894 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
2895 	    task_ledgers.iokit_mapped);
2896 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
2897 	    task_ledgers.alternate_accounting);
2898 	ledger_disable_panic_on_negative(task->map->pmap->ledger,
2899 	    task_ledgers.alternate_accounting_compressed);
2900 #endif
2901 
2902 	/*
2903 	 * If the current thread is a member of the task
2904 	 * being terminated, then the last reference to
2905 	 * the task will not be dropped until the thread
2906 	 * is finally reaped.  To avoid incurring the
2907 	 * expense of removing the address space regions
2908 	 * at reap time, we do it explictly here.
2909 	 */
2910 
2911 	vm_map_lock(task->map);
2912 	vm_map_disable_hole_optimization(task->map);
2913 	vm_map_unlock(task->map);
2914 
2915 #if MACH_ASSERT
2916 	/*
2917 	 * Identify the pmap's process, in case the pmap ledgers drift
2918 	 * and we have to report it.
2919 	 */
2920 	char procname[17];
2921 	if (task->bsd_info && !task_is_exec_copy(task)) {
2922 		pid = proc_pid(task->bsd_info);
2923 		proc_name_kdp(task->bsd_info, procname, sizeof(procname));
2924 	} else {
2925 		pid = 0;
2926 		strlcpy(procname, "<unknown>", sizeof(procname));
2927 	}
2928 	pmap_set_process(task->map->pmap, pid, procname);
2929 	if (vm_map_page_shift(task->map) < (int)PAGE_SHIFT) {
2930 		DEBUG4K_LIFE("map %p procname: %s\n", task->map, procname);
2931 		if (debug4k_panic_on_terminate) {
2932 			panic("DEBUG4K: %s:%d %d[%s] map %p", __FUNCTION__, __LINE__, pid, procname, task->map);
2933 		}
2934 	}
2935 #endif /* MACH_ASSERT */
2936 
2937 	vm_map_terminate(task->map);
2938 
2939 	/* release our shared region */
2940 	vm_shared_region_set(task, NULL);
2941 
2942 #if __has_feature(ptrauth_calls)
2943 	task_set_shared_region_id(task, NULL);
2944 #endif /* __has_feature(ptrauth_calls) */
2945 
2946 	lck_mtx_lock(&tasks_threads_lock);
2947 	queue_remove(&tasks, task, task_t, tasks);
2948 	queue_enter(&terminated_tasks, task, task_t, tasks);
2949 	tasks_count--;
2950 	terminated_tasks_count++;
2951 	lck_mtx_unlock(&tasks_threads_lock);
2952 
2953 	/*
2954 	 * We no longer need to guard against being aborted, so restore
2955 	 * the previous interruptible state.
2956 	 */
2957 	thread_interrupt_level(interrupt_save);
2958 
2959 #if KPC
2960 	/* force the task to release all ctrs */
2961 	if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) {
2962 		kpc_force_all_ctrs(task, 0);
2963 	}
2964 #endif /* KPC */
2965 
2966 #if CONFIG_COALITIONS
2967 	/*
2968 	 * Leave the coalition for corpse task or task that
2969 	 * never had any active threads (e.g. fork, exec failure).
2970 	 * For task with active threads, the task will be removed
2971 	 * from coalition by last terminating thread.
2972 	 */
2973 	if (task->active_thread_count == 0) {
2974 		coalitions_remove_task(task);
2975 	}
2976 #endif
2977 
2978 #if CONFIG_FREEZE
2979 	extern int      vm_compressor_available;
2980 	if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE && vm_compressor_available) {
2981 		task_disown_frozen_csegs(task);
2982 		assert(queue_empty(&task->task_frozen_cseg_q));
2983 	}
2984 #endif /* CONFIG_FREEZE */
2985 
2986 
2987 	/*
2988 	 * Get rid of the task active reference on itself.
2989 	 */
2990 	task_deallocate_grp(task, TASK_GRP_INTERNAL);
2991 
2992 	return KERN_SUCCESS;
2993 }
2994 
2995 void
tasks_system_suspend(boolean_t suspend)2996 tasks_system_suspend(boolean_t suspend)
2997 {
2998 	task_t task;
2999 
3000 	KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SUSPEND_USERSPACE) |
3001 	    (suspend ? DBG_FUNC_START : DBG_FUNC_END));
3002 
3003 	lck_mtx_lock(&tasks_threads_lock);
3004 	assert(tasks_suspend_state != suspend);
3005 	tasks_suspend_state = suspend;
3006 	queue_iterate(&tasks, task, task_t, tasks) {
3007 		if (task == kernel_task) {
3008 			continue;
3009 		}
3010 		suspend ? task_suspend_internal(task) : task_resume_internal(task);
3011 	}
3012 	lck_mtx_unlock(&tasks_threads_lock);
3013 }
3014 
3015 /*
3016  * task_start_halt:
3017  *
3018  *      Shut the current task down (except for the current thread) in
3019  *	preparation for dramatic changes to the task (probably exec).
3020  *	We hold the task and mark all other threads in the task for
3021  *	termination.
3022  */
3023 kern_return_t
task_start_halt(task_t task)3024 task_start_halt(task_t task)
3025 {
3026 	kern_return_t kr = KERN_SUCCESS;
3027 	task_lock(task);
3028 	kr = task_start_halt_locked(task, FALSE);
3029 	task_unlock(task);
3030 	return kr;
3031 }
3032 
3033 static kern_return_t
task_start_halt_locked(task_t task,boolean_t should_mark_corpse)3034 task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
3035 {
3036 	thread_t thread, self;
3037 	uint64_t dispatchqueue_offset;
3038 
3039 	assert(task != kernel_task);
3040 
3041 	self = current_thread();
3042 
3043 	if (task != get_threadtask(self) && !task_is_a_corpse_fork(task)) {
3044 		return KERN_INVALID_ARGUMENT;
3045 	}
3046 
3047 	if (!should_mark_corpse &&
3048 	    (task->halting || !task->active || !self->active)) {
3049 		/*
3050 		 * Task or current thread is already being terminated.
3051 		 * Hurry up and return out of the current kernel context
3052 		 * so that we run our AST special handler to terminate
3053 		 * ourselves. If should_mark_corpse is set, the corpse
3054 		 * creation might have raced with exec, let the corpse
3055 		 * creation continue, once the current thread reaches AST
3056 		 * thread in exec will be woken up from task_complete_halt.
3057 		 * Exec will fail cause the proc was marked for exit.
3058 		 * Once the thread in exec reaches AST, it will call proc_exit
3059 		 * and deliver the EXC_CORPSE_NOTIFY.
3060 		 */
3061 		return KERN_FAILURE;
3062 	}
3063 
3064 	/* Thread creation will fail after this point of no return. */
3065 	task->halting = TRUE;
3066 
3067 	/*
3068 	 * Mark all the threads to keep them from starting any more
3069 	 * user-level execution. The thread_terminate_internal code
3070 	 * would do this on a thread by thread basis anyway, but this
3071 	 * gives us a better chance of not having to wait there.
3072 	 */
3073 	task_hold_locked(task);
3074 	dispatchqueue_offset = get_dispatchqueue_offset_from_proc(task->bsd_info);
3075 
3076 	/*
3077 	 * Terminate all the other threads in the task.
3078 	 */
3079 	queue_iterate(&task->threads, thread, thread_t, task_threads)
3080 	{
3081 		/*
3082 		 * Remove priority throttles for threads to terminate timely. This has
3083 		 * to be done after task_hold_locked() traps all threads to AST, but before
3084 		 * threads are marked inactive in thread_terminate_internal(). Takes thread
3085 		 * mutex lock.
3086 		 * See: thread_policy_update_tasklocked().
3087 		 */
3088 		proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE,
3089 		    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3090 
3091 		if (should_mark_corpse) {
3092 			thread_mtx_lock(thread);
3093 			thread->inspection = TRUE;
3094 			thread_mtx_unlock(thread);
3095 		}
3096 		if (thread != self) {
3097 			thread_terminate_internal(thread);
3098 		}
3099 	}
3100 	task->dispatchqueue_offset = dispatchqueue_offset;
3101 
3102 	task_release_locked(task);
3103 
3104 	return KERN_SUCCESS;
3105 }
3106 
3107 
3108 /*
3109  * task_complete_halt:
3110  *
3111  *	Complete task halt by waiting for threads to terminate, then clean
3112  *	up task resources (VM, port namespace, etc...) and then let the
3113  *	current thread go in the (practically empty) task context.
3114  *
3115  *	Note: task->halting flag is not cleared in order to avoid creation
3116  *	of new thread in old exec'ed task.
3117  */
3118 void
task_complete_halt(task_t task)3119 task_complete_halt(task_t task)
3120 {
3121 	task_lock(task);
3122 	assert(task->halting);
3123 	assert(task == current_task());
3124 
3125 	/*
3126 	 *	Wait for the other threads to get shut down.
3127 	 *      When the last other thread is reaped, we'll be
3128 	 *	woken up.
3129 	 */
3130 	if (task->thread_count > 1) {
3131 		assert_wait((event_t)&task->halting, THREAD_UNINT);
3132 		task_unlock(task);
3133 		thread_block(THREAD_CONTINUE_NULL);
3134 	} else {
3135 		task_unlock(task);
3136 	}
3137 
3138 	/*
3139 	 *	Give the machine dependent code a chance
3140 	 *	to perform cleanup of task-level resources
3141 	 *	associated with the current thread before
3142 	 *	ripping apart the task.
3143 	 */
3144 	machine_task_terminate(task);
3145 
3146 	/*
3147 	 *	Destroy all synchronizers owned by the task.
3148 	 */
3149 	task_synchronizer_destroy_all(task);
3150 
3151 	/*
3152 	 *	Terminate the IPC space.  A long time ago,
3153 	 *	this used to be ipc_space_clean() which would
3154 	 *	keep the space active but hollow it.
3155 	 *
3156 	 *	We really do not need this semantics given
3157 	 *	tasks die with exec now.
3158 	 */
3159 	ipc_space_terminate(task->itk_space);
3160 
3161 	/*
3162 	 * Clean out the address space, as we are going to be
3163 	 * getting a new one.
3164 	 */
3165 	vm_map_remove(task->map, task->map->min_offset,
3166 	    task->map->max_offset,
3167 	    /*
3168 	     * Final cleanup:
3169 	     * + no unnesting
3170 	     * + remove immutable mappings
3171 	     * + allow gaps in the range
3172 	     */
3173 	    (VM_MAP_REMOVE_NO_UNNESTING |
3174 	    VM_MAP_REMOVE_IMMUTABLE |
3175 	    VM_MAP_REMOVE_GAPS_OK));
3176 
3177 	/*
3178 	 * Kick out any IOKitUser handles to the task. At best they're stale,
3179 	 * at worst someone is racing a SUID exec.
3180 	 */
3181 	iokit_task_terminate(task);
3182 }
3183 
3184 /*
3185  *	task_hold_locked:
3186  *
3187  *	Suspend execution of the specified task.
3188  *	This is a recursive-style suspension of the task, a count of
3189  *	suspends is maintained.
3190  *
3191  *	CONDITIONS: the task is locked and active.
3192  */
3193 void
task_hold_locked(task_t task)3194 task_hold_locked(
3195 	task_t          task)
3196 {
3197 	thread_t        thread;
3198 
3199 	assert(task->active);
3200 
3201 	if (task->suspend_count++ > 0) {
3202 		return;
3203 	}
3204 
3205 	if (task->bsd_info) {
3206 		workq_proc_suspended(task->bsd_info);
3207 	}
3208 
3209 	/*
3210 	 *	Iterate through all the threads and hold them.
3211 	 */
3212 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3213 		thread_mtx_lock(thread);
3214 		thread_hold(thread);
3215 		thread_mtx_unlock(thread);
3216 	}
3217 }
3218 
3219 /*
3220  *	task_hold:
3221  *
3222  *	Same as the internal routine above, except that is must lock
3223  *	and verify that the task is active.  This differs from task_suspend
3224  *	in that it places a kernel hold on the task rather than just a
3225  *	user-level hold.  This keeps users from over resuming and setting
3226  *	it running out from under the kernel.
3227  *
3228  *      CONDITIONS: the caller holds a reference on the task
3229  */
3230 kern_return_t
task_hold(task_t task)3231 task_hold(
3232 	task_t          task)
3233 {
3234 	if (task == TASK_NULL) {
3235 		return KERN_INVALID_ARGUMENT;
3236 	}
3237 
3238 	task_lock(task);
3239 
3240 	if (!task->active) {
3241 		task_unlock(task);
3242 
3243 		return KERN_FAILURE;
3244 	}
3245 
3246 	task_hold_locked(task);
3247 	task_unlock(task);
3248 
3249 	return KERN_SUCCESS;
3250 }
3251 
3252 kern_return_t
task_wait(task_t task,boolean_t until_not_runnable)3253 task_wait(
3254 	task_t          task,
3255 	boolean_t       until_not_runnable)
3256 {
3257 	if (task == TASK_NULL) {
3258 		return KERN_INVALID_ARGUMENT;
3259 	}
3260 
3261 	task_lock(task);
3262 
3263 	if (!task->active) {
3264 		task_unlock(task);
3265 
3266 		return KERN_FAILURE;
3267 	}
3268 
3269 	task_wait_locked(task, until_not_runnable);
3270 	task_unlock(task);
3271 
3272 	return KERN_SUCCESS;
3273 }
3274 
3275 /*
3276  *	task_wait_locked:
3277  *
3278  *	Wait for all threads in task to stop.
3279  *
3280  * Conditions:
3281  *	Called with task locked, active, and held.
3282  */
3283 void
task_wait_locked(task_t task,boolean_t until_not_runnable)3284 task_wait_locked(
3285 	task_t          task,
3286 	boolean_t               until_not_runnable)
3287 {
3288 	thread_t        thread, self;
3289 
3290 	assert(task->active);
3291 	assert(task->suspend_count > 0);
3292 
3293 	self = current_thread();
3294 
3295 	/*
3296 	 *	Iterate through all the threads and wait for them to
3297 	 *	stop.  Do not wait for the current thread if it is within
3298 	 *	the task.
3299 	 */
3300 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3301 		if (thread != self) {
3302 			thread_wait(thread, until_not_runnable);
3303 		}
3304 	}
3305 }
3306 
3307 boolean_t
task_is_app_suspended(task_t task)3308 task_is_app_suspended(task_t task)
3309 {
3310 	return task->pidsuspended;
3311 }
3312 
3313 /*
3314  *	task_release_locked:
3315  *
3316  *	Release a kernel hold on a task.
3317  *
3318  *      CONDITIONS: the task is locked and active
3319  */
3320 void
task_release_locked(task_t task)3321 task_release_locked(
3322 	task_t          task)
3323 {
3324 	thread_t        thread;
3325 
3326 	assert(task->active);
3327 	assert(task->suspend_count > 0);
3328 
3329 	if (--task->suspend_count > 0) {
3330 		return;
3331 	}
3332 
3333 	if (task->bsd_info) {
3334 		workq_proc_resumed(task->bsd_info);
3335 	}
3336 
3337 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3338 		thread_mtx_lock(thread);
3339 		thread_release(thread);
3340 		thread_mtx_unlock(thread);
3341 	}
3342 }
3343 
3344 /*
3345  *	task_release:
3346  *
3347  *	Same as the internal routine above, except that it must lock
3348  *	and verify that the task is active.
3349  *
3350  *      CONDITIONS: The caller holds a reference to the task
3351  */
3352 kern_return_t
task_release(task_t task)3353 task_release(
3354 	task_t          task)
3355 {
3356 	if (task == TASK_NULL) {
3357 		return KERN_INVALID_ARGUMENT;
3358 	}
3359 
3360 	task_lock(task);
3361 
3362 	if (!task->active) {
3363 		task_unlock(task);
3364 
3365 		return KERN_FAILURE;
3366 	}
3367 
3368 	task_release_locked(task);
3369 	task_unlock(task);
3370 
3371 	return KERN_SUCCESS;
3372 }
3373 
3374 static kern_return_t
task_threads_internal(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * countp,mach_thread_flavor_t flavor)3375 task_threads_internal(
3376 	task_t                      task,
3377 	thread_act_array_t         *threads_out,
3378 	mach_msg_type_number_t     *countp,
3379 	mach_thread_flavor_t        flavor)
3380 {
3381 	mach_msg_type_number_t  actual, count, count_needed;
3382 	thread_t               *thread_list;
3383 	thread_t                thread;
3384 	unsigned int            i;
3385 
3386 	count = 0;
3387 	thread_list = NULL;
3388 
3389 	if (task == TASK_NULL) {
3390 		return KERN_INVALID_ARGUMENT;
3391 	}
3392 
3393 	assert(flavor <= THREAD_FLAVOR_INSPECT);
3394 
3395 	for (;;) {
3396 		task_lock(task);
3397 		if (!task->active) {
3398 			task_unlock(task);
3399 
3400 			kfree_type(thread_t, count, thread_list);
3401 			return KERN_FAILURE;
3402 		}
3403 
3404 		count_needed = actual = task->thread_count;
3405 		if (count_needed <= count) {
3406 			break;
3407 		}
3408 
3409 		/* unlock the task and allocate more memory */
3410 		task_unlock(task);
3411 
3412 		kfree_type(thread_t, count, thread_list);
3413 		count = count_needed;
3414 		thread_list = kalloc_type(thread_t, count, Z_WAITOK);
3415 
3416 		if (thread_list == NULL) {
3417 			return KERN_RESOURCE_SHORTAGE;
3418 		}
3419 	}
3420 
3421 	i = 0;
3422 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
3423 		assert(i < actual);
3424 		thread_reference(thread);
3425 		thread_list[i++] = thread;
3426 	}
3427 
3428 	count_needed = actual;
3429 
3430 	/* can unlock task now that we've got the thread refs */
3431 	task_unlock(task);
3432 
3433 	if (actual == 0) {
3434 		/* no threads, so return null pointer and deallocate memory */
3435 
3436 		*threads_out = NULL;
3437 		*countp = 0;
3438 		kfree_type(thread_t, count, thread_list);
3439 	} else {
3440 		/* if we allocated too much, must copy */
3441 		if (count_needed < count) {
3442 			void *newaddr;
3443 
3444 			newaddr = kalloc_type(thread_t, count_needed, Z_WAITOK);
3445 			if (newaddr == NULL) {
3446 				for (i = 0; i < actual; ++i) {
3447 					thread_deallocate(thread_list[i]);
3448 				}
3449 				kfree_type(thread_t, count, thread_list);
3450 				return KERN_RESOURCE_SHORTAGE;
3451 			}
3452 
3453 			bcopy(thread_list, newaddr, count_needed * sizeof(thread_t));
3454 			kfree_type(thread_t, count, thread_list);
3455 			thread_list = (thread_t *)newaddr;
3456 		}
3457 
3458 		*threads_out = thread_list;
3459 		*countp = actual;
3460 
3461 		/* do the conversion that Mig should handle */
3462 
3463 		switch (flavor) {
3464 		case THREAD_FLAVOR_CONTROL:
3465 			if (task == current_task()) {
3466 				for (i = 0; i < actual; ++i) {
3467 					((ipc_port_t *) thread_list)[i] = convert_thread_to_port_pinned(thread_list[i]);
3468 				}
3469 			} else {
3470 				for (i = 0; i < actual; ++i) {
3471 					((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
3472 				}
3473 			}
3474 			break;
3475 		case THREAD_FLAVOR_READ:
3476 			for (i = 0; i < actual; ++i) {
3477 				((ipc_port_t *) thread_list)[i] = convert_thread_read_to_port(thread_list[i]);
3478 			}
3479 			break;
3480 		case THREAD_FLAVOR_INSPECT:
3481 			for (i = 0; i < actual; ++i) {
3482 				((ipc_port_t *) thread_list)[i] = convert_thread_inspect_to_port(thread_list[i]);
3483 			}
3484 			break;
3485 		}
3486 	}
3487 
3488 	return KERN_SUCCESS;
3489 }
3490 
3491 kern_return_t
task_threads(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3492 task_threads(
3493 	task_t                      task,
3494 	thread_act_array_t         *threads_out,
3495 	mach_msg_type_number_t     *count)
3496 {
3497 	return task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3498 }
3499 
3500 
3501 kern_return_t
task_threads_from_user(mach_port_t port,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3502 task_threads_from_user(
3503 	mach_port_t                 port,
3504 	thread_act_array_t         *threads_out,
3505 	mach_msg_type_number_t     *count)
3506 {
3507 	ipc_kobject_type_t kotype;
3508 	kern_return_t kr;
3509 
3510 	task_t task = convert_port_to_task_inspect_no_eval(port);
3511 
3512 	if (task == TASK_NULL) {
3513 		return KERN_INVALID_ARGUMENT;
3514 	}
3515 
3516 	kotype = ip_kotype(port);
3517 
3518 	switch (kotype) {
3519 	case IKOT_TASK_CONTROL:
3520 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3521 		break;
3522 	case IKOT_TASK_READ:
3523 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_READ);
3524 		break;
3525 	case IKOT_TASK_INSPECT:
3526 		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_INSPECT);
3527 		break;
3528 	default:
3529 		panic("strange kobject type");
3530 		break;
3531 	}
3532 
3533 	task_deallocate(task);
3534 	return kr;
3535 }
3536 
3537 #define TASK_HOLD_NORMAL        0
3538 #define TASK_HOLD_PIDSUSPEND    1
3539 #define TASK_HOLD_LEGACY        2
3540 #define TASK_HOLD_LEGACY_ALL    3
3541 
3542 static kern_return_t
place_task_hold(task_t task,int mode)3543 place_task_hold(
3544 	task_t task,
3545 	int mode)
3546 {
3547 	if (!task->active && !task_is_a_corpse(task)) {
3548 		return KERN_FAILURE;
3549 	}
3550 
3551 	/* Return success for corpse task */
3552 	if (task_is_a_corpse(task)) {
3553 		return KERN_SUCCESS;
3554 	}
3555 
3556 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_SUSPEND),
3557 	    task_pid(task),
3558 	    task->thread_count > 0 ?((thread_t)queue_first(&task->threads))->thread_id : 0,
3559 	    task->user_stop_count, task->user_stop_count + 1);
3560 
3561 #if MACH_ASSERT
3562 	current_task()->suspends_outstanding++;
3563 #endif
3564 
3565 	if (mode == TASK_HOLD_LEGACY) {
3566 		task->legacy_stop_count++;
3567 	}
3568 
3569 	if (task->user_stop_count++ > 0) {
3570 		/*
3571 		 *	If the stop count was positive, the task is
3572 		 *	already stopped and we can exit.
3573 		 */
3574 		return KERN_SUCCESS;
3575 	}
3576 
3577 	/*
3578 	 * Put a kernel-level hold on the threads in the task (all
3579 	 * user-level task suspensions added together represent a
3580 	 * single kernel-level hold).  We then wait for the threads
3581 	 * to stop executing user code.
3582 	 */
3583 	task_hold_locked(task);
3584 	task_wait_locked(task, FALSE);
3585 
3586 	return KERN_SUCCESS;
3587 }
3588 
3589 static kern_return_t
release_task_hold(task_t task,int mode)3590 release_task_hold(
3591 	task_t          task,
3592 	int                     mode)
3593 {
3594 	boolean_t release = FALSE;
3595 
3596 	if (!task->active && !task_is_a_corpse(task)) {
3597 		return KERN_FAILURE;
3598 	}
3599 
3600 	/* Return success for corpse task */
3601 	if (task_is_a_corpse(task)) {
3602 		return KERN_SUCCESS;
3603 	}
3604 
3605 	if (mode == TASK_HOLD_PIDSUSPEND) {
3606 		if (task->pidsuspended == FALSE) {
3607 			return KERN_FAILURE;
3608 		}
3609 		task->pidsuspended = FALSE;
3610 	}
3611 
3612 	if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
3613 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3614 		    MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_RESUME) | DBG_FUNC_NONE,
3615 		    task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id,
3616 		    task->user_stop_count, mode, task->legacy_stop_count);
3617 
3618 #if MACH_ASSERT
3619 		/*
3620 		 * This is obviously not robust; if we suspend one task and then resume a different one,
3621 		 * we'll fly under the radar. This is only meant to catch the common case of a crashed
3622 		 * or buggy suspender.
3623 		 */
3624 		current_task()->suspends_outstanding--;
3625 #endif
3626 
3627 		if (mode == TASK_HOLD_LEGACY_ALL) {
3628 			if (task->legacy_stop_count >= task->user_stop_count) {
3629 				task->user_stop_count = 0;
3630 				release = TRUE;
3631 			} else {
3632 				task->user_stop_count -= task->legacy_stop_count;
3633 			}
3634 			task->legacy_stop_count = 0;
3635 		} else {
3636 			if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) {
3637 				task->legacy_stop_count--;
3638 			}
3639 			if (--task->user_stop_count == 0) {
3640 				release = TRUE;
3641 			}
3642 		}
3643 	} else {
3644 		return KERN_FAILURE;
3645 	}
3646 
3647 	/*
3648 	 *	Release the task if necessary.
3649 	 */
3650 	if (release) {
3651 		task_release_locked(task);
3652 	}
3653 
3654 	return KERN_SUCCESS;
3655 }
3656 
3657 boolean_t
get_task_suspended(task_t task)3658 get_task_suspended(task_t task)
3659 {
3660 	return 0 != task->user_stop_count;
3661 }
3662 
3663 /*
3664  *	task_suspend:
3665  *
3666  *	Implement an (old-fashioned) user-level suspension on a task.
3667  *
3668  *	Because the user isn't expecting to have to manage a suspension
3669  *	token, we'll track it for him in the kernel in the form of a naked
3670  *	send right to the task's resume port.  All such send rights
3671  *	account for a single suspension against the task (unlike task_suspend2()
3672  *	where each caller gets a unique suspension count represented by a
3673  *	unique send-once right).
3674  *
3675  * Conditions:
3676  *      The caller holds a reference to the task
3677  */
3678 kern_return_t
task_suspend(task_t task)3679 task_suspend(
3680 	task_t          task)
3681 {
3682 	kern_return_t                   kr;
3683 	mach_port_t                     port;
3684 	mach_port_name_t                name;
3685 
3686 	if (task == TASK_NULL || task == kernel_task) {
3687 		return KERN_INVALID_ARGUMENT;
3688 	}
3689 
3690 	/*
3691 	 * place a legacy hold on the task.
3692 	 */
3693 	task_lock(task);
3694 	kr = place_task_hold(task, TASK_HOLD_LEGACY);
3695 	task_unlock(task);
3696 
3697 	if (kr != KERN_SUCCESS) {
3698 		return kr;
3699 	}
3700 
3701 	/*
3702 	 * Claim a send right on the task resume port, and request a no-senders
3703 	 * notification on that port (if none outstanding).
3704 	 */
3705 	itk_lock(task);
3706 	(void)ipc_kobject_make_send_lazy_alloc_port((ipc_port_t *) &task->itk_resume,
3707 	    (ipc_kobject_t)task, IKOT_TASK_RESUME, IPC_KOBJECT_PTRAUTH_STORE,
3708 	    OS_PTRAUTH_DISCRIMINATOR("task.itk_resume"));
3709 	port = task->itk_resume; /* donates send right */
3710 	itk_unlock(task);
3711 
3712 	/*
3713 	 * Copyout the send right into the calling task's IPC space.  It won't know it is there,
3714 	 * but we'll look it up when calling a traditional resume.  Any IPC operations that
3715 	 * deallocate the send right will auto-release the suspension.
3716 	 */
3717 	if (IP_VALID(port)) {
3718 		kr = ipc_object_copyout(current_space(), ip_to_object(port),
3719 		    MACH_MSG_TYPE_MOVE_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
3720 		    NULL, NULL, &name);
3721 	} else {
3722 		kr = KERN_SUCCESS;
3723 	}
3724 	if (kr != KERN_SUCCESS) {
3725 		printf("warning: %s(%d) failed to copyout suspension "
3726 		    "token for pid %d with error: %d\n",
3727 		    proc_name_address(current_task()->bsd_info),
3728 		    proc_pid(current_task()->bsd_info),
3729 		    task_pid(task), kr);
3730 	}
3731 
3732 	return kr;
3733 }
3734 
3735 /*
3736  *	task_resume:
3737  *		Release a user hold on a task.
3738  *
3739  * Conditions:
3740  *		The caller holds a reference to the task
3741  */
3742 kern_return_t
task_resume(task_t task)3743 task_resume(
3744 	task_t  task)
3745 {
3746 	kern_return_t    kr;
3747 	mach_port_name_t resume_port_name;
3748 	ipc_entry_t              resume_port_entry;
3749 	ipc_space_t              space = current_task()->itk_space;
3750 
3751 	if (task == TASK_NULL || task == kernel_task) {
3752 		return KERN_INVALID_ARGUMENT;
3753 	}
3754 
3755 	/* release a legacy task hold */
3756 	task_lock(task);
3757 	kr = release_task_hold(task, TASK_HOLD_LEGACY);
3758 	task_unlock(task);
3759 
3760 	itk_lock(task); /* for itk_resume */
3761 	is_write_lock(space); /* spin lock */
3762 	if (is_active(space) && IP_VALID(task->itk_resume) &&
3763 	    ipc_hash_lookup(space, ip_to_object(task->itk_resume), &resume_port_name, &resume_port_entry) == TRUE) {
3764 		/*
3765 		 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
3766 		 * we are holding one less legacy hold on the task from this caller.  If the release failed,
3767 		 * go ahead and drop all the rights, as someone either already released our holds or the task
3768 		 * is gone.
3769 		 */
3770 		itk_unlock(task);
3771 		if (kr == KERN_SUCCESS) {
3772 			ipc_right_dealloc(space, resume_port_name, resume_port_entry);
3773 		} else {
3774 			ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0);
3775 		}
3776 		/* space unlocked */
3777 	} else {
3778 		itk_unlock(task);
3779 		is_write_unlock(space);
3780 		if (kr == KERN_SUCCESS) {
3781 			printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
3782 			    proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info),
3783 			    task_pid(task));
3784 		}
3785 	}
3786 
3787 	return kr;
3788 }
3789 
3790 /*
3791  * Suspend the target task.
3792  * Making/holding a token/reference/port is the callers responsibility.
3793  */
3794 kern_return_t
task_suspend_internal(task_t task)3795 task_suspend_internal(task_t task)
3796 {
3797 	kern_return_t    kr;
3798 
3799 	if (task == TASK_NULL || task == kernel_task) {
3800 		return KERN_INVALID_ARGUMENT;
3801 	}
3802 
3803 	task_lock(task);
3804 	kr = place_task_hold(task, TASK_HOLD_NORMAL);
3805 	task_unlock(task);
3806 	return kr;
3807 }
3808 
3809 /*
3810  * Suspend the target task, and return a suspension token. The token
3811  * represents a reference on the suspended task.
3812  */
3813 static kern_return_t
task_suspend2_grp(task_t task,task_suspension_token_t * suspend_token,task_grp_t grp)3814 task_suspend2_grp(
3815 	task_t                  task,
3816 	task_suspension_token_t *suspend_token,
3817 	task_grp_t              grp)
3818 {
3819 	kern_return_t    kr;
3820 
3821 	kr = task_suspend_internal(task);
3822 	if (kr != KERN_SUCCESS) {
3823 		*suspend_token = TASK_NULL;
3824 		return kr;
3825 	}
3826 
3827 	/*
3828 	 * Take a reference on the target task and return that to the caller
3829 	 * as a "suspension token," which can be converted into an SO right to
3830 	 * the now-suspended task's resume port.
3831 	 */
3832 	task_reference_grp(task, grp);
3833 	*suspend_token = task;
3834 
3835 	return KERN_SUCCESS;
3836 }
3837 
3838 kern_return_t
task_suspend2_mig(task_t task,task_suspension_token_t * suspend_token)3839 task_suspend2_mig(
3840 	task_t                  task,
3841 	task_suspension_token_t *suspend_token)
3842 {
3843 	return task_suspend2_grp(task, suspend_token, TASK_GRP_MIG);
3844 }
3845 
3846 kern_return_t
task_suspend2_external(task_t task,task_suspension_token_t * suspend_token)3847 task_suspend2_external(
3848 	task_t                  task,
3849 	task_suspension_token_t *suspend_token)
3850 {
3851 	return task_suspend2_grp(task, suspend_token, TASK_GRP_EXTERNAL);
3852 }
3853 
3854 /*
3855  * Resume the task
3856  * (reference/token/port management is caller's responsibility).
3857  */
3858 kern_return_t
task_resume_internal(task_suspension_token_t task)3859 task_resume_internal(
3860 	task_suspension_token_t         task)
3861 {
3862 	kern_return_t kr;
3863 
3864 	if (task == TASK_NULL || task == kernel_task) {
3865 		return KERN_INVALID_ARGUMENT;
3866 	}
3867 
3868 	task_lock(task);
3869 	kr = release_task_hold(task, TASK_HOLD_NORMAL);
3870 	task_unlock(task);
3871 	return kr;
3872 }
3873 
3874 /*
3875  * Resume the task using a suspension token. Consumes the token's ref.
3876  */
3877 static kern_return_t
task_resume2_grp(task_suspension_token_t task,task_grp_t grp)3878 task_resume2_grp(
3879 	task_suspension_token_t         task,
3880 	task_grp_t                      grp)
3881 {
3882 	kern_return_t kr;
3883 
3884 	kr = task_resume_internal(task);
3885 	task_suspension_token_deallocate_grp(task, grp);
3886 
3887 	return kr;
3888 }
3889 
3890 kern_return_t
task_resume2_mig(task_suspension_token_t task)3891 task_resume2_mig(
3892 	task_suspension_token_t         task)
3893 {
3894 	return task_resume2_grp(task, TASK_GRP_MIG);
3895 }
3896 
3897 kern_return_t
task_resume2_external(task_suspension_token_t task)3898 task_resume2_external(
3899 	task_suspension_token_t         task)
3900 {
3901 	return task_resume2_grp(task, TASK_GRP_EXTERNAL);
3902 }
3903 
3904 static void
task_suspension_no_senders(ipc_port_t port,mach_port_mscount_t mscount)3905 task_suspension_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
3906 {
3907 	task_t task = convert_port_to_task_suspension_token(port);
3908 	kern_return_t kr;
3909 
3910 	if (task == TASK_NULL) {
3911 		return;
3912 	}
3913 
3914 	if (task == kernel_task) {
3915 		task_suspension_token_deallocate(task);
3916 		return;
3917 	}
3918 
3919 	task_lock(task);
3920 
3921 	kr = ipc_kobject_nsrequest(port, mscount, NULL);
3922 	if (kr == KERN_FAILURE) {
3923 		/* release all the [remaining] outstanding legacy holds */
3924 		release_task_hold(task, TASK_HOLD_LEGACY_ALL);
3925 	}
3926 
3927 	task_unlock(task);
3928 
3929 	task_suspension_token_deallocate(task);         /* drop token reference */
3930 }
3931 
3932 /*
3933  * Fires when a send once made
3934  * by convert_task_suspension_token_to_port() dies.
3935  */
3936 void
task_suspension_send_once(ipc_port_t port)3937 task_suspension_send_once(ipc_port_t port)
3938 {
3939 	task_t task = convert_port_to_task_suspension_token(port);
3940 
3941 	if (task == TASK_NULL || task == kernel_task) {
3942 		return;         /* nothing to do */
3943 	}
3944 
3945 	/* release the hold held by this specific send-once right */
3946 	task_lock(task);
3947 	release_task_hold(task, TASK_HOLD_NORMAL);
3948 	task_unlock(task);
3949 
3950 	task_suspension_token_deallocate(task);         /* drop token reference */
3951 }
3952 
3953 static kern_return_t
task_pidsuspend_locked(task_t task)3954 task_pidsuspend_locked(task_t task)
3955 {
3956 	kern_return_t kr;
3957 
3958 	if (task->pidsuspended) {
3959 		kr = KERN_FAILURE;
3960 		goto out;
3961 	}
3962 
3963 	task->pidsuspended = TRUE;
3964 
3965 	kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
3966 	if (kr != KERN_SUCCESS) {
3967 		task->pidsuspended = FALSE;
3968 	}
3969 out:
3970 	return kr;
3971 }
3972 
3973 
3974 /*
3975  *	task_pidsuspend:
3976  *
3977  *	Suspends a task by placing a hold on its threads.
3978  *
3979  * Conditions:
3980  *      The caller holds a reference to the task
3981  */
3982 kern_return_t
task_pidsuspend(task_t task)3983 task_pidsuspend(
3984 	task_t          task)
3985 {
3986 	kern_return_t    kr;
3987 
3988 	if (task == TASK_NULL || task == kernel_task) {
3989 		return KERN_INVALID_ARGUMENT;
3990 	}
3991 
3992 	task_lock(task);
3993 
3994 	kr = task_pidsuspend_locked(task);
3995 
3996 	task_unlock(task);
3997 
3998 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
3999 		iokit_task_app_suspended_changed(task);
4000 	}
4001 
4002 	return kr;
4003 }
4004 
4005 /*
4006  *	task_pidresume:
4007  *		Resumes a previously suspended task.
4008  *
4009  * Conditions:
4010  *		The caller holds a reference to the task
4011  */
4012 kern_return_t
task_pidresume(task_t task)4013 task_pidresume(
4014 	task_t  task)
4015 {
4016 	kern_return_t    kr;
4017 
4018 	if (task == TASK_NULL || task == kernel_task) {
4019 		return KERN_INVALID_ARGUMENT;
4020 	}
4021 
4022 	task_lock(task);
4023 
4024 #if CONFIG_FREEZE
4025 
4026 	while (task->changing_freeze_state) {
4027 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4028 		task_unlock(task);
4029 		thread_block(THREAD_CONTINUE_NULL);
4030 
4031 		task_lock(task);
4032 	}
4033 	task->changing_freeze_state = TRUE;
4034 #endif
4035 
4036 	kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
4037 
4038 	task_unlock(task);
4039 
4040 	if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4041 		iokit_task_app_suspended_changed(task);
4042 	}
4043 
4044 #if CONFIG_FREEZE
4045 
4046 	task_lock(task);
4047 
4048 	if (kr == KERN_SUCCESS) {
4049 		task->frozen = FALSE;
4050 	}
4051 	task->changing_freeze_state = FALSE;
4052 	thread_wakeup(&task->changing_freeze_state);
4053 
4054 	task_unlock(task);
4055 #endif
4056 
4057 	return kr;
4058 }
4059 
4060 os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
4061 
4062 /*
4063  *	task_add_turnstile_watchports:
4064  *		Setup watchports to boost the main thread of the task.
4065  *
4066  *	Arguments:
4067  *		task: task being spawned
4068  *		thread: main thread of task
4069  *		portwatch_ports: array of watchports
4070  *		portwatch_count: number of watchports
4071  *
4072  *	Conditions:
4073  *		Nothing locked.
4074  */
4075 void
task_add_turnstile_watchports(task_t task,thread_t thread,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4076 task_add_turnstile_watchports(
4077 	task_t          task,
4078 	thread_t        thread,
4079 	ipc_port_t      *portwatch_ports,
4080 	uint32_t        portwatch_count)
4081 {
4082 	struct task_watchports *watchports = NULL;
4083 	struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
4084 	os_ref_count_t refs;
4085 
4086 	/* Check if the task has terminated */
4087 	if (!task->active) {
4088 		return;
4089 	}
4090 
4091 	assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
4092 
4093 	watchports = task_watchports_alloc_init(task, thread, portwatch_count);
4094 
4095 	/* Lock the ipc space */
4096 	is_write_lock(task->itk_space);
4097 
4098 	/* Setup watchports to boost the main thread */
4099 	refs = task_add_turnstile_watchports_locked(task,
4100 	    watchports, previous_elem_array, portwatch_ports,
4101 	    portwatch_count);
4102 
4103 	/* Drop the space lock */
4104 	is_write_unlock(task->itk_space);
4105 
4106 	if (refs == 0) {
4107 		task_watchports_deallocate(watchports);
4108 	}
4109 
4110 	/* Drop the ref on previous_elem_array */
4111 	for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
4112 		task_watchport_elem_deallocate(previous_elem_array[i]);
4113 	}
4114 }
4115 
4116 /*
4117  *	task_remove_turnstile_watchports:
4118  *		Clear all turnstile boost on the task from watchports.
4119  *
4120  *	Arguments:
4121  *		task: task being terminated
4122  *
4123  *	Conditions:
4124  *		Nothing locked.
4125  */
4126 void
task_remove_turnstile_watchports(task_t task)4127 task_remove_turnstile_watchports(
4128 	task_t          task)
4129 {
4130 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4131 	struct task_watchports *watchports = NULL;
4132 	ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
4133 	uint32_t portwatch_count;
4134 
4135 	/* Lock the ipc space */
4136 	is_write_lock(task->itk_space);
4137 
4138 	/* Check if watchport boost exist */
4139 	if (task->watchports == NULL) {
4140 		is_write_unlock(task->itk_space);
4141 		return;
4142 	}
4143 	watchports = task->watchports;
4144 	portwatch_count = watchports->tw_elem_array_count;
4145 
4146 	refs = task_remove_turnstile_watchports_locked(task, watchports,
4147 	    port_freelist);
4148 
4149 	is_write_unlock(task->itk_space);
4150 
4151 	/* Drop all the port references */
4152 	for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
4153 		ip_release(port_freelist[i]);
4154 	}
4155 
4156 	/* Clear the task and thread references for task_watchport */
4157 	if (refs == 0) {
4158 		task_watchports_deallocate(watchports);
4159 	}
4160 }
4161 
4162 /*
4163  *	task_transfer_turnstile_watchports:
4164  *		Transfer all watchport turnstile boost from old task to new task.
4165  *
4166  *	Arguments:
4167  *		old_task: task calling exec
4168  *		new_task: new exec'ed task
4169  *		thread: main thread of new task
4170  *
4171  *	Conditions:
4172  *		Nothing locked.
4173  */
4174 void
task_transfer_turnstile_watchports(task_t old_task,task_t new_task,thread_t new_thread)4175 task_transfer_turnstile_watchports(
4176 	task_t   old_task,
4177 	task_t   new_task,
4178 	thread_t new_thread)
4179 {
4180 	struct task_watchports *old_watchports = NULL;
4181 	struct task_watchports *new_watchports = NULL;
4182 	os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
4183 	os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
4184 	uint32_t portwatch_count;
4185 
4186 	if (old_task->watchports == NULL || !new_task->active) {
4187 		return;
4188 	}
4189 
4190 	/* Get the watch port count from the old task */
4191 	is_write_lock(old_task->itk_space);
4192 	if (old_task->watchports == NULL) {
4193 		is_write_unlock(old_task->itk_space);
4194 		return;
4195 	}
4196 
4197 	portwatch_count = old_task->watchports->tw_elem_array_count;
4198 	is_write_unlock(old_task->itk_space);
4199 
4200 	new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
4201 
4202 	/* Lock the ipc space for old task */
4203 	is_write_lock(old_task->itk_space);
4204 
4205 	/* Lock the ipc space for new task */
4206 	is_write_lock(new_task->itk_space);
4207 
4208 	/* Check if watchport boost exist */
4209 	if (old_task->watchports == NULL || !new_task->active) {
4210 		is_write_unlock(new_task->itk_space);
4211 		is_write_unlock(old_task->itk_space);
4212 		(void)task_watchports_release(new_watchports);
4213 		task_watchports_deallocate(new_watchports);
4214 		return;
4215 	}
4216 
4217 	old_watchports = old_task->watchports;
4218 	assert(portwatch_count == old_task->watchports->tw_elem_array_count);
4219 
4220 	/* Setup new task watchports */
4221 	new_task->watchports = new_watchports;
4222 
4223 	for (uint32_t i = 0; i < portwatch_count; i++) {
4224 		ipc_port_t port = old_watchports->tw_elem[i].twe_port;
4225 
4226 		if (port == NULL) {
4227 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4228 			continue;
4229 		}
4230 
4231 		/* Lock the port and check if it has the entry */
4232 		ip_mq_lock(port);
4233 
4234 		task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
4235 
4236 		if (ipc_port_replace_watchport_elem_conditional_locked(port,
4237 		    &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
4238 			task_watchport_elem_clear(&old_watchports->tw_elem[i]);
4239 
4240 			task_watchports_retain(new_watchports);
4241 			old_refs = task_watchports_release(old_watchports);
4242 
4243 			/* Check if all ports are cleaned */
4244 			if (old_refs == 0) {
4245 				old_task->watchports = NULL;
4246 			}
4247 		} else {
4248 			task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4249 		}
4250 		/* port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
4251 	}
4252 
4253 	/* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
4254 	new_refs = task_watchports_release(new_watchports);
4255 	if (new_refs == 0) {
4256 		new_task->watchports = NULL;
4257 	}
4258 
4259 	is_write_unlock(new_task->itk_space);
4260 	is_write_unlock(old_task->itk_space);
4261 
4262 	/* Clear the task and thread references for old_watchport */
4263 	if (old_refs == 0) {
4264 		task_watchports_deallocate(old_watchports);
4265 	}
4266 
4267 	/* Clear the task and thread references for new_watchport */
4268 	if (new_refs == 0) {
4269 		task_watchports_deallocate(new_watchports);
4270 	}
4271 }
4272 
4273 /*
4274  *	task_add_turnstile_watchports_locked:
4275  *		Setup watchports to boost the main thread of the task.
4276  *
4277  *	Arguments:
4278  *		task: task to boost
4279  *		watchports: watchport structure to be attached to the task
4280  *		previous_elem_array: an array of old watchport_elem to be returned to caller
4281  *		portwatch_ports: array of watchports
4282  *		portwatch_count: number of watchports
4283  *
4284  *	Conditions:
4285  *		ipc space of the task locked.
4286  *		returns array of old watchport_elem in previous_elem_array
4287  */
4288 static os_ref_count_t
task_add_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,struct task_watchport_elem ** previous_elem_array,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4289 task_add_turnstile_watchports_locked(
4290 	task_t                      task,
4291 	struct task_watchports      *watchports,
4292 	struct task_watchport_elem  **previous_elem_array,
4293 	ipc_port_t                  *portwatch_ports,
4294 	uint32_t                    portwatch_count)
4295 {
4296 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4297 
4298 	/* Check if the task is still active */
4299 	if (!task->active) {
4300 		refs = task_watchports_release(watchports);
4301 		return refs;
4302 	}
4303 
4304 	assert(task->watchports == NULL);
4305 	task->watchports = watchports;
4306 
4307 	for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
4308 		ipc_port_t port = portwatch_ports[i];
4309 
4310 		task_watchport_elem_init(&watchports->tw_elem[i], task, port);
4311 		if (port == NULL) {
4312 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4313 			continue;
4314 		}
4315 
4316 		ip_mq_lock(port);
4317 
4318 		/* Check if port is in valid state to be setup as watchport */
4319 		if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
4320 		    &previous_elem_array[j]) != KERN_SUCCESS) {
4321 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4322 			continue;
4323 		}
4324 		/* port unlocked on return */
4325 
4326 		ip_reference(port);
4327 		task_watchports_retain(watchports);
4328 		if (previous_elem_array[j] != NULL) {
4329 			j++;
4330 		}
4331 	}
4332 
4333 	/* Drop the reference on task_watchport struct returned by os_ref_init */
4334 	refs = task_watchports_release(watchports);
4335 	if (refs == 0) {
4336 		task->watchports = NULL;
4337 	}
4338 
4339 	return refs;
4340 }
4341 
4342 /*
4343  *	task_remove_turnstile_watchports_locked:
4344  *		Clear all turnstile boost on the task from watchports.
4345  *
4346  *	Arguments:
4347  *		task: task to remove watchports from
4348  *		watchports: watchports structure for the task
4349  *		port_freelist: array of ports returned with ref to caller
4350  *
4351  *
4352  *	Conditions:
4353  *		ipc space of the task locked.
4354  *		array of ports with refs are returned in port_freelist
4355  */
4356 static os_ref_count_t
task_remove_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,ipc_port_t * port_freelist)4357 task_remove_turnstile_watchports_locked(
4358 	task_t                 task,
4359 	struct task_watchports *watchports,
4360 	ipc_port_t             *port_freelist)
4361 {
4362 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4363 
4364 	for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
4365 		ipc_port_t port = watchports->tw_elem[i].twe_port;
4366 		if (port == NULL) {
4367 			continue;
4368 		}
4369 
4370 		/* Lock the port and check if it has the entry */
4371 		ip_mq_lock(port);
4372 		if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
4373 		    &watchports->tw_elem[i]) == KERN_SUCCESS) {
4374 			task_watchport_elem_clear(&watchports->tw_elem[i]);
4375 			port_freelist[j++] = port;
4376 			refs = task_watchports_release(watchports);
4377 
4378 			/* Check if all ports are cleaned */
4379 			if (refs == 0) {
4380 				task->watchports = NULL;
4381 				break;
4382 			}
4383 		}
4384 		/* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
4385 	}
4386 	return refs;
4387 }
4388 
4389 /*
4390  *	task_watchports_alloc_init:
4391  *		Allocate and initialize task watchport struct.
4392  *
4393  *	Conditions:
4394  *		Nothing locked.
4395  */
4396 static struct task_watchports *
task_watchports_alloc_init(task_t task,thread_t thread,uint32_t count)4397 task_watchports_alloc_init(
4398 	task_t        task,
4399 	thread_t      thread,
4400 	uint32_t      count)
4401 {
4402 	struct task_watchports *watchports = kalloc_type(struct task_watchports,
4403 	    struct task_watchport_elem, count, Z_WAITOK | Z_ZERO | Z_NOFAIL);
4404 
4405 	task_reference(task);
4406 	thread_reference(thread);
4407 	watchports->tw_task = task;
4408 	watchports->tw_thread = thread;
4409 	watchports->tw_elem_array_count = count;
4410 	os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
4411 
4412 	return watchports;
4413 }
4414 
4415 /*
4416  *	task_watchports_deallocate:
4417  *		Deallocate task watchport struct.
4418  *
4419  *	Conditions:
4420  *		Nothing locked.
4421  */
4422 static void
task_watchports_deallocate(struct task_watchports * watchports)4423 task_watchports_deallocate(
4424 	struct task_watchports *watchports)
4425 {
4426 	uint32_t portwatch_count = watchports->tw_elem_array_count;
4427 
4428 	task_deallocate(watchports->tw_task);
4429 	thread_deallocate(watchports->tw_thread);
4430 	kfree_type(struct task_watchports, struct task_watchport_elem,
4431 	    portwatch_count, watchports);
4432 }
4433 
4434 /*
4435  *	task_watchport_elem_deallocate:
4436  *		Deallocate task watchport element and release its ref on task_watchport.
4437  *
4438  *	Conditions:
4439  *		Nothing locked.
4440  */
4441 void
task_watchport_elem_deallocate(struct task_watchport_elem * watchport_elem)4442 task_watchport_elem_deallocate(
4443 	struct task_watchport_elem *watchport_elem)
4444 {
4445 	os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4446 	task_t task = watchport_elem->twe_task;
4447 	struct task_watchports *watchports = NULL;
4448 	ipc_port_t port = NULL;
4449 
4450 	assert(task != NULL);
4451 
4452 	/* Take the space lock to modify the elememt */
4453 	is_write_lock(task->itk_space);
4454 
4455 	watchports = task->watchports;
4456 	assert(watchports != NULL);
4457 
4458 	port = watchport_elem->twe_port;
4459 	assert(port != NULL);
4460 
4461 	task_watchport_elem_clear(watchport_elem);
4462 	refs = task_watchports_release(watchports);
4463 
4464 	if (refs == 0) {
4465 		task->watchports = NULL;
4466 	}
4467 
4468 	is_write_unlock(task->itk_space);
4469 
4470 	ip_release(port);
4471 	if (refs == 0) {
4472 		task_watchports_deallocate(watchports);
4473 	}
4474 }
4475 
4476 /*
4477  *	task_has_watchports:
4478  *		Return TRUE if task has watchport boosts.
4479  *
4480  *	Conditions:
4481  *		Nothing locked.
4482  */
4483 boolean_t
task_has_watchports(task_t task)4484 task_has_watchports(task_t task)
4485 {
4486 	return task->watchports != NULL;
4487 }
4488 
4489 #if DEVELOPMENT || DEBUG
4490 
4491 extern void IOSleep(int);
4492 
4493 kern_return_t
task_disconnect_page_mappings(task_t task)4494 task_disconnect_page_mappings(task_t task)
4495 {
4496 	int     n;
4497 
4498 	if (task == TASK_NULL || task == kernel_task) {
4499 		return KERN_INVALID_ARGUMENT;
4500 	}
4501 
4502 	/*
4503 	 * this function is used to strip all of the mappings from
4504 	 * the pmap for the specified task to force the task to
4505 	 * re-fault all of the pages it is actively using... this
4506 	 * allows us to approximate the true working set of the
4507 	 * specified task.  We only engage if at least 1 of the
4508 	 * threads in the task is runnable, but we want to continuously
4509 	 * sweep (at least for a while - I've arbitrarily set the limit at
4510 	 * 100 sweeps to be re-looked at as we gain experience) to get a better
4511 	 * view into what areas within a page are being visited (as opposed to only
4512 	 * seeing the first fault of a page after the task becomes
4513 	 * runnable)...  in the future I may
4514 	 * try to block until awakened by a thread in this task
4515 	 * being made runnable, but for now we'll periodically poll from the
4516 	 * user level debug tool driving the sysctl
4517 	 */
4518 	for (n = 0; n < 100; n++) {
4519 		thread_t        thread;
4520 		boolean_t       runnable;
4521 		boolean_t       do_unnest;
4522 		int             page_count;
4523 
4524 		runnable = FALSE;
4525 		do_unnest = FALSE;
4526 
4527 		task_lock(task);
4528 
4529 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
4530 			if (thread->state & TH_RUN) {
4531 				runnable = TRUE;
4532 				break;
4533 			}
4534 		}
4535 		if (n == 0) {
4536 			task->task_disconnected_count++;
4537 		}
4538 
4539 		if (task->task_unnested == FALSE) {
4540 			if (runnable == TRUE) {
4541 				task->task_unnested = TRUE;
4542 				do_unnest = TRUE;
4543 			}
4544 		}
4545 		task_unlock(task);
4546 
4547 		if (runnable == FALSE) {
4548 			break;
4549 		}
4550 
4551 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
4552 		    task, do_unnest, task->task_disconnected_count, 0, 0);
4553 
4554 		page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
4555 
4556 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
4557 		    task, page_count, 0, 0, 0);
4558 
4559 		if ((n % 5) == 4) {
4560 			IOSleep(1);
4561 		}
4562 	}
4563 	return KERN_SUCCESS;
4564 }
4565 
4566 #endif
4567 
4568 
4569 #if CONFIG_FREEZE
4570 
4571 /*
4572  *	task_freeze:
4573  *
4574  *	Freeze a task.
4575  *
4576  * Conditions:
4577  *      The caller holds a reference to the task
4578  */
4579 extern void             vm_wake_compactor_swapper(void);
4580 extern queue_head_t     c_swapout_list_head;
4581 extern struct freezer_context freezer_context_global;
4582 
4583 kern_return_t
task_freeze(task_t task,uint32_t * purgeable_count,uint32_t * wired_count,uint32_t * clean_count,uint32_t * dirty_count,uint32_t dirty_budget,uint32_t * shared_count,int * freezer_error_code,boolean_t eval_only)4584 task_freeze(
4585 	task_t    task,
4586 	uint32_t           *purgeable_count,
4587 	uint32_t           *wired_count,
4588 	uint32_t           *clean_count,
4589 	uint32_t           *dirty_count,
4590 	uint32_t           dirty_budget,
4591 	uint32_t           *shared_count,
4592 	int                *freezer_error_code,
4593 	boolean_t          eval_only)
4594 {
4595 	kern_return_t kr = KERN_SUCCESS;
4596 
4597 	if (task == TASK_NULL || task == kernel_task) {
4598 		return KERN_INVALID_ARGUMENT;
4599 	}
4600 
4601 	task_lock(task);
4602 
4603 	while (task->changing_freeze_state) {
4604 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4605 		task_unlock(task);
4606 		thread_block(THREAD_CONTINUE_NULL);
4607 
4608 		task_lock(task);
4609 	}
4610 	if (task->frozen) {
4611 		task_unlock(task);
4612 		return KERN_FAILURE;
4613 	}
4614 	task->changing_freeze_state = TRUE;
4615 
4616 	freezer_context_global.freezer_ctx_task = task;
4617 
4618 	task_unlock(task);
4619 
4620 	kr = vm_map_freeze(task,
4621 	    purgeable_count,
4622 	    wired_count,
4623 	    clean_count,
4624 	    dirty_count,
4625 	    dirty_budget,
4626 	    shared_count,
4627 	    freezer_error_code,
4628 	    eval_only);
4629 
4630 	task_lock(task);
4631 
4632 	if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) {
4633 		task->frozen = TRUE;
4634 
4635 		freezer_context_global.freezer_ctx_task = NULL;
4636 		freezer_context_global.freezer_ctx_uncompressed_pages = 0;
4637 
4638 		if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
4639 			/*
4640 			 * reset the counter tracking the # of swapped compressed pages
4641 			 * because we are now done with this freeze session and task.
4642 			 */
4643 
4644 			*dirty_count = (uint32_t) (freezer_context_global.freezer_ctx_swapped_bytes / PAGE_SIZE_64);         /*used to track pageouts*/
4645 		}
4646 
4647 		freezer_context_global.freezer_ctx_swapped_bytes = 0;
4648 	}
4649 
4650 	task->changing_freeze_state = FALSE;
4651 	thread_wakeup(&task->changing_freeze_state);
4652 
4653 	task_unlock(task);
4654 
4655 	if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
4656 	    (kr == KERN_SUCCESS) &&
4657 	    (eval_only == FALSE)) {
4658 		vm_wake_compactor_swapper();
4659 		/*
4660 		 * We do an explicit wakeup of the swapout thread here
4661 		 * because the compact_and_swap routines don't have
4662 		 * knowledge about these kind of "per-task packed c_segs"
4663 		 * and so will not be evaluating whether we need to do
4664 		 * a wakeup there.
4665 		 */
4666 		thread_wakeup((event_t)&c_swapout_list_head);
4667 	}
4668 
4669 	return kr;
4670 }
4671 
4672 /*
4673  *	task_thaw:
4674  *
4675  *	Thaw a currently frozen task.
4676  *
4677  * Conditions:
4678  *      The caller holds a reference to the task
4679  */
4680 kern_return_t
task_thaw(task_t task)4681 task_thaw(
4682 	task_t          task)
4683 {
4684 	if (task == TASK_NULL || task == kernel_task) {
4685 		return KERN_INVALID_ARGUMENT;
4686 	}
4687 
4688 	task_lock(task);
4689 
4690 	while (task->changing_freeze_state) {
4691 		assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4692 		task_unlock(task);
4693 		thread_block(THREAD_CONTINUE_NULL);
4694 
4695 		task_lock(task);
4696 	}
4697 	if (!task->frozen) {
4698 		task_unlock(task);
4699 		return KERN_FAILURE;
4700 	}
4701 	task->frozen = FALSE;
4702 
4703 	task_unlock(task);
4704 
4705 	return KERN_SUCCESS;
4706 }
4707 
4708 void
task_update_frozen_to_swap_acct(task_t task,int64_t amount,freezer_acct_op_t op)4709 task_update_frozen_to_swap_acct(task_t task, int64_t amount, freezer_acct_op_t op)
4710 {
4711 	/*
4712 	 * We don't assert that the task lock is held because we call this
4713 	 * routine from the decompression path and we won't be holding the
4714 	 * task lock. However, since we are in the context of the task we are
4715 	 * safe.
4716 	 * In the case of the task_freeze path, we call it from behind the task
4717 	 * lock but we don't need to because we have a reference on the proc
4718 	 * being frozen.
4719 	 */
4720 
4721 	assert(task);
4722 	if (amount == 0) {
4723 		return;
4724 	}
4725 
4726 	if (op == CREDIT_TO_SWAP) {
4727 		ledger_credit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
4728 	} else if (op == DEBIT_FROM_SWAP) {
4729 		ledger_debit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
4730 	} else {
4731 		panic("task_update_frozen_to_swap_acct: Invalid ledger op");
4732 	}
4733 }
4734 #endif /* CONFIG_FREEZE */
4735 
4736 kern_return_t
task_set_security_tokens(task_t task,security_token_t sec_token,audit_token_t audit_token,host_priv_t host_priv)4737 task_set_security_tokens(
4738 	task_t           task,
4739 	security_token_t sec_token,
4740 	audit_token_t    audit_token,
4741 	host_priv_t      host_priv)
4742 {
4743 	ipc_port_t       host_port;
4744 	kern_return_t    kr;
4745 
4746 	if (task == TASK_NULL) {
4747 		return KERN_INVALID_ARGUMENT;
4748 	}
4749 
4750 	task_lock(task);
4751 	task_set_tokens(task, &sec_token, &audit_token);
4752 	task_unlock(task);
4753 
4754 	if (host_priv != HOST_PRIV_NULL) {
4755 		kr = host_get_host_priv_port(host_priv, &host_port);
4756 	} else {
4757 		kr = host_get_host_port(host_priv_self(), &host_port);
4758 	}
4759 	assert(kr == KERN_SUCCESS);
4760 
4761 	kr = task_set_special_port_internal(task, TASK_HOST_PORT, host_port);
4762 	return kr;
4763 }
4764 
4765 kern_return_t
task_send_trace_memory(__unused task_t target_task,__unused uint32_t pid,__unused uint64_t uniqueid)4766 task_send_trace_memory(
4767 	__unused task_t   target_task,
4768 	__unused uint32_t pid,
4769 	__unused uint64_t uniqueid)
4770 {
4771 	return KERN_INVALID_ARGUMENT;
4772 }
4773 
4774 /*
4775  * This routine was added, pretty much exclusively, for registering the
4776  * RPC glue vector for in-kernel short circuited tasks.  Rather than
4777  * removing it completely, I have only disabled that feature (which was
4778  * the only feature at the time).  It just appears that we are going to
4779  * want to add some user data to tasks in the future (i.e. bsd info,
4780  * task names, etc...), so I left it in the formal task interface.
4781  */
4782 kern_return_t
task_set_info(task_t task,task_flavor_t flavor,__unused task_info_t task_info_in,__unused mach_msg_type_number_t task_info_count)4783 task_set_info(
4784 	task_t          task,
4785 	task_flavor_t   flavor,
4786 	__unused task_info_t    task_info_in,           /* pointer to IN array */
4787 	__unused mach_msg_type_number_t task_info_count)
4788 {
4789 	if (task == TASK_NULL) {
4790 		return KERN_INVALID_ARGUMENT;
4791 	}
4792 	switch (flavor) {
4793 #if CONFIG_ATM
4794 	case TASK_TRACE_MEMORY_INFO:
4795 		return KERN_NOT_SUPPORTED;
4796 #endif // CONFIG_ATM
4797 	default:
4798 		return KERN_INVALID_ARGUMENT;
4799 	}
4800 }
4801 
4802 int radar_20146450 = 1;
4803 kern_return_t
task_info(task_t task,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)4804 task_info(
4805 	task_t                  task,
4806 	task_flavor_t           flavor,
4807 	task_info_t             task_info_out,
4808 	mach_msg_type_number_t  *task_info_count)
4809 {
4810 	kern_return_t error = KERN_SUCCESS;
4811 	mach_msg_type_number_t  original_task_info_count;
4812 	bool is_kernel_task = (task == kernel_task);
4813 
4814 	if (task == TASK_NULL) {
4815 		return KERN_INVALID_ARGUMENT;
4816 	}
4817 
4818 	original_task_info_count = *task_info_count;
4819 	task_lock(task);
4820 
4821 	if ((task != current_task()) && (!task->active)) {
4822 		task_unlock(task);
4823 		return KERN_INVALID_ARGUMENT;
4824 	}
4825 
4826 
4827 	switch (flavor) {
4828 	case TASK_BASIC_INFO_32:
4829 	case TASK_BASIC2_INFO_32:
4830 #if defined(__arm__) || defined(__arm64__)
4831 	case TASK_BASIC_INFO_64:
4832 #endif
4833 		{
4834 			task_basic_info_32_t    basic_info;
4835 			vm_map_t                                map;
4836 			clock_sec_t                             secs;
4837 			clock_usec_t                    usecs;
4838 			ledger_amount_t tmp;
4839 
4840 			if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
4841 				error = KERN_INVALID_ARGUMENT;
4842 				break;
4843 			}
4844 
4845 			basic_info = (task_basic_info_32_t)task_info_out;
4846 
4847 			map = (task == kernel_task)? kernel_map: task->map;
4848 			basic_info->virtual_size = (typeof(basic_info->virtual_size))vm_map_adjusted_size(map);
4849 			if (flavor == TASK_BASIC2_INFO_32) {
4850 				/*
4851 				 * The "BASIC2" flavor gets the maximum resident
4852 				 * size instead of the current resident size...
4853 				 */
4854 				ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, &tmp);
4855 			} else {
4856 				ledger_get_balance(task->ledger, task_ledgers.phys_mem, &tmp);
4857 			}
4858 			basic_info->resident_size = (natural_t) MIN((ledger_amount_t) UINT32_MAX, tmp);
4859 
4860 			basic_info->policy = ((task != kernel_task)?
4861 			    POLICY_TIMESHARE: POLICY_RR);
4862 			basic_info->suspend_count = task->user_stop_count;
4863 
4864 			absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
4865 			basic_info->user_time.seconds =
4866 			    (typeof(basic_info->user_time.seconds))secs;
4867 			basic_info->user_time.microseconds = usecs;
4868 
4869 			absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
4870 			basic_info->system_time.seconds =
4871 			    (typeof(basic_info->system_time.seconds))secs;
4872 			basic_info->system_time.microseconds = usecs;
4873 
4874 			*task_info_count = TASK_BASIC_INFO_32_COUNT;
4875 			break;
4876 		}
4877 
4878 #if defined(__arm__) || defined(__arm64__)
4879 	case TASK_BASIC_INFO_64_2:
4880 	{
4881 		task_basic_info_64_2_t  basic_info;
4882 		vm_map_t                                map;
4883 		clock_sec_t                             secs;
4884 		clock_usec_t                    usecs;
4885 
4886 		if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) {
4887 			error = KERN_INVALID_ARGUMENT;
4888 			break;
4889 		}
4890 
4891 		basic_info = (task_basic_info_64_2_t)task_info_out;
4892 
4893 		map = (task == kernel_task)? kernel_map: task->map;
4894 		basic_info->virtual_size  = vm_map_adjusted_size(map);
4895 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size);
4896 
4897 		basic_info->policy = ((task != kernel_task)?
4898 		    POLICY_TIMESHARE: POLICY_RR);
4899 		basic_info->suspend_count = task->user_stop_count;
4900 
4901 		absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
4902 		basic_info->user_time.seconds =
4903 		    (typeof(basic_info->user_time.seconds))secs;
4904 		basic_info->user_time.microseconds = usecs;
4905 
4906 		absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
4907 		basic_info->system_time.seconds =
4908 		    (typeof(basic_info->system_time.seconds))secs;
4909 		basic_info->system_time.microseconds = usecs;
4910 
4911 		*task_info_count = TASK_BASIC_INFO_64_2_COUNT;
4912 		break;
4913 	}
4914 
4915 #else /* defined(__arm__) || defined(__arm64__) */
4916 	case TASK_BASIC_INFO_64:
4917 	{
4918 		task_basic_info_64_t    basic_info;
4919 		vm_map_t                                map;
4920 		clock_sec_t                             secs;
4921 		clock_usec_t                    usecs;
4922 
4923 		if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
4924 			error = KERN_INVALID_ARGUMENT;
4925 			break;
4926 		}
4927 
4928 		basic_info = (task_basic_info_64_t)task_info_out;
4929 
4930 		map = (task == kernel_task)? kernel_map: task->map;
4931 		basic_info->virtual_size  = vm_map_adjusted_size(map);
4932 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *)&basic_info->resident_size);
4933 
4934 		basic_info->policy = ((task != kernel_task)?
4935 		    POLICY_TIMESHARE: POLICY_RR);
4936 		basic_info->suspend_count = task->user_stop_count;
4937 
4938 		absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
4939 		basic_info->user_time.seconds =
4940 		    (typeof(basic_info->user_time.seconds))secs;
4941 		basic_info->user_time.microseconds = usecs;
4942 
4943 		absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
4944 		basic_info->system_time.seconds =
4945 		    (typeof(basic_info->system_time.seconds))secs;
4946 		basic_info->system_time.microseconds = usecs;
4947 
4948 		*task_info_count = TASK_BASIC_INFO_64_COUNT;
4949 		break;
4950 	}
4951 #endif /* defined(__arm__) || defined(__arm64__) */
4952 
4953 	case MACH_TASK_BASIC_INFO:
4954 	{
4955 		mach_task_basic_info_t  basic_info;
4956 		vm_map_t                map;
4957 		clock_sec_t             secs;
4958 		clock_usec_t            usecs;
4959 
4960 		if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
4961 			error = KERN_INVALID_ARGUMENT;
4962 			break;
4963 		}
4964 
4965 		basic_info = (mach_task_basic_info_t)task_info_out;
4966 
4967 		map = (task == kernel_task) ? kernel_map : task->map;
4968 
4969 		basic_info->virtual_size  = vm_map_adjusted_size(map);
4970 
4971 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size);
4972 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size_max);
4973 
4974 		basic_info->policy = ((task != kernel_task) ?
4975 		    POLICY_TIMESHARE : POLICY_RR);
4976 
4977 		basic_info->suspend_count = task->user_stop_count;
4978 
4979 		absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
4980 		basic_info->user_time.seconds =
4981 		    (typeof(basic_info->user_time.seconds))secs;
4982 		basic_info->user_time.microseconds = usecs;
4983 
4984 		absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
4985 		basic_info->system_time.seconds =
4986 		    (typeof(basic_info->system_time.seconds))secs;
4987 		basic_info->system_time.microseconds = usecs;
4988 
4989 		*task_info_count = MACH_TASK_BASIC_INFO_COUNT;
4990 		break;
4991 	}
4992 
4993 	case TASK_THREAD_TIMES_INFO:
4994 	{
4995 		task_thread_times_info_t        times_info;
4996 		thread_t                                        thread;
4997 
4998 		if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
4999 			error = KERN_INVALID_ARGUMENT;
5000 			break;
5001 		}
5002 
5003 		times_info = (task_thread_times_info_t) task_info_out;
5004 		times_info->user_time.seconds = 0;
5005 		times_info->user_time.microseconds = 0;
5006 		times_info->system_time.seconds = 0;
5007 		times_info->system_time.microseconds = 0;
5008 
5009 
5010 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5011 			time_value_t    user_time, system_time;
5012 
5013 			if (thread->options & TH_OPT_IDLE_THREAD) {
5014 				continue;
5015 			}
5016 
5017 			thread_read_times(thread, &user_time, &system_time, NULL);
5018 
5019 			time_value_add(&times_info->user_time, &user_time);
5020 			time_value_add(&times_info->system_time, &system_time);
5021 		}
5022 
5023 		*task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
5024 		break;
5025 	}
5026 
5027 	case TASK_ABSOLUTETIME_INFO:
5028 	{
5029 		task_absolutetime_info_t        info;
5030 		thread_t                        thread;
5031 
5032 		if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
5033 			error = KERN_INVALID_ARGUMENT;
5034 			break;
5035 		}
5036 
5037 		info = (task_absolutetime_info_t)task_info_out;
5038 		info->threads_user = info->threads_system = 0;
5039 
5040 
5041 		info->total_user = task->total_user_time;
5042 		info->total_system = task->total_system_time;
5043 
5044 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5045 			uint64_t        tval;
5046 			spl_t           x;
5047 
5048 			if (thread->options & TH_OPT_IDLE_THREAD) {
5049 				continue;
5050 			}
5051 
5052 			x = splsched();
5053 			thread_lock(thread);
5054 
5055 			tval = timer_grab(&thread->user_timer);
5056 			info->threads_user += tval;
5057 			info->total_user += tval;
5058 
5059 			tval = timer_grab(&thread->system_timer);
5060 			if (thread->precise_user_kernel_time) {
5061 				info->threads_system += tval;
5062 				info->total_system += tval;
5063 			} else {
5064 				/* system_timer may represent either sys or user */
5065 				info->threads_user += tval;
5066 				info->total_user += tval;
5067 			}
5068 
5069 			thread_unlock(thread);
5070 			splx(x);
5071 		}
5072 
5073 
5074 		*task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
5075 		break;
5076 	}
5077 
5078 	case TASK_DYLD_INFO:
5079 	{
5080 		task_dyld_info_t info;
5081 
5082 		/*
5083 		 * We added the format field to TASK_DYLD_INFO output.  For
5084 		 * temporary backward compatibility, accept the fact that
5085 		 * clients may ask for the old version - distinquished by the
5086 		 * size of the expected result structure.
5087 		 */
5088 #define TASK_LEGACY_DYLD_INFO_COUNT \
5089 	        offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
5090 
5091 		if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
5092 			error = KERN_INVALID_ARGUMENT;
5093 			break;
5094 		}
5095 
5096 		info = (task_dyld_info_t)task_info_out;
5097 		info->all_image_info_addr = task->all_image_info_addr;
5098 		info->all_image_info_size = task->all_image_info_size;
5099 
5100 		/* only set format on output for those expecting it */
5101 		if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
5102 			info->all_image_info_format = task_has_64Bit_addr(task) ?
5103 			    TASK_DYLD_ALL_IMAGE_INFO_64 :
5104 			    TASK_DYLD_ALL_IMAGE_INFO_32;
5105 			*task_info_count = TASK_DYLD_INFO_COUNT;
5106 		} else {
5107 			*task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
5108 		}
5109 		break;
5110 	}
5111 
5112 	case TASK_EXTMOD_INFO:
5113 	{
5114 		task_extmod_info_t info;
5115 		void *p;
5116 
5117 		if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
5118 			error = KERN_INVALID_ARGUMENT;
5119 			break;
5120 		}
5121 
5122 		info = (task_extmod_info_t)task_info_out;
5123 
5124 		p = get_bsdtask_info(task);
5125 		if (p) {
5126 			proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
5127 		} else {
5128 			bzero(info->task_uuid, sizeof(info->task_uuid));
5129 		}
5130 		info->extmod_statistics = task->extmod_statistics;
5131 		*task_info_count = TASK_EXTMOD_INFO_COUNT;
5132 
5133 		break;
5134 	}
5135 
5136 	case TASK_KERNELMEMORY_INFO:
5137 	{
5138 		task_kernelmemory_info_t        tkm_info;
5139 		ledger_amount_t                 credit, debit;
5140 
5141 		if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
5142 			error = KERN_INVALID_ARGUMENT;
5143 			break;
5144 		}
5145 
5146 		tkm_info = (task_kernelmemory_info_t) task_info_out;
5147 		tkm_info->total_palloc = 0;
5148 		tkm_info->total_pfree = 0;
5149 		tkm_info->total_salloc = 0;
5150 		tkm_info->total_sfree = 0;
5151 
5152 		if (task == kernel_task) {
5153 			/*
5154 			 * All shared allocs/frees from other tasks count against
5155 			 * the kernel private memory usage.  If we are looking up
5156 			 * info for the kernel task, gather from everywhere.
5157 			 */
5158 			task_unlock(task);
5159 
5160 			/* start by accounting for all the terminated tasks against the kernel */
5161 			tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
5162 			tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
5163 
5164 			/* count all other task/thread shared alloc/free against the kernel */
5165 			lck_mtx_lock(&tasks_threads_lock);
5166 
5167 			/* XXX this really shouldn't be using the function parameter 'task' as a local var! */
5168 			queue_iterate(&tasks, task, task_t, tasks) {
5169 				if (task == kernel_task) {
5170 					if (ledger_get_entries(task->ledger,
5171 					    task_ledgers.tkm_private, &credit,
5172 					    &debit) == KERN_SUCCESS) {
5173 						tkm_info->total_palloc += credit;
5174 						tkm_info->total_pfree += debit;
5175 					}
5176 				}
5177 				if (!ledger_get_entries(task->ledger,
5178 				    task_ledgers.tkm_shared, &credit, &debit)) {
5179 					tkm_info->total_palloc += credit;
5180 					tkm_info->total_pfree += debit;
5181 				}
5182 			}
5183 			lck_mtx_unlock(&tasks_threads_lock);
5184 		} else {
5185 			if (!ledger_get_entries(task->ledger,
5186 			    task_ledgers.tkm_private, &credit, &debit)) {
5187 				tkm_info->total_palloc = credit;
5188 				tkm_info->total_pfree = debit;
5189 			}
5190 			if (!ledger_get_entries(task->ledger,
5191 			    task_ledgers.tkm_shared, &credit, &debit)) {
5192 				tkm_info->total_salloc = credit;
5193 				tkm_info->total_sfree = debit;
5194 			}
5195 			task_unlock(task);
5196 		}
5197 
5198 		*task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
5199 		return KERN_SUCCESS;
5200 	}
5201 
5202 	/* OBSOLETE */
5203 	case TASK_SCHED_FIFO_INFO:
5204 	{
5205 		if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
5206 			error = KERN_INVALID_ARGUMENT;
5207 			break;
5208 		}
5209 
5210 		error = KERN_INVALID_POLICY;
5211 		break;
5212 	}
5213 
5214 	/* OBSOLETE */
5215 	case TASK_SCHED_RR_INFO:
5216 	{
5217 		policy_rr_base_t        rr_base;
5218 		uint32_t quantum_time;
5219 		uint64_t quantum_ns;
5220 
5221 		if (*task_info_count < POLICY_RR_BASE_COUNT) {
5222 			error = KERN_INVALID_ARGUMENT;
5223 			break;
5224 		}
5225 
5226 		rr_base = (policy_rr_base_t) task_info_out;
5227 
5228 		if (task != kernel_task) {
5229 			error = KERN_INVALID_POLICY;
5230 			break;
5231 		}
5232 
5233 		rr_base->base_priority = task->priority;
5234 
5235 		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
5236 		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
5237 
5238 		rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
5239 
5240 		*task_info_count = POLICY_RR_BASE_COUNT;
5241 		break;
5242 	}
5243 
5244 	/* OBSOLETE */
5245 	case TASK_SCHED_TIMESHARE_INFO:
5246 	{
5247 		policy_timeshare_base_t ts_base;
5248 
5249 		if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
5250 			error = KERN_INVALID_ARGUMENT;
5251 			break;
5252 		}
5253 
5254 		ts_base = (policy_timeshare_base_t) task_info_out;
5255 
5256 		if (task == kernel_task) {
5257 			error = KERN_INVALID_POLICY;
5258 			break;
5259 		}
5260 
5261 		ts_base->base_priority = task->priority;
5262 
5263 		*task_info_count = POLICY_TIMESHARE_BASE_COUNT;
5264 		break;
5265 	}
5266 
5267 	case TASK_SECURITY_TOKEN:
5268 	{
5269 		security_token_t        *sec_token_p;
5270 
5271 		if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
5272 			error = KERN_INVALID_ARGUMENT;
5273 			break;
5274 		}
5275 
5276 		sec_token_p = (security_token_t *) task_info_out;
5277 
5278 		*sec_token_p = *task_get_sec_token(task);
5279 
5280 		*task_info_count = TASK_SECURITY_TOKEN_COUNT;
5281 		break;
5282 	}
5283 
5284 	case TASK_AUDIT_TOKEN:
5285 	{
5286 		audit_token_t   *audit_token_p;
5287 
5288 		if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
5289 			error = KERN_INVALID_ARGUMENT;
5290 			break;
5291 		}
5292 
5293 		audit_token_p = (audit_token_t *) task_info_out;
5294 
5295 		*audit_token_p = *task_get_audit_token(task);
5296 
5297 		*task_info_count = TASK_AUDIT_TOKEN_COUNT;
5298 		break;
5299 	}
5300 
5301 	case TASK_SCHED_INFO:
5302 		error = KERN_INVALID_ARGUMENT;
5303 		break;
5304 
5305 	case TASK_EVENTS_INFO:
5306 	{
5307 		task_events_info_t      events_info;
5308 		thread_t                thread;
5309 		uint64_t                n_syscalls_mach, n_syscalls_unix, n_csw;
5310 
5311 		if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
5312 			error = KERN_INVALID_ARGUMENT;
5313 			break;
5314 		}
5315 
5316 		events_info = (task_events_info_t) task_info_out;
5317 
5318 
5319 		events_info->faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
5320 		events_info->pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
5321 		events_info->cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
5322 		events_info->messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
5323 		events_info->messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
5324 
5325 		n_syscalls_mach = task->syscalls_mach;
5326 		n_syscalls_unix = task->syscalls_unix;
5327 		n_csw = task->c_switch;
5328 
5329 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
5330 			n_csw           += thread->c_switch;
5331 			n_syscalls_mach += thread->syscalls_mach;
5332 			n_syscalls_unix += thread->syscalls_unix;
5333 		}
5334 
5335 		events_info->syscalls_mach = (int32_t) MIN(n_syscalls_mach, INT32_MAX);
5336 		events_info->syscalls_unix = (int32_t) MIN(n_syscalls_unix, INT32_MAX);
5337 		events_info->csw = (int32_t) MIN(n_csw, INT32_MAX);
5338 
5339 		*task_info_count = TASK_EVENTS_INFO_COUNT;
5340 		break;
5341 	}
5342 	case TASK_AFFINITY_TAG_INFO:
5343 	{
5344 		if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
5345 			error = KERN_INVALID_ARGUMENT;
5346 			break;
5347 		}
5348 
5349 		error = task_affinity_info(task, task_info_out, task_info_count);
5350 		break;
5351 	}
5352 	case TASK_POWER_INFO:
5353 	{
5354 		if (*task_info_count < TASK_POWER_INFO_COUNT) {
5355 			error = KERN_INVALID_ARGUMENT;
5356 			break;
5357 		}
5358 
5359 		task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL, NULL);
5360 		break;
5361 	}
5362 
5363 	case TASK_POWER_INFO_V2:
5364 	{
5365 		if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
5366 			error = KERN_INVALID_ARGUMENT;
5367 			break;
5368 		}
5369 		task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
5370 		task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2, NULL);
5371 		break;
5372 	}
5373 
5374 	case TASK_VM_INFO:
5375 	case TASK_VM_INFO_PURGEABLE:
5376 	{
5377 		task_vm_info_t          vm_info;
5378 		vm_map_t                map;
5379 		ledger_amount_t         tmp_amount;
5380 
5381 #if __arm64__
5382 		struct proc *p;
5383 		uint32_t platform, sdk;
5384 		p = current_proc();
5385 		platform = proc_platform(p);
5386 		sdk = proc_sdk(p);
5387 		if (original_task_info_count > TASK_VM_INFO_REV2_COUNT &&
5388 		    platform == PLATFORM_IOS &&
5389 		    sdk != 0 &&
5390 		    (sdk >> 16) <= 12) {
5391 			/*
5392 			 * Some iOS apps pass an incorrect value for
5393 			 * task_info_count, expressed in number of bytes
5394 			 * instead of number of "natural_t" elements.
5395 			 * For the sake of backwards binary compatibility
5396 			 * for apps built with an iOS12 or older SDK and using
5397 			 * the "rev2" data structure, let's fix task_info_count
5398 			 * for them, to avoid stomping past the actual end
5399 			 * of their buffer.
5400 			 */
5401 #if DEVELOPMENT || DEBUG
5402 			printf("%s:%d %d[%s] rdar://49484582 task_info_count %d -> %d platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p), proc_name_address(p), original_task_info_count, TASK_VM_INFO_REV2_COUNT, platform, (sdk >> 16), ((sdk >> 8) & 0xff), (sdk & 0xff));
5403 #endif /* DEVELOPMENT || DEBUG */
5404 			DTRACE_VM4(workaround_task_vm_info_count,
5405 			    mach_msg_type_number_t, original_task_info_count,
5406 			    mach_msg_type_number_t, TASK_VM_INFO_REV2_COUNT,
5407 			    uint32_t, platform,
5408 			    uint32_t, sdk);
5409 			original_task_info_count = TASK_VM_INFO_REV2_COUNT;
5410 			*task_info_count = original_task_info_count;
5411 		}
5412 #endif /* __arm64__ */
5413 
5414 		if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
5415 			error = KERN_INVALID_ARGUMENT;
5416 			break;
5417 		}
5418 
5419 		vm_info = (task_vm_info_t)task_info_out;
5420 
5421 		/*
5422 		 * Do not hold both the task and map locks,
5423 		 * so convert the task lock into a map reference,
5424 		 * drop the task lock, then lock the map.
5425 		 */
5426 		if (is_kernel_task) {
5427 			map = kernel_map;
5428 			task_unlock(task);
5429 			/* no lock, no reference */
5430 		} else {
5431 			map = task->map;
5432 			vm_map_reference(map);
5433 			task_unlock(task);
5434 			vm_map_lock_read(map);
5435 		}
5436 
5437 		vm_info->virtual_size = (typeof(vm_info->virtual_size))vm_map_adjusted_size(map);
5438 		vm_info->region_count = map->hdr.nentries;
5439 		vm_info->page_size = vm_map_page_size(map);
5440 
5441 		ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size);
5442 		ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size_peak);
5443 
5444 		vm_info->device = 0;
5445 		vm_info->device_peak = 0;
5446 		ledger_get_balance(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external);
5447 		ledger_get_lifetime_max(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external_peak);
5448 		ledger_get_balance(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal);
5449 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal_peak);
5450 		ledger_get_balance(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable);
5451 		ledger_get_lifetime_max(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable_peak);
5452 		ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed);
5453 		ledger_get_lifetime_max(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_peak);
5454 		ledger_get_entries(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_lifetime, &tmp_amount);
5455 
5456 		vm_info->purgeable_volatile_pmap = 0;
5457 		vm_info->purgeable_volatile_resident = 0;
5458 		vm_info->purgeable_volatile_virtual = 0;
5459 		if (is_kernel_task) {
5460 			/*
5461 			 * We do not maintain the detailed stats for the
5462 			 * kernel_pmap, so just count everything as
5463 			 * "internal"...
5464 			 */
5465 			vm_info->internal = vm_info->resident_size;
5466 			/*
5467 			 * ... but since the memory held by the VM compressor
5468 			 * in the kernel address space ought to be attributed
5469 			 * to user-space tasks, we subtract it from "internal"
5470 			 * to give memory reporting tools a more accurate idea
5471 			 * of what the kernel itself is actually using, instead
5472 			 * of making it look like the kernel is leaking memory
5473 			 * when the system is under memory pressure.
5474 			 */
5475 			vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
5476 			    PAGE_SIZE);
5477 		} else {
5478 			mach_vm_size_t  volatile_virtual_size;
5479 			mach_vm_size_t  volatile_resident_size;
5480 			mach_vm_size_t  volatile_compressed_size;
5481 			mach_vm_size_t  volatile_pmap_size;
5482 			mach_vm_size_t  volatile_compressed_pmap_size;
5483 			kern_return_t   kr;
5484 
5485 			if (flavor == TASK_VM_INFO_PURGEABLE) {
5486 				kr = vm_map_query_volatile(
5487 					map,
5488 					&volatile_virtual_size,
5489 					&volatile_resident_size,
5490 					&volatile_compressed_size,
5491 					&volatile_pmap_size,
5492 					&volatile_compressed_pmap_size);
5493 				if (kr == KERN_SUCCESS) {
5494 					vm_info->purgeable_volatile_pmap =
5495 					    volatile_pmap_size;
5496 					if (radar_20146450) {
5497 						vm_info->compressed -=
5498 						    volatile_compressed_pmap_size;
5499 					}
5500 					vm_info->purgeable_volatile_resident =
5501 					    volatile_resident_size;
5502 					vm_info->purgeable_volatile_virtual =
5503 					    volatile_virtual_size;
5504 				}
5505 			}
5506 		}
5507 		*task_info_count = TASK_VM_INFO_REV0_COUNT;
5508 
5509 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5510 			/* must be captured while we still have the map lock */
5511 			vm_info->min_address = map->min_offset;
5512 			vm_info->max_address = map->max_offset;
5513 		}
5514 
5515 		/*
5516 		 * Done with vm map things, can drop the map lock and reference,
5517 		 * and take the task lock back.
5518 		 *
5519 		 * Re-validate that the task didn't die on us.
5520 		 */
5521 		if (!is_kernel_task) {
5522 			vm_map_unlock_read(map);
5523 			vm_map_deallocate(map);
5524 		}
5525 		map = VM_MAP_NULL;
5526 
5527 		task_lock(task);
5528 
5529 		if ((task != current_task()) && (!task->active)) {
5530 			error = KERN_INVALID_ARGUMENT;
5531 			break;
5532 		}
5533 
5534 		if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
5535 			vm_info->phys_footprint =
5536 			    (mach_vm_size_t) get_task_phys_footprint(task);
5537 			*task_info_count = TASK_VM_INFO_REV1_COUNT;
5538 		}
5539 		if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5540 			/* data was captured above */
5541 			*task_info_count = TASK_VM_INFO_REV2_COUNT;
5542 		}
5543 
5544 		if (original_task_info_count >= TASK_VM_INFO_REV3_COUNT) {
5545 			ledger_get_lifetime_max(task->ledger,
5546 			    task_ledgers.phys_footprint,
5547 			    &vm_info->ledger_phys_footprint_peak);
5548 			ledger_get_balance(task->ledger,
5549 			    task_ledgers.purgeable_nonvolatile,
5550 			    &vm_info->ledger_purgeable_nonvolatile);
5551 			ledger_get_balance(task->ledger,
5552 			    task_ledgers.purgeable_nonvolatile_compressed,
5553 			    &vm_info->ledger_purgeable_novolatile_compressed);
5554 			ledger_get_balance(task->ledger,
5555 			    task_ledgers.purgeable_volatile,
5556 			    &vm_info->ledger_purgeable_volatile);
5557 			ledger_get_balance(task->ledger,
5558 			    task_ledgers.purgeable_volatile_compressed,
5559 			    &vm_info->ledger_purgeable_volatile_compressed);
5560 			ledger_get_balance(task->ledger,
5561 			    task_ledgers.network_nonvolatile,
5562 			    &vm_info->ledger_tag_network_nonvolatile);
5563 			ledger_get_balance(task->ledger,
5564 			    task_ledgers.network_nonvolatile_compressed,
5565 			    &vm_info->ledger_tag_network_nonvolatile_compressed);
5566 			ledger_get_balance(task->ledger,
5567 			    task_ledgers.network_volatile,
5568 			    &vm_info->ledger_tag_network_volatile);
5569 			ledger_get_balance(task->ledger,
5570 			    task_ledgers.network_volatile_compressed,
5571 			    &vm_info->ledger_tag_network_volatile_compressed);
5572 			ledger_get_balance(task->ledger,
5573 			    task_ledgers.media_footprint,
5574 			    &vm_info->ledger_tag_media_footprint);
5575 			ledger_get_balance(task->ledger,
5576 			    task_ledgers.media_footprint_compressed,
5577 			    &vm_info->ledger_tag_media_footprint_compressed);
5578 			ledger_get_balance(task->ledger,
5579 			    task_ledgers.media_nofootprint,
5580 			    &vm_info->ledger_tag_media_nofootprint);
5581 			ledger_get_balance(task->ledger,
5582 			    task_ledgers.media_nofootprint_compressed,
5583 			    &vm_info->ledger_tag_media_nofootprint_compressed);
5584 			ledger_get_balance(task->ledger,
5585 			    task_ledgers.graphics_footprint,
5586 			    &vm_info->ledger_tag_graphics_footprint);
5587 			ledger_get_balance(task->ledger,
5588 			    task_ledgers.graphics_footprint_compressed,
5589 			    &vm_info->ledger_tag_graphics_footprint_compressed);
5590 			ledger_get_balance(task->ledger,
5591 			    task_ledgers.graphics_nofootprint,
5592 			    &vm_info->ledger_tag_graphics_nofootprint);
5593 			ledger_get_balance(task->ledger,
5594 			    task_ledgers.graphics_nofootprint_compressed,
5595 			    &vm_info->ledger_tag_graphics_nofootprint_compressed);
5596 			ledger_get_balance(task->ledger,
5597 			    task_ledgers.neural_footprint,
5598 			    &vm_info->ledger_tag_neural_footprint);
5599 			ledger_get_balance(task->ledger,
5600 			    task_ledgers.neural_footprint_compressed,
5601 			    &vm_info->ledger_tag_neural_footprint_compressed);
5602 			ledger_get_balance(task->ledger,
5603 			    task_ledgers.neural_nofootprint,
5604 			    &vm_info->ledger_tag_neural_nofootprint);
5605 			ledger_get_balance(task->ledger,
5606 			    task_ledgers.neural_nofootprint_compressed,
5607 			    &vm_info->ledger_tag_neural_nofootprint_compressed);
5608 			*task_info_count = TASK_VM_INFO_REV3_COUNT;
5609 		}
5610 		if (original_task_info_count >= TASK_VM_INFO_REV4_COUNT) {
5611 			if (task->bsd_info) {
5612 				vm_info->limit_bytes_remaining =
5613 				    memorystatus_available_memory_internal(task->bsd_info);
5614 			} else {
5615 				vm_info->limit_bytes_remaining = 0;
5616 			}
5617 			*task_info_count = TASK_VM_INFO_REV4_COUNT;
5618 		}
5619 		if (original_task_info_count >= TASK_VM_INFO_REV5_COUNT) {
5620 			thread_t thread;
5621 			uint64_t total = task->decompressions;
5622 			queue_iterate(&task->threads, thread, thread_t, task_threads) {
5623 				total += thread->decompressions;
5624 			}
5625 			vm_info->decompressions = (int32_t) MIN(total, INT32_MAX);
5626 			*task_info_count = TASK_VM_INFO_REV5_COUNT;
5627 		}
5628 		if (original_task_info_count >= TASK_VM_INFO_REV6_COUNT) {
5629 			ledger_get_balance(task->ledger, task_ledgers.swapins,
5630 			    &vm_info->ledger_swapins);
5631 			*task_info_count = TASK_VM_INFO_REV6_COUNT;
5632 		}
5633 
5634 		break;
5635 	}
5636 
5637 	case TASK_WAIT_STATE_INFO:
5638 	{
5639 		/*
5640 		 * Deprecated flavor. Currently allowing some results until all users
5641 		 * stop calling it. The results may not be accurate.
5642 		 */
5643 		task_wait_state_info_t  wait_state_info;
5644 		uint64_t total_sfi_ledger_val = 0;
5645 
5646 		if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
5647 			error = KERN_INVALID_ARGUMENT;
5648 			break;
5649 		}
5650 
5651 		wait_state_info = (task_wait_state_info_t) task_info_out;
5652 
5653 		wait_state_info->total_wait_state_time = 0;
5654 		bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
5655 
5656 #if CONFIG_SCHED_SFI
5657 		int i, prev_lentry = -1;
5658 		int64_t  val_credit, val_debit;
5659 
5660 		for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
5661 			val_credit = 0;
5662 			/*
5663 			 * checking with prev_lentry != entry ensures adjacent classes
5664 			 * which share the same ledger do not add wait times twice.
5665 			 * Note: Use ledger() call to get data for each individual sfi class.
5666 			 */
5667 			if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
5668 			    KERN_SUCCESS == ledger_get_entries(task->ledger,
5669 			    task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
5670 				total_sfi_ledger_val += val_credit;
5671 			}
5672 			prev_lentry = task_ledgers.sfi_wait_times[i];
5673 		}
5674 
5675 #endif /* CONFIG_SCHED_SFI */
5676 		wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
5677 		*task_info_count = TASK_WAIT_STATE_INFO_COUNT;
5678 
5679 		break;
5680 	}
5681 	case TASK_VM_INFO_PURGEABLE_ACCOUNT:
5682 	{
5683 #if DEVELOPMENT || DEBUG
5684 		pvm_account_info_t      acnt_info;
5685 
5686 		if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
5687 			error = KERN_INVALID_ARGUMENT;
5688 			break;
5689 		}
5690 
5691 		if (task_info_out == NULL) {
5692 			error = KERN_INVALID_ARGUMENT;
5693 			break;
5694 		}
5695 
5696 		acnt_info = (pvm_account_info_t) task_info_out;
5697 
5698 		error = vm_purgeable_account(task, acnt_info);
5699 
5700 		*task_info_count = PVM_ACCOUNT_INFO_COUNT;
5701 
5702 		break;
5703 #else /* DEVELOPMENT || DEBUG */
5704 		error = KERN_NOT_SUPPORTED;
5705 		break;
5706 #endif /* DEVELOPMENT || DEBUG */
5707 	}
5708 	case TASK_FLAGS_INFO:
5709 	{
5710 		task_flags_info_t               flags_info;
5711 
5712 		if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
5713 			error = KERN_INVALID_ARGUMENT;
5714 			break;
5715 		}
5716 
5717 		flags_info = (task_flags_info_t)task_info_out;
5718 
5719 		/* only publish the 64-bit flag of the task */
5720 		flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA);
5721 
5722 		*task_info_count = TASK_FLAGS_INFO_COUNT;
5723 		break;
5724 	}
5725 
5726 	case TASK_DEBUG_INFO_INTERNAL:
5727 	{
5728 #if DEVELOPMENT || DEBUG
5729 		task_debug_info_internal_t dbg_info;
5730 		ipc_space_t space = task->itk_space;
5731 		if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
5732 			error = KERN_NOT_SUPPORTED;
5733 			break;
5734 		}
5735 
5736 		if (task_info_out == NULL) {
5737 			error = KERN_INVALID_ARGUMENT;
5738 			break;
5739 		}
5740 		dbg_info = (task_debug_info_internal_t) task_info_out;
5741 		dbg_info->ipc_space_size = 0;
5742 
5743 		if (space) {
5744 #if MACH_LOCKFREE_SPACE
5745 			hazard_guard_t guard = hazard_guard_get(0);
5746 			ipc_entry_t table = hazard_guard_acquire(guard, &space->is_table);
5747 			if (table) {
5748 				dbg_info->ipc_space_size = table->ie_size;
5749 			}
5750 			hazard_guard_put(guard);
5751 #else
5752 			is_read_lock(space);
5753 			if (is_active(space)) {
5754 				dbg_info->ipc_space_size =
5755 				    is_active_table(space)->ie_size;
5756 			}
5757 			is_read_unlock(space);
5758 #endif
5759 		}
5760 
5761 		dbg_info->suspend_count = task->suspend_count;
5762 
5763 		error = KERN_SUCCESS;
5764 		*task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
5765 		break;
5766 #else /* DEVELOPMENT || DEBUG */
5767 		error = KERN_NOT_SUPPORTED;
5768 		break;
5769 #endif /* DEVELOPMENT || DEBUG */
5770 	}
5771 	default:
5772 		error = KERN_INVALID_ARGUMENT;
5773 	}
5774 
5775 	task_unlock(task);
5776 	return error;
5777 }
5778 
5779 /*
5780  * task_info_from_user
5781  *
5782  * When calling task_info from user space,
5783  * this function will be executed as mig server side
5784  * instead of calling directly into task_info.
5785  * This gives the possibility to perform more security
5786  * checks on task_port.
5787  *
5788  * In the case of TASK_DYLD_INFO, we require the more
5789  * privileged task_read_port not the less-privileged task_name_port.
5790  *
5791  */
5792 kern_return_t
task_info_from_user(mach_port_t task_port,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)5793 task_info_from_user(
5794 	mach_port_t             task_port,
5795 	task_flavor_t           flavor,
5796 	task_info_t             task_info_out,
5797 	mach_msg_type_number_t  *task_info_count)
5798 {
5799 	task_t task;
5800 	kern_return_t ret;
5801 
5802 	if (flavor == TASK_DYLD_INFO) {
5803 		task = convert_port_to_task_read(task_port);
5804 	} else {
5805 		task = convert_port_to_task_name(task_port);
5806 	}
5807 
5808 	ret = task_info(task, flavor, task_info_out, task_info_count);
5809 
5810 	task_deallocate(task);
5811 
5812 	return ret;
5813 }
5814 
5815 /*
5816  * Routine: task_dyld_process_info_update_helper
5817  *
5818  * Release send rights in release_ports.
5819  *
5820  * If no active ports found in task's dyld notifier array, unset the magic value
5821  * in user space to indicate so.
5822  *
5823  * Condition:
5824  *      task's itk_lock is locked, and is unlocked upon return.
5825  *      Global g_dyldinfo_mtx is locked, and is unlocked upon return.
5826  */
5827 void
task_dyld_process_info_update_helper(task_t task,size_t active_count,vm_map_address_t magic_addr,ipc_port_t * release_ports,size_t release_count)5828 task_dyld_process_info_update_helper(
5829 	task_t                  task,
5830 	size_t                  active_count,
5831 	vm_map_address_t        magic_addr,    /* a userspace address */
5832 	ipc_port_t             *release_ports,
5833 	size_t                  release_count)
5834 {
5835 	void *notifiers_ptr = NULL;
5836 
5837 	assert(release_count <= DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT);
5838 
5839 	if (active_count == 0) {
5840 		assert(task->itk_dyld_notify != NULL);
5841 		notifiers_ptr = task->itk_dyld_notify;
5842 		task->itk_dyld_notify = NULL;
5843 		itk_unlock(task);
5844 
5845 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
5846 		(void)copyoutmap_atomic32(task->map, MACH_PORT_NULL, magic_addr); /* unset magic */
5847 	} else {
5848 		itk_unlock(task);
5849 		(void)copyoutmap_atomic32(task->map, (mach_port_name_t)DYLD_PROCESS_INFO_NOTIFY_MAGIC,
5850 		    magic_addr);     /* reset magic */
5851 	}
5852 
5853 	lck_mtx_unlock(&g_dyldinfo_mtx);
5854 
5855 	for (size_t i = 0; i < release_count; i++) {
5856 		ipc_port_release_send(release_ports[i]);
5857 	}
5858 }
5859 
5860 /*
5861  * Routine: task_dyld_process_info_notify_register
5862  *
5863  * Insert a send right to target task's itk_dyld_notify array. Allocate kernel
5864  * memory for the array if it's the first port to be registered. Also cleanup
5865  * any dead rights found in the array.
5866  *
5867  * Consumes sright if returns KERN_SUCCESS, otherwise MIG will destroy it.
5868  *
5869  * Args:
5870  *     task:   Target task for the registration.
5871  *     sright: A send right.
5872  *
5873  * Returns:
5874  *     KERN_SUCCESS: Registration succeeded.
5875  *     KERN_INVALID_TASK: task is invalid.
5876  *     KERN_INVALID_RIGHT: sright is invalid.
5877  *     KERN_DENIED: Security policy denied this call.
5878  *     KERN_RESOURCE_SHORTAGE: Kernel memory allocation failed.
5879  *     KERN_NO_SPACE: No available notifier port slot left for this task.
5880  *     KERN_RIGHT_EXISTS: The notifier port is already registered and active.
5881  *
5882  *     Other error code see task_info().
5883  *
5884  * See Also:
5885  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
5886  */
5887 kern_return_t
task_dyld_process_info_notify_register(task_t task,ipc_port_t sright)5888 task_dyld_process_info_notify_register(
5889 	task_t                  task,
5890 	ipc_port_t              sright)
5891 {
5892 	struct task_dyld_info dyld_info;
5893 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
5894 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
5895 	uint32_t release_count = 0, active_count = 0;
5896 	mach_vm_address_t ports_addr; /* a user space address */
5897 	kern_return_t kr;
5898 	boolean_t right_exists = false;
5899 	ipc_port_t *notifiers_ptr = NULL;
5900 	ipc_port_t *portp;
5901 
5902 	if (task == TASK_NULL || task == kernel_task) {
5903 		return KERN_INVALID_TASK;
5904 	}
5905 
5906 	if (!IP_VALID(sright)) {
5907 		return KERN_INVALID_RIGHT;
5908 	}
5909 
5910 #if CONFIG_MACF
5911 	if (mac_task_check_dyld_process_info_notify_register()) {
5912 		return KERN_DENIED;
5913 	}
5914 #endif
5915 
5916 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
5917 	if (kr) {
5918 		return kr;
5919 	}
5920 
5921 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
5922 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
5923 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
5924 	} else {
5925 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
5926 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
5927 	}
5928 
5929 	if (task->itk_dyld_notify == NULL) {
5930 		notifiers_ptr = kalloc_type(ipc_port_t,
5931 		    DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT,
5932 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
5933 	}
5934 
5935 	lck_mtx_lock(&g_dyldinfo_mtx);
5936 	itk_lock(task);
5937 
5938 	if (task->itk_dyld_notify == NULL) {
5939 		task->itk_dyld_notify = notifiers_ptr;
5940 		notifiers_ptr = NULL;
5941 	}
5942 
5943 	assert(task->itk_dyld_notify != NULL);
5944 	/* First pass: clear dead names and check for duplicate registration */
5945 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
5946 		portp = &task->itk_dyld_notify[slot];
5947 		if (*portp != IPC_PORT_NULL && !ip_active(*portp)) {
5948 			release_ports[release_count++] = *portp;
5949 			*portp = IPC_PORT_NULL;
5950 		} else if (*portp == sright) {
5951 			/* the port is already registered and is active */
5952 			right_exists = true;
5953 		}
5954 
5955 		if (*portp != IPC_PORT_NULL) {
5956 			active_count++;
5957 		}
5958 	}
5959 
5960 	if (right_exists) {
5961 		/* skip second pass */
5962 		kr = KERN_RIGHT_EXISTS;
5963 		goto out;
5964 	}
5965 
5966 	/* Second pass: register the port */
5967 	kr = KERN_NO_SPACE;
5968 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
5969 		portp = &task->itk_dyld_notify[slot];
5970 		if (*portp == IPC_PORT_NULL) {
5971 			*portp = sright;
5972 			active_count++;
5973 			kr = KERN_SUCCESS;
5974 			break;
5975 		}
5976 	}
5977 
5978 out:
5979 	assert(active_count > 0);
5980 
5981 	task_dyld_process_info_update_helper(task, active_count,
5982 	    (vm_map_address_t)ports_addr, release_ports, release_count);
5983 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
5984 
5985 	kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
5986 
5987 	return kr;
5988 }
5989 
5990 /*
5991  * Routine: task_dyld_process_info_notify_deregister
5992  *
5993  * Remove a send right in target task's itk_dyld_notify array matching the receive
5994  * right name passed in. Deallocate kernel memory for the array if it's the last port to
5995  * be deregistered, or all ports have died. Also cleanup any dead rights found in the array.
5996  *
5997  * Does not consume any reference.
5998  *
5999  * Args:
6000  *     task: Target task for the deregistration.
6001  *     rcv_name: The name denoting the receive right in caller's space.
6002  *
6003  * Returns:
6004  *     KERN_SUCCESS: A matching entry found and degistration succeeded.
6005  *     KERN_INVALID_TASK: task is invalid.
6006  *     KERN_INVALID_NAME: name is invalid.
6007  *     KERN_DENIED: Security policy denied this call.
6008  *     KERN_FAILURE: A matching entry is not found.
6009  *     KERN_INVALID_RIGHT: The name passed in does not represent a valid rcv right.
6010  *
6011  *     Other error code see task_info().
6012  *
6013  * See Also:
6014  *     task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6015  */
6016 kern_return_t
task_dyld_process_info_notify_deregister(task_t task,mach_port_name_t rcv_name)6017 task_dyld_process_info_notify_deregister(
6018 	task_t                  task,
6019 	mach_port_name_t        rcv_name)
6020 {
6021 	struct task_dyld_info dyld_info;
6022 	mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6023 	ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6024 	uint32_t release_count = 0, active_count = 0;
6025 	boolean_t port_found = false;
6026 	mach_vm_address_t ports_addr; /* a user space address */
6027 	ipc_port_t sright;
6028 	kern_return_t kr;
6029 	ipc_port_t *portp;
6030 
6031 	if (task == TASK_NULL || task == kernel_task) {
6032 		return KERN_INVALID_TASK;
6033 	}
6034 
6035 	if (!MACH_PORT_VALID(rcv_name)) {
6036 		return KERN_INVALID_NAME;
6037 	}
6038 
6039 #if CONFIG_MACF
6040 	if (mac_task_check_dyld_process_info_notify_register()) {
6041 		return KERN_DENIED;
6042 	}
6043 #endif
6044 
6045 	kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6046 	if (kr) {
6047 		return kr;
6048 	}
6049 
6050 	if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6051 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6052 		    offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6053 	} else {
6054 		ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6055 		    offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6056 	}
6057 
6058 	kr = ipc_port_translate_receive(current_space(), rcv_name, &sright); /* does not produce port ref */
6059 	if (kr) {
6060 		return KERN_INVALID_RIGHT;
6061 	}
6062 
6063 	ip_reference(sright);
6064 	ip_mq_unlock(sright);
6065 
6066 	assert(sright != IPC_PORT_NULL);
6067 
6068 	lck_mtx_lock(&g_dyldinfo_mtx);
6069 	itk_lock(task);
6070 
6071 	if (task->itk_dyld_notify == NULL) {
6072 		itk_unlock(task);
6073 		lck_mtx_unlock(&g_dyldinfo_mtx);
6074 		ip_release(sright);
6075 		return KERN_FAILURE;
6076 	}
6077 
6078 	for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6079 		portp = &task->itk_dyld_notify[slot];
6080 		if (*portp == sright) {
6081 			release_ports[release_count++] = *portp;
6082 			*portp = IPC_PORT_NULL;
6083 			port_found = true;
6084 		} else if ((*portp != IPC_PORT_NULL && !ip_active(*portp))) {
6085 			release_ports[release_count++] = *portp;
6086 			*portp = IPC_PORT_NULL;
6087 		}
6088 
6089 		if (*portp != IPC_PORT_NULL) {
6090 			active_count++;
6091 		}
6092 	}
6093 
6094 	task_dyld_process_info_update_helper(task, active_count,
6095 	    (vm_map_address_t)ports_addr, release_ports, release_count);
6096 	/* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6097 
6098 	ip_release(sright);
6099 
6100 	return port_found ? KERN_SUCCESS : KERN_FAILURE;
6101 }
6102 
6103 /*
6104  *	task_power_info
6105  *
6106  *	Returns power stats for the task.
6107  *	Note: Called with task locked.
6108  */
6109 void
task_power_info_locked(task_t task,task_power_info_t info,gpu_energy_data_t ginfo,task_power_info_v2_t infov2,uint64_t * runnable_time)6110 task_power_info_locked(
6111 	task_t                  task,
6112 	task_power_info_t       info,
6113 	gpu_energy_data_t       ginfo,
6114 	task_power_info_v2_t    infov2,
6115 	uint64_t                *runnable_time)
6116 {
6117 	thread_t                thread;
6118 	ledger_amount_t         tmp;
6119 
6120 	uint64_t                runnable_time_sum = 0;
6121 
6122 	task_lock_assert_owned(task);
6123 
6124 	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
6125 	    (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
6126 	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
6127 	    (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
6128 
6129 	info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
6130 	info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
6131 
6132 	info->total_user = task->total_user_time;
6133 	info->total_system = task->total_system_time;
6134 	runnable_time_sum = task->total_runnable_time;
6135 
6136 #if defined(__arm__) || defined(__arm64__)
6137 	if (infov2) {
6138 		infov2->task_energy = task->task_energy;
6139 	}
6140 #endif /* defined(__arm__) || defined(__arm64__) */
6141 
6142 	if (ginfo) {
6143 		ginfo->task_gpu_utilisation = task->task_gpu_ns;
6144 	}
6145 
6146 	if (infov2) {
6147 		infov2->task_ptime = task->total_ptime;
6148 		infov2->task_pset_switches = task->ps_switch;
6149 	}
6150 
6151 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6152 		uint64_t        tval;
6153 		spl_t           x;
6154 
6155 		if (thread->options & TH_OPT_IDLE_THREAD) {
6156 			continue;
6157 		}
6158 
6159 		x = splsched();
6160 		thread_lock(thread);
6161 
6162 		info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
6163 		info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
6164 
6165 #if defined(__arm__) || defined(__arm64__)
6166 		if (infov2) {
6167 			infov2->task_energy += ml_energy_stat(thread);
6168 		}
6169 #endif /* defined(__arm__) || defined(__arm64__) */
6170 
6171 		tval = timer_grab(&thread->user_timer);
6172 		info->total_user += tval;
6173 
6174 		if (infov2) {
6175 			tval = timer_grab(&thread->ptime);
6176 			infov2->task_ptime += tval;
6177 			infov2->task_pset_switches += thread->ps_switch;
6178 		}
6179 
6180 		tval = timer_grab(&thread->system_timer);
6181 		if (thread->precise_user_kernel_time) {
6182 			info->total_system += tval;
6183 		} else {
6184 			/* system_timer may represent either sys or user */
6185 			info->total_user += tval;
6186 		}
6187 
6188 		tval = timer_grab(&thread->runnable_timer);
6189 
6190 		runnable_time_sum += tval;
6191 
6192 		if (ginfo) {
6193 			ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
6194 		}
6195 		thread_unlock(thread);
6196 		splx(x);
6197 	}
6198 
6199 	if (runnable_time) {
6200 		*runnable_time = runnable_time_sum;
6201 	}
6202 }
6203 
6204 /*
6205  *	task_gpu_utilisation
6206  *
6207  *	Returns the total gpu time used by the all the threads of the task
6208  *  (both dead and alive)
6209  */
6210 uint64_t
task_gpu_utilisation(task_t task)6211 task_gpu_utilisation(
6212 	task_t  task)
6213 {
6214 	uint64_t gpu_time = 0;
6215 #if defined(__x86_64__)
6216 	thread_t thread;
6217 
6218 	task_lock(task);
6219 	gpu_time += task->task_gpu_ns;
6220 
6221 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6222 		spl_t x;
6223 		x = splsched();
6224 		thread_lock(thread);
6225 		gpu_time += ml_gpu_stat(thread);
6226 		thread_unlock(thread);
6227 		splx(x);
6228 	}
6229 
6230 	task_unlock(task);
6231 #else /* defined(__x86_64__) */
6232 	/* silence compiler warning */
6233 	(void)task;
6234 #endif /* defined(__x86_64__) */
6235 	return gpu_time;
6236 }
6237 
6238 /*
6239  *	task_energy
6240  *
6241  *	Returns the total energy used by the all the threads of the task
6242  *  (both dead and alive)
6243  */
6244 uint64_t
task_energy(task_t task)6245 task_energy(
6246 	task_t  task)
6247 {
6248 	uint64_t energy = 0;
6249 	thread_t thread;
6250 
6251 	task_lock(task);
6252 	energy += task->task_energy;
6253 
6254 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6255 		spl_t x;
6256 		x = splsched();
6257 		thread_lock(thread);
6258 		energy += ml_energy_stat(thread);
6259 		thread_unlock(thread);
6260 		splx(x);
6261 	}
6262 
6263 	task_unlock(task);
6264 	return energy;
6265 }
6266 
6267 #if __AMP__
6268 
6269 uint64_t
task_cpu_ptime(task_t task)6270 task_cpu_ptime(
6271 	task_t  task)
6272 {
6273 	uint64_t cpu_ptime = 0;
6274 	thread_t thread;
6275 
6276 	task_lock(task);
6277 	cpu_ptime += task->total_ptime;
6278 
6279 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6280 		if (thread->options & TH_OPT_IDLE_THREAD) {
6281 			continue;
6282 		}
6283 		cpu_ptime += timer_grab(&thread->ptime);
6284 	}
6285 
6286 	task_unlock(task);
6287 	return cpu_ptime;
6288 }
6289 
6290 #else /* __AMP__ */
6291 
6292 uint64_t
task_cpu_ptime(__unused task_t task)6293 task_cpu_ptime(
6294 	__unused task_t  task)
6295 {
6296 	return 0;
6297 }
6298 
6299 #endif /* __AMP__ */
6300 
6301 /* This function updates the cpu time in the arrays for each
6302  * effective and requested QoS class
6303  */
6304 void
task_update_cpu_time_qos_stats(task_t task,uint64_t * eqos_stats,uint64_t * rqos_stats)6305 task_update_cpu_time_qos_stats(
6306 	task_t  task,
6307 	uint64_t *eqos_stats,
6308 	uint64_t *rqos_stats)
6309 {
6310 	if (!eqos_stats && !rqos_stats) {
6311 		return;
6312 	}
6313 
6314 	task_lock(task);
6315 	thread_t thread;
6316 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
6317 		if (thread->options & TH_OPT_IDLE_THREAD) {
6318 			continue;
6319 		}
6320 
6321 		thread_update_qos_cpu_time(thread);
6322 	}
6323 
6324 	if (eqos_stats) {
6325 		eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default;
6326 		eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
6327 		eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background;
6328 		eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility;
6329 		eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy;
6330 		eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
6331 		eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
6332 	}
6333 
6334 	if (rqos_stats) {
6335 		rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default;
6336 		rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance;
6337 		rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background;
6338 		rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility;
6339 		rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy;
6340 		rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated;
6341 		rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive;
6342 	}
6343 
6344 	task_unlock(task);
6345 }
6346 
6347 kern_return_t
task_purgable_info(task_t task,task_purgable_info_t * stats)6348 task_purgable_info(
6349 	task_t                  task,
6350 	task_purgable_info_t    *stats)
6351 {
6352 	if (task == TASK_NULL || stats == NULL) {
6353 		return KERN_INVALID_ARGUMENT;
6354 	}
6355 	/* Take task reference */
6356 	task_reference(task);
6357 	vm_purgeable_stats((vm_purgeable_info_t)stats, task);
6358 	/* Drop task reference */
6359 	task_deallocate(task);
6360 	return KERN_SUCCESS;
6361 }
6362 
6363 void
task_vtimer_set(task_t task,integer_t which)6364 task_vtimer_set(
6365 	task_t          task,
6366 	integer_t       which)
6367 {
6368 	thread_t        thread;
6369 	spl_t           x;
6370 
6371 	task_lock(task);
6372 
6373 	task->vtimers |= which;
6374 
6375 	switch (which) {
6376 	case TASK_VTIMER_USER:
6377 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6378 			x = splsched();
6379 			thread_lock(thread);
6380 			if (thread->precise_user_kernel_time) {
6381 				thread->vtimer_user_save = timer_grab(&thread->user_timer);
6382 			} else {
6383 				thread->vtimer_user_save = timer_grab(&thread->system_timer);
6384 			}
6385 			thread_unlock(thread);
6386 			splx(x);
6387 		}
6388 		break;
6389 
6390 	case TASK_VTIMER_PROF:
6391 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6392 			x = splsched();
6393 			thread_lock(thread);
6394 			thread->vtimer_prof_save = timer_grab(&thread->user_timer);
6395 			thread->vtimer_prof_save += timer_grab(&thread->system_timer);
6396 			thread_unlock(thread);
6397 			splx(x);
6398 		}
6399 		break;
6400 
6401 	case TASK_VTIMER_RLIM:
6402 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
6403 			x = splsched();
6404 			thread_lock(thread);
6405 			thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
6406 			thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
6407 			thread_unlock(thread);
6408 			splx(x);
6409 		}
6410 		break;
6411 	}
6412 
6413 	task_unlock(task);
6414 }
6415 
6416 void
task_vtimer_clear(task_t task,integer_t which)6417 task_vtimer_clear(
6418 	task_t          task,
6419 	integer_t       which)
6420 {
6421 	assert(task == current_task());
6422 
6423 	task_lock(task);
6424 
6425 	task->vtimers &= ~which;
6426 
6427 	task_unlock(task);
6428 }
6429 
6430 void
task_vtimer_update(__unused task_t task,integer_t which,uint32_t * microsecs)6431 task_vtimer_update(
6432 	__unused
6433 	task_t          task,
6434 	integer_t       which,
6435 	uint32_t        *microsecs)
6436 {
6437 	thread_t        thread = current_thread();
6438 	uint32_t        tdelt = 0;
6439 	clock_sec_t     secs = 0;
6440 	uint64_t        tsum;
6441 
6442 	assert(task == current_task());
6443 
6444 	spl_t s = splsched();
6445 	thread_lock(thread);
6446 
6447 	if ((task->vtimers & which) != (uint32_t)which) {
6448 		thread_unlock(thread);
6449 		splx(s);
6450 		return;
6451 	}
6452 
6453 	switch (which) {
6454 	case TASK_VTIMER_USER:
6455 		if (thread->precise_user_kernel_time) {
6456 			tdelt = (uint32_t)timer_delta(&thread->user_timer,
6457 			    &thread->vtimer_user_save);
6458 		} else {
6459 			tdelt = (uint32_t)timer_delta(&thread->system_timer,
6460 			    &thread->vtimer_user_save);
6461 		}
6462 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6463 		break;
6464 
6465 	case TASK_VTIMER_PROF:
6466 		tsum = timer_grab(&thread->user_timer);
6467 		tsum += timer_grab(&thread->system_timer);
6468 		tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
6469 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6470 		/* if the time delta is smaller than a usec, ignore */
6471 		if (*microsecs != 0) {
6472 			thread->vtimer_prof_save = tsum;
6473 		}
6474 		break;
6475 
6476 	case TASK_VTIMER_RLIM:
6477 		tsum = timer_grab(&thread->user_timer);
6478 		tsum += timer_grab(&thread->system_timer);
6479 		tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
6480 		thread->vtimer_rlim_save = tsum;
6481 		absolutetime_to_microtime(tdelt, &secs, microsecs);
6482 		break;
6483 	}
6484 
6485 	thread_unlock(thread);
6486 	splx(s);
6487 }
6488 
6489 /*
6490  *	task_assign:
6491  *
6492  *	Change the assigned processor set for the task
6493  */
6494 kern_return_t
task_assign(__unused task_t task,__unused processor_set_t new_pset,__unused boolean_t assign_threads)6495 task_assign(
6496 	__unused task_t         task,
6497 	__unused processor_set_t        new_pset,
6498 	__unused boolean_t      assign_threads)
6499 {
6500 	return KERN_FAILURE;
6501 }
6502 
6503 /*
6504  *	task_assign_default:
6505  *
6506  *	Version of task_assign to assign to default processor set.
6507  */
6508 kern_return_t
task_assign_default(task_t task,boolean_t assign_threads)6509 task_assign_default(
6510 	task_t          task,
6511 	boolean_t       assign_threads)
6512 {
6513 	return task_assign(task, &pset0, assign_threads);
6514 }
6515 
6516 /*
6517  *	task_get_assignment
6518  *
6519  *	Return name of processor set that task is assigned to.
6520  */
6521 kern_return_t
task_get_assignment(task_t task,processor_set_t * pset)6522 task_get_assignment(
6523 	task_t          task,
6524 	processor_set_t *pset)
6525 {
6526 	if (!task || !task->active) {
6527 		return KERN_FAILURE;
6528 	}
6529 
6530 	*pset = &pset0;
6531 
6532 	return KERN_SUCCESS;
6533 }
6534 
6535 uint64_t
get_task_dispatchqueue_offset(task_t task)6536 get_task_dispatchqueue_offset(
6537 	task_t          task)
6538 {
6539 	return task->dispatchqueue_offset;
6540 }
6541 
6542 /*
6543  *      task_policy
6544  *
6545  *	Set scheduling policy and parameters, both base and limit, for
6546  *	the given task. Policy must be a policy which is enabled for the
6547  *	processor set. Change contained threads if requested.
6548  */
6549 kern_return_t
task_policy(__unused task_t task,__unused policy_t policy_id,__unused policy_base_t base,__unused mach_msg_type_number_t count,__unused boolean_t set_limit,__unused boolean_t change)6550 task_policy(
6551 	__unused task_t                 task,
6552 	__unused policy_t                       policy_id,
6553 	__unused policy_base_t          base,
6554 	__unused mach_msg_type_number_t count,
6555 	__unused boolean_t                      set_limit,
6556 	__unused boolean_t                      change)
6557 {
6558 	return KERN_FAILURE;
6559 }
6560 
6561 /*
6562  *	task_set_policy
6563  *
6564  *	Set scheduling policy and parameters, both base and limit, for
6565  *	the given task. Policy can be any policy implemented by the
6566  *	processor set, whether enabled or not. Change contained threads
6567  *	if requested.
6568  */
6569 kern_return_t
task_set_policy(__unused task_t task,__unused processor_set_t pset,__unused policy_t policy_id,__unused policy_base_t base,__unused mach_msg_type_number_t base_count,__unused policy_limit_t limit,__unused mach_msg_type_number_t limit_count,__unused boolean_t change)6570 task_set_policy(
6571 	__unused task_t                 task,
6572 	__unused processor_set_t                pset,
6573 	__unused policy_t                       policy_id,
6574 	__unused policy_base_t          base,
6575 	__unused mach_msg_type_number_t base_count,
6576 	__unused policy_limit_t         limit,
6577 	__unused mach_msg_type_number_t limit_count,
6578 	__unused boolean_t                      change)
6579 {
6580 	return KERN_FAILURE;
6581 }
6582 
6583 kern_return_t
task_set_ras_pc(__unused task_t task,__unused vm_offset_t pc,__unused vm_offset_t endpc)6584 task_set_ras_pc(
6585 	__unused task_t task,
6586 	__unused vm_offset_t    pc,
6587 	__unused vm_offset_t    endpc)
6588 {
6589 	return KERN_FAILURE;
6590 }
6591 
6592 void
task_synchronizer_destroy_all(task_t task)6593 task_synchronizer_destroy_all(task_t task)
6594 {
6595 	/*
6596 	 *  Destroy owned semaphores
6597 	 */
6598 	semaphore_destroy_all(task);
6599 }
6600 
6601 /*
6602  * Install default (machine-dependent) initial thread state
6603  * on the task.  Subsequent thread creation will have this initial
6604  * state set on the thread by machine_thread_inherit_taskwide().
6605  * Flavors and structures are exactly the same as those to thread_set_state()
6606  */
6607 kern_return_t
task_set_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t state_count)6608 task_set_state(
6609 	task_t task,
6610 	int flavor,
6611 	thread_state_t state,
6612 	mach_msg_type_number_t state_count)
6613 {
6614 	kern_return_t ret;
6615 
6616 	if (task == TASK_NULL) {
6617 		return KERN_INVALID_ARGUMENT;
6618 	}
6619 
6620 	task_lock(task);
6621 
6622 	if (!task->active) {
6623 		task_unlock(task);
6624 		return KERN_FAILURE;
6625 	}
6626 
6627 	ret = machine_task_set_state(task, flavor, state, state_count);
6628 
6629 	task_unlock(task);
6630 	return ret;
6631 }
6632 
6633 /*
6634  * Examine the default (machine-dependent) initial thread state
6635  * on the task, as set by task_set_state().  Flavors and structures
6636  * are exactly the same as those passed to thread_get_state().
6637  */
6638 kern_return_t
task_get_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)6639 task_get_state(
6640 	task_t  task,
6641 	int     flavor,
6642 	thread_state_t state,
6643 	mach_msg_type_number_t *state_count)
6644 {
6645 	kern_return_t ret;
6646 
6647 	if (task == TASK_NULL) {
6648 		return KERN_INVALID_ARGUMENT;
6649 	}
6650 
6651 	task_lock(task);
6652 
6653 	if (!task->active) {
6654 		task_unlock(task);
6655 		return KERN_FAILURE;
6656 	}
6657 
6658 	ret = machine_task_get_state(task, flavor, state, state_count);
6659 
6660 	task_unlock(task);
6661 	return ret;
6662 }
6663 
6664 
6665 static kern_return_t __attribute__((noinline, not_tail_called))
PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason)6666 PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND(
6667 	mach_exception_code_t code,
6668 	mach_exception_subcode_t subcode,
6669 	void *reason)
6670 {
6671 #ifdef MACH_BSD
6672 	if (1 == proc_selfpid()) {
6673 		return KERN_NOT_SUPPORTED;              // initproc is immune
6674 	}
6675 #endif
6676 	mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
6677 		[0] = code,
6678 		[1] = subcode,
6679 	};
6680 	task_t task = current_task();
6681 	kern_return_t kr;
6682 
6683 	/* (See jetsam-related comments below) */
6684 
6685 	proc_memstat_skip(task->bsd_info, TRUE);
6686 	kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason);
6687 	proc_memstat_skip(task->bsd_info, FALSE);
6688 	return kr;
6689 }
6690 
6691 kern_return_t
task_violated_guard(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason)6692 task_violated_guard(
6693 	mach_exception_code_t code,
6694 	mach_exception_subcode_t subcode,
6695 	void *reason)
6696 {
6697 	return PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND(code, subcode, reason);
6698 }
6699 
6700 
6701 #if CONFIG_MEMORYSTATUS
6702 
6703 boolean_t
task_get_memlimit_is_active(task_t task)6704 task_get_memlimit_is_active(task_t task)
6705 {
6706 	assert(task != NULL);
6707 
6708 	if (task->memlimit_is_active == 1) {
6709 		return TRUE;
6710 	} else {
6711 		return FALSE;
6712 	}
6713 }
6714 
6715 void
task_set_memlimit_is_active(task_t task,boolean_t memlimit_is_active)6716 task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
6717 {
6718 	assert(task != NULL);
6719 
6720 	if (memlimit_is_active) {
6721 		task->memlimit_is_active = 1;
6722 	} else {
6723 		task->memlimit_is_active = 0;
6724 	}
6725 }
6726 
6727 boolean_t
task_get_memlimit_is_fatal(task_t task)6728 task_get_memlimit_is_fatal(task_t task)
6729 {
6730 	assert(task != NULL);
6731 
6732 	if (task->memlimit_is_fatal == 1) {
6733 		return TRUE;
6734 	} else {
6735 		return FALSE;
6736 	}
6737 }
6738 
6739 void
task_set_memlimit_is_fatal(task_t task,boolean_t memlimit_is_fatal)6740 task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
6741 {
6742 	assert(task != NULL);
6743 
6744 	if (memlimit_is_fatal) {
6745 		task->memlimit_is_fatal = 1;
6746 	} else {
6747 		task->memlimit_is_fatal = 0;
6748 	}
6749 }
6750 
6751 uint64_t
task_get_dirty_start(task_t task)6752 task_get_dirty_start(task_t task)
6753 {
6754 	return task->memstat_dirty_start;
6755 }
6756 
6757 void
task_set_dirty_start(task_t task,uint64_t start)6758 task_set_dirty_start(task_t task, uint64_t start)
6759 {
6760 	task_lock(task);
6761 	task->memstat_dirty_start = start;
6762 	task_unlock(task);
6763 }
6764 
6765 boolean_t
task_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)6766 task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
6767 {
6768 	boolean_t triggered = FALSE;
6769 
6770 	assert(task == current_task());
6771 
6772 	/*
6773 	 * Returns true, if task has already triggered an exc_resource exception.
6774 	 */
6775 
6776 	if (memlimit_is_active) {
6777 		triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
6778 	} else {
6779 		triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
6780 	}
6781 
6782 	return triggered;
6783 }
6784 
6785 void
task_mark_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)6786 task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
6787 {
6788 	assert(task == current_task());
6789 
6790 	/*
6791 	 * We allow one exc_resource per process per active/inactive limit.
6792 	 * The limit's fatal attribute does not come into play.
6793 	 */
6794 
6795 	if (memlimit_is_active) {
6796 		task->memlimit_active_exc_resource = 1;
6797 	} else {
6798 		task->memlimit_inactive_exc_resource = 1;
6799 	}
6800 }
6801 
6802 #define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
6803 
6804 void __attribute__((noinline))
PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb,boolean_t is_fatal)6805 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal)
6806 {
6807 	task_t                                          task            = current_task();
6808 	int                                                     pid         = 0;
6809 	const char                                      *procname       = "unknown";
6810 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
6811 	boolean_t send_sync_exc_resource = FALSE;
6812 
6813 #ifdef MACH_BSD
6814 	pid = proc_selfpid();
6815 
6816 	if (pid == 1) {
6817 		/*
6818 		 * Cannot have ReportCrash analyzing
6819 		 * a suspended initproc.
6820 		 */
6821 		return;
6822 	}
6823 
6824 	if (task->bsd_info != NULL) {
6825 		procname = proc_name_address(current_task()->bsd_info);
6826 		send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(current_task()->bsd_info);
6827 	}
6828 #endif
6829 #if CONFIG_COREDUMP
6830 	if (hwm_user_cores) {
6831 		int                             error;
6832 		uint64_t                starttime, end;
6833 		clock_sec_t             secs = 0;
6834 		uint32_t                microsecs = 0;
6835 
6836 		starttime = mach_absolute_time();
6837 		/*
6838 		 * Trigger a coredump of this process. Don't proceed unless we know we won't
6839 		 * be filling up the disk; and ignore the core size resource limit for this
6840 		 * core file.
6841 		 */
6842 		if ((error = coredump(current_task()->bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
6843 			printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
6844 		}
6845 		/*
6846 		 * coredump() leaves the task suspended.
6847 		 */
6848 		task_resume_internal(current_task());
6849 
6850 		end = mach_absolute_time();
6851 		absolutetime_to_microtime(end - starttime, &secs, &microsecs);
6852 		printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
6853 		    proc_name_address(current_task()->bsd_info), pid, (int)secs, microsecs);
6854 	}
6855 #endif /* CONFIG_COREDUMP */
6856 
6857 	if (disable_exc_resource) {
6858 		printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
6859 		    "supressed by a boot-arg.\n", procname, pid, max_footprint_mb);
6860 		return;
6861 	}
6862 
6863 	/*
6864 	 * A task that has triggered an EXC_RESOURCE, should not be
6865 	 * jetsammed when the device is under memory pressure.  Here
6866 	 * we set the P_MEMSTAT_SKIP flag so that the process
6867 	 * will be skipped if the memorystatus_thread wakes up.
6868 	 *
6869 	 * This is a debugging aid to ensure we can get a corpse before
6870 	 * the jetsam thread kills the process.
6871 	 * Note that proc_memstat_skip is a no-op on release kernels.
6872 	 */
6873 	proc_memstat_skip(current_task()->bsd_info, TRUE);
6874 
6875 	code[0] = code[1] = 0;
6876 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
6877 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK);
6878 	EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
6879 
6880 	/*
6881 	 * Do not generate a corpse fork if the violation is a fatal one
6882 	 * or the process wants synchronous EXC_RESOURCE exceptions.
6883 	 */
6884 	if (is_fatal || send_sync_exc_resource || !exc_via_corpse_forking) {
6885 		/* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */
6886 		if (send_sync_exc_resource || !corpse_for_fatal_memkill) {
6887 			/*
6888 			 * Use the _internal_ variant so that no user-space
6889 			 * process can resume our task from under us.
6890 			 */
6891 			task_suspend_internal(task);
6892 			exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
6893 			task_resume_internal(task);
6894 		}
6895 	} else {
6896 		if (audio_active) {
6897 			printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
6898 			    "supressed due to audio playback.\n", procname, pid, max_footprint_mb);
6899 		} else {
6900 			task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
6901 			    code, EXCEPTION_CODE_MAX, NULL);
6902 		}
6903 	}
6904 
6905 	/*
6906 	 * After the EXC_RESOURCE has been handled, we must clear the
6907 	 * P_MEMSTAT_SKIP flag so that the process can again be
6908 	 * considered for jetsam if the memorystatus_thread wakes up.
6909 	 */
6910 	proc_memstat_skip(current_task()->bsd_info, FALSE);         /* clear the flag */
6911 }
6912 
6913 /*
6914  * Callback invoked when a task exceeds its physical footprint limit.
6915  */
6916 void
task_footprint_exceeded(int warning,__unused const void * param0,__unused const void * param1)6917 task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
6918 {
6919 	ledger_amount_t max_footprint, max_footprint_mb;
6920 	task_t task;
6921 	boolean_t is_warning;
6922 	boolean_t memlimit_is_active;
6923 	boolean_t memlimit_is_fatal;
6924 
6925 	if (warning == LEDGER_WARNING_DIPPED_BELOW) {
6926 		/*
6927 		 * Task memory limits only provide a warning on the way up.
6928 		 */
6929 		return;
6930 	} else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
6931 		/*
6932 		 * This task is in danger of violating a memory limit,
6933 		 * It has exceeded a percentage level of the limit.
6934 		 */
6935 		is_warning = TRUE;
6936 	} else {
6937 		/*
6938 		 * The task has exceeded the physical footprint limit.
6939 		 * This is not a warning but a true limit violation.
6940 		 */
6941 		is_warning = FALSE;
6942 	}
6943 
6944 	task = current_task();
6945 
6946 	ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint);
6947 	max_footprint_mb = max_footprint >> 20;
6948 
6949 	memlimit_is_active = task_get_memlimit_is_active(task);
6950 	memlimit_is_fatal = task_get_memlimit_is_fatal(task);
6951 
6952 	/*
6953 	 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
6954 	 * We only generate the exception once per process per memlimit (active/inactive limit).
6955 	 * To enforce this, we monitor state based on the  memlimit's active/inactive attribute
6956 	 * and we disable it by marking that memlimit as exception triggered.
6957 	 */
6958 	if ((is_warning == FALSE) && (!task_has_triggered_exc_resource(task, memlimit_is_active))) {
6959 		PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)max_footprint_mb, memlimit_is_fatal);
6960 		memorystatus_log_exception((int)max_footprint_mb, memlimit_is_active, memlimit_is_fatal);
6961 		task_mark_has_triggered_exc_resource(task, memlimit_is_active);
6962 	}
6963 
6964 	memorystatus_on_ledger_footprint_exceeded(is_warning, memlimit_is_active, memlimit_is_fatal);
6965 }
6966 
6967 extern int proc_check_footprint_priv(void);
6968 
6969 kern_return_t
task_set_phys_footprint_limit(task_t task,int new_limit_mb,int * old_limit_mb)6970 task_set_phys_footprint_limit(
6971 	task_t task,
6972 	int new_limit_mb,
6973 	int *old_limit_mb)
6974 {
6975 	kern_return_t error;
6976 
6977 	boolean_t memlimit_is_active;
6978 	boolean_t memlimit_is_fatal;
6979 
6980 	if ((error = proc_check_footprint_priv())) {
6981 		return KERN_NO_ACCESS;
6982 	}
6983 
6984 	/*
6985 	 * This call should probably be obsoleted.
6986 	 * But for now, we default to current state.
6987 	 */
6988 	memlimit_is_active = task_get_memlimit_is_active(task);
6989 	memlimit_is_fatal = task_get_memlimit_is_fatal(task);
6990 
6991 	return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
6992 }
6993 
6994 kern_return_t
task_convert_phys_footprint_limit(int limit_mb,int * converted_limit_mb)6995 task_convert_phys_footprint_limit(
6996 	int limit_mb,
6997 	int *converted_limit_mb)
6998 {
6999 	if (limit_mb == -1) {
7000 		/*
7001 		 * No limit
7002 		 */
7003 		if (max_task_footprint != 0) {
7004 			*converted_limit_mb = (int)(max_task_footprint / 1024 / 1024);         /* bytes to MB */
7005 		} else {
7006 			*converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
7007 		}
7008 	} else {
7009 		/* nothing to convert */
7010 		*converted_limit_mb = limit_mb;
7011 	}
7012 	return KERN_SUCCESS;
7013 }
7014 
7015 
7016 kern_return_t
task_set_phys_footprint_limit_internal(task_t task,int new_limit_mb,int * old_limit_mb,boolean_t memlimit_is_active,boolean_t memlimit_is_fatal)7017 task_set_phys_footprint_limit_internal(
7018 	task_t task,
7019 	int new_limit_mb,
7020 	int *old_limit_mb,
7021 	boolean_t memlimit_is_active,
7022 	boolean_t memlimit_is_fatal)
7023 {
7024 	ledger_amount_t old;
7025 	kern_return_t ret;
7026 
7027 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
7028 
7029 	if (ret != KERN_SUCCESS) {
7030 		return ret;
7031 	}
7032 
7033 	/*
7034 	 * Check that limit >> 20 will not give an "unexpected" 32-bit
7035 	 * result. There are, however, implicit assumptions that -1 mb limit
7036 	 * equates to LEDGER_LIMIT_INFINITY.
7037 	 */
7038 	assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
7039 
7040 	if (old_limit_mb) {
7041 		*old_limit_mb = (int)(old >> 20);
7042 	}
7043 
7044 	if (new_limit_mb == -1) {
7045 		/*
7046 		 * Caller wishes to remove the limit.
7047 		 */
7048 		ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7049 		    max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
7050 		    max_task_footprint ? (uint8_t)max_task_footprint_warning_level : 0);
7051 
7052 		task_lock(task);
7053 		task_set_memlimit_is_active(task, memlimit_is_active);
7054 		task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7055 		task_unlock(task);
7056 
7057 		return KERN_SUCCESS;
7058 	}
7059 
7060 #ifdef CONFIG_NOMONITORS
7061 	return KERN_SUCCESS;
7062 #endif /* CONFIG_NOMONITORS */
7063 
7064 	task_lock(task);
7065 
7066 	if ((memlimit_is_active == task_get_memlimit_is_active(task)) &&
7067 	    (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) &&
7068 	    (((ledger_amount_t)new_limit_mb << 20) == old)) {
7069 		/*
7070 		 * memlimit state is not changing
7071 		 */
7072 		task_unlock(task);
7073 		return KERN_SUCCESS;
7074 	}
7075 
7076 	task_set_memlimit_is_active(task, memlimit_is_active);
7077 	task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7078 
7079 	ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7080 	    (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
7081 
7082 	if (task == current_task()) {
7083 		ledger_check_new_balance(current_thread(), task->ledger,
7084 		    task_ledgers.phys_footprint);
7085 	}
7086 
7087 	task_unlock(task);
7088 
7089 	return KERN_SUCCESS;
7090 }
7091 
7092 kern_return_t
task_get_phys_footprint_limit(task_t task,int * limit_mb)7093 task_get_phys_footprint_limit(
7094 	task_t task,
7095 	int *limit_mb)
7096 {
7097 	ledger_amount_t limit;
7098 	kern_return_t ret;
7099 
7100 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
7101 	if (ret != KERN_SUCCESS) {
7102 		return ret;
7103 	}
7104 
7105 	/*
7106 	 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
7107 	 * result. There are, however, implicit assumptions that -1 mb limit
7108 	 * equates to LEDGER_LIMIT_INFINITY.
7109 	 */
7110 	assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
7111 	*limit_mb = (int)(limit >> 20);
7112 
7113 	return KERN_SUCCESS;
7114 }
7115 #else /* CONFIG_MEMORYSTATUS */
7116 kern_return_t
task_set_phys_footprint_limit(__unused task_t task,__unused int new_limit_mb,__unused int * old_limit_mb)7117 task_set_phys_footprint_limit(
7118 	__unused task_t task,
7119 	__unused int new_limit_mb,
7120 	__unused int *old_limit_mb)
7121 {
7122 	return KERN_FAILURE;
7123 }
7124 
7125 kern_return_t
task_get_phys_footprint_limit(__unused task_t task,__unused int * limit_mb)7126 task_get_phys_footprint_limit(
7127 	__unused task_t task,
7128 	__unused int *limit_mb)
7129 {
7130 	return KERN_FAILURE;
7131 }
7132 #endif /* CONFIG_MEMORYSTATUS */
7133 
7134 security_token_t *
task_get_sec_token(task_t task)7135 task_get_sec_token(task_t task)
7136 {
7137 	return &task_get_ro(task)->task_tokens.sec_token;
7138 }
7139 
7140 void
task_set_sec_token(task_t task,security_token_t * token)7141 task_set_sec_token(task_t task, security_token_t *token)
7142 {
7143 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7144 	    task_tokens.sec_token, token);
7145 }
7146 
7147 audit_token_t *
task_get_audit_token(task_t task)7148 task_get_audit_token(task_t task)
7149 {
7150 	return &task_get_ro(task)->task_tokens.audit_token;
7151 }
7152 
7153 void
task_set_audit_token(task_t task,audit_token_t * token)7154 task_set_audit_token(task_t task, audit_token_t *token)
7155 {
7156 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7157 	    task_tokens.audit_token, token);
7158 }
7159 
7160 void
task_set_tokens(task_t task,security_token_t * sec_token,audit_token_t * audit_token)7161 task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token)
7162 {
7163 	struct task_token_ro_data tokens;
7164 
7165 	tokens = task_get_ro(task)->task_tokens;
7166 	tokens.sec_token = *sec_token;
7167 	tokens.audit_token = *audit_token;
7168 
7169 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task), task_tokens,
7170 	    &tokens);
7171 }
7172 
7173 boolean_t
task_is_privileged(task_t task)7174 task_is_privileged(task_t task)
7175 {
7176 	return task_get_sec_token(task)->val[0] == 0;
7177 }
7178 
7179 #ifdef CONFIG_MACF
7180 uint8_t *
task_get_mach_trap_filter_mask(task_t task)7181 task_get_mach_trap_filter_mask(task_t task)
7182 {
7183 	return task_get_ro(task)->task_filters.mach_trap_filter_mask;
7184 }
7185 
7186 void
task_set_mach_trap_filter_mask(task_t task,uint8_t * mask)7187 task_set_mach_trap_filter_mask(task_t task, uint8_t *mask)
7188 {
7189 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7190 	    task_filters.mach_trap_filter_mask, &mask);
7191 }
7192 
7193 uint8_t *
task_get_mach_kobj_filter_mask(task_t task)7194 task_get_mach_kobj_filter_mask(task_t task)
7195 {
7196 	return task_get_ro(task)->task_filters.mach_kobj_filter_mask;
7197 }
7198 
7199 void
task_set_mach_kobj_filter_mask(task_t task,uint8_t * mask)7200 task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask)
7201 {
7202 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7203 	    task_filters.mach_kobj_filter_mask, &mask);
7204 }
7205 
7206 void
task_copy_filter_masks(task_t new_task,task_t old_task)7207 task_copy_filter_masks(task_t new_task, task_t old_task)
7208 {
7209 	struct task_filter_ro_data filters;
7210 
7211 	filters = task_get_ro(new_task)->task_filters;
7212 	filters.mach_trap_filter_mask = task_get_mach_trap_filter_mask(old_task);
7213 	filters.mach_kobj_filter_mask = task_get_mach_kobj_filter_mask(old_task);
7214 
7215 	zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(new_task),
7216 	    task_filters, &filters);
7217 }
7218 #endif /* CONFIG_MACF */
7219 
7220 void
task_set_thread_limit(task_t task,uint16_t thread_limit)7221 task_set_thread_limit(task_t task, uint16_t thread_limit)
7222 {
7223 	assert(task != kernel_task);
7224 	if (thread_limit <= TASK_MAX_THREAD_LIMIT) {
7225 		task_lock(task);
7226 		task->task_thread_limit = thread_limit;
7227 		task_unlock(task);
7228 	}
7229 }
7230 
7231 #if CONFIG_PROC_RESOURCE_LIMITS
7232 kern_return_t
task_set_port_space_limits(task_t task,uint32_t soft_limit,uint32_t hard_limit)7233 task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit)
7234 {
7235 	return ipc_space_set_table_size_limits(task->itk_space, soft_limit, hard_limit);
7236 }
7237 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7238 
7239 #if XNU_TARGET_OS_OSX
7240 boolean_t
task_has_system_version_compat_enabled(task_t task)7241 task_has_system_version_compat_enabled(task_t task)
7242 {
7243 	boolean_t enabled = FALSE;
7244 
7245 	task_lock(task);
7246 	enabled = (task->t_flags & TF_SYS_VERSION_COMPAT);
7247 	task_unlock(task);
7248 
7249 	return enabled;
7250 }
7251 
7252 void
task_set_system_version_compat_enabled(task_t task,boolean_t enable_system_version_compat)7253 task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat)
7254 {
7255 	assert(task == current_task());
7256 	assert(task != kernel_task);
7257 
7258 	task_lock(task);
7259 	if (enable_system_version_compat) {
7260 		task->t_flags |= TF_SYS_VERSION_COMPAT;
7261 	} else {
7262 		task->t_flags &= ~TF_SYS_VERSION_COMPAT;
7263 	}
7264 	task_unlock(task);
7265 }
7266 #endif /* XNU_TARGET_OS_OSX */
7267 
7268 /*
7269  * We need to export some functions to other components that
7270  * are currently implemented in macros within the osfmk
7271  * component.  Just export them as functions of the same name.
7272  */
7273 boolean_t
is_kerneltask(task_t t)7274 is_kerneltask(task_t t)
7275 {
7276 	if (t == kernel_task) {
7277 		return TRUE;
7278 	}
7279 
7280 	return FALSE;
7281 }
7282 
7283 boolean_t
is_corpsetask(task_t t)7284 is_corpsetask(task_t t)
7285 {
7286 	return task_is_a_corpse(t);
7287 }
7288 
7289 boolean_t
is_corpsefork(task_t t)7290 is_corpsefork(task_t t)
7291 {
7292 	return task_is_a_corpse_fork(t);
7293 }
7294 
7295 task_t
current_task_early(void)7296 current_task_early(void)
7297 {
7298 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
7299 		if (current_thread()->t_tro == NULL) {
7300 			return TASK_NULL;
7301 		}
7302 	}
7303 	return get_threadtask(current_thread());
7304 }
7305 
7306 task_t
current_task(void)7307 current_task(void)
7308 {
7309 	return get_threadtask(current_thread());
7310 }
7311 
7312 /* defined in bsd/kern/kern_prot.c */
7313 extern int get_audit_token_pid(audit_token_t *audit_token);
7314 
7315 int
task_pid(task_t task)7316 task_pid(task_t task)
7317 {
7318 	if (task) {
7319 		return get_audit_token_pid(task_get_audit_token(task));
7320 	}
7321 	return -1;
7322 }
7323 
7324 #if __has_feature(ptrauth_calls)
7325 /*
7326  * Get the shared region id and jop signing key for the task.
7327  * The function will allocate a kalloc buffer and return
7328  * it to caller, the caller needs to free it. This is used
7329  * for getting the information via task port.
7330  */
7331 char *
task_get_vm_shared_region_id_and_jop_pid(task_t task,uint64_t * jop_pid)7332 task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid)
7333 {
7334 	size_t len;
7335 	char *shared_region_id = NULL;
7336 
7337 	task_lock(task);
7338 	if (task->shared_region_id == NULL) {
7339 		task_unlock(task);
7340 		return NULL;
7341 	}
7342 	len = strlen(task->shared_region_id) + 1;
7343 
7344 	/* don't hold task lock while allocating */
7345 	task_unlock(task);
7346 	shared_region_id = kalloc_data(len, Z_WAITOK);
7347 	task_lock(task);
7348 
7349 	if (task->shared_region_id == NULL) {
7350 		task_unlock(task);
7351 		kfree_data(shared_region_id, len);
7352 		return NULL;
7353 	}
7354 	assert(len == strlen(task->shared_region_id) + 1);         /* should never change */
7355 	strlcpy(shared_region_id, task->shared_region_id, len);
7356 	task_unlock(task);
7357 
7358 	/* find key from its auth pager */
7359 	if (jop_pid != NULL) {
7360 		*jop_pid = shared_region_find_key(shared_region_id);
7361 	}
7362 
7363 	return shared_region_id;
7364 }
7365 
7366 /*
7367  * set the shared region id for a task
7368  */
7369 void
task_set_shared_region_id(task_t task,char * id)7370 task_set_shared_region_id(task_t task, char *id)
7371 {
7372 	char *old_id;
7373 
7374 	task_lock(task);
7375 	old_id = task->shared_region_id;
7376 	task->shared_region_id = id;
7377 	task->shared_region_auth_remapped = FALSE;
7378 	task_unlock(task);
7379 
7380 	/* free any pre-existing shared region id */
7381 	if (old_id != NULL) {
7382 		shared_region_key_dealloc(old_id);
7383 		kfree_data(old_id, strlen(old_id) + 1);
7384 	}
7385 }
7386 #endif /* __has_feature(ptrauth_calls) */
7387 
7388 /*
7389  * This routine finds a thread in a task by its unique id
7390  * Returns a referenced thread or THREAD_NULL if the thread was not found
7391  *
7392  * TODO: This is super inefficient - it's an O(threads in task) list walk!
7393  *       We should make a tid hash, or transition all tid clients to thread ports
7394  *
7395  * Precondition: No locks held (will take task lock)
7396  */
7397 thread_t
task_findtid(task_t task,uint64_t tid)7398 task_findtid(task_t task, uint64_t tid)
7399 {
7400 	thread_t self           = current_thread();
7401 	thread_t found_thread   = THREAD_NULL;
7402 	thread_t iter_thread    = THREAD_NULL;
7403 
7404 	/* Short-circuit the lookup if we're looking up ourselves */
7405 	if (tid == self->thread_id || tid == TID_NULL) {
7406 		assert(get_threadtask(self) == task);
7407 
7408 		thread_reference(self);
7409 
7410 		return self;
7411 	}
7412 
7413 	task_lock(task);
7414 
7415 	queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
7416 		if (iter_thread->thread_id == tid) {
7417 			found_thread = iter_thread;
7418 			thread_reference(found_thread);
7419 			break;
7420 		}
7421 	}
7422 
7423 	task_unlock(task);
7424 
7425 	return found_thread;
7426 }
7427 
7428 int
pid_from_task(task_t task)7429 pid_from_task(task_t task)
7430 {
7431 	int pid = -1;
7432 
7433 	if (task->bsd_info) {
7434 		pid = proc_pid(task->bsd_info);
7435 	} else {
7436 		pid = task_pid(task);
7437 	}
7438 
7439 	return pid;
7440 }
7441 
7442 /*
7443  * Control the CPU usage monitor for a task.
7444  */
7445 kern_return_t
task_cpu_usage_monitor_ctl(task_t task,uint32_t * flags)7446 task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
7447 {
7448 	int error = KERN_SUCCESS;
7449 
7450 	if (*flags & CPUMON_MAKE_FATAL) {
7451 		task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
7452 	} else {
7453 		error = KERN_INVALID_ARGUMENT;
7454 	}
7455 
7456 	return error;
7457 }
7458 
7459 /*
7460  * Control the wakeups monitor for a task.
7461  */
7462 kern_return_t
task_wakeups_monitor_ctl(task_t task,uint32_t * flags,int32_t * rate_hz)7463 task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
7464 {
7465 	ledger_t ledger = task->ledger;
7466 
7467 	task_lock(task);
7468 	if (*flags & WAKEMON_GET_PARAMS) {
7469 		ledger_amount_t limit;
7470 		uint64_t                period;
7471 
7472 		ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
7473 		ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
7474 
7475 		if (limit != LEDGER_LIMIT_INFINITY) {
7476 			/*
7477 			 * An active limit means the wakeups monitor is enabled.
7478 			 */
7479 			*rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
7480 			*flags = WAKEMON_ENABLE;
7481 			if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
7482 				*flags |= WAKEMON_MAKE_FATAL;
7483 			}
7484 		} else {
7485 			*flags = WAKEMON_DISABLE;
7486 			*rate_hz = -1;
7487 		}
7488 
7489 		/*
7490 		 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
7491 		 */
7492 		task_unlock(task);
7493 		return KERN_SUCCESS;
7494 	}
7495 
7496 	if (*flags & WAKEMON_ENABLE) {
7497 		if (*flags & WAKEMON_SET_DEFAULTS) {
7498 			*rate_hz = task_wakeups_monitor_rate;
7499 		}
7500 
7501 #ifndef CONFIG_NOMONITORS
7502 		if (*flags & WAKEMON_MAKE_FATAL) {
7503 			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
7504 		}
7505 #endif /* CONFIG_NOMONITORS */
7506 
7507 		if (*rate_hz <= 0) {
7508 			task_unlock(task);
7509 			return KERN_INVALID_ARGUMENT;
7510 		}
7511 
7512 #ifndef CONFIG_NOMONITORS
7513 		ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
7514 		    (uint8_t)task_wakeups_monitor_ustackshots_trigger_pct);
7515 		ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
7516 		ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
7517 #endif /* CONFIG_NOMONITORS */
7518 	} else if (*flags & WAKEMON_DISABLE) {
7519 		/*
7520 		 * Caller wishes to disable wakeups monitor on the task.
7521 		 *
7522 		 * Disable telemetry if it was triggered by the wakeups monitor, and
7523 		 * remove the limit & callback on the wakeups ledger entry.
7524 		 */
7525 #if CONFIG_TELEMETRY
7526 		telemetry_task_ctl_locked(task, TF_WAKEMON_WARNING, 0);
7527 #endif
7528 		ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
7529 		ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
7530 	}
7531 
7532 	task_unlock(task);
7533 	return KERN_SUCCESS;
7534 }
7535 
7536 void
task_wakeups_rate_exceeded(int warning,__unused const void * param0,__unused const void * param1)7537 task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
7538 {
7539 	if (warning == LEDGER_WARNING_ROSE_ABOVE) {
7540 #if CONFIG_TELEMETRY
7541 		/*
7542 		 * This task is in danger of violating the wakeups monitor. Enable telemetry on this task
7543 		 * so there are micro-stackshots available if and when EXC_RESOURCE is triggered.
7544 		 */
7545 		telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 1);
7546 #endif
7547 		return;
7548 	}
7549 
7550 #if CONFIG_TELEMETRY
7551 	/*
7552 	 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
7553 	 * exceeded the limit, turn telemetry off for the task.
7554 	 */
7555 	telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 0);
7556 #endif
7557 
7558 	if (warning == 0) {
7559 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
7560 	}
7561 }
7562 
7563 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)7564 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
7565 {
7566 	task_t                      task        = current_task();
7567 	int                         pid         = 0;
7568 	const char                  *procname   = "unknown";
7569 	boolean_t                   fatal;
7570 	kern_return_t               kr;
7571 #ifdef EXC_RESOURCE_MONITORS
7572 	mach_exception_data_type_t  code[EXCEPTION_CODE_MAX];
7573 #endif /* EXC_RESOURCE_MONITORS */
7574 	struct ledger_entry_info    lei;
7575 
7576 #ifdef MACH_BSD
7577 	pid = proc_selfpid();
7578 	if (task->bsd_info != NULL) {
7579 		procname = proc_name_address(current_task()->bsd_info);
7580 	}
7581 #endif
7582 
7583 	ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
7584 
7585 	/*
7586 	 * Disable the exception notification so we don't overwhelm
7587 	 * the listener with an endless stream of redundant exceptions.
7588 	 * TODO: detect whether another thread is already reporting the violation.
7589 	 */
7590 	uint32_t flags = WAKEMON_DISABLE;
7591 	task_wakeups_monitor_ctl(task, &flags, NULL);
7592 
7593 	fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
7594 	trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
7595 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
7596 	    "over ~%llu seconds, averaging %llu wakes / second and "
7597 	    "violating a %slimit of %llu wakes over %llu seconds.\n",
7598 	    procname, pid,
7599 	    lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
7600 	    lei.lei_last_refill == 0 ? 0 :
7601 	    (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
7602 	    fatal ? "FATAL " : "",
7603 	    lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
7604 
7605 	kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
7606 	    fatal ? kRNFatalLimitFlag : 0);
7607 	if (kr) {
7608 		printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
7609 	}
7610 
7611 #ifdef EXC_RESOURCE_MONITORS
7612 	if (disable_exc_resource) {
7613 		printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
7614 		    "supressed by a boot-arg\n", procname, pid);
7615 		return;
7616 	}
7617 	if (audio_active) {
7618 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
7619 		    "supressed due to audio playback\n", procname, pid);
7620 		return;
7621 	}
7622 	if (lei.lei_last_refill == 0) {
7623 		os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
7624 		    "supressed due to lei.lei_last_refill = 0 \n", procname, pid);
7625 	}
7626 
7627 	code[0] = code[1] = 0;
7628 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
7629 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
7630 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
7631 	    NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
7632 	EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
7633 	    lei.lei_last_refill);
7634 	EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
7635 	    NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
7636 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
7637 #endif /* EXC_RESOURCE_MONITORS */
7638 
7639 	if (fatal) {
7640 		task_terminate_internal(task);
7641 	}
7642 }
7643 
7644 static boolean_t
global_update_logical_writes(int64_t io_delta,int64_t * global_write_count)7645 global_update_logical_writes(int64_t io_delta, int64_t *global_write_count)
7646 {
7647 	int64_t old_count, new_count;
7648 	boolean_t needs_telemetry;
7649 
7650 	do {
7651 		new_count = old_count = *global_write_count;
7652 		new_count += io_delta;
7653 		if (new_count >= io_telemetry_limit) {
7654 			new_count = 0;
7655 			needs_telemetry = TRUE;
7656 		} else {
7657 			needs_telemetry = FALSE;
7658 		}
7659 	} while (!OSCompareAndSwap64(old_count, new_count, global_write_count));
7660 	return needs_telemetry;
7661 }
7662 
7663 void
task_update_physical_writes(__unused task_t task,__unused task_physical_write_flavor_t flavor,__unused uint64_t io_size,__unused task_balance_flags_t flags)7664 task_update_physical_writes(__unused task_t task, __unused task_physical_write_flavor_t flavor, __unused uint64_t io_size, __unused task_balance_flags_t flags)
7665 {
7666 #if CONFIG_PHYS_WRITE_ACCT
7667 	if (!io_size) {
7668 		return;
7669 	}
7670 
7671 	/*
7672 	 * task == NULL means that we have to update kernel_task ledgers
7673 	 */
7674 	if (!task) {
7675 		task = kernel_task;
7676 	}
7677 
7678 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PHYS_WRITE_ACCT)) | DBG_FUNC_NONE,
7679 	    task_pid(task), flavor, io_size, flags, 0);
7680 	DTRACE_IO4(physical_writes, struct task *, task, task_physical_write_flavor_t, flavor, uint64_t, io_size, task_balance_flags_t, flags);
7681 
7682 	if (flags & TASK_BALANCE_CREDIT) {
7683 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
7684 			OSAddAtomic64(io_size, (SInt64 *)&(task->task_fs_metadata_writes));
7685 			ledger_credit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
7686 		}
7687 	} else if (flags & TASK_BALANCE_DEBIT) {
7688 		if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
7689 			OSAddAtomic64(-1 * io_size, (SInt64 *)&(task->task_fs_metadata_writes));
7690 			ledger_debit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
7691 		}
7692 	}
7693 #endif /* CONFIG_PHYS_WRITE_ACCT */
7694 }
7695 
7696 void
task_update_logical_writes(task_t task,uint32_t io_size,int flags,void * vp)7697 task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
7698 {
7699 	int64_t io_delta = 0;
7700 	int64_t * global_counter_to_update;
7701 	boolean_t needs_telemetry = FALSE;
7702 	boolean_t is_external_device = FALSE;
7703 	int ledger_to_update = 0;
7704 	struct task_writes_counters * writes_counters_to_update;
7705 
7706 	if ((!task) || (!io_size) || (!vp)) {
7707 		return;
7708 	}
7709 
7710 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_DATA_WRITE)) | DBG_FUNC_NONE,
7711 	    task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp), 0);
7712 	DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
7713 
7714 	// Is the drive backing this vnode internal or external to the system?
7715 	if (vnode_isonexternalstorage(vp) == false) {
7716 		global_counter_to_update = &global_logical_writes_count;
7717 		ledger_to_update = task_ledgers.logical_writes;
7718 		writes_counters_to_update = &task->task_writes_counters_internal;
7719 		is_external_device = FALSE;
7720 	} else {
7721 		global_counter_to_update = &global_logical_writes_to_external_count;
7722 		ledger_to_update = task_ledgers.logical_writes_to_external;
7723 		writes_counters_to_update = &task->task_writes_counters_external;
7724 		is_external_device = TRUE;
7725 	}
7726 
7727 	switch (flags) {
7728 	case TASK_WRITE_IMMEDIATE:
7729 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_immediate_writes));
7730 		ledger_credit(task->ledger, ledger_to_update, io_size);
7731 		if (!is_external_device) {
7732 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
7733 		}
7734 		break;
7735 	case TASK_WRITE_DEFERRED:
7736 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_deferred_writes));
7737 		ledger_credit(task->ledger, ledger_to_update, io_size);
7738 		if (!is_external_device) {
7739 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
7740 		}
7741 		break;
7742 	case TASK_WRITE_INVALIDATED:
7743 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_invalidated_writes));
7744 		ledger_debit(task->ledger, ledger_to_update, io_size);
7745 		if (!is_external_device) {
7746 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, FALSE, io_size);
7747 		}
7748 		break;
7749 	case TASK_WRITE_METADATA:
7750 		OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_metadata_writes));
7751 		ledger_credit(task->ledger, ledger_to_update, io_size);
7752 		if (!is_external_device) {
7753 			coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
7754 		}
7755 		break;
7756 	}
7757 
7758 	io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
7759 	if (io_telemetry_limit != 0) {
7760 		/* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
7761 		needs_telemetry = global_update_logical_writes(io_delta, global_counter_to_update);
7762 		if (needs_telemetry && !is_external_device) {
7763 			act_set_io_telemetry_ast(current_thread());
7764 		}
7765 	}
7766 }
7767 
7768 /*
7769  * Control the I/O monitor for a task.
7770  */
7771 kern_return_t
task_io_monitor_ctl(task_t task,uint32_t * flags)7772 task_io_monitor_ctl(task_t task, uint32_t *flags)
7773 {
7774 	ledger_t ledger = task->ledger;
7775 
7776 	task_lock(task);
7777 	if (*flags & IOMON_ENABLE) {
7778 		/* Configure the physical I/O ledger */
7779 		ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
7780 		ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
7781 	} else if (*flags & IOMON_DISABLE) {
7782 		/*
7783 		 * Caller wishes to disable I/O monitor on the task.
7784 		 */
7785 		ledger_disable_refill(ledger, task_ledgers.physical_writes);
7786 		ledger_disable_callback(ledger, task_ledgers.physical_writes);
7787 	}
7788 
7789 	task_unlock(task);
7790 	return KERN_SUCCESS;
7791 }
7792 
7793 void
task_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)7794 task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
7795 {
7796 	if (warning == 0) {
7797 		SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
7798 	}
7799 }
7800 
7801 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)7802 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
7803 {
7804 	int                             pid = 0;
7805 	task_t                          task = current_task();
7806 #ifdef EXC_RESOURCE_MONITORS
7807 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
7808 #endif /* EXC_RESOURCE_MONITORS */
7809 	struct ledger_entry_info        lei = {};
7810 	kern_return_t                   kr;
7811 
7812 #ifdef MACH_BSD
7813 	pid = proc_selfpid();
7814 #endif
7815 	/*
7816 	 * Get the ledger entry info. We need to do this before disabling the exception
7817 	 * to get correct values for all fields.
7818 	 */
7819 	switch (flavor) {
7820 	case FLAVOR_IO_PHYSICAL_WRITES:
7821 		ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
7822 		break;
7823 	}
7824 
7825 
7826 	/*
7827 	 * Disable the exception notification so we don't overwhelm
7828 	 * the listener with an endless stream of redundant exceptions.
7829 	 * TODO: detect whether another thread is already reporting the violation.
7830 	 */
7831 	uint32_t flags = IOMON_DISABLE;
7832 	task_io_monitor_ctl(task, &flags);
7833 
7834 	if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
7835 		trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
7836 	}
7837 	os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
7838 	    pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
7839 
7840 	kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
7841 	if (kr) {
7842 		printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
7843 	}
7844 
7845 #ifdef EXC_RESOURCE_MONITORS
7846 	code[0] = code[1] = 0;
7847 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
7848 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
7849 	EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
7850 	EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
7851 	EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
7852 	exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
7853 #endif /* EXC_RESOURCE_MONITORS */
7854 }
7855 
7856 void
task_port_space_ast(__unused task_t task)7857 task_port_space_ast(__unused task_t task)
7858 {
7859 	uint32_t current_size, soft_limit, hard_limit;
7860 	assert(task == current_task());
7861 	kern_return_t ret = ipc_space_get_table_size_and_limits(task->itk_space,
7862 	    &current_size, &soft_limit, &hard_limit);
7863 	if (ret == KERN_SUCCESS) {
7864 		SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task, current_size, soft_limit, hard_limit);
7865 	}
7866 }
7867 
7868 #if CONFIG_PROC_RESOURCE_LIMITS
7869 static mach_port_t
task_allocate_fatal_port(void)7870 task_allocate_fatal_port(void)
7871 {
7872 	mach_port_t task_fatal_port = MACH_PORT_NULL;
7873 	task_id_token_t token;
7874 
7875 	kern_return_t kr = task_create_identity_token(current_task(), &token); /* Takes a reference on the token */
7876 	if (kr) {
7877 		return MACH_PORT_NULL;
7878 	}
7879 	task_fatal_port = ipc_kobject_alloc_port((ipc_kobject_t)token, IKOT_TASK_FATAL,
7880 	    IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
7881 
7882 	task_id_token_set_port(token, task_fatal_port);
7883 
7884 	return task_fatal_port;
7885 }
7886 
7887 static void
task_fatal_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)7888 task_fatal_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
7889 {
7890 	task_t task = TASK_NULL;
7891 	kern_return_t kr;
7892 
7893 	task_id_token_t token = ipc_kobject_get_stable(port, IKOT_TASK_FATAL);
7894 
7895 	assert(token != NULL);
7896 	if (token) {
7897 		kr = task_identity_token_get_task_grp(token, &task, TASK_GRP_KERNEL); /* takes a reference on task */
7898 		if (task) {
7899 			task_bsdtask_kill(task);
7900 			task_deallocate(task);
7901 		}
7902 		task_id_token_release(token); /* consumes ref given by notification */
7903 	}
7904 }
7905 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7906 
7907 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,uint32_t current_size,uint32_t soft_limit,uint32_t hard_limit)7908 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task, uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit)
7909 {
7910 	int pid = 0;
7911 	char *procname = (char *) "unknown";
7912 	__unused kern_return_t kr;
7913 	__unused resource_notify_flags_t flags = kRNFlagsNone;
7914 	__unused uint32_t limit;
7915 	__unused mach_port_t task_fatal_port = MACH_PORT_NULL;
7916 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
7917 
7918 #ifdef MACH_BSD
7919 	pid = proc_selfpid();
7920 	if (task->bsd_info != NULL) {
7921 		procname = proc_name_address(task->bsd_info);
7922 	}
7923 #endif
7924 	/*
7925 	 * Only kernel_task and launchd may be allowed to
7926 	 * have really large ipc space.
7927 	 */
7928 	if (pid == 0 || pid == 1) {
7929 		return;
7930 	}
7931 
7932 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many mach ports. \
7933 	    Num of ports allocated %u; \n", procname, pid, current_size);
7934 
7935 	/* Abort the process if it has hit the system-wide limit for ipc port table size */
7936 	if (!hard_limit && !soft_limit) {
7937 		code[0] = code[1] = 0;
7938 		EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_PORTS);
7939 		EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_PORT_SPACE_FULL);
7940 		EXC_RESOURCE_PORTS_ENCODE_PORTS(code[0], current_size);
7941 
7942 		exit_with_port_space_exception(current_proc(), code[0], code[1]);
7943 
7944 		return;
7945 	}
7946 
7947 #if CONFIG_PROC_RESOURCE_LIMITS
7948 	if (hard_limit > 0) {
7949 		flags |= kRNHardLimitFlag;
7950 		limit = hard_limit;
7951 		task_fatal_port = task_allocate_fatal_port();
7952 		if (!task_fatal_port) {
7953 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
7954 			task_bsdtask_kill(task);
7955 		}
7956 	} else {
7957 		flags |= kRNSoftLimitFlag;
7958 		limit = soft_limit;
7959 	}
7960 
7961 	kr = send_resource_violation_with_fatal_port(send_port_space_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
7962 	if (kr) {
7963 		os_log(OS_LOG_DEFAULT, "send_resource_violation(ports, ...): error %#x\n", kr);
7964 	}
7965 	if (task_fatal_port) {
7966 		ipc_port_release_send(task_fatal_port);
7967 	}
7968 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7969 }
7970 
7971 void
task_filedesc_ast(__unused task_t task,__unused int current_size,__unused int soft_limit,__unused int hard_limit)7972 task_filedesc_ast(__unused task_t task, __unused int current_size, __unused int soft_limit, __unused int hard_limit)
7973 {
7974 #if CONFIG_PROC_RESOURCE_LIMITS
7975 	assert(task == current_task());
7976 	SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task, current_size, soft_limit, hard_limit);
7977 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7978 }
7979 
7980 #if CONFIG_PROC_RESOURCE_LIMITS
7981 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task,int current_size,int soft_limit,int hard_limit)7982 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit)
7983 {
7984 	int pid = 0;
7985 	char *procname = (char *) "unknown";
7986 	kern_return_t kr;
7987 	resource_notify_flags_t flags = kRNFlagsNone;
7988 	int limit;
7989 	mach_port_t task_fatal_port = MACH_PORT_NULL;
7990 
7991 #ifdef MACH_BSD
7992 	pid = proc_selfpid();
7993 	if (task->bsd_info != NULL) {
7994 		procname = proc_name_address(task->bsd_info);
7995 	}
7996 #endif
7997 	/*
7998 	 * Only kernel_task and launchd may be allowed to
7999 	 * have really large ipc space.
8000 	 */
8001 	if (pid == 0 || pid == 1) {
8002 		return;
8003 	}
8004 
8005 	os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many file descriptors. \
8006 	    Num of fds allocated %u; \n", procname, pid, current_size);
8007 
8008 	if (hard_limit > 0) {
8009 		flags |= kRNHardLimitFlag;
8010 		limit = hard_limit;
8011 		task_fatal_port = task_allocate_fatal_port();
8012 		if (!task_fatal_port) {
8013 			os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8014 			task_bsdtask_kill(task);
8015 		}
8016 	} else {
8017 		flags |= kRNSoftLimitFlag;
8018 		limit = soft_limit;
8019 	}
8020 
8021 	kr = send_resource_violation_with_fatal_port(send_file_descriptors_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8022 	if (kr) {
8023 		os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(filedesc, ...): error %#x\n", kr);
8024 	}
8025 	if (task_fatal_port) {
8026 		ipc_port_release_send(task_fatal_port);
8027 	}
8028 }
8029 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8030 
8031 /* Placeholders for the task set/get voucher interfaces */
8032 kern_return_t
task_get_mach_voucher(task_t task,mach_voucher_selector_t __unused which,ipc_voucher_t * voucher)8033 task_get_mach_voucher(
8034 	task_t                  task,
8035 	mach_voucher_selector_t __unused which,
8036 	ipc_voucher_t           *voucher)
8037 {
8038 	if (TASK_NULL == task) {
8039 		return KERN_INVALID_TASK;
8040 	}
8041 
8042 	*voucher = NULL;
8043 	return KERN_SUCCESS;
8044 }
8045 
8046 kern_return_t
task_set_mach_voucher(task_t task,ipc_voucher_t __unused voucher)8047 task_set_mach_voucher(
8048 	task_t                  task,
8049 	ipc_voucher_t           __unused voucher)
8050 {
8051 	if (TASK_NULL == task) {
8052 		return KERN_INVALID_TASK;
8053 	}
8054 
8055 	return KERN_SUCCESS;
8056 }
8057 
8058 kern_return_t
task_swap_mach_voucher(__unused task_t task,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)8059 task_swap_mach_voucher(
8060 	__unused task_t         task,
8061 	__unused ipc_voucher_t  new_voucher,
8062 	ipc_voucher_t          *in_out_old_voucher)
8063 {
8064 	/*
8065 	 * Currently this function is only called from a MIG generated
8066 	 * routine which doesn't release the reference on the voucher
8067 	 * addressed by in_out_old_voucher. To avoid leaking this reference,
8068 	 * a call to release it has been added here.
8069 	 */
8070 	ipc_voucher_release(*in_out_old_voucher);
8071 	OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
8072 }
8073 
8074 void
task_set_gpu_denied(task_t task,boolean_t denied)8075 task_set_gpu_denied(task_t task, boolean_t denied)
8076 {
8077 	task_lock(task);
8078 
8079 	if (denied) {
8080 		task->t_flags |= TF_GPU_DENIED;
8081 	} else {
8082 		task->t_flags &= ~TF_GPU_DENIED;
8083 	}
8084 
8085 	task_unlock(task);
8086 }
8087 
8088 boolean_t
task_is_gpu_denied(task_t task)8089 task_is_gpu_denied(task_t task)
8090 {
8091 	/* We don't need the lock to read this flag */
8092 	return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE;
8093 }
8094 
8095 
8096 uint64_t
get_task_memory_region_count(task_t task)8097 get_task_memory_region_count(task_t task)
8098 {
8099 	vm_map_t map;
8100 	map = (task == kernel_task) ? kernel_map: task->map;
8101 	return (uint64_t)get_map_nentries(map);
8102 }
8103 
8104 static void
kdebug_trace_dyld_internal(uint32_t base_code,struct dyld_kernel_image_info * info)8105 kdebug_trace_dyld_internal(uint32_t base_code,
8106     struct dyld_kernel_image_info *info)
8107 {
8108 	static_assert(sizeof(info->uuid) >= 16);
8109 
8110 #if defined(__LP64__)
8111 	uint64_t *uuid = (uint64_t *)&(info->uuid);
8112 
8113 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8114 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
8115 	    uuid[1], info->load_addr,
8116 	    (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
8117 	    0);
8118 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8119 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
8120 	    (uint64_t)info->fsobjid.fid_objno |
8121 	    ((uint64_t)info->fsobjid.fid_generation << 32),
8122 	    0, 0, 0, 0);
8123 #else /* defined(__LP64__) */
8124 	uint32_t *uuid = (uint32_t *)&(info->uuid);
8125 
8126 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8127 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
8128 	    uuid[1], uuid[2], uuid[3], 0);
8129 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8130 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
8131 	    (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
8132 	    info->fsobjid.fid_objno, 0);
8133 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8134 	    KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
8135 	    info->fsobjid.fid_generation, 0, 0, 0, 0);
8136 #endif /* !defined(__LP64__) */
8137 }
8138 
8139 static kern_return_t
kdebug_trace_dyld(task_t task,uint32_t base_code,vm_map_copy_t infos_copy,mach_msg_type_number_t infos_len)8140 kdebug_trace_dyld(task_t task, uint32_t base_code,
8141     vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
8142 {
8143 	kern_return_t kr;
8144 	dyld_kernel_image_info_array_t infos;
8145 	vm_map_offset_t map_data;
8146 	vm_offset_t data;
8147 
8148 	if (!infos_copy) {
8149 		return KERN_INVALID_ADDRESS;
8150 	}
8151 
8152 	if (!kdebug_enable ||
8153 	    !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) {
8154 		vm_map_copy_discard(infos_copy);
8155 		return KERN_SUCCESS;
8156 	}
8157 
8158 	if (task == NULL || task != current_task()) {
8159 		return KERN_INVALID_TASK;
8160 	}
8161 
8162 	kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
8163 	if (kr != KERN_SUCCESS) {
8164 		return kr;
8165 	}
8166 
8167 	infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
8168 
8169 	for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
8170 		kdebug_trace_dyld_internal(base_code, &(infos[i]));
8171 	}
8172 
8173 	data = CAST_DOWN(vm_offset_t, map_data);
8174 	mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
8175 	return KERN_SUCCESS;
8176 }
8177 
8178 kern_return_t
task_register_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8179 task_register_dyld_image_infos(task_t task,
8180     dyld_kernel_image_info_array_t infos_copy,
8181     mach_msg_type_number_t infos_len)
8182 {
8183 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
8184 	           (vm_map_copy_t)infos_copy, infos_len);
8185 }
8186 
8187 kern_return_t
task_unregister_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8188 task_unregister_dyld_image_infos(task_t task,
8189     dyld_kernel_image_info_array_t infos_copy,
8190     mach_msg_type_number_t infos_len)
8191 {
8192 	return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
8193 	           (vm_map_copy_t)infos_copy, infos_len);
8194 }
8195 
8196 kern_return_t
task_get_dyld_image_infos(__unused task_t task,__unused dyld_kernel_image_info_array_t * dyld_images,__unused mach_msg_type_number_t * dyld_imagesCnt)8197 task_get_dyld_image_infos(__unused task_t task,
8198     __unused dyld_kernel_image_info_array_t * dyld_images,
8199     __unused mach_msg_type_number_t * dyld_imagesCnt)
8200 {
8201 	return KERN_NOT_SUPPORTED;
8202 }
8203 
8204 kern_return_t
task_register_dyld_shared_cache_image_info(task_t task,dyld_kernel_image_info_t cache_img,__unused boolean_t no_cache,__unused boolean_t private_cache)8205 task_register_dyld_shared_cache_image_info(task_t task,
8206     dyld_kernel_image_info_t cache_img,
8207     __unused boolean_t no_cache,
8208     __unused boolean_t private_cache)
8209 {
8210 	if (task == NULL || task != current_task()) {
8211 		return KERN_INVALID_TASK;
8212 	}
8213 
8214 	kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
8215 	return KERN_SUCCESS;
8216 }
8217 
8218 kern_return_t
task_register_dyld_set_dyld_state(__unused task_t task,__unused uint8_t dyld_state)8219 task_register_dyld_set_dyld_state(__unused task_t task,
8220     __unused uint8_t dyld_state)
8221 {
8222 	return KERN_NOT_SUPPORTED;
8223 }
8224 
8225 kern_return_t
task_register_dyld_get_process_state(__unused task_t task,__unused dyld_kernel_process_info_t * dyld_process_state)8226 task_register_dyld_get_process_state(__unused task_t task,
8227     __unused dyld_kernel_process_info_t * dyld_process_state)
8228 {
8229 	return KERN_NOT_SUPPORTED;
8230 }
8231 
8232 kern_return_t
task_inspect(task_inspect_t task_insp,task_inspect_flavor_t flavor,task_inspect_info_t info_out,mach_msg_type_number_t * size_in_out)8233 task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
8234     task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out)
8235 {
8236 #if MONOTONIC
8237 	task_t task = (task_t)task_insp;
8238 	kern_return_t kr = KERN_SUCCESS;
8239 	mach_msg_type_number_t size;
8240 
8241 	if (task == TASK_NULL) {
8242 		return KERN_INVALID_ARGUMENT;
8243 	}
8244 
8245 	size = *size_in_out;
8246 
8247 	switch (flavor) {
8248 	case TASK_INSPECT_BASIC_COUNTS: {
8249 		struct task_inspect_basic_counts *bc;
8250 		uint64_t task_counts[MT_CORE_NFIXED] = { 0 };
8251 
8252 		if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
8253 			kr = KERN_INVALID_ARGUMENT;
8254 			break;
8255 		}
8256 
8257 		mt_fixed_task_counts(task, task_counts);
8258 		bc = (struct task_inspect_basic_counts *)info_out;
8259 #ifdef MT_CORE_INSTRS
8260 		bc->instructions = task_counts[MT_CORE_INSTRS];
8261 #else /* defined(MT_CORE_INSTRS) */
8262 		bc->instructions = 0;
8263 #endif /* !defined(MT_CORE_INSTRS) */
8264 		bc->cycles = task_counts[MT_CORE_CYCLES];
8265 		size = TASK_INSPECT_BASIC_COUNTS_COUNT;
8266 		break;
8267 	}
8268 	default:
8269 		kr = KERN_INVALID_ARGUMENT;
8270 		break;
8271 	}
8272 
8273 	if (kr == KERN_SUCCESS) {
8274 		*size_in_out = size;
8275 	}
8276 	return kr;
8277 #else /* MONOTONIC */
8278 #pragma unused(task_insp, flavor, info_out, size_in_out)
8279 	return KERN_NOT_SUPPORTED;
8280 #endif /* !MONOTONIC */
8281 }
8282 
8283 #if CONFIG_SECLUDED_MEMORY
8284 int num_tasks_can_use_secluded_mem = 0;
8285 
8286 void
task_set_can_use_secluded_mem(task_t task,boolean_t can_use_secluded_mem)8287 task_set_can_use_secluded_mem(
8288 	task_t          task,
8289 	boolean_t       can_use_secluded_mem)
8290 {
8291 	if (!task->task_could_use_secluded_mem) {
8292 		return;
8293 	}
8294 	task_lock(task);
8295 	task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
8296 	task_unlock(task);
8297 }
8298 
8299 void
task_set_can_use_secluded_mem_locked(task_t task,boolean_t can_use_secluded_mem)8300 task_set_can_use_secluded_mem_locked(
8301 	task_t          task,
8302 	boolean_t       can_use_secluded_mem)
8303 {
8304 	assert(task->task_could_use_secluded_mem);
8305 	if (can_use_secluded_mem &&
8306 	    secluded_for_apps &&         /* global boot-arg */
8307 	    !task->task_can_use_secluded_mem) {
8308 		assert(num_tasks_can_use_secluded_mem >= 0);
8309 		OSAddAtomic(+1,
8310 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
8311 		task->task_can_use_secluded_mem = TRUE;
8312 	} else if (!can_use_secluded_mem &&
8313 	    task->task_can_use_secluded_mem) {
8314 		assert(num_tasks_can_use_secluded_mem > 0);
8315 		OSAddAtomic(-1,
8316 		    (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
8317 		task->task_can_use_secluded_mem = FALSE;
8318 	}
8319 }
8320 
8321 void
task_set_could_use_secluded_mem(task_t task,boolean_t could_use_secluded_mem)8322 task_set_could_use_secluded_mem(
8323 	task_t          task,
8324 	boolean_t       could_use_secluded_mem)
8325 {
8326 	task->task_could_use_secluded_mem = !!could_use_secluded_mem;
8327 }
8328 
8329 void
task_set_could_also_use_secluded_mem(task_t task,boolean_t could_also_use_secluded_mem)8330 task_set_could_also_use_secluded_mem(
8331 	task_t          task,
8332 	boolean_t       could_also_use_secluded_mem)
8333 {
8334 	task->task_could_also_use_secluded_mem = !!could_also_use_secluded_mem;
8335 }
8336 
8337 boolean_t
task_can_use_secluded_mem(task_t task,boolean_t is_alloc)8338 task_can_use_secluded_mem(
8339 	task_t          task,
8340 	boolean_t       is_alloc)
8341 {
8342 	if (task->task_can_use_secluded_mem) {
8343 		assert(task->task_could_use_secluded_mem);
8344 		assert(num_tasks_can_use_secluded_mem > 0);
8345 		return TRUE;
8346 	}
8347 	if (task->task_could_also_use_secluded_mem &&
8348 	    num_tasks_can_use_secluded_mem > 0) {
8349 		assert(num_tasks_can_use_secluded_mem > 0);
8350 		return TRUE;
8351 	}
8352 
8353 	/*
8354 	 * If a single task is using more than some large amount of
8355 	 * memory (i.e. secluded_shutoff_trigger) and is approaching
8356 	 * its task limit, allow it to dip into secluded and begin
8357 	 * suppression of rebuilding secluded memory until that task exits.
8358 	 */
8359 	if (is_alloc && secluded_shutoff_trigger != 0) {
8360 		uint64_t phys_used = get_task_phys_footprint(task);
8361 		uint64_t limit = get_task_phys_footprint_limit(task);
8362 		if (phys_used > secluded_shutoff_trigger &&
8363 		    limit > secluded_shutoff_trigger &&
8364 		    phys_used > limit - secluded_shutoff_headroom) {
8365 			start_secluded_suppression(task);
8366 			return TRUE;
8367 		}
8368 	}
8369 
8370 	return FALSE;
8371 }
8372 
8373 boolean_t
task_could_use_secluded_mem(task_t task)8374 task_could_use_secluded_mem(
8375 	task_t  task)
8376 {
8377 	return task->task_could_use_secluded_mem;
8378 }
8379 
8380 boolean_t
task_could_also_use_secluded_mem(task_t task)8381 task_could_also_use_secluded_mem(
8382 	task_t  task)
8383 {
8384 	return task->task_could_also_use_secluded_mem;
8385 }
8386 #endif /* CONFIG_SECLUDED_MEMORY */
8387 
8388 queue_head_t *
task_io_user_clients(task_t task)8389 task_io_user_clients(task_t task)
8390 {
8391 	return &task->io_user_clients;
8392 }
8393 
8394 void
task_set_message_app_suspended(task_t task,boolean_t enable)8395 task_set_message_app_suspended(task_t task, boolean_t enable)
8396 {
8397 	task->message_app_suspended = enable;
8398 }
8399 
8400 void
task_copy_fields_for_exec(task_t dst_task,task_t src_task)8401 task_copy_fields_for_exec(task_t dst_task, task_t src_task)
8402 {
8403 	dst_task->vtimers = src_task->vtimers;
8404 }
8405 
8406 #if DEVELOPMENT || DEBUG
8407 int vm_region_footprint = 0;
8408 #endif /* DEVELOPMENT || DEBUG */
8409 
8410 boolean_t
task_self_region_footprint(void)8411 task_self_region_footprint(void)
8412 {
8413 #if DEVELOPMENT || DEBUG
8414 	if (vm_region_footprint) {
8415 		/* system-wide override */
8416 		return TRUE;
8417 	}
8418 #endif /* DEVELOPMENT || DEBUG */
8419 	return current_task()->task_region_footprint;
8420 }
8421 
8422 void
task_self_region_footprint_set(boolean_t newval)8423 task_self_region_footprint_set(
8424 	boolean_t newval)
8425 {
8426 	task_t  curtask;
8427 
8428 	curtask = current_task();
8429 	task_lock(curtask);
8430 	if (newval) {
8431 		curtask->task_region_footprint = TRUE;
8432 	} else {
8433 		curtask->task_region_footprint = FALSE;
8434 	}
8435 	task_unlock(curtask);
8436 }
8437 
8438 void
task_set_darkwake_mode(task_t task,boolean_t set_mode)8439 task_set_darkwake_mode(task_t task, boolean_t set_mode)
8440 {
8441 	assert(task);
8442 
8443 	task_lock(task);
8444 
8445 	if (set_mode) {
8446 		task->t_flags |= TF_DARKWAKE_MODE;
8447 	} else {
8448 		task->t_flags &= ~(TF_DARKWAKE_MODE);
8449 	}
8450 
8451 	task_unlock(task);
8452 }
8453 
8454 boolean_t
task_get_darkwake_mode(task_t task)8455 task_get_darkwake_mode(task_t task)
8456 {
8457 	assert(task);
8458 	return (task->t_flags & TF_DARKWAKE_MODE) != 0;
8459 }
8460 
8461 /*
8462  * Set default behavior for task's control port and EXC_GUARD variants that have
8463  * settable behavior.
8464  *
8465  * Platform binaries typically have one behavior, third parties another -
8466  * but there are special exception we may need to account for.
8467  */
8468 void
task_set_exc_guard_ctrl_port_default(task_t task,thread_t main_thread,const char * name,unsigned int namelen,boolean_t is_simulated,uint32_t platform,uint32_t sdk)8469 task_set_exc_guard_ctrl_port_default(
8470 	task_t task,
8471 	thread_t main_thread,
8472 	const char *name,
8473 	unsigned int namelen,
8474 	boolean_t is_simulated,
8475 	uint32_t platform,
8476 	uint32_t sdk)
8477 {
8478 	if (task->t_flags & TF_PLATFORM) {
8479 		/* set exc guard default behavior for first-party code */
8480 		task->task_exc_guard = (task_exc_guard_default & TASK_EXC_GUARD_ALL);
8481 
8482 		if (1 == task_pid(task)) {
8483 			/* special flags for inittask - delivery every instance as corpse */
8484 			task->task_exc_guard = _TASK_EXC_GUARD_ALL_CORPSE;
8485 		} else if (task_exc_guard_default & TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS) {
8486 			/* honor by-name default setting overrides */
8487 
8488 			int count = sizeof(task_exc_guard_named_defaults) / sizeof(struct task_exc_guard_named_default);
8489 
8490 			for (int i = 0; i < count; i++) {
8491 				const struct task_exc_guard_named_default *named_default =
8492 				    &task_exc_guard_named_defaults[i];
8493 				if (strncmp(named_default->name, name, namelen) == 0 &&
8494 				    strlen(named_default->name) == namelen) {
8495 					task->task_exc_guard = named_default->behavior;
8496 					break;
8497 				}
8498 			}
8499 		}
8500 
8501 		/* set control port options for 1p code, inherited from parent task by default */
8502 		task->task_control_port_options = (ipc_control_port_options & ICP_OPTIONS_1P_MASK);
8503 	} else {
8504 		/* set exc guard default behavior for third-party code */
8505 		task->task_exc_guard = ((task_exc_guard_default >> TASK_EXC_GUARD_THIRD_PARTY_DEFAULT_SHIFT) & TASK_EXC_GUARD_ALL);
8506 		/* set control port options for 3p code, inherited from parent task by default */
8507 		task->task_control_port_options = (ipc_control_port_options & ICP_OPTIONS_3P_MASK) >> ICP_OPTIONS_3P_SHIFT;
8508 	}
8509 
8510 	if (is_simulated) {
8511 		/* If simulated and built against pre-iOS 15 SDK, disable all EXC_GUARD */
8512 		if ((platform == PLATFORM_IOSSIMULATOR && sdk < 0xf0000) ||
8513 		    (platform == PLATFORM_TVOSSIMULATOR && sdk < 0xf0000) ||
8514 		    (platform == PLATFORM_WATCHOSSIMULATOR && sdk < 0x80000)) {
8515 			task->task_exc_guard = TASK_EXC_GUARD_NONE;
8516 		}
8517 		/* Disable protection for control ports for simulated binaries */
8518 		task->task_control_port_options = TASK_CONTROL_PORT_OPTIONS_NONE;
8519 	}
8520 
8521 
8522 	task_set_immovable_pinned(task);
8523 	main_thread_set_immovable_pinned(main_thread);
8524 }
8525 
8526 kern_return_t
task_get_exc_guard_behavior(task_t task,task_exc_guard_behavior_t * behaviorp)8527 task_get_exc_guard_behavior(
8528 	task_t task,
8529 	task_exc_guard_behavior_t *behaviorp)
8530 {
8531 	if (task == TASK_NULL) {
8532 		return KERN_INVALID_TASK;
8533 	}
8534 	*behaviorp = task->task_exc_guard;
8535 	return KERN_SUCCESS;
8536 }
8537 
8538 kern_return_t
task_set_exc_guard_behavior(task_t task,task_exc_guard_behavior_t new_behavior)8539 task_set_exc_guard_behavior(
8540 	task_t task,
8541 	task_exc_guard_behavior_t new_behavior)
8542 {
8543 	if (task == TASK_NULL) {
8544 		return KERN_INVALID_TASK;
8545 	}
8546 	if (new_behavior & ~TASK_EXC_GUARD_ALL) {
8547 		return KERN_INVALID_VALUE;
8548 	}
8549 
8550 	/* limit setting to that allowed for this config */
8551 	new_behavior = new_behavior & task_exc_guard_config_mask;
8552 
8553 #if !defined (DEBUG) && !defined (DEVELOPMENT)
8554 	/* On release kernels, only allow _upgrading_ exc guard behavior */
8555 	task_exc_guard_behavior_t cur_behavior;
8556 
8557 	os_atomic_rmw_loop(&task->task_exc_guard, cur_behavior, new_behavior, relaxed, {
8558 		if ((cur_behavior & task_exc_guard_no_unset_mask) & ~(new_behavior & task_exc_guard_no_unset_mask)) {
8559 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
8560 		}
8561 
8562 		if ((new_behavior & task_exc_guard_no_set_mask) & ~(cur_behavior & task_exc_guard_no_set_mask)) {
8563 		        os_atomic_rmw_loop_give_up(return KERN_DENIED);
8564 		}
8565 
8566 		/* no restrictions on CORPSE bit */
8567 	});
8568 #else
8569 	task->task_exc_guard = new_behavior;
8570 #endif
8571 	return KERN_SUCCESS;
8572 }
8573 
8574 kern_return_t
task_set_corpse_forking_behavior(task_t task,task_corpse_forking_behavior_t behavior)8575 task_set_corpse_forking_behavior(task_t task, task_corpse_forking_behavior_t behavior)
8576 {
8577 #if DEVELOPMENT || DEBUG
8578 	if (task == TASK_NULL) {
8579 		return KERN_INVALID_TASK;
8580 	}
8581 
8582 	task_lock(task);
8583 	if (behavior & TASK_CORPSE_FORKING_DISABLED_MEM_DIAG) {
8584 		task->t_flags |= TF_NO_CORPSE_FORKING;
8585 	} else {
8586 		task->t_flags &= ~TF_NO_CORPSE_FORKING;
8587 	}
8588 	task_unlock(task);
8589 
8590 	return KERN_SUCCESS;
8591 #else
8592 	(void)task;
8593 	(void)behavior;
8594 	return KERN_NOT_SUPPORTED;
8595 #endif
8596 }
8597 
8598 boolean_t
task_corpse_forking_disabled(task_t task)8599 task_corpse_forking_disabled(task_t task)
8600 {
8601 	boolean_t disabled = FALSE;
8602 
8603 	task_lock(task);
8604 	disabled = (task->t_flags & TF_NO_CORPSE_FORKING);
8605 	task_unlock(task);
8606 
8607 	return disabled;
8608 }
8609 
8610 #if __arm64__
8611 extern int legacy_footprint_entitlement_mode;
8612 extern void memorystatus_act_on_legacy_footprint_entitlement(struct proc *, boolean_t);
8613 extern void memorystatus_act_on_ios13extended_footprint_entitlement(struct proc *);
8614 
8615 
8616 void
task_set_legacy_footprint(task_t task)8617 task_set_legacy_footprint(
8618 	task_t task)
8619 {
8620 	task_lock(task);
8621 	task->task_legacy_footprint = TRUE;
8622 	task_unlock(task);
8623 }
8624 
8625 void
task_set_extra_footprint_limit(task_t task)8626 task_set_extra_footprint_limit(
8627 	task_t task)
8628 {
8629 	if (task->task_extra_footprint_limit) {
8630 		return;
8631 	}
8632 	task_lock(task);
8633 	if (task->task_extra_footprint_limit) {
8634 		task_unlock(task);
8635 		return;
8636 	}
8637 	task->task_extra_footprint_limit = TRUE;
8638 	task_unlock(task);
8639 	memorystatus_act_on_legacy_footprint_entitlement(task->bsd_info, TRUE);
8640 }
8641 
8642 void
task_set_ios13extended_footprint_limit(task_t task)8643 task_set_ios13extended_footprint_limit(
8644 	task_t task)
8645 {
8646 	if (task->task_ios13extended_footprint_limit) {
8647 		return;
8648 	}
8649 	task_lock(task);
8650 	if (task->task_ios13extended_footprint_limit) {
8651 		task_unlock(task);
8652 		return;
8653 	}
8654 	task->task_ios13extended_footprint_limit = TRUE;
8655 	task_unlock(task);
8656 	memorystatus_act_on_ios13extended_footprint_entitlement(task->bsd_info);
8657 }
8658 #endif /* __arm64__ */
8659 
8660 static inline ledger_amount_t
task_ledger_get_balance(ledger_t ledger,int ledger_idx)8661 task_ledger_get_balance(
8662 	ledger_t        ledger,
8663 	int             ledger_idx)
8664 {
8665 	ledger_amount_t amount;
8666 	amount = 0;
8667 	ledger_get_balance(ledger, ledger_idx, &amount);
8668 	return amount;
8669 }
8670 
8671 /*
8672  * Gather the amount of memory counted in a task's footprint due to
8673  * being in a specific set of ledgers.
8674  */
8675 void
task_ledgers_footprint(ledger_t ledger,ledger_amount_t * ledger_resident,ledger_amount_t * ledger_compressed)8676 task_ledgers_footprint(
8677 	ledger_t        ledger,
8678 	ledger_amount_t *ledger_resident,
8679 	ledger_amount_t *ledger_compressed)
8680 {
8681 	*ledger_resident = 0;
8682 	*ledger_compressed = 0;
8683 
8684 	/* purgeable non-volatile memory */
8685 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile);
8686 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile_compressed);
8687 
8688 	/* "default" tagged memory */
8689 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint);
8690 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint_compressed);
8691 
8692 	/* "network" currently never counts in the footprint... */
8693 
8694 	/* "media" tagged memory */
8695 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.media_footprint);
8696 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.media_footprint_compressed);
8697 
8698 	/* "graphics" tagged memory */
8699 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint);
8700 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint_compressed);
8701 
8702 	/* "neural" tagged memory */
8703 	*ledger_resident += task_ledger_get_balance(ledger, task_ledgers.neural_footprint);
8704 	*ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.neural_footprint_compressed);
8705 }
8706 
8707 #if CONFIG_MEMORYSTATUS
8708 /*
8709  * Credit any outstanding task dirty time to the ledger.
8710  * memstat_dirty_start is pushed forward to prevent any possibility of double
8711  * counting, making it safe to call this as often as necessary to ensure that
8712  * anyone reading the ledger gets up-to-date information.
8713  */
8714 void
task_ledger_settle_dirty_time(task_t t)8715 task_ledger_settle_dirty_time(task_t t)
8716 {
8717 	task_lock(t);
8718 
8719 	uint64_t start = t->memstat_dirty_start;
8720 	if (start) {
8721 		uint64_t now = mach_absolute_time();
8722 
8723 		uint64_t duration;
8724 		absolutetime_to_nanoseconds(now - start, &duration);
8725 
8726 		ledger_t ledger = get_task_ledger(t);
8727 		ledger_credit(ledger, task_ledgers.memorystatus_dirty_time, duration);
8728 
8729 		t->memstat_dirty_start = now;
8730 	}
8731 
8732 	task_unlock(t);
8733 }
8734 #endif /* CONFIG_MEMORYSTATUS */
8735 
8736 void
task_set_memory_ownership_transfer(task_t task,boolean_t value)8737 task_set_memory_ownership_transfer(
8738 	task_t    task,
8739 	boolean_t value)
8740 {
8741 	task_lock(task);
8742 	task->task_can_transfer_memory_ownership = !!value;
8743 	task_unlock(task);
8744 }
8745 
8746 #if DEVELOPMENT || DEBUG
8747 
8748 void
task_set_no_footprint_for_debug(task_t task,boolean_t value)8749 task_set_no_footprint_for_debug(task_t task, boolean_t value)
8750 {
8751 	task_lock(task);
8752 	task->task_no_footprint_for_debug = !!value;
8753 	task_unlock(task);
8754 }
8755 
8756 int
task_get_no_footprint_for_debug(task_t task)8757 task_get_no_footprint_for_debug(task_t task)
8758 {
8759 	return task->task_no_footprint_for_debug;
8760 }
8761 
8762 #endif /* DEVELOPMENT || DEBUG */
8763 
8764 void
task_copy_vmobjects(task_t task,vm_object_query_t query,size_t len,size_t * num)8765 task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num)
8766 {
8767 	vm_object_t find_vmo;
8768 	size_t size = 0;
8769 
8770 	task_objq_lock(task);
8771 	if (query != NULL) {
8772 		queue_iterate(&task->task_objq, find_vmo, vm_object_t, task_objq)
8773 		{
8774 			vm_object_query_t p = &query[size++];
8775 
8776 			/* make sure to not overrun */
8777 			if (size * sizeof(vm_object_query_data_t) > len) {
8778 				--size;
8779 				break;
8780 			}
8781 
8782 			bzero(p, sizeof(*p));
8783 			p->object_id = (vm_object_id_t) VM_KERNEL_ADDRPERM(find_vmo);
8784 			p->virtual_size = find_vmo->internal ? find_vmo->vo_size : 0;
8785 			p->resident_size = find_vmo->resident_page_count * PAGE_SIZE;
8786 			p->wired_size = find_vmo->wired_page_count * PAGE_SIZE;
8787 			p->reusable_size = find_vmo->reusable_page_count * PAGE_SIZE;
8788 			p->vo_no_footprint = find_vmo->vo_no_footprint;
8789 			p->vo_ledger_tag = find_vmo->vo_ledger_tag;
8790 			p->purgable = find_vmo->purgable;
8791 
8792 			if (find_vmo->internal && find_vmo->pager_created && find_vmo->pager != NULL) {
8793 				p->compressed_size = vm_compressor_pager_get_count(find_vmo->pager) * PAGE_SIZE;
8794 			} else {
8795 				p->compressed_size = 0;
8796 			}
8797 		}
8798 	} else {
8799 		size = (size_t)task->task_owned_objects;
8800 	}
8801 	task_objq_unlock(task);
8802 
8803 	*num = size;
8804 }
8805 
8806 void
task_get_owned_vmobjects(task_t task,size_t buffer_size,vmobject_list_output_t buffer,size_t * output_size,size_t * entries)8807 task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries)
8808 {
8809 	assert(output_size);
8810 	assert(entries);
8811 
8812 	/* copy the vmobjects and vmobject data out of the task */
8813 	if (buffer_size == 0) {
8814 		task_copy_vmobjects(task, NULL, 0, entries);
8815 		*output_size = (*entries > 0) ? *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer) : 0;
8816 	} else {
8817 		assert(buffer);
8818 		task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(*buffer), entries);
8819 		buffer->entries = (uint64_t)*entries;
8820 		*output_size = *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer);
8821 	}
8822 }
8823 
8824 void
task_store_owned_vmobject_info(task_t to_task,task_t from_task)8825 task_store_owned_vmobject_info(task_t to_task, task_t from_task)
8826 {
8827 	size_t buffer_size;
8828 	vmobject_list_output_t buffer;
8829 	size_t output_size;
8830 	size_t entries;
8831 
8832 	assert(to_task != from_task);
8833 
8834 	/* get the size, allocate a bufferr, and populate */
8835 	entries = 0;
8836 	output_size = 0;
8837 	task_get_owned_vmobjects(from_task, 0, NULL, &output_size, &entries);
8838 
8839 	if (output_size) {
8840 		buffer_size = output_size;
8841 		buffer = kalloc_data(buffer_size, Z_WAITOK);
8842 
8843 		if (buffer) {
8844 			entries = 0;
8845 			output_size = 0;
8846 
8847 			task_get_owned_vmobjects(from_task, buffer_size, buffer, &output_size, &entries);
8848 
8849 			if (entries) {
8850 				to_task->corpse_vmobject_list = buffer;
8851 				to_task->corpse_vmobject_list_size = buffer_size;
8852 			}
8853 		}
8854 	}
8855 }
8856 
8857 void
task_set_filter_msg_flag(task_t task,boolean_t flag)8858 task_set_filter_msg_flag(
8859 	task_t task,
8860 	boolean_t flag)
8861 {
8862 	assert(task != TASK_NULL);
8863 
8864 	task_lock(task);
8865 	if (flag) {
8866 		task->t_flags |= TF_FILTER_MSG;
8867 	} else {
8868 		task->t_flags &= ~TF_FILTER_MSG;
8869 	}
8870 	task_unlock(task);
8871 }
8872 
8873 boolean_t
task_get_filter_msg_flag(task_t task)8874 task_get_filter_msg_flag(
8875 	task_t task)
8876 {
8877 	uint32_t flags = 0;
8878 
8879 	if (!task) {
8880 		return false;
8881 	}
8882 
8883 	flags = os_atomic_load(&task->t_flags, relaxed);
8884 	return (flags & TF_FILTER_MSG) ? TRUE : FALSE;
8885 }
8886 bool
task_is_exotic(task_t task)8887 task_is_exotic(
8888 	task_t task)
8889 {
8890 	if (task == TASK_NULL) {
8891 		return false;
8892 	}
8893 	return vm_map_is_exotic(get_task_map(task));
8894 }
8895 
8896 bool
task_is_alien(task_t task)8897 task_is_alien(
8898 	task_t task)
8899 {
8900 	if (task == TASK_NULL) {
8901 		return false;
8902 	}
8903 	return vm_map_is_alien(get_task_map(task));
8904 }
8905 
8906 
8907 
8908 #if CONFIG_MACF
8909 /* Set the filter mask for Mach traps. */
8910 void
mac_task_set_mach_filter_mask(task_t task,uint8_t * maskptr)8911 mac_task_set_mach_filter_mask(task_t task, uint8_t *maskptr)
8912 {
8913 	assert(task);
8914 
8915 	task_set_mach_trap_filter_mask(task, maskptr);
8916 }
8917 
8918 /* Set the filter mask for kobject msgs. */
8919 void
mac_task_set_kobj_filter_mask(task_t task,uint8_t * maskptr)8920 mac_task_set_kobj_filter_mask(task_t task, uint8_t *maskptr)
8921 {
8922 	assert(task);
8923 
8924 	task_set_mach_kobj_filter_mask(task, maskptr);
8925 }
8926 
8927 /* Hook for mach trap/sc filter evaluation policy. */
8928 mac_task_mach_filter_cbfunc_t mac_task_mach_trap_evaluate = NULL;
8929 
8930 /* Hook for kobj message filter evaluation policy. */
8931 mac_task_kobj_filter_cbfunc_t mac_task_kobj_msg_evaluate = NULL;
8932 
8933 /* Set the callback hooks for the filtering policy. */
8934 int
mac_task_register_filter_callbacks(const mac_task_mach_filter_cbfunc_t mach_cbfunc,const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)8935 mac_task_register_filter_callbacks(
8936 	const mac_task_mach_filter_cbfunc_t mach_cbfunc,
8937 	const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)
8938 {
8939 	if (mach_cbfunc != NULL) {
8940 		if (mac_task_mach_trap_evaluate != NULL) {
8941 			return KERN_FAILURE;
8942 		}
8943 		mac_task_mach_trap_evaluate = mach_cbfunc;
8944 	}
8945 	if (kobj_cbfunc != NULL) {
8946 		if (mac_task_kobj_msg_evaluate != NULL) {
8947 			return KERN_FAILURE;
8948 		}
8949 		mac_task_kobj_msg_evaluate = kobj_cbfunc;
8950 	}
8951 
8952 	return KERN_SUCCESS;
8953 }
8954 #endif /* CONFIG_MACF */
8955 
8956 void
task_transfer_mach_filter_bits(task_t new_task,task_t old_task)8957 task_transfer_mach_filter_bits(
8958 	task_t new_task,
8959 	task_t old_task)
8960 {
8961 #ifdef CONFIG_MACF
8962 	/* Copy mach trap and kernel object mask pointers to new task. */
8963 	task_copy_filter_masks(new_task, old_task);
8964 #endif
8965 	/* If filter message flag is set then set it in the new task. */
8966 	if (task_get_filter_msg_flag(old_task)) {
8967 		new_task->t_flags |= TF_FILTER_MSG;
8968 	}
8969 }
8970 
8971 
8972 #if __has_feature(ptrauth_calls)
8973 /* All pac violations will be delivered as fatal exceptions irrespective of
8974  * the enable_pac_exception boot-arg value.
8975  */
8976 #define PAC_EXCEPTION_ENTITLEMENT "com.apple.private.pac.exception"
8977 /*
8978  * When enable_pac_exception boot-arg is set to true, processes
8979  * can choose to get non-fatal pac exception delivery by setting
8980  * this entitlement.
8981  */
8982 #define SKIP_PAC_EXCEPTION_ENTITLEMENT "com.apple.private.skip.pac.exception"
8983 
8984 void
task_set_pac_exception_fatal_flag(task_t task)8985 task_set_pac_exception_fatal_flag(
8986 	task_t task)
8987 {
8988 	assert(task != TASK_NULL);
8989 	bool pac_entitlement = false;
8990 
8991 	if (enable_pac_exception && IOTaskHasEntitlement(task, SKIP_PAC_EXCEPTION_ENTITLEMENT)) {
8992 		return;
8993 	}
8994 
8995 	if (IOTaskHasEntitlement(task, PAC_EXCEPTION_ENTITLEMENT)) {
8996 		pac_entitlement = true;
8997 	}
8998 
8999 	task_lock(task);
9000 	if (pac_entitlement || (enable_pac_exception && task->t_flags & TF_PLATFORM)) {
9001 		task->t_flags |= TF_PAC_EXC_FATAL;
9002 	}
9003 	task_unlock(task);
9004 }
9005 
9006 bool
task_is_pac_exception_fatal(task_t task)9007 task_is_pac_exception_fatal(
9008 	task_t task)
9009 {
9010 	uint32_t flags = 0;
9011 
9012 	assert(task != TASK_NULL);
9013 
9014 	flags = os_atomic_load(&task->t_flags, relaxed);
9015 	return (bool)(flags & TF_PAC_EXC_FATAL);
9016 }
9017 #endif /* __has_feature(ptrauth_calls) */
9018 
9019 void
task_set_tecs(task_t task)9020 task_set_tecs(task_t task)
9021 {
9022 	if (task == TASK_NULL) {
9023 		task = current_task();
9024 	}
9025 
9026 	if (!machine_csv(CPUVN_CI)) {
9027 		return;
9028 	}
9029 
9030 	LCK_MTX_ASSERT(&task->lock, LCK_MTX_ASSERT_NOTOWNED);
9031 
9032 	task_lock(task);
9033 
9034 	task->t_flags |= TF_TECS;
9035 
9036 	thread_t thread;
9037 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
9038 		machine_tecs(thread);
9039 	}
9040 	task_unlock(task);
9041 }
9042 
9043 kern_return_t
task_test_sync_upcall(task_t task,ipc_port_t send_port)9044 task_test_sync_upcall(
9045 	task_t     task,
9046 	ipc_port_t send_port)
9047 {
9048 #if DEVELOPMENT || DEBUG
9049 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9050 		return KERN_INVALID_ARGUMENT;
9051 	}
9052 
9053 	/* Block on sync kernel upcall on the given send port */
9054 	mach_test_sync_upcall(send_port);
9055 
9056 	ipc_port_release_send(send_port);
9057 	return KERN_SUCCESS;
9058 #else
9059 	(void)task;
9060 	(void)send_port;
9061 	return KERN_NOT_SUPPORTED;
9062 #endif
9063 }
9064 
9065 kern_return_t
task_test_async_upcall_propagation(task_t task,ipc_port_t send_port,int qos,int iotier)9066 task_test_async_upcall_propagation(
9067 	task_t      task,
9068 	ipc_port_t  send_port,
9069 	int         qos,
9070 	int         iotier)
9071 {
9072 #if DEVELOPMENT || DEBUG
9073 	kern_return_t kr;
9074 
9075 	if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9076 		return KERN_INVALID_ARGUMENT;
9077 	}
9078 
9079 	if (qos < THREAD_QOS_DEFAULT || qos > THREAD_QOS_USER_INTERACTIVE ||
9080 	    iotier < THROTTLE_LEVEL_START || iotier > THROTTLE_LEVEL_END) {
9081 		return KERN_INVALID_ARGUMENT;
9082 	}
9083 
9084 	struct thread_attr_for_ipc_propagation attr = {
9085 		.tafip_iotier = iotier,
9086 		.tafip_qos = qos
9087 	};
9088 
9089 	/* Apply propagate attr to port */
9090 	kr = ipc_port_propagate_thread_attr(send_port, attr);
9091 	if (kr != KERN_SUCCESS) {
9092 		return kr;
9093 	}
9094 
9095 	thread_enable_send_importance(current_thread(), TRUE);
9096 
9097 	/* Perform an async kernel upcall on the given send port */
9098 	mach_test_async_upcall(send_port);
9099 	thread_enable_send_importance(current_thread(), FALSE);
9100 
9101 	ipc_port_release_send(send_port);
9102 	return KERN_SUCCESS;
9103 #else
9104 	(void)task;
9105 	(void)send_port;
9106 	(void)qos;
9107 	(void)iotier;
9108 	return KERN_NOT_SUPPORTED;
9109 #endif
9110 }
9111 
9112 #if CONFIG_PROC_RESOURCE_LIMITS
9113 mach_port_name_t
current_task_get_fatal_port_name(void)9114 current_task_get_fatal_port_name(void)
9115 {
9116 	mach_port_t task_fatal_port = MACH_PORT_NULL;
9117 	mach_port_name_t port_name = 0;
9118 
9119 	task_fatal_port = task_allocate_fatal_port();
9120 
9121 	if (task_fatal_port) {
9122 		ipc_object_copyout(current_space(), ip_to_object(task_fatal_port), MACH_MSG_TYPE_PORT_SEND,
9123 		    IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, &port_name);
9124 	}
9125 
9126 	return port_name;
9127 }
9128 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
9129 
9130 #if defined(__x86_64__)
9131 bool
curtask_get_insn_copy_optout(void)9132 curtask_get_insn_copy_optout(void)
9133 {
9134 	bool optout;
9135 	task_t cur_task = current_task();
9136 
9137 	task_lock(cur_task);
9138 	optout = (cur_task->t_flags & TF_INSN_COPY_OPTOUT) ? true : false;
9139 	task_unlock(cur_task);
9140 
9141 	return optout;
9142 }
9143 
9144 void
curtask_set_insn_copy_optout(void)9145 curtask_set_insn_copy_optout(void)
9146 {
9147 	task_t cur_task = current_task();
9148 
9149 	task_lock(cur_task);
9150 
9151 	cur_task->t_flags |= TF_INSN_COPY_OPTOUT;
9152 
9153 	thread_t thread;
9154 	queue_iterate(&cur_task->threads, thread, thread_t, task_threads) {
9155 		machine_thread_set_insn_copy_optout(thread);
9156 	}
9157 	task_unlock(cur_task);
9158 }
9159 #endif /* defined(__x86_64__) */
9160 
9161 void
task_get_corpse_vmobject_list(task_t task,vmobject_list_output_t * list,size_t * list_size)9162 task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size)
9163 {
9164 	assert(task);
9165 	assert(list_size);
9166 
9167 	*list = task->corpse_vmobject_list;
9168 	*list_size = (size_t)task->corpse_vmobject_list_size;
9169 }
9170 
9171 __abortlike
9172 static void
panic_proc_ro_task_backref_mismatch(task_t t,proc_ro_t ro)9173 panic_proc_ro_task_backref_mismatch(task_t t, proc_ro_t ro)
9174 {
9175 	panic("proc_ro->task backref mismatch: t=%p, ro=%p, "
9176 	    "proc_ro_task(ro)=%p", t, ro, proc_ro_task(ro));
9177 }
9178 
9179 proc_ro_t
task_get_ro(task_t t)9180 task_get_ro(task_t t)
9181 {
9182 	proc_ro_t ro = (proc_ro_t)t->bsd_info_ro;
9183 
9184 	zone_require_ro(ZONE_ID_PROC_RO, sizeof(struct proc_ro), ro);
9185 	if (__improbable(proc_ro_task(ro) != t)) {
9186 		panic_proc_ro_task_backref_mismatch(t, ro);
9187 	}
9188 
9189 	return ro;
9190 }
9191