1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63 /*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to [email protected] any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
81 /*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
88
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/host_priv.h>
92 #include <mach/machine/vm_types.h>
93 #include <mach/vm_param.h>
94 #include <mach/mach_vm.h>
95 #include <mach/semaphore.h>
96 #include <mach/task_info.h>
97 #include <mach/task_inspect.h>
98 #include <mach/task_special_ports.h>
99 #include <mach/sdt.h>
100 #include <mach/mach_test_upcall.h>
101
102 #include <ipc/ipc_importance.h>
103 #include <ipc/ipc_types.h>
104 #include <ipc/ipc_space.h>
105 #include <ipc/ipc_entry.h>
106 #include <ipc/ipc_hash.h>
107 #include <ipc/ipc_init.h>
108
109 #include <kern/kern_types.h>
110 #include <kern/mach_param.h>
111 #include <kern/misc_protos.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/coalition.h>
115 #include <kern/zalloc.h>
116 #include <kern/kalloc.h>
117 #include <kern/kern_cdata.h>
118 #include <kern/processor.h>
119 #include <kern/recount.h>
120 #include <kern/sched_prim.h> /* for thread_wakeup */
121 #include <kern/ipc_tt.h>
122 #include <kern/host.h>
123 #include <kern/clock.h>
124 #include <kern/timer.h>
125 #include <kern/assert.h>
126 #include <kern/affinity.h>
127 #include <kern/exc_resource.h>
128 #include <kern/machine.h>
129 #include <kern/policy_internal.h>
130 #include <kern/restartable.h>
131 #include <kern/ipc_kobject.h>
132
133 #include <corpses/task_corpse.h>
134 #if CONFIG_TELEMETRY
135 #include <kern/telemetry.h>
136 #endif
137
138 #if CONFIG_PERVASIVE_CPI
139 #include <kern/monotonic.h>
140 #include <machine/monotonic.h>
141 #endif /* CONFIG_PERVASIVE_CPI */
142
143 #if CONFIG_EXCLAVES
144 #include "exclaves_boot.h"
145 #include "exclaves_resource.h"
146 #include "exclaves_boot.h"
147 #include "exclaves_inspection.h"
148 #include "exclaves_conclave.h"
149 #endif /* CONFIG_EXCLAVES */
150
151 #include <os/log.h>
152
153 #include <vm/pmap.h>
154 #include <vm/vm_map_xnu.h>
155 #include <vm/vm_kern_xnu.h> /* for kernel_map, ipc_kernel_map */
156 #include <vm/vm_pageout_xnu.h>
157 #include <vm/vm_protos.h>
158 #include <vm/vm_purgeable_xnu.h>
159 #include <vm/vm_compressor_pager_xnu.h>
160 #include <vm/vm_reclaim_xnu.h>
161 #include <vm/vm_compressor_xnu.h>
162
163 #include <sys/kdebug.h>
164 #include <sys/proc_ro.h>
165 #include <sys/resource.h>
166 #include <sys/signalvar.h> /* for coredump */
167 #include <sys/bsdtask_info.h>
168 #include <sys/kdebug_triage.h>
169 #include <sys/code_signing.h> /* for is_address_space_debugged */
170 #include <sys/reason.h>
171
172 /*
173 * Exported interfaces
174 */
175
176 #include <mach/task_server.h>
177 #include <mach/mach_host_server.h>
178 #include <mach/mach_port_server.h>
179
180 #include <vm/vm_shared_region_xnu.h>
181
182 #include <libkern/OSDebug.h>
183 #include <libkern/OSAtomic.h>
184 #include <libkern/section_keywords.h>
185
186 #include <mach-o/loader.h>
187 #include <kdp/kdp_dyld.h>
188
189 #include <kern/sfi.h> /* picks up ledger.h */
190
191 #if CONFIG_MACF
192 #include <security/mac_mach_internal.h>
193 #endif
194
195 #include <IOKit/IOBSD.h>
196 #include <kdp/processor_core.h>
197
198 #if defined (__arm64__)
199 #include <pexpert/arm64/board_config.h>
200 #endif
201
202 #include <string.h>
203
204 #if KPERF
205 extern int kpc_force_all_ctrs(task_t, int);
206 #endif
207
208 SECURITY_READ_ONLY_LATE(task_t) kernel_task;
209
210 int64_t next_taskuniqueid = 0;
211 const size_t task_alignment = _Alignof(struct task);
212 extern const size_t proc_alignment;
213 extern size_t proc_struct_size;
214 extern size_t proc_and_task_size;
215 size_t task_struct_size;
216
217 extern uint32_t ipc_control_port_options;
218
219 extern int large_corpse_count;
220
221 extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p);
222 extern boolean_t proc_is_simulated(const proc_t);
223
224 static void task_port_no_senders(ipc_port_t, mach_msg_type_number_t);
225 static void task_port_with_flavor_no_senders(ipc_port_t, mach_msg_type_number_t);
226 static void task_suspension_no_senders(ipc_port_t, mach_msg_type_number_t);
227 static inline void task_zone_init(void);
228
229 static void task_store_owned_vmobject_info(task_t to_task, task_t from_task);
230
231 #if CONFIG_EXCLAVES
232 static bool task_should_panic_on_exit_due_to_conclave_taint(task_t task);
233 static bool task_is_conclave_tainted(task_t task);
234 static void task_set_conclave_taint(task_t task);
235 kern_return_t task_crash_info_conclave_upcall(task_t task,
236 const struct conclave_sharedbuffer_t *shared_buf, uint32_t length);
237 #endif /* CONFIG_EXCLAVES */
238
239 IPC_KOBJECT_DEFINE(IKOT_TASK_NAME);
240 IPC_KOBJECT_DEFINE(IKOT_TASK_CONTROL,
241 .iko_op_no_senders = task_port_no_senders);
242 IPC_KOBJECT_DEFINE(IKOT_TASK_READ,
243 .iko_op_no_senders = task_port_with_flavor_no_senders);
244 IPC_KOBJECT_DEFINE(IKOT_TASK_INSPECT,
245 .iko_op_no_senders = task_port_with_flavor_no_senders);
246 IPC_KOBJECT_DEFINE(IKOT_TASK_RESUME,
247 .iko_op_no_senders = task_suspension_no_senders);
248
249 #if CONFIG_PROC_RESOURCE_LIMITS
250 static void task_fatal_port_no_senders(ipc_port_t, mach_msg_type_number_t);
251 static mach_port_t task_allocate_fatal_port(void);
252
253 IPC_KOBJECT_DEFINE(IKOT_TASK_FATAL,
254 .iko_op_stable = true,
255 .iko_op_no_senders = task_fatal_port_no_senders);
256
257 extern void task_id_token_set_port(task_id_token_t token, ipc_port_t port);
258 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
259
260 /* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
261 int audio_active = 0;
262
263 /*
264 * structure for tracking zone usage
265 * Used either one per task/thread for all zones or <per-task,per-zone>.
266 */
267 typedef struct zinfo_usage_store_t {
268 /* These fields may be updated atomically, and so must be 8 byte aligned */
269 uint64_t alloc __attribute__((aligned(8))); /* allocation counter */
270 uint64_t free __attribute__((aligned(8))); /* free counter */
271 } zinfo_usage_store_t;
272
273 /**
274 * Return codes related to diag threshold and memory limit
275 */
276 __options_decl(diagthreshold_check_return, int, {
277 THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED = 0,
278 THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED = 1,
279 THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED = 2,
280 THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED = 3,
281 });
282
283 /**
284 * Return codes related to diag threshold and memory limit
285 */
286 __options_decl(current_, int, {
287 THRESHOLD_IS_SAME_AS_LIMIT = 0,
288 THRESHOLD_IS_NOT_SAME_AS_LIMIT = 1
289 });
290
291 zinfo_usage_store_t tasks_tkm_private;
292 zinfo_usage_store_t tasks_tkm_shared;
293
294 /* A container to accumulate statistics for expired tasks */
295 expired_task_statistics_t dead_task_statistics;
296 LCK_SPIN_DECLARE_ATTR(dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
297
298 ledger_template_t task_ledger_template = NULL;
299
300 /* global lock for task_dyld_process_info_notify_{register, deregister, get_trap} */
301 LCK_GRP_DECLARE(g_dyldinfo_mtx_grp, "g_dyldinfo");
302 LCK_MTX_DECLARE(g_dyldinfo_mtx, &g_dyldinfo_mtx_grp);
303
304 SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__((used)) =
305 {.cpu_time = -1,
306 .tkm_private = -1,
307 .tkm_shared = -1,
308 .phys_mem = -1,
309 .wired_mem = -1,
310 .internal = -1,
311 .iokit_mapped = -1,
312 .external = -1,
313 .reusable = -1,
314 .alternate_accounting = -1,
315 .alternate_accounting_compressed = -1,
316 .page_table = -1,
317 .phys_footprint = -1,
318 .internal_compressed = -1,
319 .purgeable_volatile = -1,
320 .purgeable_nonvolatile = -1,
321 .purgeable_volatile_compressed = -1,
322 .purgeable_nonvolatile_compressed = -1,
323 .tagged_nofootprint = -1,
324 .tagged_footprint = -1,
325 .tagged_nofootprint_compressed = -1,
326 .tagged_footprint_compressed = -1,
327 .network_volatile = -1,
328 .network_nonvolatile = -1,
329 .network_volatile_compressed = -1,
330 .network_nonvolatile_compressed = -1,
331 .media_nofootprint = -1,
332 .media_footprint = -1,
333 .media_nofootprint_compressed = -1,
334 .media_footprint_compressed = -1,
335 .graphics_nofootprint = -1,
336 .graphics_footprint = -1,
337 .graphics_nofootprint_compressed = -1,
338 .graphics_footprint_compressed = -1,
339 .neural_nofootprint = -1,
340 .neural_footprint = -1,
341 .neural_nofootprint_compressed = -1,
342 .neural_footprint_compressed = -1,
343 .neural_nofootprint_total = -1,
344 .platform_idle_wakeups = -1,
345 .interrupt_wakeups = -1,
346 #if CONFIG_SCHED_SFI
347 .sfi_wait_times = { 0 /* initialized at runtime */},
348 #endif /* CONFIG_SCHED_SFI */
349 .cpu_time_billed_to_me = -1,
350 .cpu_time_billed_to_others = -1,
351 .physical_writes = -1,
352 .logical_writes = -1,
353 .logical_writes_to_external = -1,
354 .pages_grabbed = -1,
355 .pages_grabbed_kern = -1,
356 .pages_grabbed_iopl = -1,
357 .pages_grabbed_upl = -1,
358 #if CONFIG_FREEZE
359 .frozen_to_swap = -1,
360 #endif /* CONFIG_FREEZE */
361 .energy_billed_to_me = -1,
362 .energy_billed_to_others = -1,
363 #if CONFIG_PHYS_WRITE_ACCT
364 .fs_metadata_writes = -1,
365 #endif /* CONFIG_PHYS_WRITE_ACCT */
366 #if CONFIG_MEMORYSTATUS
367 .memorystatus_dirty_time = -1,
368 #endif /* CONFIG_MEMORYSTATUS */
369 .swapins = -1,
370 .conclave_mem = -1, };
371
372 /* System sleep state */
373 boolean_t tasks_suspend_state;
374
375 __options_decl(send_exec_resource_is_fatal, bool, {
376 IS_NOT_FATAL = false,
377 IS_FATAL = true
378 });
379
380 __options_decl(send_exec_resource_is_diagnostics, bool, {
381 IS_NOT_DIAGNOSTICS = false,
382 IS_DIAGNOSTICS = true
383 });
384
385 __options_decl(send_exec_resource_is_warning, bool, {
386 IS_NOT_WARNING = false,
387 IS_WARNING = true
388 });
389
390 __options_decl(send_exec_resource_options_t, uint8_t, {
391 EXEC_RESOURCE_FATAL = 0x01,
392 EXEC_RESOURCE_DIAGNOSTIC = 0x02,
393 EXEC_RESOURCE_WARNING = 0x04,
394 });
395
396 /**
397 * Actions to take when a process has reached the memory limit or the diagnostics threshold limits
398 */
399 static inline void task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning);
400 #if DEBUG || DEVELOPMENT
401 static inline void task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size);
402 #endif
403 void init_task_ledgers(void);
404 void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
405 void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
406 void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
407 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
408 void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options);
409 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
410 #if CONFIG_PROC_RESOURCE_LIMITS
411 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit);
412 mach_port_name_t current_task_get_fatal_port_name(void);
413 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit);
414 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
415
416 kern_return_t task_suspend_internal(task_t);
417 kern_return_t task_resume_internal(task_t);
418 static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
419
420 extern kern_return_t iokit_task_terminate(task_t task, int phase);
421 extern void iokit_task_app_suspended_changed(task_t task);
422
423 extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
424 extern void bsd_copythreadname(void *dst_uth, void *src_uth);
425 extern kern_return_t thread_resume(thread_t thread);
426
427 // Condition to include diag footprints
428 #define RESETTABLE_DIAG_FOOTPRINT_LIMITS ((DEBUG || DEVELOPMENT) && CONFIG_MEMORYSTATUS)
429
430 // Warn tasks when they hit 80% of their memory limit.
431 #define PHYS_FOOTPRINT_WARNING_LEVEL 80
432
433 #define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT 150 /* wakeups per second */
434 #define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL 300 /* in seconds. */
435
436 /*
437 * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
438 *
439 * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
440 * stacktraces, aka micro-stackshots)
441 */
442 #define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER 70
443
444 int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
445 int task_wakeups_monitor_rate; /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
446
447 unsigned int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
448
449 TUNABLE(bool, disable_exc_resource, "disable_exc_resource", false); /* Global override to suppress EXC_RESOURCE for resource monitor violations. */
450 TUNABLE(bool, disable_exc_resource_during_audio, "disable_exc_resource_during_audio", true); /* Global override to suppress EXC_RESOURCE while audio is active */
451
452 ledger_amount_t max_task_footprint = 0; /* Per-task limit on physical memory consumption in bytes */
453 unsigned int max_task_footprint_warning_level = 0; /* Per-task limit warning percentage */
454
455 /*
456 * Configure per-task memory limit.
457 * The boot-arg is interpreted as Megabytes,
458 * and takes precedence over the device tree.
459 * Setting the boot-arg to 0 disables task limits.
460 */
461 TUNABLE_DT_WRITEABLE(int, max_task_footprint_mb, "/defaults", "kern.max_task_pmem", "max_task_pmem", 0, TUNABLE_DT_NONE);
462
463 /* I/O Monitor Limits */
464 #define IOMON_DEFAULT_LIMIT (20480ull) /* MB of logical/physical I/O */
465 #define IOMON_DEFAULT_INTERVAL (86400ull) /* in seconds */
466
467 uint64_t task_iomon_limit_mb; /* Per-task I/O monitor limit in MBs */
468 uint64_t task_iomon_interval_secs; /* Per-task I/O monitor interval in secs */
469
470 #define IO_TELEMETRY_DEFAULT_LIMIT (10ll * 1024ll * 1024ll)
471 int64_t io_telemetry_limit; /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
472 int64_t global_logical_writes_count = 0; /* Global count for logical writes */
473 int64_t global_logical_writes_to_external_count = 0; /* Global count for logical writes to external storage*/
474 static boolean_t global_update_logical_writes(int64_t, int64_t*);
475
476 #if DEBUG || DEVELOPMENT
477 static diagthreshold_check_return task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value);
478 #endif
479 #define TASK_MAX_THREAD_LIMIT 256
480
481 #if MACH_ASSERT
482 int pmap_ledgers_panic = 1;
483 int pmap_ledgers_panic_leeway = 3;
484 #endif /* MACH_ASSERT */
485
486 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
487
488 #if CONFIG_COREDUMP
489 int hwm_user_cores = 0; /* high watermark violations generate user core files */
490 #endif
491
492 #ifdef MACH_BSD
493 extern uint32_t proc_platform(const struct proc *);
494 extern uint32_t proc_sdk(struct proc *);
495 extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
496 extern int proc_pid(struct proc *p);
497 extern int proc_selfpid(void);
498 extern struct proc *current_proc(void);
499 extern char *proc_name_address(struct proc *p);
500 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
501 extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize);
502 extern void workq_proc_suspended(struct proc *p);
503 extern void workq_proc_resumed(struct proc *p);
504 extern struct proc *kernproc;
505
506 #if CONFIG_MEMORYSTATUS
507 extern void proc_memstat_skip(struct proc* p, boolean_t set);
508 extern void memorystatus_on_ledger_footprint_exceeded(int warning, bool memlimit_is_active, bool memlimit_is_fatal);
509 extern void memorystatus_log_exception(const int max_footprint_mb, bool memlimit_is_active, bool memlimit_is_fatal);
510 extern void memorystatus_log_diag_threshold_exception(const int diag_threshold_value);
511 extern boolean_t memorystatus_allowed_vm_map_fork(task_t task, bool *is_large);
512 extern uint64_t memorystatus_available_memory_internal(struct proc *p);
513
514 #if DEVELOPMENT || DEBUG
515 extern void memorystatus_abort_vm_map_fork(task_t);
516 #endif
517
518 #endif /* CONFIG_MEMORYSTATUS */
519
520 #endif /* MACH_BSD */
521
522 /* Boot-arg that turns on fatal pac exception delivery for all first-party apps */
523 static TUNABLE(bool, enable_pac_exception, "enable_pac_exception", false);
524
525 /*
526 * Defaults for controllable EXC_GUARD behaviors
527 *
528 * Internal builds are fatal by default (except BRIDGE).
529 * Create an alternate set of defaults for special processes by name.
530 */
531 struct task_exc_guard_named_default {
532 char *name;
533 uint32_t behavior;
534 };
535 #define _TASK_EXC_GUARD_MP_CORPSE (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_CORPSE)
536 #define _TASK_EXC_GUARD_MP_ONCE (_TASK_EXC_GUARD_MP_CORPSE | TASK_EXC_GUARD_MP_ONCE)
537 #define _TASK_EXC_GUARD_MP_FATAL (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_FATAL)
538
539 #define _TASK_EXC_GUARD_VM_CORPSE (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_ONCE)
540 #define _TASK_EXC_GUARD_VM_ONCE (_TASK_EXC_GUARD_VM_CORPSE | TASK_EXC_GUARD_VM_ONCE)
541 #define _TASK_EXC_GUARD_VM_FATAL (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_FATAL)
542
543 #define _TASK_EXC_GUARD_ALL_CORPSE (_TASK_EXC_GUARD_MP_CORPSE | _TASK_EXC_GUARD_VM_CORPSE)
544 #define _TASK_EXC_GUARD_ALL_ONCE (_TASK_EXC_GUARD_MP_ONCE | _TASK_EXC_GUARD_VM_ONCE)
545 #define _TASK_EXC_GUARD_ALL_FATAL (_TASK_EXC_GUARD_MP_FATAL | _TASK_EXC_GUARD_VM_FATAL)
546
547 /* cannot turn off FATAL and DELIVER bit if set */
548 uint32_t task_exc_guard_no_unset_mask = TASK_EXC_GUARD_MP_FATAL | TASK_EXC_GUARD_VM_FATAL |
549 TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_VM_DELIVER;
550 /* cannot turn on ONCE bit if unset */
551 uint32_t task_exc_guard_no_set_mask = TASK_EXC_GUARD_MP_ONCE | TASK_EXC_GUARD_VM_ONCE;
552
553 #if !defined(XNU_TARGET_OS_BRIDGE)
554
555 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_FATAL;
556 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
557 /*
558 * These "by-process-name" default overrides are intended to be a short-term fix to
559 * quickly get over races between changes introducing new EXC_GUARD raising behaviors
560 * in some process and a change in default behavior for same. We should ship with
561 * these lists empty (by fixing the bugs, or explicitly changing the task's EXC_GUARD
562 * exception behavior via task_set_exc_guard_behavior()).
563 *
564 * XXX Remember to add/remove TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS back to
565 * task_exc_guard_default when transitioning this list between empty and
566 * non-empty.
567 */
568 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
569
570 #else /* !defined(XNU_TARGET_OS_BRIDGE) */
571
572 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_ONCE;
573 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
574 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
575
576 #endif /* !defined(XNU_TARGET_OS_BRIDGE) */
577
578 /* Forwards */
579
580 static bool task_hold_locked(task_t task);
581 static void task_wait_locked(task_t task, boolean_t until_not_runnable);
582 static void task_release_locked(task_t task);
583 extern task_t proc_get_task_raw(void *proc);
584 extern void task_ref_hold_proc_task_struct(task_t task);
585 extern void task_release_proc_task_struct(task_t task, proc_ro_t proc_ro);
586
587 static void task_synchronizer_destroy_all(task_t task);
588 static os_ref_count_t
589 task_add_turnstile_watchports_locked(
590 task_t task,
591 struct task_watchports *watchports,
592 struct task_watchport_elem **previous_elem_array,
593 ipc_port_t *portwatch_ports,
594 uint32_t portwatch_count);
595
596 static os_ref_count_t
597 task_remove_turnstile_watchports_locked(
598 task_t task,
599 struct task_watchports *watchports,
600 ipc_port_t *port_freelist);
601
602 static struct task_watchports *
603 task_watchports_alloc_init(
604 task_t task,
605 thread_t thread,
606 uint32_t count);
607
608 static void
609 task_watchports_deallocate(
610 struct task_watchports *watchports);
611
612 void
task_set_64bit(task_t task,boolean_t is_64bit,boolean_t is_64bit_data)613 task_set_64bit(
614 task_t task,
615 boolean_t is_64bit,
616 boolean_t is_64bit_data)
617 {
618 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
619 thread_t thread;
620 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
621
622 task_lock(task);
623
624 /*
625 * Switching to/from 64-bit address spaces
626 */
627 if (is_64bit) {
628 if (!task_has_64Bit_addr(task)) {
629 task_set_64Bit_addr(task);
630 }
631 } else {
632 if (task_has_64Bit_addr(task)) {
633 task_clear_64Bit_addr(task);
634 }
635 }
636
637 /*
638 * Switching to/from 64-bit register state.
639 */
640 if (is_64bit_data) {
641 if (task_has_64Bit_data(task)) {
642 goto out;
643 }
644
645 task_set_64Bit_data(task);
646 } else {
647 if (!task_has_64Bit_data(task)) {
648 goto out;
649 }
650
651 task_clear_64Bit_data(task);
652 }
653
654 /* FIXME: On x86, the thread save state flavor can diverge from the
655 * task's 64-bit feature flag due to the 32-bit/64-bit register save
656 * state dichotomy. Since we can be pre-empted in this interval,
657 * certain routines may observe the thread as being in an inconsistent
658 * state with respect to its task's 64-bitness.
659 */
660
661 #if defined(__x86_64__) || defined(__arm64__)
662 queue_iterate(&task->threads, thread, thread_t, task_threads) {
663 thread_mtx_lock(thread);
664 machine_thread_switch_addrmode(thread);
665 thread_mtx_unlock(thread);
666 }
667 #endif /* defined(__x86_64__) || defined(__arm64__) */
668
669 out:
670 task_unlock(task);
671 }
672
673 bool
task_get_64bit_addr(task_t task)674 task_get_64bit_addr(task_t task)
675 {
676 return task_has_64Bit_addr(task);
677 }
678
679 bool
task_get_64bit_data(task_t task)680 task_get_64bit_data(task_t task)
681 {
682 return task_has_64Bit_data(task);
683 }
684
685 void
task_set_platform_binary(task_t task,boolean_t is_platform)686 task_set_platform_binary(
687 task_t task,
688 boolean_t is_platform)
689 {
690 if (is_platform) {
691 task_ro_flags_set(task, TFRO_PLATFORM);
692 } else {
693 task_ro_flags_clear(task, TFRO_PLATFORM);
694 }
695 }
696
697 #if XNU_TARGET_OS_OSX
698 #if DEVELOPMENT || DEBUG
699 SECURITY_READ_ONLY_LATE(bool) AMFI_bootarg_disable_mach_hardening = false;
700 #endif /* DEVELOPMENT || DEBUG */
701
702 void
task_disable_mach_hardening(task_t task)703 task_disable_mach_hardening(task_t task)
704 {
705 task_ro_flags_set(task, TFRO_MACH_HARDENING_OPT_OUT);
706 }
707
708 bool
task_opted_out_mach_hardening(task_t task)709 task_opted_out_mach_hardening(task_t task)
710 {
711 return task_ro_flags_get(task) & TFRO_MACH_HARDENING_OPT_OUT;
712 }
713 #endif /* XNU_TARGET_OS_OSX */
714
715 /*
716 * Use the `task_is_hardened_binary` macro below
717 * when applying new security policies.
718 *
719 * Kernel security policies now generally apply to
720 * "hardened binaries" - which are platform binaries, and
721 * third party binaries who adopt hardened runtime on ios.
722 */
723 boolean_t
task_get_platform_binary(task_t task)724 task_get_platform_binary(task_t task)
725 {
726 return (task_ro_flags_get(task) & TFRO_PLATFORM) != 0;
727 }
728
729 static boolean_t
task_get_hardened_runtime(task_t task)730 task_get_hardened_runtime(task_t task)
731 {
732 return (task_ro_flags_get(task) & TFRO_HARDENED) != 0;
733 }
734
735 boolean_t
task_is_hardened_binary(task_t task)736 task_is_hardened_binary(task_t task)
737 {
738 return task_get_platform_binary(task) ||
739 task_get_hardened_runtime(task);
740 }
741
742 void
task_set_hardened_runtime(task_t task,bool is_hardened)743 task_set_hardened_runtime(
744 task_t task,
745 bool is_hardened)
746 {
747 if (is_hardened) {
748 task_ro_flags_set(task, TFRO_HARDENED);
749 } else {
750 task_ro_flags_clear(task, TFRO_HARDENED);
751 }
752 }
753
754 boolean_t
task_is_a_corpse(task_t task)755 task_is_a_corpse(task_t task)
756 {
757 return (task_ro_flags_get(task) & TFRO_CORPSE) != 0;
758 }
759
760 boolean_t
task_is_ipc_active(task_t task)761 task_is_ipc_active(task_t task)
762 {
763 return task->ipc_active;
764 }
765
766 void
task_set_corpse(task_t task)767 task_set_corpse(task_t task)
768 {
769 return task_ro_flags_set(task, TFRO_CORPSE);
770 }
771
772 void
task_set_immovable_pinned(task_t task)773 task_set_immovable_pinned(task_t task)
774 {
775 ipc_task_set_immovable_pinned(task);
776 }
777
778 /*
779 * Set or clear per-task TF_CA_CLIENT_WI flag according to specified argument.
780 * Returns "false" if flag is already set, and "true" in other cases.
781 */
782 bool
task_set_ca_client_wi(task_t task,boolean_t set_or_clear)783 task_set_ca_client_wi(
784 task_t task,
785 boolean_t set_or_clear)
786 {
787 bool ret = true;
788 task_lock(task);
789 if (set_or_clear) {
790 /* Tasks can have only one CA_CLIENT work interval */
791 if (task->t_flags & TF_CA_CLIENT_WI) {
792 ret = false;
793 } else {
794 task->t_flags |= TF_CA_CLIENT_WI;
795 }
796 } else {
797 task->t_flags &= ~TF_CA_CLIENT_WI;
798 }
799 task_unlock(task);
800 return ret;
801 }
802
803 /*
804 * task_set_dyld_info() is called at most three times.
805 * 1) at task struct creation to set addr/size to zero.
806 * 2) in mach_loader.c to set location of __all_image_info section in loaded dyld
807 * 3) is from dyld itself to update location of all_image_info
808 * For security any calls after that are ignored. The TF_DYLD_ALL_IMAGE_SET bit is used to determine state.
809 */
810 kern_return_t
task_set_dyld_info(task_t task,mach_vm_address_t addr,mach_vm_size_t size,bool finalize_value)811 task_set_dyld_info(
812 task_t task,
813 mach_vm_address_t addr,
814 mach_vm_size_t size,
815 bool finalize_value)
816 {
817 mach_vm_address_t end;
818 if (os_add_overflow(addr, size, &end)) {
819 return KERN_FAILURE;
820 }
821
822 task_lock(task);
823 /* don't accept updates if all_image_info_addr is final */
824 if ((task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) == 0) {
825 bool inputNonZero = ((addr != 0) || (size != 0));
826 bool currentNonZero = ((task->all_image_info_addr != 0) || (task->all_image_info_size != 0));
827 task->all_image_info_addr = addr;
828 task->all_image_info_size = size;
829 /* can only change from a non-zero value to another non-zero once */
830 if ((inputNonZero && currentNonZero) || finalize_value) {
831 task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
832 }
833 task_unlock(task);
834 return KERN_SUCCESS;
835 } else {
836 task_unlock(task);
837 return KERN_FAILURE;
838 }
839 }
840
841 bool
task_donates_own_pages(task_t task)842 task_donates_own_pages(
843 task_t task)
844 {
845 return task->donates_own_pages;
846 }
847
848 void
task_set_mach_header_address(task_t task,mach_vm_address_t addr)849 task_set_mach_header_address(
850 task_t task,
851 mach_vm_address_t addr)
852 {
853 task_lock(task);
854 task->mach_header_vm_address = addr;
855 task_unlock(task);
856 }
857
858 void
task_bank_reset(__unused task_t task)859 task_bank_reset(__unused task_t task)
860 {
861 if (task->bank_context != NULL) {
862 bank_task_destroy(task);
863 }
864 }
865
866 /*
867 * NOTE: This should only be called when the P_LINTRANSIT
868 * flag is set (the proc_trans lock is held) on the
869 * proc associated with the task.
870 */
871 void
task_bank_init(__unused task_t task)872 task_bank_init(__unused task_t task)
873 {
874 if (task->bank_context != NULL) {
875 panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
876 }
877 bank_task_initialize(task);
878 }
879
880 void
task_set_did_exec_flag(task_t task)881 task_set_did_exec_flag(task_t task)
882 {
883 task->t_procflags |= TPF_DID_EXEC;
884 }
885
886 void
task_clear_exec_copy_flag(task_t task)887 task_clear_exec_copy_flag(task_t task)
888 {
889 task->t_procflags &= ~TPF_EXEC_COPY;
890 }
891
892 event_t
task_get_return_wait_event(task_t task)893 task_get_return_wait_event(task_t task)
894 {
895 return (event_t)&task->returnwait_inheritor;
896 }
897
898 void
task_clear_return_wait(task_t task,uint32_t flags)899 task_clear_return_wait(task_t task, uint32_t flags)
900 {
901 if (flags & TCRW_CLEAR_INITIAL_WAIT) {
902 thread_wakeup(task_get_return_wait_event(task));
903 }
904
905 if (flags & TCRW_CLEAR_FINAL_WAIT) {
906 is_write_lock(task->itk_space);
907
908 task->t_returnwaitflags &= ~TRW_LRETURNWAIT;
909 task->returnwait_inheritor = NULL;
910
911 if (flags & TCRW_CLEAR_EXEC_COMPLETE) {
912 task->t_returnwaitflags &= ~TRW_LEXEC_COMPLETE;
913 }
914
915 if (task->t_returnwaitflags & TRW_LRETURNWAITER) {
916 struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
917 TURNSTILE_ULOCK);
918
919 waitq_wakeup64_all(&turnstile->ts_waitq,
920 CAST_EVENT64_T(task_get_return_wait_event(task)),
921 THREAD_AWAKENED, WAITQ_UPDATE_INHERITOR);
922
923 turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_HELD);
924
925 turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
926 turnstile_cleanup();
927 task->t_returnwaitflags &= ~TRW_LRETURNWAITER;
928 }
929 is_write_unlock(task->itk_space);
930 }
931 }
932
933 void __attribute__((noreturn))
task_wait_to_return(void)934 task_wait_to_return(void)
935 {
936 task_t task = current_task();
937 uint8_t returnwaitflags;
938
939 is_write_lock(task->itk_space);
940
941 if (task->t_returnwaitflags & TRW_LRETURNWAIT) {
942 struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
943 TURNSTILE_ULOCK);
944
945 do {
946 task->t_returnwaitflags |= TRW_LRETURNWAITER;
947 turnstile_update_inheritor(turnstile, task->returnwait_inheritor,
948 (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
949
950 waitq_assert_wait64(&turnstile->ts_waitq,
951 CAST_EVENT64_T(task_get_return_wait_event(task)),
952 THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
953
954 is_write_unlock(task->itk_space);
955
956 turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
957
958 thread_block(THREAD_CONTINUE_NULL);
959
960 is_write_lock(task->itk_space);
961 } while (task->t_returnwaitflags & TRW_LRETURNWAIT);
962
963 turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
964 }
965
966 returnwaitflags = task->t_returnwaitflags;
967 is_write_unlock(task->itk_space);
968 turnstile_cleanup();
969
970 /**
971 * In posix_spawn() path, process_signature() is guaranteed to complete
972 * when the "second wait" is cleared. Call out to execute whatever depends
973 * on the result of that before we return to EL0.
974 */
975 task_post_signature_processing_hook(task);
976 #if CONFIG_MACF
977 /*
978 * Before jumping to userspace and allowing this process
979 * to execute any code, make sure its credentials are cached,
980 * and notify any interested parties.
981 */
982 extern void current_cached_proc_cred_update(void);
983
984 current_cached_proc_cred_update();
985 if (returnwaitflags & TRW_LEXEC_COMPLETE) {
986 mac_proc_notify_exec_complete(current_proc());
987 }
988 #endif
989
990 thread_bootstrap_return();
991 }
992
993 /**
994 * A callout by task_wait_to_return on the main thread of a newly spawned task
995 * after process_signature() is completed by the parent task.
996 *
997 * @param task The newly spawned task
998 */
999 void
task_post_signature_processing_hook(task_t task)1000 task_post_signature_processing_hook(task_t task)
1001 {
1002 ml_task_post_signature_processing_hook(task);
1003 }
1004
1005 boolean_t
task_is_exec_copy(task_t task)1006 task_is_exec_copy(task_t task)
1007 {
1008 return task_is_exec_copy_internal(task);
1009 }
1010
1011 boolean_t
task_did_exec(task_t task)1012 task_did_exec(task_t task)
1013 {
1014 return task_did_exec_internal(task);
1015 }
1016
1017 boolean_t
task_is_active(task_t task)1018 task_is_active(task_t task)
1019 {
1020 return task->active;
1021 }
1022
1023 boolean_t
task_is_halting(task_t task)1024 task_is_halting(task_t task)
1025 {
1026 return task->halting;
1027 }
1028
1029 void
task_init(void)1030 task_init(void)
1031 {
1032 if (max_task_footprint_mb != 0) {
1033 #if CONFIG_MEMORYSTATUS
1034 if (max_task_footprint_mb < 50) {
1035 printf("Warning: max_task_pmem %d below minimum.\n",
1036 max_task_footprint_mb);
1037 max_task_footprint_mb = 50;
1038 }
1039 printf("Limiting task physical memory footprint to %d MB\n",
1040 max_task_footprint_mb);
1041
1042 max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024; // Convert MB to bytes
1043
1044 /*
1045 * Configure the per-task memory limit warning level.
1046 * This is computed as a percentage.
1047 */
1048 max_task_footprint_warning_level = 0;
1049
1050 if (max_mem < 0x40000000) {
1051 /*
1052 * On devices with < 1GB of memory:
1053 * -- set warnings to 50MB below the per-task limit.
1054 */
1055 if (max_task_footprint_mb > 50) {
1056 max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
1057 }
1058 } else {
1059 /*
1060 * On devices with >= 1GB of memory:
1061 * -- set warnings to 100MB below the per-task limit.
1062 */
1063 if (max_task_footprint_mb > 100) {
1064 max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
1065 }
1066 }
1067
1068 /*
1069 * Never allow warning level to land below the default.
1070 */
1071 if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
1072 max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
1073 }
1074
1075 printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
1076
1077 #else
1078 printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
1079 #endif /* CONFIG_MEMORYSTATUS */
1080 }
1081
1082 #if DEVELOPMENT || DEBUG
1083 PE_parse_boot_argn("task_exc_guard_default",
1084 &task_exc_guard_default,
1085 sizeof(task_exc_guard_default));
1086 #endif /* DEVELOPMENT || DEBUG */
1087
1088 #if CONFIG_COREDUMP
1089 if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
1090 sizeof(hwm_user_cores))) {
1091 hwm_user_cores = 0;
1092 }
1093 #endif
1094
1095 proc_init_cpumon_params();
1096
1097 if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof(task_wakeups_monitor_rate))) {
1098 task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
1099 }
1100
1101 if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof(task_wakeups_monitor_interval))) {
1102 task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
1103 }
1104
1105 if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
1106 sizeof(task_wakeups_monitor_ustackshots_trigger_pct))) {
1107 task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
1108 }
1109
1110 if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof(task_iomon_limit_mb))) {
1111 task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
1112 }
1113
1114 if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof(task_iomon_interval_secs))) {
1115 task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
1116 }
1117
1118 if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof(io_telemetry_limit))) {
1119 io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
1120 }
1121
1122 /*
1123 * If we have coalitions, coalition_init() will call init_task_ledgers() as it
1124 * sets up the ledgers for the default coalition. If we don't have coalitions,
1125 * then we have to call it now.
1126 */
1127 #if CONFIG_COALITIONS
1128 assert(task_ledger_template);
1129 #else /* CONFIG_COALITIONS */
1130 init_task_ledgers();
1131 #endif /* CONFIG_COALITIONS */
1132
1133 task_ref_init();
1134 task_zone_init();
1135
1136 #ifdef __LP64__
1137 boolean_t is_64bit = TRUE;
1138 #else
1139 boolean_t is_64bit = FALSE;
1140 #endif
1141
1142 kernproc = (struct proc *)zalloc_flags(proc_task_zone, Z_WAITOK | Z_ZERO);
1143 kernel_task = proc_get_task_raw(kernproc);
1144
1145 /*
1146 * Create the kernel task as the first task.
1147 */
1148 if (task_create_internal(TASK_NULL, NULL, NULL, FALSE, is_64bit,
1149 is_64bit, TF_NONE, TF_NONE, TPF_NONE, TWF_NONE, kernel_task) != KERN_SUCCESS) {
1150 panic("task_init");
1151 }
1152
1153
1154 vm_map_setup(get_task_map(kernel_task), kernel_task);
1155 ipc_task_enable(kernel_task);
1156
1157 #if defined(HAS_APPLE_PAC)
1158 kernel_task->rop_pid = ml_default_rop_pid();
1159 kernel_task->jop_pid = ml_default_jop_pid();
1160 // kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
1161 // disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
1162 ml_task_set_disable_user_jop(kernel_task, FALSE);
1163 #endif
1164
1165 vm_map_deallocate(kernel_task->map);
1166 kernel_task->map = kernel_map;
1167 }
1168
1169 static inline void
task_zone_init(void)1170 task_zone_init(void)
1171 {
1172 proc_struct_size = roundup(proc_struct_size, task_alignment);
1173 task_struct_size = roundup(sizeof(struct task), proc_alignment);
1174 proc_and_task_size = proc_struct_size + task_struct_size;
1175
1176 proc_task_zone = zone_create_ext("proc_task", proc_and_task_size,
1177 ZC_ZFREE_CLEARMEM | ZC_SEQUESTER, ZONE_ID_PROC_TASK, NULL); /* sequester is needed for proc_rele() */
1178 }
1179
1180 /*
1181 * Task ledgers
1182 * ------------
1183 *
1184 * phys_footprint
1185 * Physical footprint: This is the sum of:
1186 * + (internal - alternate_accounting)
1187 * + (internal_compressed - alternate_accounting_compressed)
1188 * + iokit_mapped
1189 * + purgeable_nonvolatile
1190 * + purgeable_nonvolatile_compressed
1191 * + page_table
1192 *
1193 * internal
1194 * The task's anonymous memory, which on iOS is always resident.
1195 *
1196 * internal_compressed
1197 * Amount of this task's internal memory which is held by the compressor.
1198 * Such memory is no longer actually resident for the task [i.e., resident in its pmap],
1199 * and could be either decompressed back into memory, or paged out to storage, depending
1200 * on our implementation.
1201 *
1202 * iokit_mapped
1203 * IOKit mappings: The total size of all IOKit mappings in this task, regardless of
1204 * clean/dirty or internal/external state].
1205 *
1206 * alternate_accounting
1207 * The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
1208 * are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
1209 * double counting.
1210 *
1211 * pages_grabbed
1212 * pages_grabbed counts all page grabs in a task. It is also broken out into three subtypes
1213 * which track UPL, IOPL and Kernel page grabs.
1214 */
1215 void
init_task_ledgers(void)1216 init_task_ledgers(void)
1217 {
1218 ledger_template_t t;
1219
1220 assert(task_ledger_template == NULL);
1221 assert(kernel_task == TASK_NULL);
1222
1223 #if MACH_ASSERT
1224 PE_parse_boot_argn("pmap_ledgers_panic",
1225 &pmap_ledgers_panic,
1226 sizeof(pmap_ledgers_panic));
1227 PE_parse_boot_argn("pmap_ledgers_panic_leeway",
1228 &pmap_ledgers_panic_leeway,
1229 sizeof(pmap_ledgers_panic_leeway));
1230 #endif /* MACH_ASSERT */
1231
1232 if ((t = ledger_template_create("Per-task ledger")) == NULL) {
1233 panic("couldn't create task ledger template");
1234 }
1235
1236 task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
1237 task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
1238 "physmem", "bytes");
1239 task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
1240 "bytes");
1241 task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
1242 "bytes");
1243 task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
1244 "bytes");
1245 task_ledgers.conclave_mem = ledger_entry_add_with_flags(t, "conclave_mem", "physmem", "bytes",
1246 LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE | LEDGER_ENTRY_ALLOW_DEBIT);
1247 task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
1248 "bytes");
1249 task_ledgers.iokit_mapped = ledger_entry_add_with_flags(t, "iokit_mapped", "mappings",
1250 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1251 task_ledgers.alternate_accounting = ledger_entry_add_with_flags(t, "alternate_accounting", "physmem",
1252 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1253 task_ledgers.alternate_accounting_compressed = ledger_entry_add_with_flags(t, "alternate_accounting_compressed", "physmem",
1254 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1255 task_ledgers.page_table = ledger_entry_add_with_flags(t, "page_table", "physmem",
1256 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1257 task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
1258 "bytes");
1259 task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
1260 "bytes");
1261 task_ledgers.reusable = ledger_entry_add(t, "reusable", "physmem", "bytes");
1262 task_ledgers.external = ledger_entry_add(t, "external", "physmem", "bytes");
1263 task_ledgers.purgeable_volatile = ledger_entry_add_with_flags(t, "purgeable_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1264 task_ledgers.purgeable_nonvolatile = ledger_entry_add_with_flags(t, "purgeable_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1265 task_ledgers.purgeable_volatile_compressed = ledger_entry_add_with_flags(t, "purgeable_volatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1266 task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add_with_flags(t, "purgeable_nonvolatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1267 task_ledgers.pages_grabbed = ledger_entry_add_with_flags(t, "pages_grabbed", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1268 task_ledgers.pages_grabbed_kern = ledger_entry_add_with_flags(t, "pages_grabbed_kern", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1269 task_ledgers.pages_grabbed_iopl = ledger_entry_add_with_flags(t, "pages_grabbed_iopl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1270 task_ledgers.pages_grabbed_upl = ledger_entry_add_with_flags(t, "pages_grabbed_upl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1271 task_ledgers.tagged_nofootprint = ledger_entry_add_with_flags(t, "tagged_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1272 task_ledgers.tagged_footprint = ledger_entry_add_with_flags(t, "tagged_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1273 task_ledgers.tagged_nofootprint_compressed = ledger_entry_add_with_flags(t, "tagged_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1274 task_ledgers.tagged_footprint_compressed = ledger_entry_add_with_flags(t, "tagged_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1275 task_ledgers.network_volatile = ledger_entry_add_with_flags(t, "network_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1276 task_ledgers.network_nonvolatile = ledger_entry_add_with_flags(t, "network_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1277 task_ledgers.network_volatile_compressed = ledger_entry_add_with_flags(t, "network_volatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1278 task_ledgers.network_nonvolatile_compressed = ledger_entry_add_with_flags(t, "network_nonvolatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1279 task_ledgers.media_nofootprint = ledger_entry_add_with_flags(t, "media_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1280 task_ledgers.media_footprint = ledger_entry_add_with_flags(t, "media_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1281 task_ledgers.media_nofootprint_compressed = ledger_entry_add_with_flags(t, "media_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1282 task_ledgers.media_footprint_compressed = ledger_entry_add_with_flags(t, "media_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1283 task_ledgers.graphics_nofootprint = ledger_entry_add_with_flags(t, "graphics_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1284 task_ledgers.graphics_footprint = ledger_entry_add_with_flags(t, "graphics_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1285 task_ledgers.graphics_nofootprint_compressed = ledger_entry_add_with_flags(t, "graphics_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1286 task_ledgers.graphics_footprint_compressed = ledger_entry_add_with_flags(t, "graphics_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1287 task_ledgers.neural_nofootprint = ledger_entry_add_with_flags(t, "neural_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1288 task_ledgers.neural_footprint = ledger_entry_add_with_flags(t, "neural_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1289 task_ledgers.neural_nofootprint_compressed = ledger_entry_add_with_flags(t, "neural_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1290 task_ledgers.neural_footprint_compressed = ledger_entry_add_with_flags(t, "neural_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1291 task_ledgers.neural_nofootprint_total = ledger_entry_add(t, "neural_nofootprint_total", "physmem", "bytes");
1292
1293 #if CONFIG_FREEZE
1294 task_ledgers.frozen_to_swap = ledger_entry_add(t, "frozen_to_swap", "physmem", "bytes");
1295 #endif /* CONFIG_FREEZE */
1296
1297 task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
1298 "count");
1299 task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
1300 "count");
1301
1302 #if CONFIG_SCHED_SFI
1303 sfi_class_id_t class_id, ledger_alias;
1304 for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1305 task_ledgers.sfi_wait_times[class_id] = -1;
1306 }
1307
1308 /* don't account for UNSPECIFIED */
1309 for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
1310 ledger_alias = sfi_get_ledger_alias_for_class(class_id);
1311 if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
1312 /* Check to see if alias has been registered yet */
1313 if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
1314 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
1315 } else {
1316 /* Otherwise, initialize it first */
1317 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
1318 }
1319 } else {
1320 task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
1321 }
1322
1323 if (task_ledgers.sfi_wait_times[class_id] < 0) {
1324 panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
1325 }
1326 }
1327
1328 assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID - 1] != -1);
1329 #endif /* CONFIG_SCHED_SFI */
1330
1331 task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
1332 task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
1333 task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
1334 task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
1335 task_ledgers.logical_writes_to_external = ledger_entry_add(t, "logical_writes_to_external", "res", "bytes");
1336 #if CONFIG_PHYS_WRITE_ACCT
1337 task_ledgers.fs_metadata_writes = ledger_entry_add(t, "fs_metadata_writes", "res", "bytes");
1338 #endif /* CONFIG_PHYS_WRITE_ACCT */
1339 task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj");
1340 task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj");
1341
1342 #if CONFIG_MEMORYSTATUS
1343 task_ledgers.memorystatus_dirty_time = ledger_entry_add(t, "memorystatus_dirty_time", "physmem", "ns");
1344 #endif /* CONFIG_MEMORYSTATUS */
1345
1346 task_ledgers.swapins = ledger_entry_add_with_flags(t, "swapins", "physmem", "bytes",
1347 LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1348
1349 if ((task_ledgers.cpu_time < 0) ||
1350 (task_ledgers.tkm_private < 0) ||
1351 (task_ledgers.tkm_shared < 0) ||
1352 (task_ledgers.phys_mem < 0) ||
1353 (task_ledgers.wired_mem < 0) ||
1354 (task_ledgers.conclave_mem < 0) ||
1355 (task_ledgers.internal < 0) ||
1356 (task_ledgers.external < 0) ||
1357 (task_ledgers.reusable < 0) ||
1358 (task_ledgers.iokit_mapped < 0) ||
1359 (task_ledgers.alternate_accounting < 0) ||
1360 (task_ledgers.alternate_accounting_compressed < 0) ||
1361 (task_ledgers.page_table < 0) ||
1362 (task_ledgers.phys_footprint < 0) ||
1363 (task_ledgers.internal_compressed < 0) ||
1364 (task_ledgers.purgeable_volatile < 0) ||
1365 (task_ledgers.purgeable_nonvolatile < 0) ||
1366 (task_ledgers.purgeable_volatile_compressed < 0) ||
1367 (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
1368 (task_ledgers.tagged_nofootprint < 0) ||
1369 (task_ledgers.tagged_footprint < 0) ||
1370 (task_ledgers.tagged_nofootprint_compressed < 0) ||
1371 (task_ledgers.tagged_footprint_compressed < 0) ||
1372 #if CONFIG_FREEZE
1373 (task_ledgers.frozen_to_swap < 0) ||
1374 #endif /* CONFIG_FREEZE */
1375 (task_ledgers.network_volatile < 0) ||
1376 (task_ledgers.network_nonvolatile < 0) ||
1377 (task_ledgers.network_volatile_compressed < 0) ||
1378 (task_ledgers.network_nonvolatile_compressed < 0) ||
1379 (task_ledgers.media_nofootprint < 0) ||
1380 (task_ledgers.media_footprint < 0) ||
1381 (task_ledgers.media_nofootprint_compressed < 0) ||
1382 (task_ledgers.media_footprint_compressed < 0) ||
1383 (task_ledgers.graphics_nofootprint < 0) ||
1384 (task_ledgers.graphics_footprint < 0) ||
1385 (task_ledgers.graphics_nofootprint_compressed < 0) ||
1386 (task_ledgers.graphics_footprint_compressed < 0) ||
1387 (task_ledgers.neural_nofootprint < 0) ||
1388 (task_ledgers.neural_footprint < 0) ||
1389 (task_ledgers.neural_nofootprint_compressed < 0) ||
1390 (task_ledgers.neural_footprint_compressed < 0) ||
1391 (task_ledgers.neural_nofootprint_total < 0) ||
1392 (task_ledgers.platform_idle_wakeups < 0) ||
1393 (task_ledgers.interrupt_wakeups < 0) ||
1394 (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
1395 (task_ledgers.physical_writes < 0) ||
1396 (task_ledgers.logical_writes < 0) ||
1397 (task_ledgers.logical_writes_to_external < 0) ||
1398 #if CONFIG_PHYS_WRITE_ACCT
1399 (task_ledgers.fs_metadata_writes < 0) ||
1400 #endif /* CONFIG_PHYS_WRITE_ACCT */
1401 #if CONFIG_MEMORYSTATUS
1402 (task_ledgers.memorystatus_dirty_time < 0) ||
1403 #endif /* CONFIG_MEMORYSTATUS */
1404 (task_ledgers.energy_billed_to_me < 0) ||
1405 (task_ledgers.energy_billed_to_others < 0) ||
1406 (task_ledgers.swapins < 0)
1407 ) {
1408 panic("couldn't create entries for task ledger template");
1409 }
1410
1411 ledger_track_credit_only(t, task_ledgers.phys_footprint);
1412 ledger_track_credit_only(t, task_ledgers.internal);
1413 ledger_track_credit_only(t, task_ledgers.external);
1414 ledger_track_credit_only(t, task_ledgers.reusable);
1415
1416 ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
1417 ledger_track_maximum(t, task_ledgers.phys_mem, 60);
1418 ledger_track_maximum(t, task_ledgers.internal, 60);
1419 ledger_track_maximum(t, task_ledgers.internal_compressed, 60);
1420 ledger_track_maximum(t, task_ledgers.reusable, 60);
1421 ledger_track_maximum(t, task_ledgers.external, 60);
1422 ledger_track_maximum(t, task_ledgers.neural_nofootprint_total, 60);
1423 #if MACH_ASSERT
1424 if (pmap_ledgers_panic) {
1425 ledger_panic_on_negative(t, task_ledgers.phys_footprint);
1426 ledger_panic_on_negative(t, task_ledgers.conclave_mem);
1427 ledger_panic_on_negative(t, task_ledgers.page_table);
1428 ledger_panic_on_negative(t, task_ledgers.internal);
1429 ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
1430 ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
1431 ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
1432 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
1433 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
1434 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
1435 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
1436 #if CONFIG_PHYS_WRITE_ACCT
1437 ledger_panic_on_negative(t, task_ledgers.fs_metadata_writes);
1438 #endif /* CONFIG_PHYS_WRITE_ACCT */
1439
1440 ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint);
1441 ledger_panic_on_negative(t, task_ledgers.tagged_footprint);
1442 ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint_compressed);
1443 ledger_panic_on_negative(t, task_ledgers.tagged_footprint_compressed);
1444 ledger_panic_on_negative(t, task_ledgers.network_volatile);
1445 ledger_panic_on_negative(t, task_ledgers.network_nonvolatile);
1446 ledger_panic_on_negative(t, task_ledgers.network_volatile_compressed);
1447 ledger_panic_on_negative(t, task_ledgers.network_nonvolatile_compressed);
1448 ledger_panic_on_negative(t, task_ledgers.media_nofootprint);
1449 ledger_panic_on_negative(t, task_ledgers.media_footprint);
1450 ledger_panic_on_negative(t, task_ledgers.media_nofootprint_compressed);
1451 ledger_panic_on_negative(t, task_ledgers.media_footprint_compressed);
1452 ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint);
1453 ledger_panic_on_negative(t, task_ledgers.graphics_footprint);
1454 ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint_compressed);
1455 ledger_panic_on_negative(t, task_ledgers.graphics_footprint_compressed);
1456 ledger_panic_on_negative(t, task_ledgers.neural_nofootprint);
1457 ledger_panic_on_negative(t, task_ledgers.neural_footprint);
1458 ledger_panic_on_negative(t, task_ledgers.neural_nofootprint_compressed);
1459 ledger_panic_on_negative(t, task_ledgers.neural_footprint_compressed);
1460 }
1461 #endif /* MACH_ASSERT */
1462
1463 #if CONFIG_MEMORYSTATUS
1464 ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
1465 #endif /* CONFIG_MEMORYSTATUS */
1466
1467 ledger_set_callback(t, task_ledgers.interrupt_wakeups,
1468 task_wakeups_rate_exceeded, NULL, NULL);
1469 ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
1470
1471 #if CONFIG_SPTM || !XNU_MONITOR
1472 ledger_template_complete(t);
1473 #else /* CONFIG_SPTM || !XNU_MONITOR */
1474 ledger_template_complete_secure_alloc(t);
1475 #endif /* XNU_MONITOR */
1476 task_ledger_template = t;
1477 }
1478
1479 /* Create a task, but leave the task ports disabled */
1480 kern_return_t
task_create_internal(task_t parent_task,proc_ro_t proc_ro,coalition_t * parent_coalitions __unused,boolean_t inherit_memory,boolean_t is_64bit,boolean_t is_64bit_data,uint32_t t_flags,uint32_t t_flags_ro,uint32_t t_procflags,uint8_t t_returnwaitflags,task_t child_task)1481 task_create_internal(
1482 task_t parent_task, /* Null-able */
1483 proc_ro_t proc_ro,
1484 coalition_t *parent_coalitions __unused,
1485 boolean_t inherit_memory,
1486 boolean_t is_64bit,
1487 boolean_t is_64bit_data,
1488 uint32_t t_flags,
1489 uint32_t t_flags_ro,
1490 uint32_t t_procflags,
1491 uint8_t t_returnwaitflags,
1492 task_t child_task)
1493 {
1494 task_t new_task;
1495 vm_shared_region_t shared_region;
1496 ledger_t ledger = NULL;
1497 struct task_ro_data task_ro_data = {};
1498 uint32_t parent_t_flags_ro = 0;
1499
1500 new_task = child_task;
1501
1502 if (task_ref_count_init(new_task) != KERN_SUCCESS) {
1503 return KERN_RESOURCE_SHORTAGE;
1504 }
1505
1506 /* allocate with active entries */
1507 assert(task_ledger_template != NULL);
1508 ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1509 if (ledger == NULL) {
1510 task_ref_count_fini(new_task);
1511 return KERN_RESOURCE_SHORTAGE;
1512 }
1513
1514 counter_alloc(&(new_task->faults));
1515
1516 #if defined(HAS_APPLE_PAC)
1517 const uint8_t disable_user_jop = inherit_memory ? parent_task->disable_user_jop : FALSE;
1518 ml_task_set_rop_pid(new_task, parent_task, inherit_memory);
1519 ml_task_set_jop_pid(new_task, parent_task, inherit_memory, disable_user_jop);
1520 ml_task_set_disable_user_jop(new_task, disable_user_jop);
1521 #endif
1522
1523
1524 new_task->ledger = ledger;
1525
1526 /* if inherit_memory is true, parent_task MUST not be NULL */
1527 if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) {
1528 #if CONFIG_DEFERRED_RECLAIM
1529 if (parent_task->deferred_reclamation_metadata) {
1530 /*
1531 * Prevent concurrent reclaims while we're forking the parent_task's map,
1532 * so that the child's map is in sync with the forked reclamation
1533 * metadata.
1534 */
1535 vm_deferred_reclamation_ring_own(
1536 parent_task->deferred_reclamation_metadata);
1537 }
1538 #endif /* CONFIG_DEFERRED_RECLAIM */
1539 new_task->map = vm_map_fork(ledger, parent_task->map, 0);
1540 #if CONFIG_DEFERRED_RECLAIM
1541 if (new_task->map != NULL &&
1542 parent_task->deferred_reclamation_metadata) {
1543 new_task->deferred_reclamation_metadata =
1544 vm_deferred_reclamation_task_fork(new_task,
1545 parent_task->deferred_reclamation_metadata);
1546 }
1547 if (parent_task->deferred_reclamation_metadata) {
1548 vm_deferred_reclamation_ring_disown(
1549 parent_task->deferred_reclamation_metadata);
1550 }
1551 #endif /* CONFIG_DEFERRED_RECLAIM */
1552 } else {
1553 unsigned int pmap_flags = is_64bit ? PMAP_CREATE_64BIT : 0;
1554 pmap_t pmap = pmap_create_options(ledger, 0, pmap_flags);
1555 vm_map_t new_map;
1556
1557 if (pmap == NULL) {
1558 counter_free(&new_task->faults);
1559 ledger_dereference(ledger);
1560 task_ref_count_fini(new_task);
1561 return KERN_RESOURCE_SHORTAGE;
1562 }
1563 new_map = vm_map_create_options(pmap,
1564 (vm_map_offset_t)(VM_MIN_ADDRESS),
1565 (vm_map_offset_t)(VM_MAX_ADDRESS),
1566 VM_MAP_CREATE_PAGEABLE);
1567 if (parent_task) {
1568 vm_map_inherit_limits(new_map, parent_task->map);
1569 }
1570 new_task->map = new_map;
1571 }
1572
1573 if (new_task->map == NULL) {
1574 counter_free(&new_task->faults);
1575 ledger_dereference(ledger);
1576 task_ref_count_fini(new_task);
1577 return KERN_RESOURCE_SHORTAGE;
1578 }
1579
1580 lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
1581 queue_init(&new_task->threads);
1582 new_task->suspend_count = 0;
1583 new_task->thread_count = 0;
1584 new_task->active_thread_count = 0;
1585 new_task->user_stop_count = 0;
1586 new_task->legacy_stop_count = 0;
1587 new_task->active = TRUE;
1588 new_task->halting = FALSE;
1589 new_task->priv_flags = 0;
1590 new_task->t_flags = t_flags;
1591 task_ro_data.t_flags_ro = t_flags_ro;
1592 new_task->t_procflags = t_procflags;
1593 new_task->t_returnwaitflags = t_returnwaitflags;
1594 new_task->returnwait_inheritor = current_thread();
1595 new_task->importance = 0;
1596 new_task->crashed_thread_id = 0;
1597 new_task->watchports = NULL;
1598 new_task->t_rr_ranges = NULL;
1599
1600 new_task->bank_context = NULL;
1601
1602 if (parent_task) {
1603 parent_t_flags_ro = task_ro_flags_get(parent_task);
1604 }
1605
1606 if (parent_task && inherit_memory) {
1607 #if __has_feature(ptrauth_calls)
1608 /* Inherit the pac exception flags from parent if in fork */
1609 task_ro_data.t_flags_ro |= (parent_t_flags_ro & (TFRO_PAC_ENFORCE_USER_STATE |
1610 TFRO_PAC_EXC_FATAL));
1611 #endif /* __has_feature(ptrauth_calls) */
1612 /* Inherit the hardened binary flags from parent if in fork */
1613 task_ro_data.t_flags_ro |= parent_t_flags_ro & (TFRO_HARDENED | TFRO_PLATFORM | TFRO_JIT_EXC_FATAL);
1614 #if XNU_TARGET_OS_OSX
1615 task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_MACH_HARDENING_OPT_OUT;
1616 #endif /* XNU_TARGET_OS_OSX */
1617 }
1618
1619 #ifdef MACH_BSD
1620 new_task->corpse_info = NULL;
1621 #endif /* MACH_BSD */
1622
1623 /* kern_task not created by this function has unique id 0, start with 1 here. */
1624 task_set_uniqueid(new_task);
1625
1626 #if CONFIG_MACF
1627 set_task_crash_label(new_task, NULL);
1628
1629 task_ro_data.task_filters.mach_trap_filter_mask = NULL;
1630 task_ro_data.task_filters.mach_kobj_filter_mask = NULL;
1631 #endif
1632
1633 #if CONFIG_MEMORYSTATUS
1634 if (max_task_footprint != 0) {
1635 ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1636 }
1637 #endif /* CONFIG_MEMORYSTATUS */
1638
1639 if (task_wakeups_monitor_rate != 0) {
1640 uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1641 int32_t rate; // Ignored because of WAKEMON_SET_DEFAULTS
1642 task_wakeups_monitor_ctl(new_task, &flags, &rate);
1643 }
1644
1645 #if CONFIG_IO_ACCOUNTING
1646 uint32_t flags = IOMON_ENABLE;
1647 task_io_monitor_ctl(new_task, &flags);
1648 #endif /* CONFIG_IO_ACCOUNTING */
1649
1650 machine_task_init(new_task, parent_task, inherit_memory);
1651
1652 new_task->task_debug = NULL;
1653
1654 #if DEVELOPMENT || DEBUG
1655 new_task->task_unnested = FALSE;
1656 new_task->task_disconnected_count = 0;
1657 #endif
1658 queue_init(&new_task->semaphore_list);
1659 new_task->semaphores_owned = 0;
1660
1661 new_task->vtimers = 0;
1662
1663 new_task->shared_region = NULL;
1664
1665 new_task->affinity_space = NULL;
1666
1667 #if CONFIG_CPU_COUNTERS
1668 new_task->t_kpc = 0;
1669 #endif /* CONFIG_CPU_COUNTERS */
1670
1671 new_task->pidsuspended = FALSE;
1672 new_task->frozen = FALSE;
1673 new_task->changing_freeze_state = FALSE;
1674 new_task->rusage_cpu_flags = 0;
1675 new_task->rusage_cpu_percentage = 0;
1676 new_task->rusage_cpu_interval = 0;
1677 new_task->rusage_cpu_deadline = 0;
1678 new_task->rusage_cpu_callt = NULL;
1679 #if MACH_ASSERT
1680 new_task->suspends_outstanding = 0;
1681 #endif
1682 recount_task_init(&new_task->tk_recount);
1683
1684 #if HYPERVISOR
1685 new_task->hv_task_target = NULL;
1686 #endif /* HYPERVISOR */
1687
1688 #if CONFIG_TASKWATCH
1689 queue_init(&new_task->task_watchers);
1690 new_task->num_taskwatchers = 0;
1691 new_task->watchapplying = 0;
1692 #endif /* CONFIG_TASKWATCH */
1693
1694 new_task->mem_notify_reserved = 0;
1695 new_task->memlimit_attrs_reserved = 0;
1696
1697 new_task->requested_policy = default_task_requested_policy;
1698 new_task->effective_policy = default_task_effective_policy;
1699
1700 new_task->task_shared_region_slide = -1;
1701
1702 if (parent_task != NULL) {
1703 task_ro_data.task_tokens.sec_token = *task_get_sec_token(parent_task);
1704 task_ro_data.task_tokens.audit_token = *task_get_audit_token(parent_task);
1705
1706 /* only inherit the option bits, no effect until task_set_immovable_pinned() */
1707 task_ro_data.task_control_port_options = task_get_control_port_options(parent_task);
1708
1709 task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_FILTER_MSG;
1710 #if CONFIG_MACF
1711 if (!(t_flags & TF_CORPSE_FORK)) {
1712 task_ro_data.task_filters.mach_trap_filter_mask = task_get_mach_trap_filter_mask(parent_task);
1713 task_ro_data.task_filters.mach_kobj_filter_mask = task_get_mach_kobj_filter_mask(parent_task);
1714 }
1715 #endif
1716 } else {
1717 task_ro_data.task_tokens.sec_token = KERNEL_SECURITY_TOKEN;
1718 task_ro_data.task_tokens.audit_token = KERNEL_AUDIT_TOKEN;
1719
1720 task_ro_data.task_control_port_options = TASK_CONTROL_PORT_OPTIONS_NONE;
1721 }
1722
1723 /* must set before task_importance_init_from_parent: */
1724 if (proc_ro != NULL) {
1725 new_task->bsd_info_ro = proc_ro_ref_task(proc_ro, new_task, &task_ro_data);
1726 } else {
1727 new_task->bsd_info_ro = proc_ro_alloc(NULL, NULL, new_task, &task_ro_data);
1728 }
1729
1730 ipc_task_init(new_task, parent_task);
1731
1732 task_importance_init_from_parent(new_task, parent_task);
1733
1734 new_task->corpse_vmobject_list = NULL;
1735
1736 if (parent_task != TASK_NULL) {
1737 /* inherit the parent's shared region */
1738 shared_region = vm_shared_region_get(parent_task);
1739 if (shared_region != NULL) {
1740 vm_shared_region_set(new_task, shared_region);
1741 }
1742
1743 #if __has_feature(ptrauth_calls)
1744 /* use parent's shared_region_id */
1745 char *shared_region_id = task_get_vm_shared_region_id_and_jop_pid(parent_task, NULL);
1746 if (shared_region_id != NULL) {
1747 shared_region_key_alloc(shared_region_id, FALSE, 0); /* get a reference */
1748 }
1749 task_set_shared_region_id(new_task, shared_region_id);
1750 #endif /* __has_feature(ptrauth_calls) */
1751
1752 if (task_has_64Bit_addr(parent_task)) {
1753 task_set_64Bit_addr(new_task);
1754 }
1755
1756 if (task_has_64Bit_data(parent_task)) {
1757 task_set_64Bit_data(new_task);
1758 }
1759
1760 if (inherit_memory) {
1761 new_task->all_image_info_addr = parent_task->all_image_info_addr;
1762 new_task->all_image_info_size = parent_task->all_image_info_size;
1763 if (parent_task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) {
1764 new_task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
1765 }
1766 }
1767 new_task->mach_header_vm_address = 0;
1768
1769 if (inherit_memory && parent_task->affinity_space) {
1770 task_affinity_create(parent_task, new_task);
1771 }
1772
1773 new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
1774
1775 new_task->task_exc_guard = parent_task->task_exc_guard;
1776 if (parent_task->t_flags & TF_NO_SMT) {
1777 new_task->t_flags |= TF_NO_SMT;
1778 }
1779
1780 if (parent_task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE) {
1781 new_task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
1782 }
1783
1784 if (parent_task->t_flags & TF_TECS) {
1785 new_task->t_flags |= TF_TECS;
1786 }
1787
1788 #if defined(__x86_64__)
1789 if (parent_task->t_flags & TF_INSN_COPY_OPTOUT) {
1790 new_task->t_flags |= TF_INSN_COPY_OPTOUT;
1791 }
1792 #endif
1793
1794
1795 new_task->priority = BASEPRI_DEFAULT;
1796 new_task->max_priority = MAXPRI_USER;
1797 } else {
1798 #ifdef __LP64__
1799 if (is_64bit) {
1800 task_set_64Bit_addr(new_task);
1801 }
1802 #endif
1803
1804 if (is_64bit_data) {
1805 task_set_64Bit_data(new_task);
1806 }
1807
1808 new_task->all_image_info_addr = (mach_vm_address_t)0;
1809 new_task->all_image_info_size = (mach_vm_size_t)0;
1810
1811 new_task->pset_hint = PROCESSOR_SET_NULL;
1812
1813 new_task->task_exc_guard = TASK_EXC_GUARD_NONE;
1814
1815 if (new_task == kernel_task) {
1816 new_task->priority = BASEPRI_KERNEL;
1817 new_task->max_priority = MAXPRI_KERNEL;
1818 } else {
1819 new_task->priority = BASEPRI_DEFAULT;
1820 new_task->max_priority = MAXPRI_USER;
1821 }
1822 }
1823
1824 bzero(new_task->coalition, sizeof(new_task->coalition));
1825 for (int i = 0; i < COALITION_NUM_TYPES; i++) {
1826 queue_chain_init(new_task->task_coalition[i]);
1827 }
1828
1829 /* Allocate I/O Statistics */
1830 new_task->task_io_stats = kalloc_data(sizeof(struct io_stat_info),
1831 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1832
1833 bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats));
1834 bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats));
1835
1836 bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
1837
1838 counter_alloc(&(new_task->pageins));
1839 counter_alloc(&(new_task->cow_faults));
1840 counter_alloc(&(new_task->messages_sent));
1841 counter_alloc(&(new_task->messages_received));
1842 counter_alloc(&(new_task->pages_grabbed));
1843 counter_alloc(&(new_task->pages_grabbed_kern));
1844 counter_alloc(&(new_task->pages_grabbed_iopl));
1845 counter_alloc(&(new_task->pages_grabbed_upl));
1846
1847 /* Copy resource acc. info from Parent for Corpe Forked task. */
1848 if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
1849 task_rollup_accounting_info(new_task, parent_task);
1850 task_store_owned_vmobject_info(new_task, parent_task);
1851 } else {
1852 /* Initialize to zero for standard fork/spawn case */
1853 new_task->total_runnable_time = 0;
1854 new_task->syscalls_mach = 0;
1855 new_task->syscalls_unix = 0;
1856 new_task->c_switch = 0;
1857 new_task->p_switch = 0;
1858 new_task->ps_switch = 0;
1859 new_task->decompressions = 0;
1860 new_task->low_mem_notified_warn = 0;
1861 new_task->low_mem_notified_critical = 0;
1862 new_task->purged_memory_warn = 0;
1863 new_task->purged_memory_critical = 0;
1864 new_task->low_mem_privileged_listener = 0;
1865 new_task->memlimit_is_active = 0;
1866 new_task->memlimit_is_fatal = 0;
1867 new_task->memlimit_active_exc_resource = 0;
1868 new_task->memlimit_inactive_exc_resource = 0;
1869 new_task->task_timer_wakeups_bin_1 = 0;
1870 new_task->task_timer_wakeups_bin_2 = 0;
1871 new_task->task_gpu_ns = 0;
1872 new_task->task_writes_counters_internal.task_immediate_writes = 0;
1873 new_task->task_writes_counters_internal.task_deferred_writes = 0;
1874 new_task->task_writes_counters_internal.task_invalidated_writes = 0;
1875 new_task->task_writes_counters_internal.task_metadata_writes = 0;
1876 new_task->task_writes_counters_external.task_immediate_writes = 0;
1877 new_task->task_writes_counters_external.task_deferred_writes = 0;
1878 new_task->task_writes_counters_external.task_invalidated_writes = 0;
1879 new_task->task_writes_counters_external.task_metadata_writes = 0;
1880 #if CONFIG_PHYS_WRITE_ACCT
1881 new_task->task_fs_metadata_writes = 0;
1882 #endif /* CONFIG_PHYS_WRITE_ACCT */
1883 }
1884
1885
1886 new_task->donates_own_pages = FALSE;
1887 #if CONFIG_COALITIONS
1888 if (!(t_flags & TF_CORPSE_FORK)) {
1889 /* TODO: there is no graceful failure path here... */
1890 if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1891 coalitions_adopt_task(parent_coalitions, new_task);
1892 if (parent_coalitions[COALITION_TYPE_JETSAM]) {
1893 new_task->donates_own_pages = coalition_is_swappable(parent_coalitions[COALITION_TYPE_JETSAM]);
1894 }
1895 } else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1896 /*
1897 * all tasks at least have a resource coalition, so
1898 * if the parent has one then inherit all coalitions
1899 * the parent is a part of
1900 */
1901 coalitions_adopt_task(parent_task->coalition, new_task);
1902 if (parent_task->coalition[COALITION_TYPE_JETSAM]) {
1903 new_task->donates_own_pages = coalition_is_swappable(parent_task->coalition[COALITION_TYPE_JETSAM]);
1904 }
1905 } else {
1906 /* TODO: assert that new_task will be PID 1 (launchd) */
1907 coalitions_adopt_init_task(new_task);
1908 }
1909 /*
1910 * on exec, we need to transfer the coalition roles from the
1911 * parent task to the exec copy task.
1912 */
1913 if (parent_task && (t_procflags & TPF_EXEC_COPY)) {
1914 int coal_roles[COALITION_NUM_TYPES];
1915 task_coalition_roles(parent_task, coal_roles);
1916 (void)coalitions_set_roles(new_task->coalition, new_task, coal_roles);
1917 }
1918 } else {
1919 coalitions_adopt_corpse_task(new_task);
1920 }
1921
1922 if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1923 panic("created task is not a member of a resource coalition");
1924 }
1925 task_set_coalition_member(new_task);
1926 #endif /* CONFIG_COALITIONS */
1927
1928 if (parent_task != TASK_NULL) {
1929 /* task_policy_create queries the adopted coalition */
1930 task_policy_create(new_task, parent_task);
1931 }
1932
1933 new_task->dispatchqueue_offset = 0;
1934 if (parent_task != NULL) {
1935 new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
1936 }
1937
1938 new_task->task_can_transfer_memory_ownership = FALSE;
1939 new_task->task_volatile_objects = 0;
1940 new_task->task_nonvolatile_objects = 0;
1941 new_task->task_objects_disowning = FALSE;
1942 new_task->task_objects_disowned = FALSE;
1943 new_task->task_owned_objects = 0;
1944 queue_init(&new_task->task_objq);
1945
1946 #if CONFIG_FREEZE
1947 queue_init(&new_task->task_frozen_cseg_q);
1948 #endif /* CONFIG_FREEZE */
1949
1950 task_objq_lock_init(new_task);
1951
1952 #if __arm64__
1953 new_task->task_legacy_footprint = FALSE;
1954 new_task->task_extra_footprint_limit = FALSE;
1955 new_task->task_ios13extended_footprint_limit = FALSE;
1956 #endif /* __arm64__ */
1957 new_task->task_region_footprint = FALSE;
1958 new_task->task_has_crossed_thread_limit = FALSE;
1959 new_task->task_thread_limit = 0;
1960 #if CONFIG_SECLUDED_MEMORY
1961 new_task->task_can_use_secluded_mem = FALSE;
1962 new_task->task_could_use_secluded_mem = FALSE;
1963 new_task->task_could_also_use_secluded_mem = FALSE;
1964 new_task->task_suppressed_secluded = FALSE;
1965 #endif /* CONFIG_SECLUDED_MEMORY */
1966
1967
1968 /*
1969 * t_flags is set up above. But since we don't
1970 * support darkwake mode being set that way
1971 * currently, we clear it out here explicitly.
1972 */
1973 new_task->t_flags &= ~(TF_DARKWAKE_MODE);
1974
1975 queue_init(&new_task->io_user_clients);
1976 new_task->loadTag = 0;
1977
1978 lck_mtx_lock(&tasks_threads_lock);
1979 queue_enter(&tasks, new_task, task_t, tasks);
1980 tasks_count++;
1981 if (tasks_suspend_state) {
1982 task_suspend_internal(new_task);
1983 }
1984 lck_mtx_unlock(&tasks_threads_lock);
1985 task_ref_hold_proc_task_struct(new_task);
1986
1987 return KERN_SUCCESS;
1988 }
1989
1990 /*
1991 * task_rollup_accounting_info
1992 *
1993 * Roll up accounting stats. Used to rollup stats
1994 * for exec copy task and corpse fork.
1995 */
1996 void
task_rollup_accounting_info(task_t to_task,task_t from_task)1997 task_rollup_accounting_info(task_t to_task, task_t from_task)
1998 {
1999 assert(from_task != to_task);
2000
2001 recount_task_copy(&to_task->tk_recount, &from_task->tk_recount);
2002 to_task->total_runnable_time = from_task->total_runnable_time;
2003 counter_add(&to_task->faults, counter_load(&from_task->faults));
2004 counter_add(&to_task->pageins, counter_load(&from_task->pageins));
2005 counter_add(&to_task->cow_faults, counter_load(&from_task->cow_faults));
2006 counter_add(&to_task->messages_sent, counter_load(&from_task->messages_sent));
2007 counter_add(&to_task->messages_received, counter_load(&from_task->messages_received));
2008 to_task->decompressions = from_task->decompressions;
2009 to_task->syscalls_mach = from_task->syscalls_mach;
2010 to_task->syscalls_unix = from_task->syscalls_unix;
2011 to_task->c_switch = from_task->c_switch;
2012 to_task->p_switch = from_task->p_switch;
2013 to_task->ps_switch = from_task->ps_switch;
2014 to_task->extmod_statistics = from_task->extmod_statistics;
2015 to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
2016 to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
2017 to_task->purged_memory_warn = from_task->purged_memory_warn;
2018 to_task->purged_memory_critical = from_task->purged_memory_critical;
2019 to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
2020 *to_task->task_io_stats = *from_task->task_io_stats;
2021 to_task->cpu_time_eqos_stats = from_task->cpu_time_eqos_stats;
2022 to_task->cpu_time_rqos_stats = from_task->cpu_time_rqos_stats;
2023 to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
2024 to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
2025 to_task->task_gpu_ns = from_task->task_gpu_ns;
2026 to_task->task_writes_counters_internal.task_immediate_writes = from_task->task_writes_counters_internal.task_immediate_writes;
2027 to_task->task_writes_counters_internal.task_deferred_writes = from_task->task_writes_counters_internal.task_deferred_writes;
2028 to_task->task_writes_counters_internal.task_invalidated_writes = from_task->task_writes_counters_internal.task_invalidated_writes;
2029 to_task->task_writes_counters_internal.task_metadata_writes = from_task->task_writes_counters_internal.task_metadata_writes;
2030 to_task->task_writes_counters_external.task_immediate_writes = from_task->task_writes_counters_external.task_immediate_writes;
2031 to_task->task_writes_counters_external.task_deferred_writes = from_task->task_writes_counters_external.task_deferred_writes;
2032 to_task->task_writes_counters_external.task_invalidated_writes = from_task->task_writes_counters_external.task_invalidated_writes;
2033 to_task->task_writes_counters_external.task_metadata_writes = from_task->task_writes_counters_external.task_metadata_writes;
2034 #if CONFIG_PHYS_WRITE_ACCT
2035 to_task->task_fs_metadata_writes = from_task->task_fs_metadata_writes;
2036 #endif /* CONFIG_PHYS_WRITE_ACCT */
2037
2038 #if CONFIG_MEMORYSTATUS
2039 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.memorystatus_dirty_time);
2040 #endif /* CONFIG_MEMORYSTATUS */
2041
2042 /* Skip ledger roll up for memory accounting entries */
2043 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
2044 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
2045 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
2046 #if CONFIG_SCHED_SFI
2047 for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
2048 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
2049 }
2050 #endif
2051 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
2052 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
2053 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
2054 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
2055 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_me);
2056 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_others);
2057 }
2058
2059 /*
2060 * task_deallocate_internal:
2061 *
2062 * Drop a reference on a task.
2063 * Don't call this directly.
2064 */
2065 extern void task_deallocate_internal(task_t task, os_ref_count_t refs);
2066 void
task_deallocate_internal(task_t task,os_ref_count_t refs)2067 task_deallocate_internal(
2068 task_t task,
2069 os_ref_count_t refs)
2070 {
2071 ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
2072
2073 if (task == TASK_NULL) {
2074 return;
2075 }
2076
2077 #if IMPORTANCE_INHERITANCE
2078 if (refs == 1) {
2079 /*
2080 * If last ref potentially comes from the task's importance,
2081 * disconnect it. But more task refs may be added before
2082 * that completes, so wait for the reference to go to zero
2083 * naturally (it may happen on a recursive task_deallocate()
2084 * from the ipc_importance_disconnect_task() call).
2085 */
2086 if (IIT_NULL != task->task_imp_base) {
2087 ipc_importance_disconnect_task(task);
2088 }
2089 return;
2090 }
2091 #endif /* IMPORTANCE_INHERITANCE */
2092
2093 if (refs > 0) {
2094 return;
2095 }
2096
2097 /*
2098 * The task should be dead at this point. Ensure other resources
2099 * like threads, are gone before we trash the world.
2100 */
2101 assert(queue_empty(&task->threads));
2102 assert(get_bsdtask_info(task) == NULL);
2103 assert(!is_active(task->itk_space));
2104 assert(!task->active);
2105 assert(task->active_thread_count == 0);
2106 assert(!task_get_game_mode(task));
2107 assert(!task_get_carplay_mode(task));
2108
2109 lck_mtx_lock(&tasks_threads_lock);
2110 assert(terminated_tasks_count > 0);
2111 queue_remove(&terminated_tasks, task, task_t, tasks);
2112 terminated_tasks_count--;
2113 lck_mtx_unlock(&tasks_threads_lock);
2114
2115 /*
2116 * remove the reference on bank context
2117 */
2118 task_bank_reset(task);
2119
2120 kfree_data(task->task_io_stats, sizeof(struct io_stat_info));
2121
2122 /*
2123 * Give the machine dependent code a chance
2124 * to perform cleanup before ripping apart
2125 * the task.
2126 */
2127 machine_task_terminate(task);
2128
2129 ipc_task_terminate(task);
2130
2131 /* let iokit know 2 */
2132 iokit_task_terminate(task, 2);
2133
2134 /* Unregister task from userspace coredumps on panic */
2135 kern_unregister_userspace_coredump(task);
2136
2137 if (task->affinity_space) {
2138 task_affinity_deallocate(task);
2139 }
2140
2141 #if MACH_ASSERT
2142 if (task->ledger != NULL &&
2143 task->map != NULL &&
2144 task->map->pmap != NULL &&
2145 task->map->pmap->ledger != NULL) {
2146 assert(task->ledger == task->map->pmap->ledger);
2147 }
2148 #endif /* MACH_ASSERT */
2149
2150 vm_owned_objects_disown(task);
2151 assert(task->task_objects_disowned);
2152 if (task->task_owned_objects != 0) {
2153 panic("task_deallocate(%p): "
2154 "volatile_objects=%d nonvolatile_objects=%d owned=%d\n",
2155 task,
2156 task->task_volatile_objects,
2157 task->task_nonvolatile_objects,
2158 task->task_owned_objects);
2159 }
2160
2161 #if CONFIG_DEFERRED_RECLAIM
2162 /*
2163 * Remove this tasks reclaim buffer from global queues.
2164 */
2165 if (task->deferred_reclamation_metadata != NULL) {
2166 vm_deferred_reclamation_buffer_deallocate(task->deferred_reclamation_metadata);
2167 task->deferred_reclamation_metadata = NULL;
2168 }
2169 #endif /* CONFIG_DEFERRED_RECLAIM */
2170
2171 vm_map_deallocate(task->map);
2172 if (task->is_large_corpse) {
2173 assert(large_corpse_count > 0);
2174 OSDecrementAtomic(&large_corpse_count);
2175 task->is_large_corpse = false;
2176 }
2177 is_release(task->itk_space);
2178
2179 if (task->t_rr_ranges) {
2180 restartable_ranges_release(task->t_rr_ranges);
2181 }
2182
2183 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
2184 &interrupt_wakeups, &debit);
2185 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
2186 &platform_idle_wakeups, &debit);
2187
2188 struct recount_times_mach sum = { 0 };
2189 struct recount_times_mach p_only = { 0 };
2190 recount_task_times_perf_only(task, &sum, &p_only);
2191 #if CONFIG_PERVASIVE_ENERGY
2192 uint64_t energy = recount_task_energy_nj(task);
2193 #endif /* CONFIG_PERVASIVE_ENERGY */
2194 recount_task_deinit(&task->tk_recount);
2195
2196 /* Accumulate statistics for dead tasks */
2197 lck_spin_lock(&dead_task_statistics_lock);
2198 dead_task_statistics.total_user_time += sum.rtm_user;
2199 dead_task_statistics.total_system_time += sum.rtm_system;
2200
2201 dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
2202 dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
2203
2204 dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
2205 dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
2206 dead_task_statistics.total_ptime += p_only.rtm_user + p_only.rtm_system;
2207 dead_task_statistics.total_pset_switches += task->ps_switch;
2208 dead_task_statistics.task_gpu_ns += task->task_gpu_ns;
2209 #if CONFIG_PERVASIVE_ENERGY
2210 dead_task_statistics.task_energy += energy;
2211 #endif /* CONFIG_PERVASIVE_ENERGY */
2212
2213 lck_spin_unlock(&dead_task_statistics_lock);
2214 lck_mtx_destroy(&task->lock, &task_lck_grp);
2215
2216 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
2217 &debit)) {
2218 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
2219 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
2220 }
2221 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
2222 &debit)) {
2223 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
2224 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
2225 }
2226 ledger_dereference(task->ledger);
2227
2228 counter_free(&task->faults);
2229 counter_free(&task->pageins);
2230 counter_free(&task->cow_faults);
2231 counter_free(&task->messages_sent);
2232 counter_free(&task->messages_received);
2233 counter_free(&task->pages_grabbed);
2234 counter_free(&task->pages_grabbed_kern);
2235 counter_free(&task->pages_grabbed_iopl);
2236 counter_free(&task->pages_grabbed_upl);
2237
2238 #if CONFIG_COALITIONS
2239 task_release_coalitions(task);
2240 #endif /* CONFIG_COALITIONS */
2241
2242 bzero(task->coalition, sizeof(task->coalition));
2243
2244 #if MACH_BSD
2245 /* clean up collected information since last reference to task is gone */
2246 if (task->corpse_info) {
2247 void *corpse_info_kernel = kcdata_memory_get_begin_addr(task->corpse_info);
2248 task_crashinfo_destroy(task->corpse_info);
2249 task->corpse_info = NULL;
2250 kfree_data(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
2251 }
2252 #endif
2253
2254 #if CONFIG_MACF
2255 if (get_task_crash_label(task)) {
2256 mac_exc_free_label(get_task_crash_label(task));
2257 set_task_crash_label(task, NULL);
2258 }
2259 #endif
2260
2261 assert(queue_empty(&task->task_objq));
2262 task_objq_lock_destroy(task);
2263
2264 if (task->corpse_vmobject_list) {
2265 kfree_data(task->corpse_vmobject_list,
2266 (vm_size_t)task->corpse_vmobject_list_size);
2267 }
2268
2269 task_ref_count_fini(task);
2270 proc_ro_erase_task(task->bsd_info_ro);
2271 task_release_proc_task_struct(task, task->bsd_info_ro);
2272 }
2273
2274 /*
2275 * task_name_deallocate_mig:
2276 *
2277 * Drop a reference on a task name.
2278 */
2279 void
task_name_deallocate_mig(task_name_t task_name)2280 task_name_deallocate_mig(
2281 task_name_t task_name)
2282 {
2283 return task_deallocate_grp((task_t)task_name, TASK_GRP_MIG);
2284 }
2285
2286 /*
2287 * task_policy_set_deallocate_mig:
2288 *
2289 * Drop a reference on a task type.
2290 */
2291 void
task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)2292 task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)
2293 {
2294 return task_deallocate_grp((task_t)task_policy_set, TASK_GRP_MIG);
2295 }
2296
2297 /*
2298 * task_policy_get_deallocate_mig:
2299 *
2300 * Drop a reference on a task type.
2301 */
2302 void
task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)2303 task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)
2304 {
2305 return task_deallocate_grp((task_t)task_policy_get, TASK_GRP_MIG);
2306 }
2307
2308 /*
2309 * task_inspect_deallocate_mig:
2310 *
2311 * Drop a task inspection reference.
2312 */
2313 void
task_inspect_deallocate_mig(task_inspect_t task_inspect)2314 task_inspect_deallocate_mig(
2315 task_inspect_t task_inspect)
2316 {
2317 return task_deallocate_grp((task_t)task_inspect, TASK_GRP_MIG);
2318 }
2319
2320 /*
2321 * task_read_deallocate_mig:
2322 *
2323 * Drop a reference on task read port.
2324 */
2325 void
task_read_deallocate_mig(task_read_t task_read)2326 task_read_deallocate_mig(
2327 task_read_t task_read)
2328 {
2329 return task_deallocate_grp((task_t)task_read, TASK_GRP_MIG);
2330 }
2331
2332 /*
2333 * task_suspension_token_deallocate:
2334 *
2335 * Drop a reference on a task suspension token.
2336 */
2337 void
task_suspension_token_deallocate(task_suspension_token_t token)2338 task_suspension_token_deallocate(
2339 task_suspension_token_t token)
2340 {
2341 return task_deallocate((task_t)token);
2342 }
2343
2344 void
task_suspension_token_deallocate_grp(task_suspension_token_t token,task_grp_t grp)2345 task_suspension_token_deallocate_grp(
2346 task_suspension_token_t token,
2347 task_grp_t grp)
2348 {
2349 return task_deallocate_grp((task_t)token, grp);
2350 }
2351
2352 /*
2353 * task_collect_crash_info:
2354 *
2355 * collect crash info from bsd and mach based data
2356 */
2357 kern_return_t
task_collect_crash_info(task_t task,struct label * crash_label,int is_corpse_fork)2358 task_collect_crash_info(
2359 task_t task,
2360 #ifdef CONFIG_MACF
2361 struct label *crash_label,
2362 #endif
2363 int is_corpse_fork)
2364 {
2365 kern_return_t kr = KERN_SUCCESS;
2366
2367 kcdata_descriptor_t crash_data = NULL;
2368 kcdata_descriptor_t crash_data_release = NULL;
2369 mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
2370 mach_vm_offset_t crash_data_ptr = 0;
2371 void *crash_data_kernel = NULL;
2372 void *crash_data_kernel_release = NULL;
2373 #if CONFIG_MACF
2374 struct label *label, *free_label;
2375 #endif
2376
2377 if (!corpses_enabled()) {
2378 return KERN_NOT_SUPPORTED;
2379 }
2380
2381 #if CONFIG_MACF
2382 free_label = label = mac_exc_create_label(NULL);
2383 #endif
2384
2385 task_lock(task);
2386
2387 assert(is_corpse_fork || get_bsdtask_info(task) != NULL);
2388 if (task->corpse_info == NULL && (is_corpse_fork || get_bsdtask_info(task) != NULL)) {
2389 #if CONFIG_MACF
2390 /* Set the crash label, used by the exception delivery mac hook */
2391 free_label = get_task_crash_label(task); // Most likely NULL.
2392 set_task_crash_label(task, label);
2393 mac_exc_update_task_crash_label(task, crash_label);
2394 #endif
2395 task_unlock(task);
2396
2397 crash_data_kernel = kalloc_data(CORPSEINFO_ALLOCATION_SIZE,
2398 Z_WAITOK | Z_ZERO);
2399 if (crash_data_kernel == NULL) {
2400 kr = KERN_RESOURCE_SHORTAGE;
2401 goto out_no_lock;
2402 }
2403 crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
2404
2405 /* Do not get a corpse ref for corpse fork */
2406 crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size,
2407 is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF,
2408 KCFLAG_USE_MEMCOPY);
2409 if (crash_data) {
2410 task_lock(task);
2411 crash_data_release = task->corpse_info;
2412 crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);
2413 task->corpse_info = crash_data;
2414
2415 task_unlock(task);
2416 kr = KERN_SUCCESS;
2417 } else {
2418 kfree_data(crash_data_kernel,
2419 CORPSEINFO_ALLOCATION_SIZE);
2420 kr = KERN_FAILURE;
2421 }
2422
2423 if (crash_data_release != NULL) {
2424 task_crashinfo_destroy(crash_data_release);
2425 }
2426 kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2427 } else {
2428 task_unlock(task);
2429 }
2430
2431 out_no_lock:
2432 #if CONFIG_MACF
2433 if (free_label != NULL) {
2434 mac_exc_free_label(free_label);
2435 }
2436 #endif
2437 return kr;
2438 }
2439
2440 /*
2441 * task_deliver_crash_notification:
2442 *
2443 * Makes outcall to registered host port for a corpse.
2444 */
2445 kern_return_t
task_deliver_crash_notification(task_t corpse,thread_t thread,exception_type_t etype,mach_exception_subcode_t subcode)2446 task_deliver_crash_notification(
2447 task_t corpse, /* corpse or corpse fork */
2448 thread_t thread,
2449 exception_type_t etype,
2450 mach_exception_subcode_t subcode)
2451 {
2452 kcdata_descriptor_t crash_info = corpse->corpse_info;
2453 thread_t th_iter = NULL;
2454 kern_return_t kr = KERN_SUCCESS;
2455 wait_interrupt_t wsave;
2456 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2457 ipc_port_t corpse_port;
2458
2459 if (crash_info == NULL) {
2460 return KERN_FAILURE;
2461 }
2462
2463 assert(task_is_a_corpse(corpse));
2464
2465 task_lock(corpse);
2466
2467 /*
2468 * Always populate code[0] as the effective exception type for EXC_CORPSE_NOTIFY.
2469 * Crash reporters should derive whether it's fatal from corpse blob.
2470 */
2471 code[0] = etype;
2472 code[1] = subcode;
2473
2474 queue_iterate(&corpse->threads, th_iter, thread_t, task_threads)
2475 {
2476 if (th_iter->corpse_dup == FALSE) {
2477 ipc_thread_reset(th_iter);
2478 }
2479 }
2480 task_unlock(corpse);
2481
2482 /* Arm the no-sender notification for taskport */
2483 task_reference(corpse);
2484 corpse_port = convert_corpse_to_port_and_nsrequest(corpse);
2485
2486 wsave = thread_interrupt_level(THREAD_UNINT);
2487 kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
2488 if (kr != KERN_SUCCESS) {
2489 printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(corpse));
2490 }
2491
2492 (void)thread_interrupt_level(wsave);
2493
2494 /*
2495 * Drop the send right on corpse port, will fire the
2496 * no-sender notification if exception deliver failed.
2497 */
2498 ipc_port_release_send(corpse_port);
2499 return kr;
2500 }
2501
2502 /*
2503 * task_terminate:
2504 *
2505 * Terminate the specified task. See comments on thread_terminate
2506 * (kern/thread.c) about problems with terminating the "current task."
2507 */
2508
2509 kern_return_t
task_terminate(task_t task)2510 task_terminate(
2511 task_t task)
2512 {
2513 if (task == TASK_NULL) {
2514 return KERN_INVALID_ARGUMENT;
2515 }
2516
2517 if (get_bsdtask_info(task)) {
2518 return KERN_FAILURE;
2519 }
2520
2521 return task_terminate_internal(task);
2522 }
2523
2524 #if MACH_ASSERT
2525 extern int proc_pid(struct proc *);
2526 extern void proc_name_kdp(struct proc *p, char *buf, int size);
2527 #endif /* MACH_ASSERT */
2528
2529 static void
task_partial_reap(task_t task,__unused int pid)2530 __unused task_partial_reap(task_t task, __unused int pid)
2531 {
2532 unsigned int reclaimed_resident = 0;
2533 unsigned int reclaimed_compressed = 0;
2534 uint64_t task_page_count;
2535
2536 task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
2537
2538 KDBG(VMDBG_CODE(DBG_VM_MAP_PARTIAL_REAP) | DBG_FUNC_START,
2539 pid, task_page_count);
2540
2541 vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
2542
2543 KDBG(VMDBG_CODE(DBG_VM_MAP_PARTIAL_REAP) | DBG_FUNC_END,
2544 pid, reclaimed_resident, reclaimed_compressed);
2545 }
2546
2547 /*
2548 * task_mark_corpse:
2549 *
2550 * Mark the task as a corpse. Called by crashing thread.
2551 */
2552 kern_return_t
task_mark_corpse(task_t task)2553 task_mark_corpse(task_t task)
2554 {
2555 kern_return_t kr = KERN_SUCCESS;
2556 thread_t self_thread;
2557 (void) self_thread;
2558 wait_interrupt_t wsave;
2559 #if CONFIG_MACF
2560 struct label *crash_label = NULL;
2561 #endif
2562
2563 assert(task != kernel_task);
2564 assert(task == current_task());
2565 assert(!task_is_a_corpse(task));
2566
2567 #if CONFIG_MACF
2568 crash_label = mac_exc_create_label_for_proc((struct proc*)get_bsdtask_info(task));
2569 #endif
2570
2571 kr = task_collect_crash_info(task,
2572 #if CONFIG_MACF
2573 crash_label,
2574 #endif
2575 FALSE);
2576 if (kr != KERN_SUCCESS) {
2577 goto out;
2578 }
2579
2580 /* Store owned vmobjects so we can access them after being marked as corpse */
2581 task_store_owned_vmobject_info(task, task);
2582
2583 self_thread = current_thread();
2584
2585 wsave = thread_interrupt_level(THREAD_UNINT);
2586 task_lock(task);
2587
2588 /*
2589 * Check if any other thread called task_terminate_internal
2590 * and made the task inactive before we could mark it for
2591 * corpse pending report. Bail out if the task is inactive.
2592 */
2593 if (!task->active) {
2594 kcdata_descriptor_t crash_data_release = task->corpse_info;;
2595 void *crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);;
2596
2597 task->corpse_info = NULL;
2598 task_unlock(task);
2599
2600 if (crash_data_release != NULL) {
2601 task_crashinfo_destroy(crash_data_release);
2602 }
2603 kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2604 return KERN_TERMINATED;
2605 }
2606
2607 task_set_corpse_pending_report(task);
2608 task_set_corpse(task);
2609 task->crashed_thread_id = thread_tid(self_thread);
2610
2611 kr = task_start_halt_locked(task, TRUE);
2612 assert(kr == KERN_SUCCESS);
2613
2614 task_set_uniqueid(task);
2615
2616 task_unlock(task);
2617
2618 /*
2619 * ipc_task_reset() moved to last thread_terminate_self(): rdar://75737960.
2620 * disable old ports here instead.
2621 *
2622 * The vm_map and ipc_space must exist until this function returns,
2623 * convert_port_to_{map,space}_with_flavor relies on this behavior.
2624 */
2625 ipc_task_disable(task);
2626
2627 /* let iokit know 1 */
2628 iokit_task_terminate(task, 1);
2629
2630 /* terminate the ipc space */
2631 ipc_space_terminate(task->itk_space);
2632
2633 /* Add it to global corpse task list */
2634 task_add_to_corpse_task_list(task);
2635
2636 thread_terminate_internal(self_thread);
2637
2638 (void) thread_interrupt_level(wsave);
2639 assert(task->halting == TRUE);
2640
2641 out:
2642 #if CONFIG_MACF
2643 mac_exc_free_label(crash_label);
2644 #endif
2645 return kr;
2646 }
2647
2648 /*
2649 * task_set_uniqueid
2650 *
2651 * Set task uniqueid to systemwide unique 64 bit value
2652 */
2653 void
task_set_uniqueid(task_t task)2654 task_set_uniqueid(task_t task)
2655 {
2656 task->task_uniqueid = OSIncrementAtomic64(&next_taskuniqueid);
2657 }
2658
2659 /*
2660 * task_clear_corpse
2661 *
2662 * Clears the corpse pending bit on task.
2663 * Removes inspection bit on the threads.
2664 */
2665 void
task_clear_corpse(task_t task)2666 task_clear_corpse(task_t task)
2667 {
2668 thread_t th_iter = NULL;
2669
2670 task_lock(task);
2671 queue_iterate(&task->threads, th_iter, thread_t, task_threads)
2672 {
2673 thread_mtx_lock(th_iter);
2674 th_iter->inspection = FALSE;
2675 ipc_thread_disable(th_iter);
2676 thread_mtx_unlock(th_iter);
2677 }
2678
2679 thread_terminate_crashed_threads();
2680 /* remove the pending corpse report flag */
2681 task_clear_corpse_pending_report(task);
2682
2683 task_unlock(task);
2684 }
2685
2686 /*
2687 * task_port_no_senders
2688 *
2689 * Called whenever the Mach port system detects no-senders on
2690 * the task port of a corpse.
2691 * Each notification that comes in should terminate the task (corpse).
2692 */
2693 static void
task_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)2694 task_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
2695 {
2696 task_t task = ipc_kobject_get_locked(port, IKOT_TASK_CONTROL);
2697
2698 assert(task != TASK_NULL);
2699 assert(task_is_a_corpse(task));
2700
2701 /* Remove the task from global corpse task list */
2702 task_remove_from_corpse_task_list(task);
2703
2704 task_clear_corpse(task);
2705 vm_map_unset_corpse_source(task->map);
2706 task_terminate_internal(task);
2707 }
2708
2709 /*
2710 * task_port_with_flavor_no_senders
2711 *
2712 * Called whenever the Mach port system detects no-senders on
2713 * the task inspect or read port. These ports are allocated lazily and
2714 * should be deallocated here when there are no senders remaining.
2715 */
2716 static void
task_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)2717 task_port_with_flavor_no_senders(
2718 ipc_port_t port,
2719 mach_port_mscount_t mscount __unused)
2720 {
2721 task_t task;
2722 mach_task_flavor_t flavor;
2723 ipc_kobject_type_t kotype;
2724
2725 ip_mq_lock(port);
2726 if (port->ip_srights > 0) {
2727 ip_mq_unlock(port);
2728 return;
2729 }
2730 kotype = ip_kotype(port);
2731 assert((IKOT_TASK_READ == kotype) || (IKOT_TASK_INSPECT == kotype));
2732 task = ipc_kobject_get_locked(port, kotype);
2733 if (task != TASK_NULL) {
2734 task_reference(task);
2735 }
2736 ip_mq_unlock(port);
2737
2738 if (task == TASK_NULL) {
2739 /* The task is exiting or disabled; it will eventually deallocate the port */
2740 return;
2741 }
2742
2743 if (kotype == IKOT_TASK_READ) {
2744 flavor = TASK_FLAVOR_READ;
2745 } else {
2746 flavor = TASK_FLAVOR_INSPECT;
2747 }
2748
2749 itk_lock(task);
2750 ip_mq_lock(port);
2751
2752 /*
2753 * If the port is no longer active, then ipc_task_terminate() ran
2754 * and destroyed the kobject already. Just deallocate the task
2755 * ref we took and go away.
2756 *
2757 * It is also possible that several nsrequests are in flight,
2758 * only one shall NULL-out the port entry, and this is the one
2759 * that gets to dealloc the port.
2760 *
2761 * Check for a stale no-senders notification. A call to any function
2762 * that vends out send rights to this port could resurrect it between
2763 * this notification being generated and actually being handled here.
2764 */
2765 if (!ip_active(port) ||
2766 task->itk_task_ports[flavor] != port ||
2767 port->ip_srights > 0) {
2768 ip_mq_unlock(port);
2769 itk_unlock(task);
2770 task_deallocate(task);
2771 return;
2772 }
2773
2774 assert(task->itk_task_ports[flavor] == port);
2775 task->itk_task_ports[flavor] = IP_NULL;
2776 itk_unlock(task);
2777
2778 ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
2779
2780 task_deallocate(task);
2781 }
2782
2783 /*
2784 * task_wait_till_threads_terminate_locked
2785 *
2786 * Wait till all the threads in the task are terminated.
2787 * Might release the task lock and re-acquire it.
2788 */
2789 void
task_wait_till_threads_terminate_locked(task_t task)2790 task_wait_till_threads_terminate_locked(task_t task)
2791 {
2792 /* wait for all the threads in the task to terminate */
2793 while (task->active_thread_count != 0) {
2794 assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
2795 task_unlock(task);
2796 thread_block(THREAD_CONTINUE_NULL);
2797
2798 task_lock(task);
2799 }
2800 }
2801
2802 /*
2803 * task_duplicate_map_and_threads
2804 *
2805 * Copy vmmap of source task.
2806 * Copy active threads from source task to destination task.
2807 * Source task would be suspended during the copy.
2808 */
2809 kern_return_t
task_duplicate_map_and_threads(task_t task,void * p,task_t new_task,thread_t * thread_ret,uint64_t ** udata_buffer,int * size,int * num_udata,bool for_exception)2810 task_duplicate_map_and_threads(
2811 task_t task,
2812 void *p,
2813 task_t new_task,
2814 thread_t *thread_ret,
2815 uint64_t **udata_buffer,
2816 int *size,
2817 int *num_udata,
2818 bool for_exception)
2819 {
2820 kern_return_t kr = KERN_SUCCESS;
2821 int active;
2822 thread_t thread, self, thread_return = THREAD_NULL;
2823 thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL;
2824 thread_t *thread_array;
2825 uint32_t active_thread_count = 0, array_count = 0, i;
2826 vm_map_t oldmap;
2827 uint64_t *buffer = NULL;
2828 int buf_size = 0;
2829 int est_knotes = 0, num_knotes = 0;
2830
2831 self = current_thread();
2832
2833 /*
2834 * Suspend the task to copy thread state, use the internal
2835 * variant so that no user-space process can resume
2836 * the task from under us
2837 */
2838 kr = task_suspend_internal(task);
2839 if (kr != KERN_SUCCESS) {
2840 return kr;
2841 }
2842
2843 if (task->map->disable_vmentry_reuse == TRUE) {
2844 /*
2845 * Quite likely GuardMalloc (or some debugging tool)
2846 * is being used on this task. And it has gone through
2847 * its limit. Making a corpse will likely encounter
2848 * a lot of VM entries that will need COW.
2849 *
2850 * Skip it.
2851 */
2852 #if DEVELOPMENT || DEBUG
2853 memorystatus_abort_vm_map_fork(task);
2854 #endif
2855 ktriage_record(thread_tid(self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_FAIL_LIBGMALLOC), 0 /* arg */);
2856 task_resume_internal(task);
2857 return KERN_FAILURE;
2858 }
2859
2860 /* Check with VM if vm_map_fork is allowed for this task */
2861 bool is_large = false;
2862 if (memorystatus_allowed_vm_map_fork(task, &is_large)) {
2863 /* Setup new task's vmmap, switch from parent task's map to it COW map */
2864 oldmap = new_task->map;
2865 new_task->map = vm_map_fork(new_task->ledger,
2866 task->map,
2867 (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
2868 VM_MAP_FORK_PRESERVE_PURGEABLE |
2869 VM_MAP_FORK_CORPSE_FOOTPRINT |
2870 VM_MAP_FORK_SHARE_IF_OWNED));
2871 if (new_task->map) {
2872 new_task->is_large_corpse = is_large;
2873 vm_map_deallocate(oldmap);
2874
2875 /* copy ledgers that impact the memory footprint */
2876 vm_map_copy_footprint_ledgers(task, new_task);
2877
2878 /* Get all the udata pointers from kqueue */
2879 est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2880 if (est_knotes > 0) {
2881 buf_size = (est_knotes + 32) * sizeof(uint64_t);
2882 buffer = kalloc_data(buf_size, Z_WAITOK);
2883 num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2884 if (num_knotes > est_knotes + 32) {
2885 num_knotes = est_knotes + 32;
2886 }
2887 }
2888 } else {
2889 if (is_large) {
2890 assert(large_corpse_count > 0);
2891 OSDecrementAtomic(&large_corpse_count);
2892 }
2893 new_task->map = oldmap;
2894 #if DEVELOPMENT || DEBUG
2895 memorystatus_abort_vm_map_fork(task);
2896 #endif
2897 task_resume_internal(task);
2898 return KERN_NO_SPACE;
2899 }
2900 } else if (!for_exception) {
2901 #if DEVELOPMENT || DEBUG
2902 memorystatus_abort_vm_map_fork(task);
2903 #endif
2904 task_resume_internal(task);
2905 return KERN_NO_SPACE;
2906 }
2907
2908 active_thread_count = task->active_thread_count;
2909 if (active_thread_count == 0) {
2910 kfree_data(buffer, buf_size);
2911 task_resume_internal(task);
2912 return KERN_FAILURE;
2913 }
2914
2915 thread_array = kalloc_type(thread_t, active_thread_count, Z_WAITOK);
2916
2917 /* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
2918 task_lock(task);
2919 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2920 /* Skip inactive threads */
2921 active = thread->active;
2922 if (!active) {
2923 continue;
2924 }
2925
2926 if (array_count >= active_thread_count) {
2927 break;
2928 }
2929
2930 thread_array[array_count++] = thread;
2931 thread_reference(thread);
2932 }
2933 task_unlock(task);
2934
2935 for (i = 0; i < array_count; i++) {
2936 kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
2937 if (kr != KERN_SUCCESS) {
2938 break;
2939 }
2940
2941 /* Equivalent of current thread in corpse */
2942 if (thread_array[i] == self) {
2943 thread_return = new_thread;
2944 new_task->crashed_thread_id = thread_tid(new_thread);
2945 } else if (first_thread == NULL) {
2946 first_thread = new_thread;
2947 } else {
2948 /* drop the extra ref returned by thread_create_with_continuation */
2949 thread_deallocate(new_thread);
2950 }
2951
2952 kr = thread_dup2(thread_array[i], new_thread);
2953 if (kr != KERN_SUCCESS) {
2954 thread_mtx_lock(new_thread);
2955 new_thread->corpse_dup = TRUE;
2956 thread_mtx_unlock(new_thread);
2957 continue;
2958 }
2959
2960 /* Copy thread name */
2961 bsd_copythreadname(get_bsdthread_info(new_thread),
2962 get_bsdthread_info(thread_array[i]));
2963 new_thread->thread_tag = thread_array[i]->thread_tag &
2964 ~THREAD_TAG_USER_JOIN;
2965 thread_copy_resource_info(new_thread, thread_array[i]);
2966 }
2967
2968 /* return the first thread if we couldn't find the equivalent of current */
2969 if (thread_return == THREAD_NULL) {
2970 thread_return = first_thread;
2971 } else if (first_thread != THREAD_NULL) {
2972 /* drop the extra ref returned by thread_create_with_continuation */
2973 thread_deallocate(first_thread);
2974 }
2975
2976 task_resume_internal(task);
2977
2978 for (i = 0; i < array_count; i++) {
2979 thread_deallocate(thread_array[i]);
2980 }
2981 kfree_type(thread_t, active_thread_count, thread_array);
2982
2983 if (kr == KERN_SUCCESS) {
2984 *thread_ret = thread_return;
2985 *udata_buffer = buffer;
2986 *size = buf_size;
2987 *num_udata = num_knotes;
2988 } else {
2989 if (thread_return != THREAD_NULL) {
2990 thread_deallocate(thread_return);
2991 }
2992 kfree_data(buffer, buf_size);
2993 }
2994
2995 return kr;
2996 }
2997
2998 #if CONFIG_SECLUDED_MEMORY
2999 extern void task_set_can_use_secluded_mem_locked(
3000 task_t task,
3001 boolean_t can_use_secluded_mem);
3002 #endif /* CONFIG_SECLUDED_MEMORY */
3003
3004 #if MACH_ASSERT
3005 int debug4k_panic_on_terminate = 0;
3006 #endif /* MACH_ASSERT */
3007 kern_return_t
task_terminate_internal(task_t task)3008 task_terminate_internal(
3009 task_t task)
3010 {
3011 thread_t thread, self;
3012 task_t self_task;
3013 boolean_t interrupt_save;
3014 int pid = 0;
3015
3016 assert(task != kernel_task);
3017
3018 self = current_thread();
3019 self_task = current_task();
3020
3021 /*
3022 * Get the task locked and make sure that we are not racing
3023 * with someone else trying to terminate us.
3024 */
3025 if (task == self_task) {
3026 task_lock(task);
3027 } else if (task < self_task) {
3028 task_lock(task);
3029 task_lock(self_task);
3030 } else {
3031 task_lock(self_task);
3032 task_lock(task);
3033 }
3034
3035 #if CONFIG_SECLUDED_MEMORY
3036 if (task->task_can_use_secluded_mem) {
3037 task_set_can_use_secluded_mem_locked(task, FALSE);
3038 }
3039 task->task_could_use_secluded_mem = FALSE;
3040 task->task_could_also_use_secluded_mem = FALSE;
3041
3042 if (task->task_suppressed_secluded) {
3043 stop_secluded_suppression(task);
3044 }
3045 #endif /* CONFIG_SECLUDED_MEMORY */
3046
3047 if (!task->active) {
3048 /*
3049 * Task is already being terminated.
3050 * Just return an error. If we are dying, this will
3051 * just get us to our AST special handler and that
3052 * will get us to finalize the termination of ourselves.
3053 */
3054 task_unlock(task);
3055 if (self_task != task) {
3056 task_unlock(self_task);
3057 }
3058
3059 return KERN_FAILURE;
3060 }
3061
3062 if (task_corpse_pending_report(task)) {
3063 /*
3064 * Task is marked for reporting as corpse.
3065 * Just return an error. This will
3066 * just get us to our AST special handler and that
3067 * will get us to finish the path to death
3068 */
3069 task_unlock(task);
3070 if (self_task != task) {
3071 task_unlock(self_task);
3072 }
3073
3074 return KERN_FAILURE;
3075 }
3076
3077 if (self_task != task) {
3078 task_unlock(self_task);
3079 }
3080
3081 /*
3082 * Make sure the current thread does not get aborted out of
3083 * the waits inside these operations.
3084 */
3085 interrupt_save = thread_interrupt_level(THREAD_UNINT);
3086
3087 /*
3088 * Indicate that we want all the threads to stop executing
3089 * at user space by holding the task (we would have held
3090 * each thread independently in thread_terminate_internal -
3091 * but this way we may be more likely to already find it
3092 * held there). Mark the task inactive, and prevent
3093 * further task operations via the task port.
3094 *
3095 * The vm_map and ipc_space must exist until this function returns,
3096 * convert_port_to_{map,space}_with_flavor relies on this behavior.
3097 */
3098 bool first_suspension __unused = task_hold_locked(task);
3099 task->active = FALSE;
3100 ipc_task_disable(task);
3101
3102 #if CONFIG_EXCLAVES
3103 //rdar://139307390, first suspension might not have done conclave suspend.
3104 first_suspension = true;
3105 if (first_suspension) {
3106 task_unlock(task);
3107 task_suspend_conclave(task);
3108 task_lock(task);
3109 }
3110 #endif /* CONFIG_EXCLAVES */
3111
3112
3113 /*
3114 * Terminate each thread in the task.
3115 */
3116 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3117 thread_terminate_internal(thread);
3118 }
3119
3120 #ifdef MACH_BSD
3121 void *bsd_info = get_bsdtask_info(task);
3122 if (bsd_info != NULL) {
3123 pid = proc_pid(bsd_info);
3124 }
3125 #endif /* MACH_BSD */
3126
3127 task_unlock(task);
3128
3129 #if CONFIG_EXCLAVES
3130 task_stop_conclave(task, false);
3131 #endif /* CONFIG_EXCLAVES */
3132
3133 proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
3134 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3135
3136 /* Early object reap phase */
3137
3138 // PR-17045188: Revisit implementation
3139 // task_partial_reap(task, pid);
3140
3141 #if CONFIG_TASKWATCH
3142 /*
3143 * remove all task watchers
3144 */
3145 task_removewatchers(task);
3146
3147 #endif /* CONFIG_TASKWATCH */
3148
3149 /*
3150 * Destroy all synchronizers owned by the task.
3151 */
3152 task_synchronizer_destroy_all(task);
3153
3154 /*
3155 * Clear the watchport boost on the task.
3156 */
3157 task_remove_turnstile_watchports(task);
3158
3159 /* let iokit know 1 */
3160 iokit_task_terminate(task, 1);
3161
3162 /*
3163 * Destroy the IPC space, leaving just a reference for it.
3164 */
3165 ipc_space_terminate(task->itk_space);
3166
3167 #if 00
3168 /* if some ledgers go negative on tear-down again... */
3169 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3170 task_ledgers.phys_footprint);
3171 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3172 task_ledgers.internal);
3173 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3174 task_ledgers.iokit_mapped);
3175 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3176 task_ledgers.alternate_accounting);
3177 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3178 task_ledgers.alternate_accounting_compressed);
3179 #endif
3180
3181 /*
3182 * If the current thread is a member of the task
3183 * being terminated, then the last reference to
3184 * the task will not be dropped until the thread
3185 * is finally reaped. To avoid incurring the
3186 * expense of removing the address space regions
3187 * at reap time, we do it explictly here.
3188 */
3189
3190 #if MACH_ASSERT
3191 /*
3192 * Identify the pmap's process, in case the pmap ledgers drift
3193 * and we have to report it.
3194 */
3195 char procname[17];
3196 void *proc = get_bsdtask_info(task);
3197 if (proc) {
3198 pid = proc_pid(proc);
3199 proc_name_kdp(proc, procname, sizeof(procname));
3200 } else {
3201 pid = 0;
3202 strlcpy(procname, "<unknown>", sizeof(procname));
3203 }
3204 pmap_set_process(task->map->pmap, pid, procname);
3205 if (vm_map_page_shift(task->map) < (int)PAGE_SHIFT) {
3206 DEBUG4K_LIFE("map %p procname: %s\n", task->map, procname);
3207 if (debug4k_panic_on_terminate) {
3208 panic("DEBUG4K: %s:%d %d[%s] map %p", __FUNCTION__, __LINE__, pid, procname, task->map);
3209 }
3210 }
3211 #endif /* MACH_ASSERT */
3212
3213 vm_map_terminate(task->map);
3214
3215 /* release our shared region */
3216 vm_shared_region_set(task, NULL);
3217
3218 #if __has_feature(ptrauth_calls)
3219 task_set_shared_region_id(task, NULL);
3220 #endif /* __has_feature(ptrauth_calls) */
3221
3222 lck_mtx_lock(&tasks_threads_lock);
3223 queue_remove(&tasks, task, task_t, tasks);
3224 queue_enter(&terminated_tasks, task, task_t, tasks);
3225 tasks_count--;
3226 terminated_tasks_count++;
3227 lck_mtx_unlock(&tasks_threads_lock);
3228
3229 /*
3230 * We no longer need to guard against being aborted, so restore
3231 * the previous interruptible state.
3232 */
3233 thread_interrupt_level(interrupt_save);
3234
3235 #if CONFIG_CPU_COUNTERS
3236 /* force the task to release all ctrs */
3237 if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) {
3238 kpc_force_all_ctrs(task, 0);
3239 }
3240 #endif /* CONFIG_CPU_COUNTERS */
3241
3242 #if CONFIG_COALITIONS
3243 /*
3244 * Leave the coalition for corpse task or task that
3245 * never had any active threads (e.g. fork, exec failure).
3246 * For task with active threads, the task will be removed
3247 * from coalition by last terminating thread.
3248 */
3249 if (task->active_thread_count == 0) {
3250 coalitions_remove_task(task);
3251 }
3252 #endif
3253
3254 #if CONFIG_FREEZE
3255 extern int vm_compressor_available;
3256 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE && vm_compressor_available) {
3257 task_disown_frozen_csegs(task);
3258 assert(queue_empty(&task->task_frozen_cseg_q));
3259 }
3260 #endif /* CONFIG_FREEZE */
3261
3262
3263 /*
3264 * Get rid of the task active reference on itself.
3265 */
3266 task_deallocate_grp(task, TASK_GRP_INTERNAL);
3267
3268 return KERN_SUCCESS;
3269 }
3270
3271 void
tasks_system_suspend(boolean_t suspend)3272 tasks_system_suspend(boolean_t suspend)
3273 {
3274 task_t task;
3275
3276 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SUSPEND_USERSPACE) |
3277 (suspend ? DBG_FUNC_START : DBG_FUNC_END));
3278
3279 lck_mtx_lock(&tasks_threads_lock);
3280 assert(tasks_suspend_state != suspend);
3281 tasks_suspend_state = suspend;
3282 queue_iterate(&tasks, task, task_t, tasks) {
3283 if (task == kernel_task) {
3284 continue;
3285 }
3286 suspend ? task_suspend_internal(task) : task_resume_internal(task);
3287 }
3288 lck_mtx_unlock(&tasks_threads_lock);
3289 }
3290
3291 /*
3292 * task_start_halt:
3293 *
3294 * Shut the current task down (except for the current thread) in
3295 * preparation for dramatic changes to the task (probably exec).
3296 * We hold the task and mark all other threads in the task for
3297 * termination.
3298 */
3299 kern_return_t
task_start_halt(task_t task)3300 task_start_halt(task_t task)
3301 {
3302 kern_return_t kr = KERN_SUCCESS;
3303 task_lock(task);
3304 kr = task_start_halt_locked(task, FALSE);
3305 task_unlock(task);
3306 return kr;
3307 }
3308
3309 static kern_return_t
task_start_halt_locked(task_t task,boolean_t should_mark_corpse)3310 task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
3311 {
3312 thread_t thread, self;
3313 uint64_t dispatchqueue_offset;
3314
3315 assert(task != kernel_task);
3316
3317 self = current_thread();
3318
3319 if (task != get_threadtask(self) && !task_is_a_corpse_fork(task)) {
3320 return KERN_INVALID_ARGUMENT;
3321 }
3322
3323 if (!should_mark_corpse &&
3324 (task->halting || !task->active || !self->active)) {
3325 /*
3326 * Task or current thread is already being terminated.
3327 * Hurry up and return out of the current kernel context
3328 * so that we run our AST special handler to terminate
3329 * ourselves. If should_mark_corpse is set, the corpse
3330 * creation might have raced with exec, let the corpse
3331 * creation continue, once the current thread reaches AST
3332 * thread in exec will be woken up from task_complete_halt.
3333 * Exec will fail cause the proc was marked for exit.
3334 * Once the thread in exec reaches AST, it will call proc_exit
3335 * and deliver the EXC_CORPSE_NOTIFY.
3336 */
3337 return KERN_FAILURE;
3338 }
3339
3340 /* Thread creation will fail after this point of no return. */
3341 task->halting = TRUE;
3342
3343 /*
3344 * Mark all the threads to keep them from starting any more
3345 * user-level execution. The thread_terminate_internal code
3346 * would do this on a thread by thread basis anyway, but this
3347 * gives us a better chance of not having to wait there.
3348 */
3349 bool first_suspension __unused = task_hold_locked(task);
3350
3351 #if CONFIG_EXCLAVES
3352 if (should_mark_corpse) {
3353 void *crash_info_ptr = task_get_corpseinfo(task);
3354 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3355 if (crash_info_ptr != NULL && thread->th_exclaves_ipc_ctx.ipcb != NULL) {
3356 struct thread_crash_exclaves_info info = { 0 };
3357
3358 info.tcei_flags = kExclaveRPCActive;
3359 info.tcei_scid = thread->th_exclaves_ipc_ctx.scid;
3360 info.tcei_thread_id = thread->thread_id;
3361
3362 kcdata_push_data(crash_info_ptr,
3363 STACKSHOT_KCTYPE_KERN_EXCLAVES_CRASH_THREADINFO,
3364 sizeof(struct thread_crash_exclaves_info), &info);
3365 }
3366 }
3367 }
3368 //rdar://139307390, first suspension might not have done conclave suspend.
3369 first_suspension = true;
3370 if (first_suspension || should_mark_corpse) {
3371 task_unlock(task);
3372 if (first_suspension) {
3373 task_suspend_conclave(task);
3374 }
3375
3376 if (should_mark_corpse) {
3377 task_stop_conclave(task, true);
3378 }
3379 task_lock(task);
3380 }
3381 #endif /* CONFIG_EXCLAVES */
3382
3383 dispatchqueue_offset = get_dispatchqueue_offset_from_proc(get_bsdtask_info(task));
3384 /*
3385 * Terminate all the other threads in the task.
3386 */
3387 queue_iterate(&task->threads, thread, thread_t, task_threads)
3388 {
3389 /*
3390 * Remove priority throttles for threads to terminate timely. This has
3391 * to be done after task_hold_locked() traps all threads to AST, but before
3392 * threads are marked inactive in thread_terminate_internal(). Takes thread
3393 * mutex lock.
3394 *
3395 * We need task_is_a_corpse() check so that we don't accidently update policy
3396 * for tasks that are doing posix_spawn().
3397 *
3398 * See: thread_policy_update_tasklocked().
3399 */
3400 if (task_is_a_corpse(task)) {
3401 proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE,
3402 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3403 }
3404
3405 if (should_mark_corpse) {
3406 thread_mtx_lock(thread);
3407 thread->inspection = TRUE;
3408 thread_mtx_unlock(thread);
3409 }
3410 if (thread != self) {
3411 thread_terminate_internal(thread);
3412 }
3413 }
3414 task->dispatchqueue_offset = dispatchqueue_offset;
3415
3416 task_release_locked(task);
3417
3418 return KERN_SUCCESS;
3419 }
3420
3421
3422 /*
3423 * task_complete_halt:
3424 *
3425 * Complete task halt by waiting for threads to terminate, then clean
3426 * up task resources (VM, port namespace, etc...) and then let the
3427 * current thread go in the (practically empty) task context.
3428 *
3429 * Note: task->halting flag is not cleared in order to avoid creation
3430 * of new thread in old exec'ed task.
3431 */
3432 void
task_complete_halt(task_t task)3433 task_complete_halt(task_t task)
3434 {
3435 task_lock(task);
3436 assert(task->halting);
3437 assert(task == current_task());
3438
3439 /*
3440 * Wait for the other threads to get shut down.
3441 * When the last other thread is reaped, we'll be
3442 * woken up.
3443 */
3444 if (task->thread_count > 1) {
3445 assert_wait((event_t)&task->halting, THREAD_UNINT);
3446 task_unlock(task);
3447 thread_block(THREAD_CONTINUE_NULL);
3448 } else {
3449 task_unlock(task);
3450 }
3451
3452 #if CONFIG_DEFERRED_RECLAIM
3453 if (task->deferred_reclamation_metadata) {
3454 vm_deferred_reclamation_buffer_deallocate(
3455 task->deferred_reclamation_metadata);
3456 task->deferred_reclamation_metadata = NULL;
3457 }
3458 #endif /* CONFIG_DEFERRED_RECLAIM */
3459
3460 /*
3461 * Give the machine dependent code a chance
3462 * to perform cleanup of task-level resources
3463 * associated with the current thread before
3464 * ripping apart the task.
3465 */
3466 machine_task_terminate(task);
3467
3468 /*
3469 * Destroy all synchronizers owned by the task.
3470 */
3471 task_synchronizer_destroy_all(task);
3472
3473 /* let iokit know 1 */
3474 iokit_task_terminate(task, 1);
3475
3476 /*
3477 * Terminate the IPC space. A long time ago,
3478 * this used to be ipc_space_clean() which would
3479 * keep the space active but hollow it.
3480 *
3481 * We really do not need this semantics given
3482 * tasks die with exec now.
3483 */
3484 ipc_space_terminate(task->itk_space);
3485
3486 /*
3487 * Clean out the address space, as we are going to be
3488 * getting a new one.
3489 */
3490 vm_map_terminate(task->map);
3491
3492 /*
3493 * Kick out any IOKitUser handles to the task. At best they're stale,
3494 * at worst someone is racing a SUID exec.
3495 */
3496 /* let iokit know 2 */
3497 iokit_task_terminate(task, 2);
3498 }
3499
3500 #ifdef CONFIG_TASK_SUSPEND_STATS
3501
3502 static void
_task_mark_suspend_source(task_t task)3503 _task_mark_suspend_source(task_t task)
3504 {
3505 int idx;
3506 task_suspend_stats_t stats;
3507 task_suspend_source_t source;
3508 task_lock_assert_owned(task);
3509 stats = &task->t_suspend_stats;
3510
3511 idx = stats->tss_count % TASK_SUSPEND_SOURCES_MAX;
3512 source = &task->t_suspend_sources[idx];
3513 bzero(source, sizeof(*source));
3514
3515 source->tss_time = mach_absolute_time();
3516 source->tss_tid = current_thread()->thread_id;
3517 source->tss_pid = task_pid(current_task());
3518 strlcpy(source->tss_procname, task_best_name(current_task()),
3519 sizeof(source->tss_procname));
3520
3521 stats->tss_count++;
3522 }
3523
3524 static inline void
_task_mark_suspend_start(task_t task)3525 _task_mark_suspend_start(task_t task)
3526 {
3527 task_lock_assert_owned(task);
3528 task->t_suspend_stats.tss_last_start = mach_absolute_time();
3529 }
3530
3531 static inline void
_task_mark_suspend_end(task_t task)3532 _task_mark_suspend_end(task_t task)
3533 {
3534 task_lock_assert_owned(task);
3535 task->t_suspend_stats.tss_last_end = mach_absolute_time();
3536 task->t_suspend_stats.tss_duration += (task->t_suspend_stats.tss_last_end -
3537 task->t_suspend_stats.tss_last_start);
3538 }
3539
3540 static kern_return_t
_task_get_suspend_stats_locked(task_t task,task_suspend_stats_t stats)3541 _task_get_suspend_stats_locked(task_t task, task_suspend_stats_t stats)
3542 {
3543 if (task == TASK_NULL || stats == NULL) {
3544 return KERN_INVALID_ARGUMENT;
3545 }
3546 task_lock_assert_owned(task);
3547 memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3548 return KERN_SUCCESS;
3549 }
3550
3551 static kern_return_t
_task_get_suspend_sources_locked(task_t task,task_suspend_source_t sources)3552 _task_get_suspend_sources_locked(task_t task, task_suspend_source_t sources)
3553 {
3554 if (task == TASK_NULL || sources == NULL) {
3555 return KERN_INVALID_ARGUMENT;
3556 }
3557 task_lock_assert_owned(task);
3558 memcpy(sources, task->t_suspend_sources,
3559 sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3560 return KERN_SUCCESS;
3561 }
3562
3563 #endif /* CONFIG_TASK_SUSPEND_STATS */
3564
3565 kern_return_t
task_get_suspend_stats(task_t task,task_suspend_stats_t stats)3566 task_get_suspend_stats(task_t task, task_suspend_stats_t stats)
3567 {
3568 #ifdef CONFIG_TASK_SUSPEND_STATS
3569 kern_return_t kr;
3570 if (task == TASK_NULL || stats == NULL) {
3571 return KERN_INVALID_ARGUMENT;
3572 }
3573 task_lock(task);
3574 kr = _task_get_suspend_stats_locked(task, stats);
3575 task_unlock(task);
3576 return kr;
3577 #else /* CONFIG_TASK_SUSPEND_STATS */
3578 (void)task;
3579 (void)stats;
3580 return KERN_NOT_SUPPORTED;
3581 #endif
3582 }
3583
3584 kern_return_t
task_get_suspend_stats_kdp(task_t task,task_suspend_stats_t stats)3585 task_get_suspend_stats_kdp(task_t task, task_suspend_stats_t stats)
3586 {
3587 #ifdef CONFIG_TASK_SUSPEND_STATS
3588 if (task == TASK_NULL || stats == NULL) {
3589 return KERN_INVALID_ARGUMENT;
3590 }
3591 memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3592 return KERN_SUCCESS;
3593 #else /* CONFIG_TASK_SUSPEND_STATS */
3594 #pragma unused(task, stats)
3595 return KERN_NOT_SUPPORTED;
3596 #endif /* CONFIG_TASK_SUSPEND_STATS */
3597 }
3598
3599 kern_return_t
task_get_suspend_sources(task_t task,task_suspend_source_array_t sources)3600 task_get_suspend_sources(task_t task, task_suspend_source_array_t sources)
3601 {
3602 #ifdef CONFIG_TASK_SUSPEND_STATS
3603 kern_return_t kr;
3604 if (task == TASK_NULL || sources == NULL) {
3605 return KERN_INVALID_ARGUMENT;
3606 }
3607 task_lock(task);
3608 kr = _task_get_suspend_sources_locked(task, sources);
3609 task_unlock(task);
3610 return kr;
3611 #else /* CONFIG_TASK_SUSPEND_STATS */
3612 (void)task;
3613 (void)sources;
3614 return KERN_NOT_SUPPORTED;
3615 #endif
3616 }
3617
3618 kern_return_t
task_get_suspend_sources_kdp(task_t task,task_suspend_source_array_t sources)3619 task_get_suspend_sources_kdp(task_t task, task_suspend_source_array_t sources)
3620 {
3621 #ifdef CONFIG_TASK_SUSPEND_STATS
3622 if (task == TASK_NULL || sources == NULL) {
3623 return KERN_INVALID_ARGUMENT;
3624 }
3625 memcpy(sources, task->t_suspend_sources,
3626 sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3627 return KERN_SUCCESS;
3628 #else /* CONFIG_TASK_SUSPEND_STATS */
3629 #pragma unused(task, sources)
3630 return KERN_NOT_SUPPORTED;
3631 #endif
3632 }
3633
3634 kern_return_t
task_set_cs_auxiliary_info(task_t task,uint64_t info)3635 task_set_cs_auxiliary_info(task_t task, uint64_t info)
3636 {
3637 if (task == TASK_NULL) {
3638 return KERN_INVALID_ARGUMENT;
3639 }
3640
3641 task->task_cs_auxiliary_info = info;
3642 return KERN_SUCCESS;
3643 }
3644
3645 uint64_t
task_get_cs_auxiliary_info_kdp(task_t task)3646 task_get_cs_auxiliary_info_kdp(task_t task)
3647 {
3648 if (task == TASK_NULL) {
3649 return 0;
3650 }
3651 return task->task_cs_auxiliary_info;
3652 }
3653
3654 /*
3655 * task_hold_locked:
3656 *
3657 * Suspend execution of the specified task.
3658 * This is a recursive-style suspension of the task, a count of
3659 * suspends is maintained.
3660 *
3661 * CONDITIONS: the task is locked and active.
3662 * Returns true if this was first suspension
3663 */
3664 bool
task_hold_locked(task_t task)3665 task_hold_locked(
3666 task_t task)
3667 {
3668 thread_t thread;
3669 void *bsd_info = get_bsdtask_info(task);
3670
3671 assert(task->active);
3672
3673 if (task->suspend_count++ > 0) {
3674 return false;
3675 }
3676
3677 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_TASK_SUSPEND),
3678 task_pid(task), task->user_stop_count, task->pidsuspended);
3679
3680 if (bsd_info) {
3681 workq_proc_suspended(bsd_info);
3682 }
3683
3684 /*
3685 * Iterate through all the threads and hold them.
3686 */
3687 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3688 thread_mtx_lock(thread);
3689 thread_hold(thread);
3690 thread_mtx_unlock(thread);
3691 }
3692
3693 #ifdef CONFIG_TASK_SUSPEND_STATS
3694 _task_mark_suspend_start(task);
3695 #endif
3696 return true;
3697 }
3698
3699 /*
3700 * task_hold_and_wait
3701 *
3702 * Same as the internal routine above, except that is must lock
3703 * and verify that the task is active. This differs from task_suspend
3704 * in that it places a kernel hold on the task rather than just a
3705 * user-level hold. This keeps users from over resuming and setting
3706 * it running out from under the kernel.
3707 *
3708 * CONDITIONS: the caller holds a reference on the task
3709 */
3710 kern_return_t
task_hold_and_wait(task_t task,bool suspend_conclave __unused)3711 task_hold_and_wait(
3712 task_t task,
3713 bool suspend_conclave __unused)
3714 {
3715 if (task == TASK_NULL) {
3716 return KERN_INVALID_ARGUMENT;
3717 }
3718
3719 task_lock(task);
3720 if (!task->active) {
3721 task_unlock(task);
3722 return KERN_FAILURE;
3723 }
3724
3725 #ifdef CONFIG_TASK_SUSPEND_STATS
3726 _task_mark_suspend_source(task);
3727 #endif /* CONFIG_TASK_SUSPEND_STATS */
3728
3729 bool first_suspension __unused = task_hold_locked(task);
3730
3731 #if CONFIG_EXCLAVES
3732 //rdar://139307390, first suspension might not have done conclave suspend.
3733 first_suspension = true;
3734 if (suspend_conclave && first_suspension) {
3735 task_unlock(task);
3736 task_suspend_conclave(task);
3737 task_lock(task);
3738 /*
3739 * If task terminated/resumed before we could wait on threads, then
3740 * it is a race we lost and we could treat that as termination/resume
3741 * happened after the wait and return SUCCESS.
3742 */
3743 if (!task->active || task->suspend_count <= 0) {
3744 task_unlock(task);
3745 return KERN_SUCCESS;
3746 }
3747 }
3748 #endif /* CONFIG_EXCLAVES */
3749
3750 task_wait_locked(task, FALSE);
3751 task_unlock(task);
3752
3753 return KERN_SUCCESS;
3754 }
3755
3756 /*
3757 * task_wait_locked:
3758 *
3759 * Wait for all threads in task to stop.
3760 *
3761 * Conditions:
3762 * Called with task locked, active, and held.
3763 */
3764 void
task_wait_locked(task_t task,boolean_t until_not_runnable)3765 task_wait_locked(
3766 task_t task,
3767 boolean_t until_not_runnable)
3768 {
3769 thread_t thread, self;
3770
3771 assert(task->active);
3772 assert(task->suspend_count > 0);
3773
3774 self = current_thread();
3775
3776 /*
3777 * Iterate through all the threads and wait for them to
3778 * stop. Do not wait for the current thread if it is within
3779 * the task.
3780 */
3781 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3782 if (thread != self) {
3783 thread_wait(thread, until_not_runnable);
3784 }
3785 }
3786 }
3787
3788 boolean_t
task_is_app_suspended(task_t task)3789 task_is_app_suspended(task_t task)
3790 {
3791 return task->pidsuspended;
3792 }
3793
3794 /*
3795 * task_release_locked:
3796 *
3797 * Release a kernel hold on a task.
3798 *
3799 * CONDITIONS: the task is locked and active
3800 */
3801 void
task_release_locked(task_t task)3802 task_release_locked(
3803 task_t task)
3804 {
3805 thread_t thread;
3806 void *bsd_info = get_bsdtask_info(task);
3807
3808 assert(task->active);
3809 assert(task->suspend_count > 0);
3810
3811 if (--task->suspend_count > 0) {
3812 return;
3813 }
3814
3815 if (bsd_info) {
3816 workq_proc_resumed(bsd_info);
3817 }
3818
3819 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3820 thread_mtx_lock(thread);
3821 thread_release(thread);
3822 thread_mtx_unlock(thread);
3823 }
3824
3825 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_TASK_RESUME) | DBG_FUNC_NONE, task_pid(task));
3826
3827 #if CONFIG_TASK_SUSPEND_STATS
3828 _task_mark_suspend_end(task);
3829 #endif
3830
3831 //rdar://139307390.
3832 #if 0
3833 #if CONFIG_EXCLAVES
3834 task_unlock(task);
3835 task_resume_conclave(task);
3836 task_lock(task);
3837 #endif /* CONFIG_EXCLAVES */
3838 #endif
3839 }
3840
3841 /*
3842 * task_release:
3843 *
3844 * Same as the internal routine above, except that it must lock
3845 * and verify that the task is active.
3846 *
3847 * CONDITIONS: The caller holds a reference to the task
3848 */
3849 kern_return_t
task_release(task_t task)3850 task_release(
3851 task_t task)
3852 {
3853 if (task == TASK_NULL) {
3854 return KERN_INVALID_ARGUMENT;
3855 }
3856
3857 task_lock(task);
3858
3859 if (!task->active) {
3860 task_unlock(task);
3861
3862 return KERN_FAILURE;
3863 }
3864
3865 task_release_locked(task);
3866 task_unlock(task);
3867
3868 return KERN_SUCCESS;
3869 }
3870
3871 static kern_return_t
task_threads_internal(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * countp,mach_thread_flavor_t flavor)3872 task_threads_internal(
3873 task_t task,
3874 thread_act_array_t *threads_out,
3875 mach_msg_type_number_t *countp,
3876 mach_thread_flavor_t flavor)
3877 {
3878 mach_msg_type_number_t actual, count, count_needed;
3879 thread_act_array_t thread_list;
3880 thread_t thread;
3881 unsigned int i;
3882
3883 count = 0;
3884 thread_list = NULL;
3885
3886 if (task == TASK_NULL) {
3887 return KERN_INVALID_ARGUMENT;
3888 }
3889
3890 assert(flavor <= THREAD_FLAVOR_INSPECT);
3891
3892 for (;;) {
3893 task_lock(task);
3894 if (!task->active) {
3895 task_unlock(task);
3896
3897 mach_port_array_free(thread_list, count);
3898 return KERN_FAILURE;
3899 }
3900
3901 count_needed = actual = task->thread_count;
3902 if (count_needed <= count) {
3903 break;
3904 }
3905
3906 /* unlock the task and allocate more memory */
3907 task_unlock(task);
3908
3909 mach_port_array_free(thread_list, count);
3910 count = count_needed;
3911 thread_list = mach_port_array_alloc(count, Z_WAITOK);
3912
3913 if (thread_list == NULL) {
3914 return KERN_RESOURCE_SHORTAGE;
3915 }
3916 }
3917
3918 i = 0;
3919 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3920 assert(i < actual);
3921 thread_reference(thread);
3922 ((thread_t *)thread_list)[i++] = thread;
3923 }
3924
3925 count_needed = actual;
3926
3927 /* can unlock task now that we've got the thread refs */
3928 task_unlock(task);
3929
3930 if (actual == 0) {
3931 /* no threads, so return null pointer and deallocate memory */
3932
3933 mach_port_array_free(thread_list, count);
3934
3935 *threads_out = NULL;
3936 *countp = 0;
3937 } else {
3938 /* if we allocated too much, must copy */
3939 if (count_needed < count) {
3940 mach_port_array_t newaddr;
3941
3942 newaddr = mach_port_array_alloc(count_needed, Z_WAITOK);
3943 if (newaddr == NULL) {
3944 for (i = 0; i < actual; ++i) {
3945 thread_deallocate(((thread_t *)thread_list)[i]);
3946 }
3947 mach_port_array_free(thread_list, count);
3948 return KERN_RESOURCE_SHORTAGE;
3949 }
3950
3951 bcopy(thread_list, newaddr, count_needed * sizeof(thread_t));
3952 mach_port_array_free(thread_list, count);
3953 thread_list = newaddr;
3954 }
3955
3956 /* do the conversion that Mig should handle */
3957 convert_thread_array_to_ports(thread_list, actual, flavor);
3958
3959 *threads_out = thread_list;
3960 *countp = actual;
3961 }
3962
3963 return KERN_SUCCESS;
3964 }
3965
3966
3967 kern_return_t
task_threads_from_user(mach_port_t port,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3968 task_threads_from_user(
3969 mach_port_t port,
3970 thread_act_array_t *threads_out,
3971 mach_msg_type_number_t *count)
3972 {
3973 ipc_kobject_type_t kotype;
3974 kern_return_t kr;
3975
3976 task_t task = convert_port_to_task_inspect_no_eval(port);
3977
3978 if (task == TASK_NULL) {
3979 return KERN_INVALID_ARGUMENT;
3980 }
3981
3982 kotype = ip_kotype(port);
3983
3984 switch (kotype) {
3985 case IKOT_TASK_CONTROL:
3986 kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3987 break;
3988 case IKOT_TASK_READ:
3989 kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_READ);
3990 break;
3991 case IKOT_TASK_INSPECT:
3992 kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_INSPECT);
3993 break;
3994 default:
3995 panic("strange kobject type");
3996 break;
3997 }
3998
3999 task_deallocate(task);
4000 return kr;
4001 }
4002
4003 #define TASK_HOLD_NORMAL 0
4004 #define TASK_HOLD_PIDSUSPEND 1
4005 #define TASK_HOLD_LEGACY 2
4006 #define TASK_HOLD_LEGACY_ALL 3
4007
4008 static kern_return_t
place_task_hold(task_t task,int mode)4009 place_task_hold(
4010 task_t task,
4011 int mode)
4012 {
4013 if (!task->active && !task_is_a_corpse(task)) {
4014 return KERN_FAILURE;
4015 }
4016
4017 /* Return success for corpse task */
4018 if (task_is_a_corpse(task)) {
4019 return KERN_SUCCESS;
4020 }
4021
4022 #if MACH_ASSERT
4023 current_task()->suspends_outstanding++;
4024 #endif
4025
4026 if (mode == TASK_HOLD_LEGACY) {
4027 task->legacy_stop_count++;
4028 }
4029
4030 #ifdef CONFIG_TASK_SUSPEND_STATS
4031 _task_mark_suspend_source(task);
4032 #endif /* CONFIG_TASK_SUSPEND_STATS */
4033
4034 if (task->user_stop_count++ > 0) {
4035 /*
4036 * If the stop count was positive, the task is
4037 * already stopped and we can exit.
4038 */
4039 return KERN_SUCCESS;
4040 }
4041
4042 /*
4043 * Put a kernel-level hold on the threads in the task (all
4044 * user-level task suspensions added together represent a
4045 * single kernel-level hold). We then wait for the threads
4046 * to stop executing user code.
4047 */
4048 bool first_suspension __unused = task_hold_locked(task);
4049
4050 //rdar://139307390, do not suspend conclave on task suspend.
4051 #if 0
4052 #if CONFIG_EXCLAVES
4053 if (first_suspension) {
4054 task_unlock(task);
4055 task_suspend_conclave(task);
4056
4057 /*
4058 * If task terminated/resumed before we could wait on threads, then
4059 * it is a race we lost and we could treat that as termination/resume
4060 * happened after the wait and return SUCCESS.
4061 */
4062 task_lock(task);
4063 if (!task->active || task->suspend_count <= 0) {
4064 return KERN_SUCCESS;
4065 }
4066 }
4067 #endif /* CONFIG_EXCLAVES */
4068 #endif
4069
4070 task_wait_locked(task, FALSE);
4071
4072 return KERN_SUCCESS;
4073 }
4074
4075 static kern_return_t
release_task_hold(task_t task,int mode)4076 release_task_hold(
4077 task_t task,
4078 int mode)
4079 {
4080 boolean_t release = FALSE;
4081
4082 if (!task->active && !task_is_a_corpse(task)) {
4083 return KERN_FAILURE;
4084 }
4085
4086 /* Return success for corpse task */
4087 if (task_is_a_corpse(task)) {
4088 return KERN_SUCCESS;
4089 }
4090
4091 if (mode == TASK_HOLD_PIDSUSPEND) {
4092 if (task->pidsuspended == FALSE) {
4093 return KERN_FAILURE;
4094 }
4095 task->pidsuspended = FALSE;
4096 }
4097
4098 if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
4099 #if MACH_ASSERT
4100 /*
4101 * This is obviously not robust; if we suspend one task and then resume a different one,
4102 * we'll fly under the radar. This is only meant to catch the common case of a crashed
4103 * or buggy suspender.
4104 */
4105 current_task()->suspends_outstanding--;
4106 #endif
4107
4108 if (mode == TASK_HOLD_LEGACY_ALL) {
4109 if (task->legacy_stop_count >= task->user_stop_count) {
4110 task->user_stop_count = 0;
4111 release = TRUE;
4112 } else {
4113 task->user_stop_count -= task->legacy_stop_count;
4114 }
4115 task->legacy_stop_count = 0;
4116 } else {
4117 if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) {
4118 task->legacy_stop_count--;
4119 }
4120 if (--task->user_stop_count == 0) {
4121 release = TRUE;
4122 }
4123 }
4124 } else {
4125 return KERN_FAILURE;
4126 }
4127
4128 /*
4129 * Release the task if necessary.
4130 */
4131 if (release) {
4132 task_release_locked(task);
4133 }
4134
4135 return KERN_SUCCESS;
4136 }
4137
4138 boolean_t
get_task_suspended(task_t task)4139 get_task_suspended(task_t task)
4140 {
4141 return 0 != task->user_stop_count;
4142 }
4143
4144 /*
4145 * task_suspend:
4146 *
4147 * Implement an (old-fashioned) user-level suspension on a task.
4148 *
4149 * Because the user isn't expecting to have to manage a suspension
4150 * token, we'll track it for him in the kernel in the form of a naked
4151 * send right to the task's resume port. All such send rights
4152 * account for a single suspension against the task (unlike task_suspend2()
4153 * where each caller gets a unique suspension count represented by a
4154 * unique send-once right).
4155 *
4156 * Conditions:
4157 * The caller holds a reference to the task
4158 */
4159 kern_return_t
task_suspend(task_t task)4160 task_suspend(
4161 task_t task)
4162 {
4163 kern_return_t kr;
4164 mach_port_t port;
4165 mach_port_name_t name;
4166
4167 if (task == TASK_NULL || task == kernel_task) {
4168 return KERN_INVALID_ARGUMENT;
4169 }
4170
4171 /*
4172 * place a legacy hold on the task.
4173 */
4174 task_lock(task);
4175 kr = place_task_hold(task, TASK_HOLD_LEGACY);
4176 task_unlock(task);
4177
4178 if (kr != KERN_SUCCESS) {
4179 return kr;
4180 }
4181
4182 /*
4183 * Claim a send right on the task resume port, and request a no-senders
4184 * notification on that port (if none outstanding).
4185 */
4186 itk_lock(task);
4187 port = task->itk_resume;
4188 if (port == IP_NULL) {
4189 port = ipc_kobject_alloc_port(task, IKOT_TASK_RESUME,
4190 IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
4191 task->itk_resume = port;
4192 } else {
4193 (void)ipc_kobject_make_send_nsrequest(port, task, IKOT_TASK_RESUME);
4194 }
4195 itk_unlock(task);
4196
4197 /*
4198 * Copyout the send right into the calling task's IPC space. It won't know it is there,
4199 * but we'll look it up when calling a traditional resume. Any IPC operations that
4200 * deallocate the send right will auto-release the suspension.
4201 */
4202 if (IP_VALID(port)) {
4203 kr = ipc_object_copyout(current_space(), port,
4204 MACH_MSG_TYPE_MOVE_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
4205 NULL, &name);
4206 } else {
4207 kr = KERN_SUCCESS;
4208 }
4209 if (kr != KERN_SUCCESS) {
4210 printf("warning: %s(%d) failed to copyout suspension "
4211 "token for pid %d with error: %d\n",
4212 proc_name_address(get_bsdtask_info(current_task())),
4213 proc_pid(get_bsdtask_info(current_task())),
4214 task_pid(task), kr);
4215 }
4216
4217 return kr;
4218 }
4219
4220 /*
4221 * task_resume:
4222 * Release a user hold on a task.
4223 *
4224 * Conditions:
4225 * The caller holds a reference to the task
4226 */
4227 kern_return_t
task_resume(task_t task)4228 task_resume(
4229 task_t task)
4230 {
4231 kern_return_t kr;
4232 mach_port_name_t resume_port_name;
4233 ipc_entry_t resume_port_entry;
4234 ipc_space_t space = current_task()->itk_space;
4235
4236 if (task == TASK_NULL || task == kernel_task) {
4237 return KERN_INVALID_ARGUMENT;
4238 }
4239
4240 /* release a legacy task hold */
4241 task_lock(task);
4242 kr = release_task_hold(task, TASK_HOLD_LEGACY);
4243 task_unlock(task);
4244
4245 itk_lock(task); /* for itk_resume */
4246 is_write_lock(space); /* spin lock */
4247 if (is_active(space) && IP_VALID(task->itk_resume) &&
4248 ipc_hash_lookup(space, ip_to_object(task->itk_resume), &resume_port_name, &resume_port_entry) == TRUE) {
4249 /*
4250 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
4251 * we are holding one less legacy hold on the task from this caller. If the release failed,
4252 * go ahead and drop all the rights, as someone either already released our holds or the task
4253 * is gone.
4254 */
4255 itk_unlock(task);
4256 if (kr == KERN_SUCCESS) {
4257 ipc_right_dealloc(space, resume_port_name, resume_port_entry);
4258 } else {
4259 ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0);
4260 }
4261 /* space unlocked */
4262 } else {
4263 itk_unlock(task);
4264 is_write_unlock(space);
4265 if (kr == KERN_SUCCESS) {
4266 printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
4267 proc_name_address(get_bsdtask_info(current_task())), proc_pid(get_bsdtask_info(current_task())),
4268 task_pid(task));
4269 }
4270 }
4271
4272 return kr;
4273 }
4274
4275 /*
4276 * Suspend the target task.
4277 * Making/holding a token/reference/port is the callers responsibility.
4278 */
4279 kern_return_t
task_suspend_internal(task_t task)4280 task_suspend_internal(task_t task)
4281 {
4282 kern_return_t kr;
4283
4284 if (task == TASK_NULL || task == kernel_task) {
4285 return KERN_INVALID_ARGUMENT;
4286 }
4287
4288 task_lock(task);
4289 kr = place_task_hold(task, TASK_HOLD_NORMAL);
4290 task_unlock(task);
4291 return kr;
4292 }
4293
4294 /*
4295 * Suspend the target task, and return a suspension token. The token
4296 * represents a reference on the suspended task.
4297 */
4298 static kern_return_t
task_suspend2_grp(task_t task,task_suspension_token_t * suspend_token,task_grp_t grp)4299 task_suspend2_grp(
4300 task_t task,
4301 task_suspension_token_t *suspend_token,
4302 task_grp_t grp)
4303 {
4304 kern_return_t kr;
4305
4306 kr = task_suspend_internal(task);
4307 if (kr != KERN_SUCCESS) {
4308 *suspend_token = TASK_NULL;
4309 return kr;
4310 }
4311
4312 /*
4313 * Take a reference on the target task and return that to the caller
4314 * as a "suspension token," which can be converted into an SO right to
4315 * the now-suspended task's resume port.
4316 */
4317 task_reference_grp(task, grp);
4318 *suspend_token = task;
4319
4320 return KERN_SUCCESS;
4321 }
4322
4323 kern_return_t
task_suspend2_mig(task_t task,task_suspension_token_t * suspend_token)4324 task_suspend2_mig(
4325 task_t task,
4326 task_suspension_token_t *suspend_token)
4327 {
4328 return task_suspend2_grp(task, suspend_token, TASK_GRP_MIG);
4329 }
4330
4331 kern_return_t
task_suspend2_external(task_t task,task_suspension_token_t * suspend_token)4332 task_suspend2_external(
4333 task_t task,
4334 task_suspension_token_t *suspend_token)
4335 {
4336 return task_suspend2_grp(task, suspend_token, TASK_GRP_EXTERNAL);
4337 }
4338
4339 /*
4340 * Resume the task
4341 * (reference/token/port management is caller's responsibility).
4342 */
4343 kern_return_t
task_resume_internal(task_suspension_token_t task)4344 task_resume_internal(
4345 task_suspension_token_t task)
4346 {
4347 kern_return_t kr;
4348
4349 if (task == TASK_NULL || task == kernel_task) {
4350 return KERN_INVALID_ARGUMENT;
4351 }
4352
4353 task_lock(task);
4354 kr = release_task_hold(task, TASK_HOLD_NORMAL);
4355 task_unlock(task);
4356 return kr;
4357 }
4358
4359 /*
4360 * Resume the task using a suspension token. Consumes the token's ref.
4361 */
4362 static kern_return_t
task_resume2_grp(task_suspension_token_t task,task_grp_t grp)4363 task_resume2_grp(
4364 task_suspension_token_t task,
4365 task_grp_t grp)
4366 {
4367 kern_return_t kr;
4368
4369 kr = task_resume_internal(task);
4370 task_suspension_token_deallocate_grp(task, grp);
4371
4372 return kr;
4373 }
4374
4375 kern_return_t
task_resume2_mig(task_suspension_token_t task)4376 task_resume2_mig(
4377 task_suspension_token_t task)
4378 {
4379 return task_resume2_grp(task, TASK_GRP_MIG);
4380 }
4381
4382 kern_return_t
task_resume2_external(task_suspension_token_t task)4383 task_resume2_external(
4384 task_suspension_token_t task)
4385 {
4386 return task_resume2_grp(task, TASK_GRP_EXTERNAL);
4387 }
4388
4389 static void
task_suspension_no_senders(ipc_port_t port,mach_port_mscount_t mscount)4390 task_suspension_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
4391 {
4392 task_t task = convert_port_to_task_suspension_token(port);
4393 kern_return_t kr;
4394
4395 if (task == TASK_NULL) {
4396 return;
4397 }
4398
4399 if (task == kernel_task) {
4400 task_suspension_token_deallocate(task);
4401 return;
4402 }
4403
4404 task_lock(task);
4405
4406 kr = ipc_kobject_nsrequest(port, mscount, NULL);
4407 if (kr == KERN_FAILURE) {
4408 /* release all the [remaining] outstanding legacy holds */
4409 release_task_hold(task, TASK_HOLD_LEGACY_ALL);
4410 }
4411
4412 task_unlock(task);
4413
4414 task_suspension_token_deallocate(task); /* drop token reference */
4415 }
4416
4417 /*
4418 * Fires when a send once made
4419 * by convert_task_suspension_token_to_port() dies.
4420 */
4421 void
task_suspension_send_once(ipc_port_t port)4422 task_suspension_send_once(ipc_port_t port)
4423 {
4424 task_t task = convert_port_to_task_suspension_token(port);
4425
4426 if (task == TASK_NULL || task == kernel_task) {
4427 return; /* nothing to do */
4428 }
4429
4430 /* release the hold held by this specific send-once right */
4431 task_lock(task);
4432 release_task_hold(task, TASK_HOLD_NORMAL);
4433 task_unlock(task);
4434
4435 task_suspension_token_deallocate(task); /* drop token reference */
4436 }
4437
4438 static kern_return_t
task_pidsuspend_locked(task_t task)4439 task_pidsuspend_locked(task_t task)
4440 {
4441 kern_return_t kr;
4442
4443 if (task->pidsuspended) {
4444 kr = KERN_FAILURE;
4445 goto out;
4446 }
4447
4448 task->pidsuspended = TRUE;
4449
4450 kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
4451 if (kr != KERN_SUCCESS) {
4452 task->pidsuspended = FALSE;
4453 }
4454 out:
4455 return kr;
4456 }
4457
4458
4459 /*
4460 * task_pidsuspend:
4461 *
4462 * Suspends a task by placing a hold on its threads.
4463 *
4464 * Conditions:
4465 * The caller holds a reference to the task
4466 */
4467 kern_return_t
task_pidsuspend(task_t task)4468 task_pidsuspend(
4469 task_t task)
4470 {
4471 kern_return_t kr;
4472
4473 if (task == TASK_NULL || task == kernel_task) {
4474 return KERN_INVALID_ARGUMENT;
4475 }
4476
4477 task_lock(task);
4478
4479 kr = task_pidsuspend_locked(task);
4480
4481 task_unlock(task);
4482
4483 if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4484 iokit_task_app_suspended_changed(task);
4485 vm_deferred_reclamation_task_suspend(task);
4486 }
4487
4488 return kr;
4489 }
4490
4491 /*
4492 * task_pidresume:
4493 * Resumes a previously suspended task.
4494 *
4495 * Conditions:
4496 * The caller holds a reference to the task
4497 */
4498 kern_return_t
task_pidresume(task_t task)4499 task_pidresume(
4500 task_t task)
4501 {
4502 kern_return_t kr;
4503
4504 if (task == TASK_NULL || task == kernel_task) {
4505 return KERN_INVALID_ARGUMENT;
4506 }
4507
4508 task_lock(task);
4509
4510 #if CONFIG_FREEZE
4511
4512 while (task->changing_freeze_state) {
4513 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4514 task_unlock(task);
4515 thread_block(THREAD_CONTINUE_NULL);
4516
4517 task_lock(task);
4518 }
4519 task->changing_freeze_state = TRUE;
4520 #endif
4521
4522 kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
4523
4524 task_unlock(task);
4525
4526 if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4527 iokit_task_app_suspended_changed(task);
4528 }
4529
4530 #if CONFIG_FREEZE
4531
4532 task_lock(task);
4533
4534 if (kr == KERN_SUCCESS) {
4535 task->frozen = FALSE;
4536 }
4537 task->changing_freeze_state = FALSE;
4538 thread_wakeup(&task->changing_freeze_state);
4539
4540 task_unlock(task);
4541 #endif
4542
4543 return kr;
4544 }
4545
4546 os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
4547
4548 /*
4549 * task_add_turnstile_watchports:
4550 * Setup watchports to boost the main thread of the task.
4551 *
4552 * Arguments:
4553 * task: task being spawned
4554 * thread: main thread of task
4555 * portwatch_ports: array of watchports
4556 * portwatch_count: number of watchports
4557 *
4558 * Conditions:
4559 * Nothing locked.
4560 */
4561 void
task_add_turnstile_watchports(task_t task,thread_t thread,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4562 task_add_turnstile_watchports(
4563 task_t task,
4564 thread_t thread,
4565 ipc_port_t *portwatch_ports,
4566 uint32_t portwatch_count)
4567 {
4568 struct task_watchports *watchports = NULL;
4569 struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
4570 os_ref_count_t refs;
4571
4572 /* Check if the task has terminated */
4573 if (!task->active) {
4574 return;
4575 }
4576
4577 assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
4578
4579 watchports = task_watchports_alloc_init(task, thread, portwatch_count);
4580
4581 /* Lock the ipc space */
4582 is_write_lock(task->itk_space);
4583
4584 /* Setup watchports to boost the main thread */
4585 refs = task_add_turnstile_watchports_locked(task,
4586 watchports, previous_elem_array, portwatch_ports,
4587 portwatch_count);
4588
4589 /* Drop the space lock */
4590 is_write_unlock(task->itk_space);
4591
4592 if (refs == 0) {
4593 task_watchports_deallocate(watchports);
4594 }
4595
4596 /* Drop the ref on previous_elem_array */
4597 for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
4598 task_watchport_elem_deallocate(previous_elem_array[i]);
4599 }
4600 }
4601
4602 /*
4603 * task_remove_turnstile_watchports:
4604 * Clear all turnstile boost on the task from watchports.
4605 *
4606 * Arguments:
4607 * task: task being terminated
4608 *
4609 * Conditions:
4610 * Nothing locked.
4611 */
4612 void
task_remove_turnstile_watchports(task_t task)4613 task_remove_turnstile_watchports(
4614 task_t task)
4615 {
4616 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4617 struct task_watchports *watchports = NULL;
4618 ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
4619 uint32_t portwatch_count;
4620
4621 /* Lock the ipc space */
4622 is_write_lock(task->itk_space);
4623
4624 /* Check if watchport boost exist */
4625 if (task->watchports == NULL) {
4626 is_write_unlock(task->itk_space);
4627 return;
4628 }
4629 watchports = task->watchports;
4630 portwatch_count = watchports->tw_elem_array_count;
4631
4632 refs = task_remove_turnstile_watchports_locked(task, watchports,
4633 port_freelist);
4634
4635 is_write_unlock(task->itk_space);
4636
4637 /* Drop all the port references */
4638 for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
4639 ip_release(port_freelist[i]);
4640 }
4641
4642 /* Clear the task and thread references for task_watchport */
4643 if (refs == 0) {
4644 task_watchports_deallocate(watchports);
4645 }
4646 }
4647
4648 /*
4649 * task_transfer_turnstile_watchports:
4650 * Transfer all watchport turnstile boost from old task to new task.
4651 *
4652 * Arguments:
4653 * old_task: task calling exec
4654 * new_task: new exec'ed task
4655 * thread: main thread of new task
4656 *
4657 * Conditions:
4658 * Nothing locked.
4659 */
4660 void
task_transfer_turnstile_watchports(task_t old_task,task_t new_task,thread_t new_thread)4661 task_transfer_turnstile_watchports(
4662 task_t old_task,
4663 task_t new_task,
4664 thread_t new_thread)
4665 {
4666 struct task_watchports *old_watchports = NULL;
4667 struct task_watchports *new_watchports = NULL;
4668 os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
4669 os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
4670 uint32_t portwatch_count;
4671
4672 if (old_task->watchports == NULL || !new_task->active) {
4673 return;
4674 }
4675
4676 /* Get the watch port count from the old task */
4677 is_write_lock(old_task->itk_space);
4678 if (old_task->watchports == NULL) {
4679 is_write_unlock(old_task->itk_space);
4680 return;
4681 }
4682
4683 portwatch_count = old_task->watchports->tw_elem_array_count;
4684 is_write_unlock(old_task->itk_space);
4685
4686 new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
4687
4688 /* Lock the ipc space for old task */
4689 is_write_lock(old_task->itk_space);
4690
4691 /* Lock the ipc space for new task */
4692 is_write_lock(new_task->itk_space);
4693
4694 /* Check if watchport boost exist */
4695 if (old_task->watchports == NULL || !new_task->active) {
4696 is_write_unlock(new_task->itk_space);
4697 is_write_unlock(old_task->itk_space);
4698 (void)task_watchports_release(new_watchports);
4699 task_watchports_deallocate(new_watchports);
4700 return;
4701 }
4702
4703 old_watchports = old_task->watchports;
4704 assert(portwatch_count == old_task->watchports->tw_elem_array_count);
4705
4706 /* Setup new task watchports */
4707 new_task->watchports = new_watchports;
4708
4709 for (uint32_t i = 0; i < portwatch_count; i++) {
4710 ipc_port_t port = old_watchports->tw_elem[i].twe_port;
4711
4712 if (port == NULL) {
4713 task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4714 continue;
4715 }
4716
4717 /* Lock the port and check if it has the entry */
4718 ip_mq_lock(port);
4719
4720 task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
4721
4722 if (ipc_port_replace_watchport_elem_conditional_locked(port,
4723 &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
4724 task_watchport_elem_clear(&old_watchports->tw_elem[i]);
4725
4726 task_watchports_retain(new_watchports);
4727 old_refs = task_watchports_release(old_watchports);
4728
4729 /* Check if all ports are cleaned */
4730 if (old_refs == 0) {
4731 old_task->watchports = NULL;
4732 }
4733 } else {
4734 task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4735 }
4736 /* port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
4737 }
4738
4739 /* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
4740 new_refs = task_watchports_release(new_watchports);
4741 if (new_refs == 0) {
4742 new_task->watchports = NULL;
4743 }
4744
4745 is_write_unlock(new_task->itk_space);
4746 is_write_unlock(old_task->itk_space);
4747
4748 /* Clear the task and thread references for old_watchport */
4749 if (old_refs == 0) {
4750 task_watchports_deallocate(old_watchports);
4751 }
4752
4753 /* Clear the task and thread references for new_watchport */
4754 if (new_refs == 0) {
4755 task_watchports_deallocate(new_watchports);
4756 }
4757 }
4758
4759 /*
4760 * task_add_turnstile_watchports_locked:
4761 * Setup watchports to boost the main thread of the task.
4762 *
4763 * Arguments:
4764 * task: task to boost
4765 * watchports: watchport structure to be attached to the task
4766 * previous_elem_array: an array of old watchport_elem to be returned to caller
4767 * portwatch_ports: array of watchports
4768 * portwatch_count: number of watchports
4769 *
4770 * Conditions:
4771 * ipc space of the task locked.
4772 * returns array of old watchport_elem in previous_elem_array
4773 */
4774 static os_ref_count_t
task_add_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,struct task_watchport_elem ** previous_elem_array,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4775 task_add_turnstile_watchports_locked(
4776 task_t task,
4777 struct task_watchports *watchports,
4778 struct task_watchport_elem **previous_elem_array,
4779 ipc_port_t *portwatch_ports,
4780 uint32_t portwatch_count)
4781 {
4782 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4783
4784 /* Check if the task is still active */
4785 if (!task->active) {
4786 refs = task_watchports_release(watchports);
4787 return refs;
4788 }
4789
4790 assert(task->watchports == NULL);
4791 task->watchports = watchports;
4792
4793 for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
4794 ipc_port_t port = portwatch_ports[i];
4795
4796 task_watchport_elem_init(&watchports->tw_elem[i], task, port);
4797 if (port == NULL) {
4798 task_watchport_elem_clear(&watchports->tw_elem[i]);
4799 continue;
4800 }
4801
4802 ip_mq_lock(port);
4803
4804 /* Check if port is in valid state to be setup as watchport */
4805 if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
4806 &previous_elem_array[j]) != KERN_SUCCESS) {
4807 task_watchport_elem_clear(&watchports->tw_elem[i]);
4808 continue;
4809 }
4810 /* port unlocked on return */
4811
4812 ip_reference(port);
4813 task_watchports_retain(watchports);
4814 if (previous_elem_array[j] != NULL) {
4815 j++;
4816 }
4817 }
4818
4819 /* Drop the reference on task_watchport struct returned by os_ref_init */
4820 refs = task_watchports_release(watchports);
4821 if (refs == 0) {
4822 task->watchports = NULL;
4823 }
4824
4825 return refs;
4826 }
4827
4828 /*
4829 * task_remove_turnstile_watchports_locked:
4830 * Clear all turnstile boost on the task from watchports.
4831 *
4832 * Arguments:
4833 * task: task to remove watchports from
4834 * watchports: watchports structure for the task
4835 * port_freelist: array of ports returned with ref to caller
4836 *
4837 *
4838 * Conditions:
4839 * ipc space of the task locked.
4840 * array of ports with refs are returned in port_freelist
4841 */
4842 static os_ref_count_t
task_remove_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,ipc_port_t * port_freelist)4843 task_remove_turnstile_watchports_locked(
4844 task_t task,
4845 struct task_watchports *watchports,
4846 ipc_port_t *port_freelist)
4847 {
4848 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4849
4850 for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
4851 ipc_port_t port = watchports->tw_elem[i].twe_port;
4852 if (port == NULL) {
4853 continue;
4854 }
4855
4856 /* Lock the port and check if it has the entry */
4857 ip_mq_lock(port);
4858 if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
4859 &watchports->tw_elem[i]) == KERN_SUCCESS) {
4860 task_watchport_elem_clear(&watchports->tw_elem[i]);
4861 port_freelist[j++] = port;
4862 refs = task_watchports_release(watchports);
4863
4864 /* Check if all ports are cleaned */
4865 if (refs == 0) {
4866 task->watchports = NULL;
4867 break;
4868 }
4869 }
4870 /* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
4871 }
4872 return refs;
4873 }
4874
4875 /*
4876 * task_watchports_alloc_init:
4877 * Allocate and initialize task watchport struct.
4878 *
4879 * Conditions:
4880 * Nothing locked.
4881 */
4882 static struct task_watchports *
task_watchports_alloc_init(task_t task,thread_t thread,uint32_t count)4883 task_watchports_alloc_init(
4884 task_t task,
4885 thread_t thread,
4886 uint32_t count)
4887 {
4888 struct task_watchports *watchports = kalloc_type(struct task_watchports,
4889 struct task_watchport_elem, count, Z_WAITOK | Z_ZERO | Z_NOFAIL);
4890
4891 task_reference(task);
4892 thread_reference(thread);
4893 watchports->tw_task = task;
4894 watchports->tw_thread = thread;
4895 watchports->tw_elem_array_count = count;
4896 os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
4897
4898 return watchports;
4899 }
4900
4901 /*
4902 * task_watchports_deallocate:
4903 * Deallocate task watchport struct.
4904 *
4905 * Conditions:
4906 * Nothing locked.
4907 */
4908 static void
task_watchports_deallocate(struct task_watchports * watchports)4909 task_watchports_deallocate(
4910 struct task_watchports *watchports)
4911 {
4912 uint32_t portwatch_count = watchports->tw_elem_array_count;
4913
4914 task_deallocate(watchports->tw_task);
4915 thread_deallocate(watchports->tw_thread);
4916 kfree_type(struct task_watchports, struct task_watchport_elem,
4917 portwatch_count, watchports);
4918 }
4919
4920 /*
4921 * task_watchport_elem_deallocate:
4922 * Deallocate task watchport element and release its ref on task_watchport.
4923 *
4924 * Conditions:
4925 * Nothing locked.
4926 */
4927 void
task_watchport_elem_deallocate(struct task_watchport_elem * watchport_elem)4928 task_watchport_elem_deallocate(
4929 struct task_watchport_elem *watchport_elem)
4930 {
4931 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4932 task_t task = watchport_elem->twe_task;
4933 struct task_watchports *watchports = NULL;
4934 ipc_port_t port = NULL;
4935
4936 assert(task != NULL);
4937
4938 /* Take the space lock to modify the elememt */
4939 is_write_lock(task->itk_space);
4940
4941 watchports = task->watchports;
4942 assert(watchports != NULL);
4943
4944 port = watchport_elem->twe_port;
4945 assert(port != NULL);
4946
4947 task_watchport_elem_clear(watchport_elem);
4948 refs = task_watchports_release(watchports);
4949
4950 if (refs == 0) {
4951 task->watchports = NULL;
4952 }
4953
4954 is_write_unlock(task->itk_space);
4955
4956 ip_release(port);
4957 if (refs == 0) {
4958 task_watchports_deallocate(watchports);
4959 }
4960 }
4961
4962 /*
4963 * task_has_watchports:
4964 * Return TRUE if task has watchport boosts.
4965 *
4966 * Conditions:
4967 * Nothing locked.
4968 */
4969 boolean_t
task_has_watchports(task_t task)4970 task_has_watchports(task_t task)
4971 {
4972 return task->watchports != NULL;
4973 }
4974
4975 #if DEVELOPMENT || DEBUG
4976
4977 extern void IOSleep(int);
4978
4979 kern_return_t
task_disconnect_page_mappings(task_t task)4980 task_disconnect_page_mappings(task_t task)
4981 {
4982 int n;
4983
4984 if (task == TASK_NULL || task == kernel_task) {
4985 return KERN_INVALID_ARGUMENT;
4986 }
4987
4988 /*
4989 * this function is used to strip all of the mappings from
4990 * the pmap for the specified task to force the task to
4991 * re-fault all of the pages it is actively using... this
4992 * allows us to approximate the true working set of the
4993 * specified task. We only engage if at least 1 of the
4994 * threads in the task is runnable, but we want to continuously
4995 * sweep (at least for a while - I've arbitrarily set the limit at
4996 * 100 sweeps to be re-looked at as we gain experience) to get a better
4997 * view into what areas within a page are being visited (as opposed to only
4998 * seeing the first fault of a page after the task becomes
4999 * runnable)... in the future I may
5000 * try to block until awakened by a thread in this task
5001 * being made runnable, but for now we'll periodically poll from the
5002 * user level debug tool driving the sysctl
5003 */
5004 for (n = 0; n < 100; n++) {
5005 thread_t thread;
5006 boolean_t runnable;
5007 boolean_t do_unnest;
5008 int page_count;
5009
5010 runnable = FALSE;
5011 do_unnest = FALSE;
5012
5013 task_lock(task);
5014
5015 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5016 if (thread->state & TH_RUN) {
5017 runnable = TRUE;
5018 break;
5019 }
5020 }
5021 if (n == 0) {
5022 task->task_disconnected_count++;
5023 }
5024
5025 if (task->task_unnested == FALSE) {
5026 if (runnable == TRUE) {
5027 task->task_unnested = TRUE;
5028 do_unnest = TRUE;
5029 }
5030 }
5031 task_unlock(task);
5032
5033 if (runnable == FALSE) {
5034 break;
5035 }
5036
5037 KDBG_RELEASE((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
5038 task, do_unnest, task->task_disconnected_count);
5039
5040 page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
5041
5042 KDBG_RELEASE((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
5043 task, page_count);
5044
5045 if ((n % 5) == 4) {
5046 IOSleep(1);
5047 }
5048 }
5049 return KERN_SUCCESS;
5050 }
5051
5052 #endif
5053
5054
5055 #if CONFIG_FREEZE
5056
5057 /*
5058 * task_freeze:
5059 *
5060 * Freeze a task.
5061 *
5062 * Conditions:
5063 * The caller holds a reference to the task
5064 */
5065 extern struct freezer_context freezer_context_global;
5066
5067 kern_return_t
task_freeze(task_t task,uint32_t * purgeable_count,uint32_t * wired_count,uint32_t * clean_count,uint32_t * dirty_count,uint32_t dirty_budget,uint32_t * shared_count,int * freezer_error_code,boolean_t eval_only)5068 task_freeze(
5069 task_t task,
5070 uint32_t *purgeable_count,
5071 uint32_t *wired_count,
5072 uint32_t *clean_count,
5073 uint32_t *dirty_count,
5074 uint32_t dirty_budget,
5075 uint32_t *shared_count,
5076 int *freezer_error_code,
5077 boolean_t eval_only)
5078 {
5079 kern_return_t kr = KERN_SUCCESS;
5080
5081 if (task == TASK_NULL || task == kernel_task) {
5082 return KERN_INVALID_ARGUMENT;
5083 }
5084
5085 task_lock(task);
5086
5087 while (task->changing_freeze_state) {
5088 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5089 task_unlock(task);
5090 thread_block(THREAD_CONTINUE_NULL);
5091
5092 task_lock(task);
5093 }
5094 if (task->frozen) {
5095 task_unlock(task);
5096 return KERN_FAILURE;
5097 }
5098 task->changing_freeze_state = TRUE;
5099
5100 freezer_context_global.freezer_ctx_task = task;
5101
5102 task_unlock(task);
5103
5104 #if CONFIG_DEFERRED_RECLAIM
5105 if (vm_deferred_reclamation_task_has_ring(task)) {
5106 kr = vm_deferred_reclamation_task_drain(task, RECLAIM_OPTIONS_NONE);
5107 if (kr != KERN_SUCCESS) {
5108 os_log_error(OS_LOG_DEFAULT, "Failed to drain reclamation ring prior to freezing (%d)\n", kr);
5109 }
5110 }
5111 #endif /* CONFIG_DEFERRED_RECLAIM */
5112
5113 kr = vm_map_freeze(task,
5114 purgeable_count,
5115 wired_count,
5116 clean_count,
5117 dirty_count,
5118 dirty_budget,
5119 shared_count,
5120 freezer_error_code,
5121 eval_only);
5122
5123 task_lock(task);
5124
5125 if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) {
5126 task->frozen = TRUE;
5127
5128 freezer_context_global.freezer_ctx_task = NULL;
5129 freezer_context_global.freezer_ctx_uncompressed_pages = 0;
5130
5131 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
5132 /*
5133 * reset the counter tracking the # of swapped compressed pages
5134 * because we are now done with this freeze session and task.
5135 */
5136
5137 *dirty_count = (uint32_t) (freezer_context_global.freezer_ctx_swapped_bytes / PAGE_SIZE_64); /*used to track pageouts*/
5138 }
5139
5140 freezer_context_global.freezer_ctx_swapped_bytes = 0;
5141 }
5142
5143 task->changing_freeze_state = FALSE;
5144 thread_wakeup(&task->changing_freeze_state);
5145
5146 task_unlock(task);
5147
5148 if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
5149 (kr == KERN_SUCCESS) &&
5150 (eval_only == FALSE)) {
5151 vm_wake_compactor_swapper();
5152 /*
5153 * We do an explicit wakeup of the swapout thread here
5154 * because the compact_and_swap routines don't have
5155 * knowledge about these kind of "per-task packed c_segs"
5156 * and so will not be evaluating whether we need to do
5157 * a wakeup there.
5158 */
5159 thread_wakeup((event_t)&vm_swapout_thread);
5160 }
5161
5162 return kr;
5163 }
5164
5165 /*
5166 * task_thaw:
5167 *
5168 * Thaw a currently frozen task.
5169 *
5170 * Conditions:
5171 * The caller holds a reference to the task
5172 */
5173 kern_return_t
task_thaw(task_t task)5174 task_thaw(
5175 task_t task)
5176 {
5177 if (task == TASK_NULL || task == kernel_task) {
5178 return KERN_INVALID_ARGUMENT;
5179 }
5180
5181 task_lock(task);
5182
5183 while (task->changing_freeze_state) {
5184 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5185 task_unlock(task);
5186 thread_block(THREAD_CONTINUE_NULL);
5187
5188 task_lock(task);
5189 }
5190 if (!task->frozen) {
5191 task_unlock(task);
5192 return KERN_FAILURE;
5193 }
5194 task->frozen = FALSE;
5195
5196 task_unlock(task);
5197
5198 return KERN_SUCCESS;
5199 }
5200
5201 void
task_update_frozen_to_swap_acct(task_t task,int64_t amount,freezer_acct_op_t op)5202 task_update_frozen_to_swap_acct(task_t task, int64_t amount, freezer_acct_op_t op)
5203 {
5204 /*
5205 * We don't assert that the task lock is held because we call this
5206 * routine from the decompression path and we won't be holding the
5207 * task lock. However, since we are in the context of the task we are
5208 * safe.
5209 * In the case of the task_freeze path, we call it from behind the task
5210 * lock but we don't need to because we have a reference on the proc
5211 * being frozen.
5212 */
5213
5214 assert(task);
5215 if (amount == 0) {
5216 return;
5217 }
5218
5219 if (op == CREDIT_TO_SWAP) {
5220 ledger_credit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5221 } else if (op == DEBIT_FROM_SWAP) {
5222 ledger_debit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5223 } else {
5224 panic("task_update_frozen_to_swap_acct: Invalid ledger op");
5225 }
5226 }
5227 #endif /* CONFIG_FREEZE */
5228
5229 kern_return_t
task_set_security_tokens(task_t task,security_token_t sec_token,audit_token_t audit_token,host_priv_t host_priv)5230 task_set_security_tokens(
5231 task_t task,
5232 security_token_t sec_token,
5233 audit_token_t audit_token,
5234 host_priv_t host_priv)
5235 {
5236 ipc_port_t host_port = IP_NULL;
5237 kern_return_t kr;
5238
5239 if (task == TASK_NULL) {
5240 return KERN_INVALID_ARGUMENT;
5241 }
5242
5243 task_lock(task);
5244 task_set_tokens(task, &sec_token, &audit_token);
5245 task_unlock(task);
5246
5247 if (host_priv != HOST_PRIV_NULL) {
5248 kr = host_get_host_priv_port(host_priv, &host_port);
5249 } else {
5250 kr = host_get_host_port(host_priv_self(), &host_port);
5251 }
5252 assert(kr == KERN_SUCCESS);
5253
5254 kr = task_set_special_port_internal(task, TASK_HOST_PORT, host_port);
5255 return kr;
5256 }
5257
5258 kern_return_t
task_send_trace_memory(__unused task_t target_task,__unused uint32_t pid,__unused uint64_t uniqueid)5259 task_send_trace_memory(
5260 __unused task_t target_task,
5261 __unused uint32_t pid,
5262 __unused uint64_t uniqueid)
5263 {
5264 return KERN_INVALID_ARGUMENT;
5265 }
5266
5267 /*
5268 * This routine was added, pretty much exclusively, for registering the
5269 * RPC glue vector for in-kernel short circuited tasks. Rather than
5270 * removing it completely, I have only disabled that feature (which was
5271 * the only feature at the time). It just appears that we are going to
5272 * want to add some user data to tasks in the future (i.e. bsd info,
5273 * task names, etc...), so I left it in the formal task interface.
5274 */
5275 kern_return_t
task_set_info(task_t task,task_flavor_t flavor,__unused task_info_t task_info_in,__unused mach_msg_type_number_t task_info_count)5276 task_set_info(
5277 task_t task,
5278 task_flavor_t flavor,
5279 __unused task_info_t task_info_in, /* pointer to IN array */
5280 __unused mach_msg_type_number_t task_info_count)
5281 {
5282 if (task == TASK_NULL) {
5283 return KERN_INVALID_ARGUMENT;
5284 }
5285 switch (flavor) {
5286 #if CONFIG_ATM
5287 case TASK_TRACE_MEMORY_INFO:
5288 return KERN_NOT_SUPPORTED;
5289 #endif // CONFIG_ATM
5290 default:
5291 return KERN_INVALID_ARGUMENT;
5292 }
5293 }
5294
5295 static void
_task_fill_times(task_t task,time_value_t * user_time,time_value_t * sys_time)5296 _task_fill_times(task_t task, time_value_t *user_time, time_value_t *sys_time)
5297 {
5298 clock_sec_t sec;
5299 clock_usec_t usec;
5300
5301 struct recount_times_mach times = recount_task_terminated_times(task);
5302 absolutetime_to_microtime(times.rtm_user, &sec, &usec);
5303 user_time->seconds = (typeof(user_time->seconds))sec;
5304 user_time->microseconds = usec;
5305 absolutetime_to_microtime(times.rtm_system, &sec, &usec);
5306 sys_time->seconds = (typeof(sys_time->seconds))sec;
5307 sys_time->microseconds = usec;
5308 }
5309
5310 int radar_20146450 = 1;
5311 kern_return_t
task_info(task_t task,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)5312 task_info(
5313 task_t task,
5314 task_flavor_t flavor,
5315 task_info_t task_info_out,
5316 mach_msg_type_number_t *task_info_count)
5317 {
5318 kern_return_t error = KERN_SUCCESS;
5319 mach_msg_type_number_t original_task_info_count;
5320 bool is_kernel_task = (task == kernel_task);
5321
5322 if (task == TASK_NULL) {
5323 return KERN_INVALID_ARGUMENT;
5324 }
5325
5326 original_task_info_count = *task_info_count;
5327 task_lock(task);
5328
5329 if (task != current_task() && !task->active) {
5330 task_unlock(task);
5331 return KERN_INVALID_ARGUMENT;
5332 }
5333
5334
5335 switch (flavor) {
5336 case TASK_BASIC_INFO_32:
5337 case TASK_BASIC2_INFO_32:
5338 #if defined(__arm64__)
5339 case TASK_BASIC_INFO_64:
5340 #endif
5341 {
5342 task_basic_info_32_t basic_info;
5343 ledger_amount_t tmp;
5344
5345 if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
5346 error = KERN_INVALID_ARGUMENT;
5347 break;
5348 }
5349
5350 basic_info = (task_basic_info_32_t)task_info_out;
5351
5352 basic_info->virtual_size = (typeof(basic_info->virtual_size))
5353 vm_map_adjusted_size(is_kernel_task ? kernel_map : task->map);
5354 if (flavor == TASK_BASIC2_INFO_32) {
5355 /*
5356 * The "BASIC2" flavor gets the maximum resident
5357 * size instead of the current resident size...
5358 */
5359 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, &tmp);
5360 } else {
5361 ledger_get_balance(task->ledger, task_ledgers.phys_mem, &tmp);
5362 }
5363 basic_info->resident_size = (natural_t) MIN((ledger_amount_t) UINT32_MAX, tmp);
5364
5365 _task_fill_times(task, &basic_info->user_time,
5366 &basic_info->system_time);
5367
5368 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5369 basic_info->suspend_count = task->user_stop_count;
5370
5371 *task_info_count = TASK_BASIC_INFO_32_COUNT;
5372 break;
5373 }
5374
5375 #if defined(__arm64__)
5376 case TASK_BASIC_INFO_64_2:
5377 {
5378 task_basic_info_64_2_t basic_info;
5379
5380 if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) {
5381 error = KERN_INVALID_ARGUMENT;
5382 break;
5383 }
5384
5385 basic_info = (task_basic_info_64_2_t)task_info_out;
5386
5387 basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5388 kernel_map : task->map);
5389 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
5390 (ledger_amount_t *)&basic_info->resident_size);
5391 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5392 basic_info->suspend_count = task->user_stop_count;
5393 _task_fill_times(task, &basic_info->user_time,
5394 &basic_info->system_time);
5395
5396 *task_info_count = TASK_BASIC_INFO_64_2_COUNT;
5397 break;
5398 }
5399
5400 #else /* defined(__arm64__) */
5401 case TASK_BASIC_INFO_64:
5402 {
5403 task_basic_info_64_t basic_info;
5404
5405 if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
5406 error = KERN_INVALID_ARGUMENT;
5407 break;
5408 }
5409
5410 basic_info = (task_basic_info_64_t)task_info_out;
5411
5412 basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5413 kernel_map : task->map);
5414 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *)&basic_info->resident_size);
5415 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5416 basic_info->suspend_count = task->user_stop_count;
5417 _task_fill_times(task, &basic_info->user_time,
5418 &basic_info->system_time);
5419
5420 *task_info_count = TASK_BASIC_INFO_64_COUNT;
5421 break;
5422 }
5423 #endif /* defined(__arm64__) */
5424
5425 case MACH_TASK_BASIC_INFO:
5426 {
5427 mach_task_basic_info_t basic_info;
5428
5429 if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
5430 error = KERN_INVALID_ARGUMENT;
5431 break;
5432 }
5433
5434 basic_info = (mach_task_basic_info_t)task_info_out;
5435
5436 basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5437 kernel_map : task->map);
5438 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size);
5439 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size_max);
5440 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5441 basic_info->suspend_count = task->user_stop_count;
5442 _task_fill_times(task, &basic_info->user_time,
5443 &basic_info->system_time);
5444
5445 *task_info_count = MACH_TASK_BASIC_INFO_COUNT;
5446 break;
5447 }
5448
5449 case TASK_THREAD_TIMES_INFO:
5450 {
5451 task_thread_times_info_t times_info;
5452 thread_t thread;
5453
5454 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
5455 error = KERN_INVALID_ARGUMENT;
5456 break;
5457 }
5458
5459 times_info = (task_thread_times_info_t)task_info_out;
5460 times_info->user_time = (time_value_t){ 0 };
5461 times_info->system_time = (time_value_t){ 0 };
5462
5463 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5464 if ((thread->options & TH_OPT_IDLE_THREAD) == 0) {
5465 time_value_t user_time, system_time;
5466
5467 thread_read_times(thread, &user_time, &system_time, NULL);
5468 time_value_add(×_info->user_time, &user_time);
5469 time_value_add(×_info->system_time, &system_time);
5470 }
5471 }
5472
5473 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
5474 break;
5475 }
5476
5477 case TASK_ABSOLUTETIME_INFO:
5478 {
5479 task_absolutetime_info_t info;
5480
5481 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
5482 error = KERN_INVALID_ARGUMENT;
5483 break;
5484 }
5485
5486 info = (task_absolutetime_info_t)task_info_out;
5487
5488 struct recount_times_mach term_times =
5489 recount_task_terminated_times(task);
5490 struct recount_times_mach total_times = recount_task_times(task);
5491
5492 info->total_user = total_times.rtm_user;
5493 info->total_system = total_times.rtm_system;
5494 info->threads_user = total_times.rtm_user - term_times.rtm_user;
5495 info->threads_system += total_times.rtm_system - term_times.rtm_system;
5496
5497 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
5498 break;
5499 }
5500
5501 case TASK_DYLD_INFO:
5502 {
5503 task_dyld_info_t info;
5504
5505 /*
5506 * We added the format field to TASK_DYLD_INFO output. For
5507 * temporary backward compatibility, accept the fact that
5508 * clients may ask for the old version - distinquished by the
5509 * size of the expected result structure.
5510 */
5511 #define TASK_LEGACY_DYLD_INFO_COUNT \
5512 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
5513
5514 if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
5515 error = KERN_INVALID_ARGUMENT;
5516 break;
5517 }
5518
5519 info = (task_dyld_info_t)task_info_out;
5520 info->all_image_info_addr = task->all_image_info_addr;
5521 info->all_image_info_size = task->all_image_info_size;
5522
5523 /* only set format on output for those expecting it */
5524 if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
5525 info->all_image_info_format = task_has_64Bit_addr(task) ?
5526 TASK_DYLD_ALL_IMAGE_INFO_64 :
5527 TASK_DYLD_ALL_IMAGE_INFO_32;
5528 *task_info_count = TASK_DYLD_INFO_COUNT;
5529 } else {
5530 *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
5531 }
5532 break;
5533 }
5534
5535 case TASK_EXTMOD_INFO:
5536 {
5537 task_extmod_info_t info;
5538 void *p;
5539
5540 if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
5541 error = KERN_INVALID_ARGUMENT;
5542 break;
5543 }
5544
5545 info = (task_extmod_info_t)task_info_out;
5546
5547 p = get_bsdtask_info(task);
5548 if (p) {
5549 proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
5550 } else {
5551 bzero(info->task_uuid, sizeof(info->task_uuid));
5552 }
5553 info->extmod_statistics = task->extmod_statistics;
5554 *task_info_count = TASK_EXTMOD_INFO_COUNT;
5555
5556 break;
5557 }
5558
5559 case TASK_KERNELMEMORY_INFO:
5560 {
5561 task_kernelmemory_info_t tkm_info;
5562 ledger_amount_t credit, debit;
5563
5564 if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
5565 error = KERN_INVALID_ARGUMENT;
5566 break;
5567 }
5568
5569 tkm_info = (task_kernelmemory_info_t) task_info_out;
5570 tkm_info->total_palloc = 0;
5571 tkm_info->total_pfree = 0;
5572 tkm_info->total_salloc = 0;
5573 tkm_info->total_sfree = 0;
5574
5575 if (task == kernel_task) {
5576 /*
5577 * All shared allocs/frees from other tasks count against
5578 * the kernel private memory usage. If we are looking up
5579 * info for the kernel task, gather from everywhere.
5580 */
5581 task_unlock(task);
5582
5583 /* start by accounting for all the terminated tasks against the kernel */
5584 tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
5585 tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
5586
5587 /* count all other task/thread shared alloc/free against the kernel */
5588 lck_mtx_lock(&tasks_threads_lock);
5589
5590 /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
5591 queue_iterate(&tasks, task, task_t, tasks) {
5592 if (task == kernel_task) {
5593 if (ledger_get_entries(task->ledger,
5594 task_ledgers.tkm_private, &credit,
5595 &debit) == KERN_SUCCESS) {
5596 tkm_info->total_palloc += credit;
5597 tkm_info->total_pfree += debit;
5598 }
5599 }
5600 if (!ledger_get_entries(task->ledger,
5601 task_ledgers.tkm_shared, &credit, &debit)) {
5602 tkm_info->total_palloc += credit;
5603 tkm_info->total_pfree += debit;
5604 }
5605 }
5606 lck_mtx_unlock(&tasks_threads_lock);
5607 } else {
5608 if (!ledger_get_entries(task->ledger,
5609 task_ledgers.tkm_private, &credit, &debit)) {
5610 tkm_info->total_palloc = credit;
5611 tkm_info->total_pfree = debit;
5612 }
5613 if (!ledger_get_entries(task->ledger,
5614 task_ledgers.tkm_shared, &credit, &debit)) {
5615 tkm_info->total_salloc = credit;
5616 tkm_info->total_sfree = debit;
5617 }
5618 task_unlock(task);
5619 }
5620
5621 *task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
5622 return KERN_SUCCESS;
5623 }
5624
5625 /* OBSOLETE */
5626 case TASK_SCHED_FIFO_INFO:
5627 {
5628 if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
5629 error = KERN_INVALID_ARGUMENT;
5630 break;
5631 }
5632
5633 error = KERN_INVALID_POLICY;
5634 break;
5635 }
5636
5637 /* OBSOLETE */
5638 case TASK_SCHED_RR_INFO:
5639 {
5640 policy_rr_base_t rr_base;
5641 uint32_t quantum_time;
5642 uint64_t quantum_ns;
5643
5644 if (*task_info_count < POLICY_RR_BASE_COUNT) {
5645 error = KERN_INVALID_ARGUMENT;
5646 break;
5647 }
5648
5649 rr_base = (policy_rr_base_t) task_info_out;
5650
5651 if (task != kernel_task) {
5652 error = KERN_INVALID_POLICY;
5653 break;
5654 }
5655
5656 rr_base->base_priority = task->priority;
5657
5658 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
5659 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
5660
5661 rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
5662
5663 *task_info_count = POLICY_RR_BASE_COUNT;
5664 break;
5665 }
5666
5667 /* OBSOLETE */
5668 case TASK_SCHED_TIMESHARE_INFO:
5669 {
5670 policy_timeshare_base_t ts_base;
5671
5672 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
5673 error = KERN_INVALID_ARGUMENT;
5674 break;
5675 }
5676
5677 ts_base = (policy_timeshare_base_t) task_info_out;
5678
5679 if (task == kernel_task) {
5680 error = KERN_INVALID_POLICY;
5681 break;
5682 }
5683
5684 ts_base->base_priority = task->priority;
5685
5686 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
5687 break;
5688 }
5689
5690 case TASK_SECURITY_TOKEN:
5691 {
5692 security_token_t *sec_token_p;
5693
5694 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
5695 error = KERN_INVALID_ARGUMENT;
5696 break;
5697 }
5698
5699 sec_token_p = (security_token_t *) task_info_out;
5700
5701 *sec_token_p = *task_get_sec_token(task);
5702
5703 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
5704 break;
5705 }
5706
5707 case TASK_AUDIT_TOKEN:
5708 {
5709 audit_token_t *audit_token_p;
5710
5711 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
5712 error = KERN_INVALID_ARGUMENT;
5713 break;
5714 }
5715
5716 audit_token_p = (audit_token_t *) task_info_out;
5717
5718 *audit_token_p = *task_get_audit_token(task);
5719
5720 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
5721 break;
5722 }
5723
5724 case TASK_SCHED_INFO:
5725 error = KERN_INVALID_ARGUMENT;
5726 break;
5727
5728 case TASK_EVENTS_INFO:
5729 {
5730 task_events_info_t events_info;
5731 thread_t thread;
5732 uint64_t n_syscalls_mach, n_syscalls_unix, n_csw;
5733
5734 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
5735 error = KERN_INVALID_ARGUMENT;
5736 break;
5737 }
5738
5739 events_info = (task_events_info_t) task_info_out;
5740
5741
5742 events_info->faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
5743 events_info->pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
5744 events_info->cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
5745 events_info->messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
5746 events_info->messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
5747
5748 n_syscalls_mach = task->syscalls_mach;
5749 n_syscalls_unix = task->syscalls_unix;
5750 n_csw = task->c_switch;
5751
5752 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5753 n_csw += thread->c_switch;
5754 n_syscalls_mach += thread->syscalls_mach;
5755 n_syscalls_unix += thread->syscalls_unix;
5756 }
5757
5758 events_info->syscalls_mach = (int32_t) MIN(n_syscalls_mach, INT32_MAX);
5759 events_info->syscalls_unix = (int32_t) MIN(n_syscalls_unix, INT32_MAX);
5760 events_info->csw = (int32_t) MIN(n_csw, INT32_MAX);
5761
5762 *task_info_count = TASK_EVENTS_INFO_COUNT;
5763 break;
5764 }
5765 case TASK_AFFINITY_TAG_INFO:
5766 {
5767 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
5768 error = KERN_INVALID_ARGUMENT;
5769 break;
5770 }
5771
5772 error = task_affinity_info(task, task_info_out, task_info_count);
5773 break;
5774 }
5775 case TASK_POWER_INFO:
5776 {
5777 if (*task_info_count < TASK_POWER_INFO_COUNT) {
5778 error = KERN_INVALID_ARGUMENT;
5779 break;
5780 }
5781
5782 task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL, NULL);
5783 break;
5784 }
5785
5786 case TASK_POWER_INFO_V2:
5787 {
5788 if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
5789 error = KERN_INVALID_ARGUMENT;
5790 break;
5791 }
5792 task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
5793 task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2, NULL);
5794 break;
5795 }
5796
5797 case TASK_VM_INFO:
5798 case TASK_VM_INFO_PURGEABLE:
5799 {
5800 task_vm_info_t vm_info;
5801 vm_map_t map;
5802 ledger_amount_t tmp_amount;
5803
5804 struct proc *p;
5805 uint32_t platform, sdk;
5806 p = current_proc();
5807 platform = proc_platform(p);
5808 sdk = proc_sdk(p);
5809 if (original_task_info_count > TASK_VM_INFO_COUNT) {
5810 /*
5811 * Some iOS apps pass an incorrect value for
5812 * task_info_count, expressed in number of bytes
5813 * instead of number of "natural_t" elements, which
5814 * can lead to binary compatibility issues (including
5815 * stack corruption) when the data structure is
5816 * expanded in the future.
5817 * Let's make this potential issue visible by
5818 * logging about it...
5819 */
5820 if (!proc_is_simulated(p)) {
5821 os_log(OS_LOG_DEFAULT, "%s[%d] task_info: possibly invalid "
5822 "task_info_count %d > TASK_VM_INFO_COUNT=%d on platform %d sdk "
5823 "%d.%d.%d - please use TASK_VM_INFO_COUNT",
5824 proc_name_address(p), proc_pid(p),
5825 original_task_info_count, TASK_VM_INFO_COUNT,
5826 platform, (sdk >> 16), ((sdk >> 8) & 0xff), (sdk & 0xff));
5827 }
5828 DTRACE_VM4(suspicious_task_vm_info_count,
5829 mach_msg_type_number_t, original_task_info_count,
5830 mach_msg_type_number_t, TASK_VM_INFO_COUNT,
5831 uint32_t, platform,
5832 uint32_t, sdk);
5833 }
5834 #if __arm64__
5835 if (original_task_info_count > TASK_VM_INFO_REV2_COUNT &&
5836 platform == PLATFORM_IOS &&
5837 sdk != 0 &&
5838 (sdk >> 16) <= 12) {
5839 /*
5840 * Some iOS apps pass an incorrect value for
5841 * task_info_count, expressed in number of bytes
5842 * instead of number of "natural_t" elements.
5843 * For the sake of backwards binary compatibility
5844 * for apps built with an iOS12 or older SDK and using
5845 * the "rev2" data structure, let's fix task_info_count
5846 * for them, to avoid stomping past the actual end
5847 * of their buffer.
5848 */
5849 #if DEVELOPMENT || DEBUG
5850 printf("%s:%d %d[%s] rdar://49484582 task_info_count %d -> %d "
5851 "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5852 proc_name_address(p), original_task_info_count,
5853 TASK_VM_INFO_REV2_COUNT, platform, (sdk >> 16),
5854 ((sdk >> 8) & 0xff), (sdk & 0xff));
5855 #endif /* DEVELOPMENT || DEBUG */
5856 DTRACE_VM4(workaround_task_vm_info_count,
5857 mach_msg_type_number_t, original_task_info_count,
5858 mach_msg_type_number_t, TASK_VM_INFO_REV2_COUNT,
5859 uint32_t, platform,
5860 uint32_t, sdk);
5861 original_task_info_count = TASK_VM_INFO_REV2_COUNT;
5862 *task_info_count = original_task_info_count;
5863 }
5864 if (original_task_info_count > TASK_VM_INFO_REV5_COUNT &&
5865 platform == PLATFORM_IOS &&
5866 sdk != 0 &&
5867 (sdk >> 16) <= 15) {
5868 /*
5869 * Some iOS apps pass an incorrect value for
5870 * task_info_count, expressed in number of bytes
5871 * instead of number of "natural_t" elements.
5872 */
5873 printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5874 "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5875 proc_name_address(p), original_task_info_count,
5876 TASK_VM_INFO_REV5_COUNT, platform, (sdk >> 16),
5877 ((sdk >> 8) & 0xff), (sdk & 0xff));
5878 DTRACE_VM4(workaround_task_vm_info_count,
5879 mach_msg_type_number_t, original_task_info_count,
5880 mach_msg_type_number_t, TASK_VM_INFO_REV5_COUNT,
5881 uint32_t, platform,
5882 uint32_t, sdk);
5883 #if DEVELOPMENT || DEBUG
5884 /*
5885 * For the sake of internal builds livability,
5886 * work around this user-space bug by capping the
5887 * buffer's size to what it was with the iOS15 SDK.
5888 */
5889 original_task_info_count = TASK_VM_INFO_REV5_COUNT;
5890 *task_info_count = original_task_info_count;
5891 #endif /* DEVELOPMENT || DEBUG */
5892 }
5893
5894 if (original_task_info_count > TASK_VM_INFO_REV7_COUNT &&
5895 platform == PLATFORM_IOS &&
5896 sdk != 0 &&
5897 (sdk >> 16) == 17) {
5898 /*
5899 * Some iOS apps still pass an incorrect value for
5900 * task_info_count, expressed in number of bytes
5901 * instead of number of "natural_t" elements.
5902 */
5903 printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5904 "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5905 proc_name_address(p), original_task_info_count,
5906 TASK_VM_INFO_REV7_COUNT, platform, (sdk >> 16),
5907 ((sdk >> 8) & 0xff), (sdk & 0xff));
5908 DTRACE_VM4(workaround_task_vm_info_count,
5909 mach_msg_type_number_t, original_task_info_count,
5910 mach_msg_type_number_t, TASK_VM_INFO_REV6_COUNT,
5911 uint32_t, platform,
5912 uint32_t, sdk);
5913 #if DEVELOPMENT || DEBUG
5914 /*
5915 * For the sake of internal builds livability,
5916 * work around this user-space bug by capping the
5917 * buffer's size to what it was with the iOS15 and iOS16 SDKs.
5918 */
5919 original_task_info_count = TASK_VM_INFO_REV6_COUNT;
5920 *task_info_count = original_task_info_count;
5921 #endif /* DEVELOPMENT || DEBUG */
5922 }
5923 #endif /* __arm64__ */
5924
5925 if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
5926 error = KERN_INVALID_ARGUMENT;
5927 break;
5928 }
5929
5930 vm_info = (task_vm_info_t)task_info_out;
5931
5932 /*
5933 * Do not hold both the task and map locks,
5934 * so convert the task lock into a map reference,
5935 * drop the task lock, then lock the map.
5936 */
5937 if (is_kernel_task) {
5938 map = kernel_map;
5939 task_unlock(task);
5940 /* no lock, no reference */
5941 } else {
5942 map = task->map;
5943 vm_map_reference(map);
5944 task_unlock(task);
5945 vm_map_lock_read(map);
5946 }
5947
5948 vm_info->virtual_size = (typeof(vm_info->virtual_size))vm_map_adjusted_size(map);
5949 vm_info->region_count = map->hdr.nentries;
5950 vm_info->page_size = vm_map_page_size(map);
5951
5952 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size);
5953 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size_peak);
5954
5955 vm_info->device = 0;
5956 vm_info->device_peak = 0;
5957 ledger_get_balance(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external);
5958 ledger_get_lifetime_max(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external_peak);
5959 ledger_get_balance(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal);
5960 ledger_get_lifetime_max(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal_peak);
5961 ledger_get_balance(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable);
5962 ledger_get_lifetime_max(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable_peak);
5963 ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed);
5964 ledger_get_lifetime_max(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_peak);
5965 ledger_get_entries(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_lifetime, &tmp_amount);
5966 ledger_get_balance(task->ledger, task_ledgers.neural_nofootprint_total, (ledger_amount_t *) &vm_info->ledger_tag_neural_nofootprint_total);
5967 ledger_get_lifetime_max(task->ledger, task_ledgers.neural_nofootprint_total, (ledger_amount_t *) &vm_info->ledger_tag_neural_nofootprint_peak);
5968
5969 vm_info->purgeable_volatile_pmap = 0;
5970 vm_info->purgeable_volatile_resident = 0;
5971 vm_info->purgeable_volatile_virtual = 0;
5972 if (is_kernel_task) {
5973 /*
5974 * We do not maintain the detailed stats for the
5975 * kernel_pmap, so just count everything as
5976 * "internal"...
5977 */
5978 vm_info->internal = vm_info->resident_size;
5979 /*
5980 * ... but since the memory held by the VM compressor
5981 * in the kernel address space ought to be attributed
5982 * to user-space tasks, we subtract it from "internal"
5983 * to give memory reporting tools a more accurate idea
5984 * of what the kernel itself is actually using, instead
5985 * of making it look like the kernel is leaking memory
5986 * when the system is under memory pressure.
5987 */
5988 vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
5989 PAGE_SIZE);
5990 } else {
5991 mach_vm_size_t volatile_virtual_size;
5992 mach_vm_size_t volatile_resident_size;
5993 mach_vm_size_t volatile_compressed_size;
5994 mach_vm_size_t volatile_pmap_size;
5995 mach_vm_size_t volatile_compressed_pmap_size;
5996 kern_return_t kr;
5997
5998 if (flavor == TASK_VM_INFO_PURGEABLE) {
5999 kr = vm_map_query_volatile(
6000 map,
6001 &volatile_virtual_size,
6002 &volatile_resident_size,
6003 &volatile_compressed_size,
6004 &volatile_pmap_size,
6005 &volatile_compressed_pmap_size);
6006 if (kr == KERN_SUCCESS) {
6007 vm_info->purgeable_volatile_pmap =
6008 volatile_pmap_size;
6009 if (radar_20146450) {
6010 vm_info->compressed -=
6011 volatile_compressed_pmap_size;
6012 }
6013 vm_info->purgeable_volatile_resident =
6014 volatile_resident_size;
6015 vm_info->purgeable_volatile_virtual =
6016 volatile_virtual_size;
6017 }
6018 }
6019 }
6020 *task_info_count = TASK_VM_INFO_REV0_COUNT;
6021
6022 if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
6023 /* must be captured while we still have the map lock */
6024 vm_info->min_address = map->min_offset;
6025 vm_info->max_address = map->max_offset;
6026 }
6027
6028 /*
6029 * Done with vm map things, can drop the map lock and reference,
6030 * and take the task lock back.
6031 *
6032 * Re-validate that the task didn't die on us.
6033 */
6034 if (!is_kernel_task) {
6035 vm_map_unlock_read(map);
6036 vm_map_deallocate(map);
6037 }
6038 map = VM_MAP_NULL;
6039
6040 task_lock(task);
6041
6042 if ((task != current_task()) && (!task->active)) {
6043 error = KERN_INVALID_ARGUMENT;
6044 break;
6045 }
6046
6047 if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
6048 vm_info->phys_footprint =
6049 (mach_vm_size_t) get_task_phys_footprint(task);
6050 *task_info_count = TASK_VM_INFO_REV1_COUNT;
6051 }
6052 if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
6053 /* data was captured above */
6054 *task_info_count = TASK_VM_INFO_REV2_COUNT;
6055 }
6056
6057 if (original_task_info_count >= TASK_VM_INFO_REV3_COUNT) {
6058 ledger_get_lifetime_max(task->ledger,
6059 task_ledgers.phys_footprint,
6060 &vm_info->ledger_phys_footprint_peak);
6061 ledger_get_balance(task->ledger,
6062 task_ledgers.purgeable_nonvolatile,
6063 &vm_info->ledger_purgeable_nonvolatile);
6064 ledger_get_balance(task->ledger,
6065 task_ledgers.purgeable_nonvolatile_compressed,
6066 &vm_info->ledger_purgeable_novolatile_compressed);
6067 ledger_get_balance(task->ledger,
6068 task_ledgers.purgeable_volatile,
6069 &vm_info->ledger_purgeable_volatile);
6070 ledger_get_balance(task->ledger,
6071 task_ledgers.purgeable_volatile_compressed,
6072 &vm_info->ledger_purgeable_volatile_compressed);
6073 ledger_get_balance(task->ledger,
6074 task_ledgers.network_nonvolatile,
6075 &vm_info->ledger_tag_network_nonvolatile);
6076 ledger_get_balance(task->ledger,
6077 task_ledgers.network_nonvolatile_compressed,
6078 &vm_info->ledger_tag_network_nonvolatile_compressed);
6079 ledger_get_balance(task->ledger,
6080 task_ledgers.network_volatile,
6081 &vm_info->ledger_tag_network_volatile);
6082 ledger_get_balance(task->ledger,
6083 task_ledgers.network_volatile_compressed,
6084 &vm_info->ledger_tag_network_volatile_compressed);
6085 ledger_get_balance(task->ledger,
6086 task_ledgers.media_footprint,
6087 &vm_info->ledger_tag_media_footprint);
6088 ledger_get_balance(task->ledger,
6089 task_ledgers.media_footprint_compressed,
6090 &vm_info->ledger_tag_media_footprint_compressed);
6091 ledger_get_balance(task->ledger,
6092 task_ledgers.media_nofootprint,
6093 &vm_info->ledger_tag_media_nofootprint);
6094 ledger_get_balance(task->ledger,
6095 task_ledgers.media_nofootprint_compressed,
6096 &vm_info->ledger_tag_media_nofootprint_compressed);
6097 ledger_get_balance(task->ledger,
6098 task_ledgers.graphics_footprint,
6099 &vm_info->ledger_tag_graphics_footprint);
6100 ledger_get_balance(task->ledger,
6101 task_ledgers.graphics_footprint_compressed,
6102 &vm_info->ledger_tag_graphics_footprint_compressed);
6103 ledger_get_balance(task->ledger,
6104 task_ledgers.graphics_nofootprint,
6105 &vm_info->ledger_tag_graphics_nofootprint);
6106 ledger_get_balance(task->ledger,
6107 task_ledgers.graphics_nofootprint_compressed,
6108 &vm_info->ledger_tag_graphics_nofootprint_compressed);
6109 ledger_get_balance(task->ledger,
6110 task_ledgers.neural_footprint,
6111 &vm_info->ledger_tag_neural_footprint);
6112 ledger_get_balance(task->ledger,
6113 task_ledgers.neural_footprint_compressed,
6114 &vm_info->ledger_tag_neural_footprint_compressed);
6115 ledger_get_balance(task->ledger,
6116 task_ledgers.neural_nofootprint,
6117 &vm_info->ledger_tag_neural_nofootprint);
6118 ledger_get_balance(task->ledger,
6119 task_ledgers.neural_nofootprint_compressed,
6120 &vm_info->ledger_tag_neural_nofootprint_compressed);
6121 *task_info_count = TASK_VM_INFO_REV3_COUNT;
6122 }
6123 if (original_task_info_count >= TASK_VM_INFO_REV4_COUNT) {
6124 if (get_bsdtask_info(task)) {
6125 vm_info->limit_bytes_remaining =
6126 memorystatus_available_memory_internal(get_bsdtask_info(task));
6127 } else {
6128 vm_info->limit_bytes_remaining = 0;
6129 }
6130 *task_info_count = TASK_VM_INFO_REV4_COUNT;
6131 }
6132 if (original_task_info_count >= TASK_VM_INFO_REV5_COUNT) {
6133 thread_t thread;
6134 uint64_t total = task->decompressions;
6135 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6136 total += thread->decompressions;
6137 }
6138 vm_info->decompressions = (int32_t) MIN(total, INT32_MAX);
6139 *task_info_count = TASK_VM_INFO_REV5_COUNT;
6140 }
6141 if (original_task_info_count >= TASK_VM_INFO_REV6_COUNT) {
6142 ledger_get_balance(task->ledger, task_ledgers.swapins,
6143 &vm_info->ledger_swapins);
6144 *task_info_count = TASK_VM_INFO_REV6_COUNT;
6145 }
6146 if (original_task_info_count >= TASK_VM_INFO_REV7_COUNT) {
6147 ledger_get_balance(task->ledger,
6148 task_ledgers.neural_nofootprint_total,
6149 &vm_info->ledger_tag_neural_nofootprint_total);
6150 ledger_get_lifetime_max(task->ledger,
6151 task_ledgers.neural_nofootprint_total,
6152 &vm_info->ledger_tag_neural_nofootprint_peak);
6153 *task_info_count = TASK_VM_INFO_REV7_COUNT;
6154 }
6155
6156 break;
6157 }
6158
6159 case TASK_WAIT_STATE_INFO:
6160 {
6161 /*
6162 * Deprecated flavor. Currently allowing some results until all users
6163 * stop calling it. The results may not be accurate.
6164 */
6165 task_wait_state_info_t wait_state_info;
6166 uint64_t total_sfi_ledger_val = 0;
6167
6168 if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
6169 error = KERN_INVALID_ARGUMENT;
6170 break;
6171 }
6172
6173 wait_state_info = (task_wait_state_info_t) task_info_out;
6174
6175 wait_state_info->total_wait_state_time = 0;
6176 bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
6177
6178 #if CONFIG_SCHED_SFI
6179 int i, prev_lentry = -1;
6180 int64_t val_credit, val_debit;
6181
6182 for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
6183 val_credit = 0;
6184 /*
6185 * checking with prev_lentry != entry ensures adjacent classes
6186 * which share the same ledger do not add wait times twice.
6187 * Note: Use ledger() call to get data for each individual sfi class.
6188 */
6189 if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
6190 KERN_SUCCESS == ledger_get_entries(task->ledger,
6191 task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
6192 total_sfi_ledger_val += val_credit;
6193 }
6194 prev_lentry = task_ledgers.sfi_wait_times[i];
6195 }
6196
6197 #endif /* CONFIG_SCHED_SFI */
6198 wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
6199 *task_info_count = TASK_WAIT_STATE_INFO_COUNT;
6200
6201 break;
6202 }
6203 case TASK_VM_INFO_PURGEABLE_ACCOUNT:
6204 {
6205 #if DEVELOPMENT || DEBUG
6206 pvm_account_info_t acnt_info;
6207
6208 if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
6209 error = KERN_INVALID_ARGUMENT;
6210 break;
6211 }
6212
6213 if (task_info_out == NULL) {
6214 error = KERN_INVALID_ARGUMENT;
6215 break;
6216 }
6217
6218 acnt_info = (pvm_account_info_t) task_info_out;
6219
6220 error = vm_purgeable_account(task, acnt_info);
6221
6222 *task_info_count = PVM_ACCOUNT_INFO_COUNT;
6223
6224 break;
6225 #else /* DEVELOPMENT || DEBUG */
6226 error = KERN_NOT_SUPPORTED;
6227 break;
6228 #endif /* DEVELOPMENT || DEBUG */
6229 }
6230 case TASK_FLAGS_INFO:
6231 {
6232 task_flags_info_t flags_info;
6233
6234 if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
6235 error = KERN_INVALID_ARGUMENT;
6236 break;
6237 }
6238
6239 flags_info = (task_flags_info_t)task_info_out;
6240
6241 /* only publish the 64-bit flag of the task */
6242 flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA);
6243
6244 *task_info_count = TASK_FLAGS_INFO_COUNT;
6245 break;
6246 }
6247
6248 case TASK_DEBUG_INFO_INTERNAL:
6249 {
6250 #if DEVELOPMENT || DEBUG
6251 task_debug_info_internal_t dbg_info;
6252 ipc_space_t space = task->itk_space;
6253 if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
6254 error = KERN_NOT_SUPPORTED;
6255 break;
6256 }
6257
6258 if (task_info_out == NULL) {
6259 error = KERN_INVALID_ARGUMENT;
6260 break;
6261 }
6262 dbg_info = (task_debug_info_internal_t) task_info_out;
6263 dbg_info->ipc_space_size = 0;
6264
6265 if (space) {
6266 smr_ipc_enter();
6267 ipc_entry_table_t table = smr_entered_load(&space->is_table);
6268 if (table) {
6269 dbg_info->ipc_space_size =
6270 ipc_entry_table_count(table);
6271 }
6272 smr_ipc_leave();
6273 }
6274
6275 dbg_info->suspend_count = task->suspend_count;
6276
6277 error = KERN_SUCCESS;
6278 *task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
6279 break;
6280 #else /* DEVELOPMENT || DEBUG */
6281 error = KERN_NOT_SUPPORTED;
6282 break;
6283 #endif /* DEVELOPMENT || DEBUG */
6284 }
6285 case TASK_SUSPEND_STATS_INFO:
6286 {
6287 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6288 if (*task_info_count < TASK_SUSPEND_STATS_INFO_COUNT || task_info_out == NULL) {
6289 error = KERN_INVALID_ARGUMENT;
6290 break;
6291 }
6292 error = _task_get_suspend_stats_locked(task, (task_suspend_stats_t)task_info_out);
6293 *task_info_count = TASK_SUSPEND_STATS_INFO_COUNT;
6294 break;
6295 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6296 error = KERN_NOT_SUPPORTED;
6297 break;
6298 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6299 }
6300 case TASK_SUSPEND_SOURCES_INFO:
6301 {
6302 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6303 if (*task_info_count < TASK_SUSPEND_SOURCES_INFO_COUNT || task_info_out == NULL) {
6304 error = KERN_INVALID_ARGUMENT;
6305 break;
6306 }
6307 error = _task_get_suspend_sources_locked(task, (task_suspend_source_t)task_info_out);
6308 *task_info_count = TASK_SUSPEND_SOURCES_INFO_COUNT;
6309 break;
6310 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6311 error = KERN_NOT_SUPPORTED;
6312 break;
6313 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6314 }
6315 case TASK_SECURITY_CONFIG_INFO:
6316 {
6317 task_security_config_info_t security_config;
6318
6319 if (*task_info_count < TASK_SECURITY_CONFIG_INFO_COUNT) {
6320 error = KERN_INVALID_ARGUMENT;
6321 break;
6322 }
6323
6324 security_config = (task_security_config_info_t)task_info_out;
6325 security_config->config = (uint32_t)task->security_config.value;
6326
6327 *task_info_count = TASK_SECURITY_CONFIG_INFO_COUNT;
6328 break;
6329 }
6330 default:
6331 error = KERN_INVALID_ARGUMENT;
6332 }
6333
6334 task_unlock(task);
6335 return error;
6336 }
6337
6338 /*
6339 * task_info_from_user
6340 *
6341 * When calling task_info from user space,
6342 * this function will be executed as mig server side
6343 * instead of calling directly into task_info.
6344 * This gives the possibility to perform more security
6345 * checks on task_port.
6346 *
6347 * In the case of TASK_DYLD_INFO, we require the more
6348 * privileged task_read_port not the less-privileged task_name_port.
6349 *
6350 */
6351 kern_return_t
task_info_from_user(mach_port_t task_port,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)6352 task_info_from_user(
6353 mach_port_t task_port,
6354 task_flavor_t flavor,
6355 task_info_t task_info_out,
6356 mach_msg_type_number_t *task_info_count)
6357 {
6358 task_t task;
6359 kern_return_t ret;
6360
6361 if (flavor == TASK_DYLD_INFO) {
6362 task = convert_port_to_task_read(task_port);
6363 } else {
6364 task = convert_port_to_task_name(task_port);
6365 }
6366
6367 ret = task_info(task, flavor, task_info_out, task_info_count);
6368
6369 task_deallocate(task);
6370
6371 return ret;
6372 }
6373
6374 /*
6375 * Routine: task_dyld_process_info_update_helper
6376 *
6377 * Release send rights in release_ports.
6378 *
6379 * If no active ports found in task's dyld notifier array, unset the magic value
6380 * in user space to indicate so.
6381 *
6382 * Condition:
6383 * task's itk_lock is locked, and is unlocked upon return.
6384 * Global g_dyldinfo_mtx is locked, and is unlocked upon return.
6385 */
6386 void
task_dyld_process_info_update_helper(task_t task,size_t active_count,vm_map_address_t magic_addr,ipc_port_t * release_ports,size_t release_count)6387 task_dyld_process_info_update_helper(
6388 task_t task,
6389 size_t active_count,
6390 vm_map_address_t magic_addr, /* a userspace address */
6391 ipc_port_t *release_ports,
6392 size_t release_count)
6393 {
6394 void *notifiers_ptr = NULL;
6395
6396 assert(release_count <= DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT);
6397
6398 if (active_count == 0) {
6399 assert(task->itk_dyld_notify != NULL);
6400 notifiers_ptr = task->itk_dyld_notify;
6401 task->itk_dyld_notify = NULL;
6402 itk_unlock(task);
6403
6404 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6405 (void)copyoutmap_atomic32(task->map, MACH_PORT_NULL, magic_addr); /* unset magic */
6406 } else {
6407 itk_unlock(task);
6408 (void)copyoutmap_atomic32(task->map, (mach_port_name_t)DYLD_PROCESS_INFO_NOTIFY_MAGIC,
6409 magic_addr); /* reset magic */
6410 }
6411
6412 lck_mtx_unlock(&g_dyldinfo_mtx);
6413
6414 for (size_t i = 0; i < release_count; i++) {
6415 ipc_port_release_send(release_ports[i]);
6416 }
6417 }
6418
6419 /*
6420 * Routine: task_dyld_process_info_notify_register
6421 *
6422 * Insert a send right to target task's itk_dyld_notify array. Allocate kernel
6423 * memory for the array if it's the first port to be registered. Also cleanup
6424 * any dead rights found in the array.
6425 *
6426 * Consumes sright if returns KERN_SUCCESS, otherwise MIG will destroy it.
6427 *
6428 * Args:
6429 * task: Target task for the registration.
6430 * sright: A send right.
6431 *
6432 * Returns:
6433 * KERN_SUCCESS: Registration succeeded.
6434 * KERN_INVALID_TASK: task is invalid.
6435 * KERN_INVALID_RIGHT: sright is invalid.
6436 * KERN_DENIED: Security policy denied this call.
6437 * KERN_RESOURCE_SHORTAGE: Kernel memory allocation failed.
6438 * KERN_NO_SPACE: No available notifier port slot left for this task.
6439 * KERN_RIGHT_EXISTS: The notifier port is already registered and active.
6440 *
6441 * Other error code see task_info().
6442 *
6443 * See Also:
6444 * task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6445 */
6446 kern_return_t
task_dyld_process_info_notify_register(task_t task,ipc_port_t sright)6447 task_dyld_process_info_notify_register(
6448 task_t task,
6449 ipc_port_t sright)
6450 {
6451 struct task_dyld_info dyld_info;
6452 mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6453 ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6454 uint32_t release_count = 0, active_count = 0;
6455 mach_vm_address_t ports_addr; /* a user space address */
6456 kern_return_t kr;
6457 boolean_t right_exists = false;
6458 ipc_port_t *notifiers_ptr = NULL;
6459 ipc_port_t *portp;
6460
6461 if (task == TASK_NULL || task == kernel_task) {
6462 return KERN_INVALID_TASK;
6463 }
6464
6465 if (!IP_VALID(sright)) {
6466 return KERN_INVALID_RIGHT;
6467 }
6468
6469 #if CONFIG_MACF
6470 if (mac_task_check_dyld_process_info_notify_register()) {
6471 return KERN_DENIED;
6472 }
6473 #endif
6474
6475 kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6476 if (kr) {
6477 return kr;
6478 }
6479
6480 if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6481 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6482 offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6483 } else {
6484 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6485 offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6486 }
6487
6488 retry:
6489 if (task->itk_dyld_notify == NULL) {
6490 notifiers_ptr = kalloc_type(ipc_port_t,
6491 DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT,
6492 Z_WAITOK | Z_ZERO | Z_NOFAIL);
6493 }
6494
6495 lck_mtx_lock(&g_dyldinfo_mtx);
6496 itk_lock(task);
6497
6498 if (task->itk_dyld_notify == NULL) {
6499 if (notifiers_ptr == NULL) {
6500 itk_unlock(task);
6501 lck_mtx_unlock(&g_dyldinfo_mtx);
6502 goto retry;
6503 }
6504 task->itk_dyld_notify = notifiers_ptr;
6505 notifiers_ptr = NULL;
6506 }
6507
6508 assert(task->itk_dyld_notify != NULL);
6509 /* First pass: clear dead names and check for duplicate registration */
6510 for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6511 portp = &task->itk_dyld_notify[slot];
6512 if (*portp != IPC_PORT_NULL && !ip_active(*portp)) {
6513 release_ports[release_count++] = *portp;
6514 *portp = IPC_PORT_NULL;
6515 } else if (*portp == sright) {
6516 /* the port is already registered and is active */
6517 right_exists = true;
6518 }
6519
6520 if (*portp != IPC_PORT_NULL) {
6521 active_count++;
6522 }
6523 }
6524
6525 if (right_exists) {
6526 /* skip second pass */
6527 kr = KERN_RIGHT_EXISTS;
6528 goto out;
6529 }
6530
6531 /* Second pass: register the port */
6532 kr = KERN_NO_SPACE;
6533 for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6534 portp = &task->itk_dyld_notify[slot];
6535 if (*portp == IPC_PORT_NULL) {
6536 *portp = sright;
6537 active_count++;
6538 kr = KERN_SUCCESS;
6539 break;
6540 }
6541 }
6542
6543 out:
6544 assert(active_count > 0);
6545
6546 task_dyld_process_info_update_helper(task, active_count,
6547 (vm_map_address_t)ports_addr, release_ports, release_count);
6548 /* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6549
6550 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6551
6552 return kr;
6553 }
6554
6555 /*
6556 * Routine: task_dyld_process_info_notify_deregister
6557 *
6558 * Remove a send right in target task's itk_dyld_notify array matching the receive
6559 * right name passed in. Deallocate kernel memory for the array if it's the last port to
6560 * be deregistered, or all ports have died. Also cleanup any dead rights found in the array.
6561 *
6562 * Does not consume any reference.
6563 *
6564 * Args:
6565 * task: Target task for the deregistration.
6566 * rcv_name: The name denoting the receive right in caller's space.
6567 *
6568 * Returns:
6569 * KERN_SUCCESS: A matching entry found and degistration succeeded.
6570 * KERN_INVALID_TASK: task is invalid.
6571 * KERN_INVALID_NAME: name is invalid.
6572 * KERN_DENIED: Security policy denied this call.
6573 * KERN_FAILURE: A matching entry is not found.
6574 * KERN_INVALID_RIGHT: The name passed in does not represent a valid rcv right.
6575 *
6576 * Other error code see task_info().
6577 *
6578 * See Also:
6579 * task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6580 */
6581 kern_return_t
task_dyld_process_info_notify_deregister(task_t task,mach_port_name_t rcv_name)6582 task_dyld_process_info_notify_deregister(
6583 task_t task,
6584 mach_port_name_t rcv_name)
6585 {
6586 struct task_dyld_info dyld_info;
6587 mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6588 ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6589 uint32_t release_count = 0, active_count = 0;
6590 boolean_t port_found = false;
6591 mach_vm_address_t ports_addr; /* a user space address */
6592 ipc_port_t sright;
6593 kern_return_t kr;
6594 ipc_port_t *portp;
6595
6596 if (task == TASK_NULL || task == kernel_task) {
6597 return KERN_INVALID_TASK;
6598 }
6599
6600 if (!MACH_PORT_VALID(rcv_name)) {
6601 return KERN_INVALID_NAME;
6602 }
6603
6604 #if CONFIG_MACF
6605 if (mac_task_check_dyld_process_info_notify_register()) {
6606 return KERN_DENIED;
6607 }
6608 #endif
6609
6610 kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6611 if (kr) {
6612 return kr;
6613 }
6614
6615 if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6616 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6617 offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6618 } else {
6619 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6620 offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6621 }
6622
6623 kr = ipc_port_translate_receive(current_space(), rcv_name, &sright); /* does not produce port ref */
6624 if (kr) {
6625 return KERN_INVALID_RIGHT;
6626 }
6627
6628 ip_reference(sright);
6629 ip_mq_unlock(sright);
6630
6631 assert(sright != IPC_PORT_NULL);
6632
6633 lck_mtx_lock(&g_dyldinfo_mtx);
6634 itk_lock(task);
6635
6636 if (task->itk_dyld_notify == NULL) {
6637 itk_unlock(task);
6638 lck_mtx_unlock(&g_dyldinfo_mtx);
6639 ip_release(sright);
6640 return KERN_FAILURE;
6641 }
6642
6643 for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6644 portp = &task->itk_dyld_notify[slot];
6645 if (*portp == sright) {
6646 release_ports[release_count++] = *portp;
6647 *portp = IPC_PORT_NULL;
6648 port_found = true;
6649 } else if ((*portp != IPC_PORT_NULL && !ip_active(*portp))) {
6650 release_ports[release_count++] = *portp;
6651 *portp = IPC_PORT_NULL;
6652 }
6653
6654 if (*portp != IPC_PORT_NULL) {
6655 active_count++;
6656 }
6657 }
6658
6659 task_dyld_process_info_update_helper(task, active_count,
6660 (vm_map_address_t)ports_addr, release_ports, release_count);
6661 /* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6662
6663 ip_release(sright);
6664
6665 return port_found ? KERN_SUCCESS : KERN_FAILURE;
6666 }
6667
6668 /*
6669 * task_power_info
6670 *
6671 * Returns power stats for the task.
6672 * Note: Called with task locked.
6673 */
6674 void
task_power_info_locked(task_t task,task_power_info_t info,gpu_energy_data_t ginfo,task_power_info_v2_t infov2,struct task_power_info_extra * extra_info)6675 task_power_info_locked(
6676 task_t task,
6677 task_power_info_t info,
6678 gpu_energy_data_t ginfo,
6679 task_power_info_v2_t infov2,
6680 struct task_power_info_extra *extra_info)
6681 {
6682 thread_t thread;
6683 ledger_amount_t tmp;
6684
6685 uint64_t runnable_time_sum = 0;
6686
6687 task_lock_assert_owned(task);
6688
6689 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
6690 (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
6691 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
6692 (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
6693
6694 info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
6695 info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
6696
6697 struct recount_usage usage = { 0 };
6698 struct recount_usage usage_perf = { 0 };
6699 recount_task_usage_perf_only(task, &usage, &usage_perf);
6700
6701 info->total_user = usage.ru_metrics[RCT_LVL_USER].rm_time_mach;
6702 info->total_system = recount_usage_system_time_mach(&usage);
6703 runnable_time_sum = task->total_runnable_time;
6704
6705 if (ginfo) {
6706 ginfo->task_gpu_utilisation = task->task_gpu_ns;
6707 }
6708
6709 if (infov2) {
6710 infov2->task_ptime = recount_usage_time_mach(&usage_perf);
6711 infov2->task_pset_switches = task->ps_switch;
6712 #if CONFIG_PERVASIVE_ENERGY
6713 infov2->task_energy = usage.ru_energy_nj;
6714 #endif /* CONFIG_PERVASIVE_ENERGY */
6715 }
6716
6717 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6718 spl_t x;
6719
6720 if (thread->options & TH_OPT_IDLE_THREAD) {
6721 continue;
6722 }
6723
6724 x = splsched();
6725 thread_lock(thread);
6726
6727 info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
6728 info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
6729
6730 if (infov2) {
6731 infov2->task_pset_switches += thread->ps_switch;
6732 }
6733
6734 runnable_time_sum += timer_grab(&thread->runnable_timer);
6735
6736 if (ginfo) {
6737 ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
6738 }
6739 thread_unlock(thread);
6740 splx(x);
6741 }
6742
6743 if (extra_info) {
6744 extra_info->runnable_time = runnable_time_sum;
6745 #if CONFIG_PERVASIVE_CPI
6746 extra_info->cycles = recount_usage_cycles(&usage);
6747 extra_info->instructions = recount_usage_instructions(&usage);
6748 extra_info->pcycles = recount_usage_cycles(&usage_perf);
6749 extra_info->pinstructions = recount_usage_instructions(&usage_perf);
6750 extra_info->user_ptime = usage_perf.ru_metrics[RCT_LVL_USER].rm_time_mach;
6751 extra_info->system_ptime = recount_usage_system_time_mach(&usage_perf);
6752 #endif // CONFIG_PERVASIVE_CPI
6753 #if CONFIG_PERVASIVE_ENERGY
6754 extra_info->energy = usage.ru_energy_nj;
6755 extra_info->penergy = usage_perf.ru_energy_nj;
6756 #endif // CONFIG_PERVASIVE_ENERGY
6757 #if RECOUNT_SECURE_METRICS
6758 if (PE_i_can_has_debugger(NULL)) {
6759 extra_info->secure_time = usage.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6760 extra_info->secure_ptime = usage_perf.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6761 }
6762 #endif // RECOUNT_SECURE_METRICS
6763 }
6764 }
6765
6766 /*
6767 * task_gpu_utilisation
6768 *
6769 * Returns the total gpu time used by the all the threads of the task
6770 * (both dead and alive)
6771 */
6772 uint64_t
task_gpu_utilisation(task_t task)6773 task_gpu_utilisation(
6774 task_t task)
6775 {
6776 uint64_t gpu_time = 0;
6777 #if defined(__x86_64__)
6778 thread_t thread;
6779
6780 task_lock(task);
6781 gpu_time += task->task_gpu_ns;
6782
6783 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6784 spl_t x;
6785 x = splsched();
6786 thread_lock(thread);
6787 gpu_time += ml_gpu_stat(thread);
6788 thread_unlock(thread);
6789 splx(x);
6790 }
6791
6792 task_unlock(task);
6793 #else /* defined(__x86_64__) */
6794 /* silence compiler warning */
6795 (void)task;
6796 #endif /* defined(__x86_64__) */
6797 return gpu_time;
6798 }
6799
6800 /* This function updates the cpu time in the arrays for each
6801 * effective and requested QoS class
6802 */
6803 void
task_update_cpu_time_qos_stats(task_t task,uint64_t * eqos_stats,uint64_t * rqos_stats)6804 task_update_cpu_time_qos_stats(
6805 task_t task,
6806 uint64_t *eqos_stats,
6807 uint64_t *rqos_stats)
6808 {
6809 if (!eqos_stats && !rqos_stats) {
6810 return;
6811 }
6812
6813 task_lock(task);
6814 thread_t thread;
6815 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6816 if (thread->options & TH_OPT_IDLE_THREAD) {
6817 continue;
6818 }
6819
6820 thread_update_qos_cpu_time(thread);
6821 }
6822
6823 if (eqos_stats) {
6824 eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default;
6825 eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
6826 eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background;
6827 eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility;
6828 eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy;
6829 eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
6830 eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
6831 }
6832
6833 if (rqos_stats) {
6834 rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default;
6835 rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance;
6836 rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background;
6837 rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility;
6838 rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy;
6839 rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated;
6840 rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive;
6841 }
6842
6843 task_unlock(task);
6844 }
6845
6846 kern_return_t
task_purgable_info(task_t task,task_purgable_info_t * stats)6847 task_purgable_info(
6848 task_t task,
6849 task_purgable_info_t *stats)
6850 {
6851 if (task == TASK_NULL || stats == NULL) {
6852 return KERN_INVALID_ARGUMENT;
6853 }
6854 /* Take task reference */
6855 task_reference(task);
6856 vm_purgeable_stats((vm_purgeable_info_t)stats, task);
6857 /* Drop task reference */
6858 task_deallocate(task);
6859 return KERN_SUCCESS;
6860 }
6861
6862 void
task_vtimer_set(task_t task,integer_t which)6863 task_vtimer_set(
6864 task_t task,
6865 integer_t which)
6866 {
6867 thread_t thread;
6868 spl_t x;
6869
6870 task_lock(task);
6871
6872 task->vtimers |= which;
6873
6874 switch (which) {
6875 case TASK_VTIMER_USER:
6876 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6877 x = splsched();
6878 thread_lock(thread);
6879 struct recount_times_mach times = recount_thread_times(thread);
6880 thread->vtimer_user_save = times.rtm_user;
6881 thread_unlock(thread);
6882 splx(x);
6883 }
6884 break;
6885
6886 case TASK_VTIMER_PROF:
6887 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6888 x = splsched();
6889 thread_lock(thread);
6890 thread->vtimer_prof_save = recount_thread_time_mach(thread);
6891 thread_unlock(thread);
6892 splx(x);
6893 }
6894 break;
6895
6896 case TASK_VTIMER_RLIM:
6897 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6898 x = splsched();
6899 thread_lock(thread);
6900 thread->vtimer_rlim_save = recount_thread_time_mach(thread);
6901 thread_unlock(thread);
6902 splx(x);
6903 }
6904 break;
6905 }
6906
6907 task_unlock(task);
6908 }
6909
6910 void
task_vtimer_clear(task_t task,integer_t which)6911 task_vtimer_clear(
6912 task_t task,
6913 integer_t which)
6914 {
6915 task_lock(task);
6916
6917 task->vtimers &= ~which;
6918
6919 task_unlock(task);
6920 }
6921
6922 void
task_vtimer_update(__unused task_t task,integer_t which,uint32_t * microsecs)6923 task_vtimer_update(
6924 __unused
6925 task_t task,
6926 integer_t which,
6927 uint32_t *microsecs)
6928 {
6929 thread_t thread = current_thread();
6930 uint32_t tdelt = 0;
6931 clock_sec_t secs = 0;
6932 uint64_t tsum;
6933
6934 assert(task == current_task());
6935
6936 spl_t s = splsched();
6937 thread_lock(thread);
6938
6939 if ((task->vtimers & which) != (uint32_t)which) {
6940 thread_unlock(thread);
6941 splx(s);
6942 return;
6943 }
6944
6945 switch (which) {
6946 case TASK_VTIMER_USER:;
6947 struct recount_times_mach times = recount_thread_times(thread);
6948 tsum = times.rtm_user;
6949 tdelt = (uint32_t)(tsum - thread->vtimer_user_save);
6950 thread->vtimer_user_save = tsum;
6951 absolutetime_to_microtime(tdelt, &secs, microsecs);
6952 break;
6953
6954 case TASK_VTIMER_PROF:
6955 tsum = recount_current_thread_time_mach();
6956 tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
6957 absolutetime_to_microtime(tdelt, &secs, microsecs);
6958 /* if the time delta is smaller than a usec, ignore */
6959 if (*microsecs != 0) {
6960 thread->vtimer_prof_save = tsum;
6961 }
6962 break;
6963
6964 case TASK_VTIMER_RLIM:
6965 tsum = recount_current_thread_time_mach();
6966 tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
6967 thread->vtimer_rlim_save = tsum;
6968 absolutetime_to_microtime(tdelt, &secs, microsecs);
6969 break;
6970 }
6971
6972 thread_unlock(thread);
6973 splx(s);
6974 }
6975
6976 uint64_t
get_task_dispatchqueue_offset(task_t task)6977 get_task_dispatchqueue_offset(
6978 task_t task)
6979 {
6980 return task->dispatchqueue_offset;
6981 }
6982
6983 void
task_synchronizer_destroy_all(task_t task)6984 task_synchronizer_destroy_all(task_t task)
6985 {
6986 /*
6987 * Destroy owned semaphores
6988 */
6989 semaphore_destroy_all(task);
6990 }
6991
6992 /*
6993 * Install default (machine-dependent) initial thread state
6994 * on the task. Subsequent thread creation will have this initial
6995 * state set on the thread by machine_thread_inherit_taskwide().
6996 * Flavors and structures are exactly the same as those to thread_set_state()
6997 */
6998 kern_return_t
task_set_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t state_count)6999 task_set_state(
7000 task_t task,
7001 int flavor,
7002 thread_state_t state,
7003 mach_msg_type_number_t state_count)
7004 {
7005 kern_return_t ret;
7006
7007 if (task == TASK_NULL) {
7008 return KERN_INVALID_ARGUMENT;
7009 }
7010
7011 task_lock(task);
7012
7013 if (!task->active) {
7014 task_unlock(task);
7015 return KERN_FAILURE;
7016 }
7017
7018 ret = machine_task_set_state(task, flavor, state, state_count);
7019
7020 task_unlock(task);
7021 return ret;
7022 }
7023
7024 /*
7025 * Examine the default (machine-dependent) initial thread state
7026 * on the task, as set by task_set_state(). Flavors and structures
7027 * are exactly the same as those passed to thread_get_state().
7028 */
7029 kern_return_t
task_get_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)7030 task_get_state(
7031 task_t task,
7032 int flavor,
7033 thread_state_t state,
7034 mach_msg_type_number_t *state_count)
7035 {
7036 kern_return_t ret;
7037
7038 if (task == TASK_NULL) {
7039 return KERN_INVALID_ARGUMENT;
7040 }
7041
7042 task_lock(task);
7043
7044 if (!task->active) {
7045 task_unlock(task);
7046 return KERN_FAILURE;
7047 }
7048
7049 ret = machine_task_get_state(task, flavor, state, state_count);
7050
7051 task_unlock(task);
7052 return ret;
7053 }
7054
7055
7056 static kern_return_t __attribute__((noinline, not_tail_called))
PROC_VIOLATED_GUARD__SEND_EXC_GUARD(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,boolean_t backtrace_only)7057 PROC_VIOLATED_GUARD__SEND_EXC_GUARD(
7058 mach_exception_code_t code,
7059 mach_exception_subcode_t subcode,
7060 void *reason,
7061 boolean_t backtrace_only)
7062 {
7063 #ifdef MACH_BSD
7064 if (1 == proc_selfpid()) {
7065 return KERN_NOT_SUPPORTED; // initproc is immune
7066 }
7067 #endif
7068 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
7069 [0] = code,
7070 [1] = subcode,
7071 };
7072 task_t task = current_task();
7073 kern_return_t kr;
7074 void *bsd_info = get_bsdtask_info(task);
7075
7076 /* (See jetsam-related comments below) */
7077
7078 proc_memstat_skip(bsd_info, TRUE);
7079 kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason, backtrace_only);
7080 proc_memstat_skip(bsd_info, FALSE);
7081 return kr;
7082 }
7083
7084 kern_return_t
task_violated_guard(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,bool backtrace_only)7085 task_violated_guard(
7086 mach_exception_code_t code,
7087 mach_exception_subcode_t subcode,
7088 void *reason,
7089 bool backtrace_only)
7090 {
7091 return PROC_VIOLATED_GUARD__SEND_EXC_GUARD(code, subcode, reason, backtrace_only);
7092 }
7093
7094
7095 #if CONFIG_MEMORYSTATUS
7096
7097 boolean_t
task_get_memlimit_is_active(task_t task)7098 task_get_memlimit_is_active(task_t task)
7099 {
7100 assert(task != NULL);
7101
7102 if (task->memlimit_is_active == 1) {
7103 return TRUE;
7104 } else {
7105 return FALSE;
7106 }
7107 }
7108
7109 void
task_set_memlimit_is_active(task_t task,boolean_t memlimit_is_active)7110 task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
7111 {
7112 assert(task != NULL);
7113
7114 if (memlimit_is_active) {
7115 task->memlimit_is_active = 1;
7116 } else {
7117 task->memlimit_is_active = 0;
7118 }
7119 }
7120
7121 boolean_t
task_get_memlimit_is_fatal(task_t task)7122 task_get_memlimit_is_fatal(task_t task)
7123 {
7124 assert(task != NULL);
7125
7126 if (task->memlimit_is_fatal == 1) {
7127 return TRUE;
7128 } else {
7129 return FALSE;
7130 }
7131 }
7132
7133 void
task_set_memlimit_is_fatal(task_t task,boolean_t memlimit_is_fatal)7134 task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
7135 {
7136 assert(task != NULL);
7137
7138 if (memlimit_is_fatal) {
7139 task->memlimit_is_fatal = 1;
7140 } else {
7141 task->memlimit_is_fatal = 0;
7142 }
7143 }
7144
7145 uint64_t
task_get_dirty_start(task_t task)7146 task_get_dirty_start(task_t task)
7147 {
7148 return task->memstat_dirty_start;
7149 }
7150
7151 void
task_set_dirty_start(task_t task,uint64_t start)7152 task_set_dirty_start(task_t task, uint64_t start)
7153 {
7154 task_lock(task);
7155 task->memstat_dirty_start = start;
7156 task_unlock(task);
7157 }
7158
7159 boolean_t
task_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)7160 task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
7161 {
7162 boolean_t triggered = FALSE;
7163
7164 assert(task == current_task());
7165
7166 /*
7167 * Returns true, if task has already triggered an exc_resource exception.
7168 */
7169
7170 if (memlimit_is_active) {
7171 triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
7172 } else {
7173 triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
7174 }
7175
7176 return triggered;
7177 }
7178
7179 void
task_mark_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)7180 task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
7181 {
7182 assert(task == current_task());
7183
7184 /*
7185 * We allow one exc_resource per process per active/inactive limit.
7186 * The limit's fatal attribute does not come into play.
7187 */
7188
7189 if (memlimit_is_active) {
7190 task->memlimit_active_exc_resource = 1;
7191 } else {
7192 task->memlimit_inactive_exc_resource = 1;
7193 }
7194 }
7195
7196 #define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
7197
7198 void __attribute__((noinline))
PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb,send_exec_resource_options_t exception_options)7199 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options)
7200 {
7201 task_t task = current_task();
7202 int pid = 0;
7203 const char *procname = "unknown";
7204 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
7205 boolean_t send_sync_exc_resource = FALSE;
7206 void *cur_bsd_info = get_bsdtask_info(current_task());
7207
7208 #ifdef MACH_BSD
7209 pid = proc_selfpid();
7210
7211 if (pid == 1) {
7212 /*
7213 * Cannot have ReportCrash analyzing
7214 * a suspended initproc.
7215 */
7216 return;
7217 }
7218
7219 if (cur_bsd_info != NULL) {
7220 procname = proc_name_address(cur_bsd_info);
7221 send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(cur_bsd_info);
7222 }
7223 #endif
7224 #if CONFIG_COREDUMP
7225 if (hwm_user_cores) {
7226 int error;
7227 uint64_t starttime, end;
7228 clock_sec_t secs = 0;
7229 uint32_t microsecs = 0;
7230
7231 starttime = mach_absolute_time();
7232 /*
7233 * Trigger a coredump of this process. Don't proceed unless we know we won't
7234 * be filling up the disk; and ignore the core size resource limit for this
7235 * core file.
7236 */
7237 if ((error = coredump(cur_bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
7238 printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
7239 }
7240 /*
7241 * coredump() leaves the task suspended.
7242 */
7243 task_resume_internal(current_task());
7244
7245 end = mach_absolute_time();
7246 absolutetime_to_microtime(end - starttime, &secs, µsecs);
7247 printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
7248 proc_name_address(cur_bsd_info), pid, (int)secs, microsecs);
7249 }
7250 #endif /* CONFIG_COREDUMP */
7251
7252 if (disable_exc_resource) {
7253 printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7254 "suppressed by a boot-arg.\n", procname, pid, max_footprint_mb);
7255 return;
7256 }
7257 printf("process %s [%d] crossed memory %s (%d MB); EXC_RESOURCE "
7258 "\n", procname, pid, (!(exception_options & EXEC_RESOURCE_DIAGNOSTIC) ? "high watermark" : "diagnostics limit"), max_footprint_mb);
7259
7260 /*
7261 * A task that has triggered an EXC_RESOURCE, should not be
7262 * jetsammed when the device is under memory pressure. Here
7263 * we set the P_MEMSTAT_SKIP flag so that the process
7264 * will be skipped if the memorystatus_thread wakes up.
7265 *
7266 * This is a debugging aid to ensure we can get a corpse before
7267 * the jetsam thread kills the process.
7268 * Note that proc_memstat_skip is a no-op on release kernels.
7269 */
7270 proc_memstat_skip(cur_bsd_info, TRUE);
7271
7272 code[0] = code[1] = 0;
7273 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
7274 /*
7275 * Regardless if there was a diag memlimit violation, fatal exceptions shall be notified always
7276 * as high level watermaks. In another words, if there was a diag limit and a watermark, and the
7277 * violation if for limit watermark, a watermark shall be reported.
7278 */
7279 if (!(exception_options & EXEC_RESOURCE_FATAL)) {
7280 EXC_RESOURCE_ENCODE_FLAVOR(code[0], !(exception_options & EXEC_RESOURCE_DIAGNOSTIC) ? FLAVOR_HIGH_WATERMARK : FLAVOR_DIAG_MEMLIMIT);
7281 } else {
7282 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK );
7283 }
7284 EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
7285 /*
7286 * Do not generate a corpse fork if the violation is a fatal one
7287 * or the process wants synchronous EXC_RESOURCE exceptions.
7288 */
7289 if ((exception_options & EXEC_RESOURCE_FATAL) || send_sync_exc_resource || !exc_via_corpse_forking) {
7290 if (exception_options & EXEC_RESOURCE_FATAL) {
7291 vm_map_set_corpse_source(task->map);
7292 }
7293
7294 /* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */
7295 if (send_sync_exc_resource || !corpse_for_fatal_memkill) {
7296 /*
7297 * Use the _internal_ variant so that no user-space
7298 * process can resume our task from under us.
7299 */
7300 task_suspend_internal(task);
7301 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
7302 task_resume_internal(task);
7303 }
7304 } else {
7305 if (disable_exc_resource_during_audio && audio_active) {
7306 printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7307 "suppressed due to audio playback.\n", procname, pid, max_footprint_mb);
7308 } else {
7309 task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
7310 code, EXCEPTION_CODE_MAX, NULL, FALSE);
7311 }
7312 }
7313
7314 /*
7315 * After the EXC_RESOURCE has been handled, we must clear the
7316 * P_MEMSTAT_SKIP flag so that the process can again be
7317 * considered for jetsam if the memorystatus_thread wakes up.
7318 */
7319 proc_memstat_skip(cur_bsd_info, FALSE); /* clear the flag */
7320 }
7321 /*
7322 * Callback invoked when a task exceeds its physical footprint limit.
7323 */
7324 void
task_footprint_exceeded(int warning,__unused const void * param0,__unused const void * param1)7325 task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
7326 {
7327 ledger_amount_t max_footprint = 0;
7328 ledger_amount_t max_footprint_mb = 0;
7329 #if DEBUG || DEVELOPMENT
7330 ledger_amount_t diag_threshold_limit_mb = 0;
7331 ledger_amount_t diag_threshold_limit = 0;
7332 #endif
7333 #if CONFIG_DEFERRED_RECLAIM
7334 ledger_amount_t current_footprint;
7335 #endif /* CONFIG_DEFERRED_RECLAIM */
7336 task_t task;
7337 send_exec_resource_is_warning is_warning = IS_NOT_WARNING;
7338 boolean_t memlimit_is_active;
7339 send_exec_resource_is_fatal memlimit_is_fatal;
7340 send_exec_resource_is_diagnostics is_diag_mem_threshold = IS_NOT_DIAGNOSTICS;
7341 if (warning == LEDGER_WARNING_DIAG_MEM_THRESHOLD) {
7342 is_diag_mem_threshold = IS_DIAGNOSTICS;
7343 is_warning = IS_WARNING;
7344 } else if (warning == LEDGER_WARNING_DIPPED_BELOW) {
7345 /*
7346 * Task memory limits only provide a warning on the way up.
7347 */
7348 return;
7349 } else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
7350 /*
7351 * This task is in danger of violating a memory limit,
7352 * It has exceeded a percentage level of the limit.
7353 */
7354 is_warning = IS_WARNING;
7355 } else {
7356 /*
7357 * The task has exceeded the physical footprint limit.
7358 * This is not a warning but a true limit violation.
7359 */
7360 is_warning = IS_NOT_WARNING;
7361 }
7362
7363 task = current_task();
7364
7365 ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint);
7366 #if DEBUG || DEVELOPMENT
7367 ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &diag_threshold_limit);
7368 #endif
7369 #if CONFIG_DEFERRED_RECLAIM
7370 if (vm_deferred_reclamation_task_has_ring(task)) {
7371 /*
7372 * Task is enrolled in deferred reclamation.
7373 * Do a reclaim to ensure it's really over its limit.
7374 */
7375 vm_deferred_reclamation_task_drain(task, RECLAIM_OPTIONS_NONE);
7376 ledger_get_balance(task->ledger, task_ledgers.phys_footprint, ¤t_footprint);
7377 if (current_footprint < max_footprint) {
7378 return;
7379 }
7380 }
7381 #endif /* CONFIG_DEFERRED_RECLAIM */
7382 max_footprint_mb = max_footprint >> 20;
7383 #if DEBUG || DEVELOPMENT
7384 diag_threshold_limit_mb = diag_threshold_limit >> 20;
7385 #endif
7386 memlimit_is_active = task_get_memlimit_is_active(task);
7387 memlimit_is_fatal = task_get_memlimit_is_fatal(task) == FALSE ? IS_NOT_FATAL : IS_FATAL;
7388 #if DEBUG || DEVELOPMENT
7389 if (is_diag_mem_threshold == IS_NOT_DIAGNOSTICS) {
7390 task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7391 } else {
7392 task_process_crossed_limit_diag(diag_threshold_limit_mb);
7393 }
7394 #else
7395 task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7396 #endif
7397 }
7398
7399 /*
7400 * Actions to perfrom when a process has crossed watermark or is a fatal consumption */
7401 static inline void
task_process_crossed_limit_no_diag(task_t task,ledger_amount_t ledger_limit_size,bool memlimit_is_fatal,bool memlimit_is_active,send_exec_resource_is_warning is_warning)7402 task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning)
7403 {
7404 send_exec_resource_options_t exception_options = 0;
7405 if (memlimit_is_fatal) {
7406 exception_options |= EXEC_RESOURCE_FATAL;
7407 }
7408 /*
7409 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7410 * We only generate the exception once per process per memlimit (active/inactive limit).
7411 * To enforce this, we monitor state based on the memlimit's active/inactive attribute
7412 * and we disable it by marking that memlimit as exception triggered.
7413 */
7414 if (is_warning == IS_NOT_WARNING && !task_has_triggered_exc_resource(task, memlimit_is_active)) {
7415 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7416 // If it was not a diag threshold (if was a memory limit), then we do not want more signalling,
7417 // however, if was a diag limit, the user may reload a different limit and signal again the violation
7418 memorystatus_log_exception((int)ledger_limit_size, memlimit_is_active, memlimit_is_fatal);
7419 task_mark_has_triggered_exc_resource(task, memlimit_is_active);
7420 }
7421 memorystatus_on_ledger_footprint_exceeded(is_warning == IS_NOT_WARNING ? FALSE : TRUE, memlimit_is_active, memlimit_is_fatal);
7422 }
7423
7424 #if DEBUG || DEVELOPMENT
7425 /**
7426 * Actions to take when a process has crossed the diagnostics limit
7427 */
7428 static inline void
task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)7429 task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)
7430 {
7431 /*
7432 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7433 * In the case of the diagnostics thresholds, the exception will be signaled only once, but the
7434 * inhibit / rearm mechanism if performed at ledger level.
7435 */
7436 send_exec_resource_options_t exception_options = EXEC_RESOURCE_DIAGNOSTIC;
7437 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7438 memorystatus_log_diag_threshold_exception((int)ledger_limit_size);
7439 }
7440 #endif
7441
7442 extern int proc_check_footprint_priv(void);
7443
7444 kern_return_t
task_set_phys_footprint_limit(task_t task,int new_limit_mb,int * old_limit_mb)7445 task_set_phys_footprint_limit(
7446 task_t task,
7447 int new_limit_mb,
7448 int *old_limit_mb)
7449 {
7450 kern_return_t error;
7451
7452 boolean_t memlimit_is_active;
7453 boolean_t memlimit_is_fatal;
7454
7455 if ((error = proc_check_footprint_priv())) {
7456 return KERN_NO_ACCESS;
7457 }
7458
7459 /*
7460 * This call should probably be obsoleted.
7461 * But for now, we default to current state.
7462 */
7463 memlimit_is_active = task_get_memlimit_is_active(task);
7464 memlimit_is_fatal = task_get_memlimit_is_fatal(task);
7465
7466 return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
7467 }
7468
7469 /*
7470 * Set the limit of diagnostics memory consumption for a concrete task
7471 */
7472 #if CONFIG_MEMORYSTATUS
7473 #if DEVELOPMENT || DEBUG
7474 kern_return_t
task_set_diag_footprint_limit(task_t task,uint64_t new_limit_mb,uint64_t * old_limit_mb)7475 task_set_diag_footprint_limit(
7476 task_t task,
7477 uint64_t new_limit_mb,
7478 uint64_t *old_limit_mb)
7479 {
7480 kern_return_t error;
7481
7482 if ((error = proc_check_footprint_priv())) {
7483 return KERN_NO_ACCESS;
7484 }
7485
7486 return task_set_diag_footprint_limit_internal(task, new_limit_mb, old_limit_mb);
7487 }
7488
7489 #endif // DEVELOPMENT || DEBUG
7490 #endif // CONFIG_MEMORYSTATUS
7491
7492 kern_return_t
task_convert_phys_footprint_limit(int limit_mb,int * converted_limit_mb)7493 task_convert_phys_footprint_limit(
7494 int limit_mb,
7495 int *converted_limit_mb)
7496 {
7497 if (limit_mb == -1) {
7498 /*
7499 * No limit
7500 */
7501 if (max_task_footprint != 0) {
7502 *converted_limit_mb = (int)(max_task_footprint / 1024 / 1024); /* bytes to MB */
7503 } else {
7504 *converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
7505 }
7506 } else {
7507 /* nothing to convert */
7508 *converted_limit_mb = limit_mb;
7509 }
7510 return KERN_SUCCESS;
7511 }
7512
7513 kern_return_t
task_set_phys_footprint_limit_internal(task_t task,int new_limit_mb,int * old_limit_mb,boolean_t memlimit_is_active,boolean_t memlimit_is_fatal)7514 task_set_phys_footprint_limit_internal(
7515 task_t task,
7516 int new_limit_mb,
7517 int *old_limit_mb,
7518 boolean_t memlimit_is_active,
7519 boolean_t memlimit_is_fatal)
7520 {
7521 ledger_amount_t old;
7522 kern_return_t ret;
7523 #if DEVELOPMENT || DEBUG
7524 diagthreshold_check_return diag_threshold_validity;
7525 #endif
7526 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
7527
7528 if (ret != KERN_SUCCESS) {
7529 return ret;
7530 }
7531 /**
7532 * Maybe we will need to re-enable the diag threshold, lets get the value
7533 * and the current status
7534 */
7535 #if DEVELOPMENT || DEBUG
7536 diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_mb, false);
7537 /**
7538 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7539 */
7540 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7541 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7542 } else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7543 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7544 }
7545 #endif
7546
7547 /*
7548 * Check that limit >> 20 will not give an "unexpected" 32-bit
7549 * result. There are, however, implicit assumptions that -1 mb limit
7550 * equates to LEDGER_LIMIT_INFINITY.
7551 */
7552 assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
7553
7554 if (old_limit_mb) {
7555 *old_limit_mb = (int)(old >> 20);
7556 }
7557
7558 if (new_limit_mb == -1) {
7559 /*
7560 * Caller wishes to remove the limit.
7561 */
7562 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7563 max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
7564 max_task_footprint ? (uint8_t)max_task_footprint_warning_level : 0);
7565
7566 task_lock(task);
7567 task_set_memlimit_is_active(task, memlimit_is_active);
7568 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7569 task_unlock(task);
7570 /**
7571 * If the diagnostics were disabled, and now we have a new limit, we have to re-enable it.
7572 */
7573 #if DEVELOPMENT || DEBUG
7574 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7575 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7576 } else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7577 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7578 }
7579 #endif
7580 return KERN_SUCCESS;
7581 }
7582
7583 #ifdef CONFIG_NOMONITORS
7584 return KERN_SUCCESS;
7585 #endif /* CONFIG_NOMONITORS */
7586
7587 task_lock(task);
7588
7589 if ((memlimit_is_active == task_get_memlimit_is_active(task)) &&
7590 (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) &&
7591 (((ledger_amount_t)new_limit_mb << 20) == old)) {
7592 /*
7593 * memlimit state is not changing
7594 */
7595 task_unlock(task);
7596 return KERN_SUCCESS;
7597 }
7598
7599 task_set_memlimit_is_active(task, memlimit_is_active);
7600 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7601
7602 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7603 (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
7604
7605 if (task == current_task()) {
7606 ledger_check_new_balance(current_thread(), task->ledger,
7607 task_ledgers.phys_footprint);
7608 }
7609
7610 task_unlock(task);
7611 #if DEVELOPMENT || DEBUG
7612 if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7613 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7614 }
7615 #endif
7616
7617 return KERN_SUCCESS;
7618 }
7619
7620 #if RESETTABLE_DIAG_FOOTPRINT_LIMITS
7621 kern_return_t
task_set_diag_footprint_limit_internal(task_t task,uint64_t new_limit_bytes,uint64_t * old_limit_bytes)7622 task_set_diag_footprint_limit_internal(
7623 task_t task,
7624 uint64_t new_limit_bytes,
7625 uint64_t *old_limit_bytes)
7626 {
7627 ledger_amount_t old = 0;
7628 kern_return_t ret = KERN_SUCCESS;
7629 diagthreshold_check_return diag_threshold_validity;
7630 ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &old);
7631
7632 if (ret != KERN_SUCCESS) {
7633 return ret;
7634 }
7635 /**
7636 * Maybe we will need to re-enable the diag threshold, lets get the value
7637 * and the current status
7638 */
7639 diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_bytes >> 20, true);
7640 /**
7641 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7642 */
7643 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7644 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7645 }
7646
7647 /*
7648 * Check that limit >> 20 will not give an "unexpected" 32-bit
7649 * result. There are, however, implicit assumptions that -1 mb limit
7650 * equates to LEDGER_LIMIT_INFINITY.
7651 */
7652 if (old_limit_bytes) {
7653 *old_limit_bytes = old;
7654 }
7655
7656 if (new_limit_bytes == -1) {
7657 /*
7658 * Caller wishes to remove the limit.
7659 */
7660 ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7661 LEDGER_LIMIT_INFINITY);
7662 /*
7663 * If the memory diagnostics flag was disabled, lets enable it again
7664 */
7665 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7666 return KERN_SUCCESS;
7667 }
7668
7669 #ifdef CONFIG_NOMONITORS
7670 return KERN_SUCCESS;
7671 #else
7672
7673 task_lock(task);
7674 ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7675 (ledger_amount_t)new_limit_bytes );
7676 if (task == current_task()) {
7677 ledger_check_new_balance(current_thread(), task->ledger,
7678 task_ledgers.phys_footprint);
7679 }
7680
7681 task_unlock(task);
7682 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7683 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7684 } else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7685 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7686 }
7687
7688 return KERN_SUCCESS;
7689 #endif /* CONFIG_NOMONITORS */
7690 }
7691
7692 kern_return_t
task_get_diag_footprint_limit_internal(task_t task,uint64_t * new_limit_bytes,bool * threshold_disabled)7693 task_get_diag_footprint_limit_internal(
7694 task_t task,
7695 uint64_t *new_limit_bytes,
7696 bool *threshold_disabled)
7697 {
7698 ledger_amount_t ledger_limit;
7699 kern_return_t ret = KERN_SUCCESS;
7700 if (new_limit_bytes == NULL || threshold_disabled == NULL) {
7701 return KERN_INVALID_ARGUMENT;
7702 }
7703 ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &ledger_limit);
7704 if (ledger_limit == LEDGER_LIMIT_INFINITY) {
7705 ledger_limit = -1;
7706 }
7707 if (ret == KERN_SUCCESS) {
7708 *new_limit_bytes = ledger_limit;
7709 ret = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, threshold_disabled);
7710 }
7711 return ret;
7712 }
7713 #endif /* RESETTABLE_DIAG_FOOTPRINT_LIMITS */
7714
7715
7716 kern_return_t
task_get_phys_footprint_limit(task_t task,int * limit_mb)7717 task_get_phys_footprint_limit(
7718 task_t task,
7719 int *limit_mb)
7720 {
7721 ledger_amount_t limit;
7722 kern_return_t ret;
7723
7724 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
7725 if (ret != KERN_SUCCESS) {
7726 return ret;
7727 }
7728
7729 /*
7730 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
7731 * result. There are, however, implicit assumptions that -1 mb limit
7732 * equates to LEDGER_LIMIT_INFINITY.
7733 */
7734 assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
7735 *limit_mb = (int)(limit >> 20);
7736
7737 return KERN_SUCCESS;
7738 }
7739 #else /* CONFIG_MEMORYSTATUS */
7740 kern_return_t
task_set_phys_footprint_limit(__unused task_t task,__unused int new_limit_mb,__unused int * old_limit_mb)7741 task_set_phys_footprint_limit(
7742 __unused task_t task,
7743 __unused int new_limit_mb,
7744 __unused int *old_limit_mb)
7745 {
7746 return KERN_FAILURE;
7747 }
7748
7749 kern_return_t
task_get_phys_footprint_limit(__unused task_t task,__unused int * limit_mb)7750 task_get_phys_footprint_limit(
7751 __unused task_t task,
7752 __unused int *limit_mb)
7753 {
7754 return KERN_FAILURE;
7755 }
7756 #endif /* CONFIG_MEMORYSTATUS */
7757
7758 security_token_t *
task_get_sec_token(task_t task)7759 task_get_sec_token(task_t task)
7760 {
7761 return &task_get_ro(task)->task_tokens.sec_token;
7762 }
7763
7764 void
task_set_sec_token(task_t task,security_token_t * token)7765 task_set_sec_token(task_t task, security_token_t *token)
7766 {
7767 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7768 task_tokens.sec_token, token);
7769 }
7770
7771 audit_token_t *
task_get_audit_token(task_t task)7772 task_get_audit_token(task_t task)
7773 {
7774 return &task_get_ro(task)->task_tokens.audit_token;
7775 }
7776
7777 void
task_set_audit_token(task_t task,audit_token_t * token)7778 task_set_audit_token(task_t task, audit_token_t *token)
7779 {
7780 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7781 task_tokens.audit_token, token);
7782 }
7783
7784 void
task_set_tokens(task_t task,security_token_t * sec_token,audit_token_t * audit_token)7785 task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token)
7786 {
7787 struct task_token_ro_data tokens;
7788
7789 tokens = task_get_ro(task)->task_tokens;
7790 tokens.sec_token = *sec_token;
7791 tokens.audit_token = *audit_token;
7792
7793 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task), task_tokens,
7794 &tokens);
7795 }
7796
7797 boolean_t
task_is_privileged(task_t task)7798 task_is_privileged(task_t task)
7799 {
7800 return task_get_sec_token(task)->val[0] == 0;
7801 }
7802
7803 #ifdef CONFIG_MACF
7804 uint8_t *
task_get_mach_trap_filter_mask(task_t task)7805 task_get_mach_trap_filter_mask(task_t task)
7806 {
7807 return task_get_ro(task)->task_filters.mach_trap_filter_mask;
7808 }
7809
7810 void
task_set_mach_trap_filter_mask(task_t task,uint8_t * mask)7811 task_set_mach_trap_filter_mask(task_t task, uint8_t *mask)
7812 {
7813 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7814 task_filters.mach_trap_filter_mask, &mask);
7815 }
7816
7817 uint8_t *
task_get_mach_kobj_filter_mask(task_t task)7818 task_get_mach_kobj_filter_mask(task_t task)
7819 {
7820 return task_get_ro(task)->task_filters.mach_kobj_filter_mask;
7821 }
7822
7823 mach_vm_address_t
task_get_all_image_info_addr(task_t task)7824 task_get_all_image_info_addr(task_t task)
7825 {
7826 return task->all_image_info_addr;
7827 }
7828
7829 void
task_set_mach_kobj_filter_mask(task_t task,uint8_t * mask)7830 task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask)
7831 {
7832 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7833 task_filters.mach_kobj_filter_mask, &mask);
7834 }
7835
7836 #endif /* CONFIG_MACF */
7837
7838 void
task_set_thread_limit(task_t task,uint16_t thread_limit)7839 task_set_thread_limit(task_t task, uint16_t thread_limit)
7840 {
7841 assert(task != kernel_task);
7842 if (thread_limit <= TASK_MAX_THREAD_LIMIT) {
7843 task_lock(task);
7844 task->task_thread_limit = thread_limit;
7845 task_unlock(task);
7846 }
7847 }
7848
7849 #if CONFIG_PROC_RESOURCE_LIMITS
7850 kern_return_t
task_set_port_space_limits(task_t task,uint32_t soft_limit,uint32_t hard_limit)7851 task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit)
7852 {
7853 return ipc_space_set_table_size_limits(task->itk_space, soft_limit, hard_limit);
7854 }
7855 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7856
7857 #if XNU_TARGET_OS_OSX
7858 boolean_t
task_has_system_version_compat_enabled(task_t task)7859 task_has_system_version_compat_enabled(task_t task)
7860 {
7861 boolean_t enabled = FALSE;
7862
7863 task_lock(task);
7864 enabled = (task->t_flags & TF_SYS_VERSION_COMPAT);
7865 task_unlock(task);
7866
7867 return enabled;
7868 }
7869
7870 void
task_set_system_version_compat_enabled(task_t task,boolean_t enable_system_version_compat)7871 task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat)
7872 {
7873 assert(task == current_task());
7874 assert(task != kernel_task);
7875
7876 task_lock(task);
7877 if (enable_system_version_compat) {
7878 task->t_flags |= TF_SYS_VERSION_COMPAT;
7879 } else {
7880 task->t_flags &= ~TF_SYS_VERSION_COMPAT;
7881 }
7882 task_unlock(task);
7883 }
7884 #endif /* XNU_TARGET_OS_OSX */
7885
7886 /*
7887 * We need to export some functions to other components that
7888 * are currently implemented in macros within the osfmk
7889 * component. Just export them as functions of the same name.
7890 */
7891 boolean_t
is_kerneltask(task_t t)7892 is_kerneltask(task_t t)
7893 {
7894 if (t == kernel_task) {
7895 return TRUE;
7896 }
7897
7898 return FALSE;
7899 }
7900
7901 boolean_t
is_corpsefork(task_t t)7902 is_corpsefork(task_t t)
7903 {
7904 return task_is_a_corpse_fork(t);
7905 }
7906
7907 task_t
current_task_early(void)7908 current_task_early(void)
7909 {
7910 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
7911 if (current_thread()->t_tro == NULL) {
7912 return TASK_NULL;
7913 }
7914 }
7915 return get_threadtask(current_thread());
7916 }
7917
7918 task_t
current_task(void)7919 current_task(void)
7920 {
7921 return get_threadtask(current_thread());
7922 }
7923
7924 /* defined in bsd/kern/kern_prot.c */
7925 extern int get_audit_token_pid(audit_token_t *audit_token);
7926
7927 int
task_pid(task_t task)7928 task_pid(task_t task)
7929 {
7930 if (task) {
7931 return get_audit_token_pid(task_get_audit_token(task));
7932 }
7933 return -1;
7934 }
7935
7936 #if __has_feature(ptrauth_calls)
7937 /*
7938 * Get the shared region id and jop signing key for the task.
7939 * The function will allocate a kalloc buffer and return
7940 * it to caller, the caller needs to free it. This is used
7941 * for getting the information via task port.
7942 */
7943 char *
task_get_vm_shared_region_id_and_jop_pid(task_t task,uint64_t * jop_pid)7944 task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid)
7945 {
7946 size_t len;
7947 char *shared_region_id = NULL;
7948
7949 task_lock(task);
7950 if (task->shared_region_id == NULL) {
7951 task_unlock(task);
7952 return NULL;
7953 }
7954 len = strlen(task->shared_region_id) + 1;
7955
7956 /* don't hold task lock while allocating */
7957 task_unlock(task);
7958 shared_region_id = kalloc_data(len, Z_WAITOK);
7959 task_lock(task);
7960
7961 if (task->shared_region_id == NULL) {
7962 task_unlock(task);
7963 kfree_data(shared_region_id, len);
7964 return NULL;
7965 }
7966 assert(len == strlen(task->shared_region_id) + 1); /* should never change */
7967 strlcpy(shared_region_id, task->shared_region_id, len);
7968 task_unlock(task);
7969
7970 /* find key from its auth pager */
7971 if (jop_pid != NULL) {
7972 *jop_pid = shared_region_find_key(shared_region_id);
7973 }
7974
7975 return shared_region_id;
7976 }
7977
7978 /*
7979 * set the shared region id for a task
7980 */
7981 void
task_set_shared_region_id(task_t task,char * id)7982 task_set_shared_region_id(task_t task, char *id)
7983 {
7984 char *old_id;
7985
7986 task_lock(task);
7987 old_id = task->shared_region_id;
7988 task->shared_region_id = id;
7989 task->shared_region_auth_remapped = FALSE;
7990 task_unlock(task);
7991
7992 /* free any pre-existing shared region id */
7993 if (old_id != NULL) {
7994 shared_region_key_dealloc(old_id);
7995 kfree_data(old_id, strlen(old_id) + 1);
7996 }
7997 }
7998 #endif /* __has_feature(ptrauth_calls) */
7999
8000 /*
8001 * This routine finds a thread in a task by its unique id
8002 * Returns a referenced thread or THREAD_NULL if the thread was not found
8003 *
8004 * TODO: This is super inefficient - it's an O(threads in task) list walk!
8005 * We should make a tid hash, or transition all tid clients to thread ports
8006 *
8007 * Precondition: No locks held (will take task lock)
8008 */
8009 thread_t
task_findtid(task_t task,uint64_t tid)8010 task_findtid(task_t task, uint64_t tid)
8011 {
8012 thread_t self = current_thread();
8013 thread_t found_thread = THREAD_NULL;
8014 thread_t iter_thread = THREAD_NULL;
8015
8016 /* Short-circuit the lookup if we're looking up ourselves */
8017 if (tid == self->thread_id || tid == TID_NULL) {
8018 assert(get_threadtask(self) == task);
8019
8020 thread_reference(self);
8021
8022 return self;
8023 }
8024
8025 task_lock(task);
8026
8027 queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
8028 if (iter_thread->thread_id == tid) {
8029 found_thread = iter_thread;
8030 thread_reference(found_thread);
8031 break;
8032 }
8033 }
8034
8035 task_unlock(task);
8036
8037 return found_thread;
8038 }
8039
8040 int
pid_from_task(task_t task)8041 pid_from_task(task_t task)
8042 {
8043 int pid = -1;
8044 void *bsd_info = get_bsdtask_info(task);
8045
8046 if (bsd_info) {
8047 pid = proc_pid(bsd_info);
8048 } else {
8049 pid = task_pid(task);
8050 }
8051
8052 return pid;
8053 }
8054
8055 /*
8056 * Control the CPU usage monitor for a task.
8057 */
8058 kern_return_t
task_cpu_usage_monitor_ctl(task_t task,uint32_t * flags)8059 task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
8060 {
8061 int error = KERN_SUCCESS;
8062
8063 if (*flags & CPUMON_MAKE_FATAL) {
8064 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
8065 } else {
8066 error = KERN_INVALID_ARGUMENT;
8067 }
8068
8069 return error;
8070 }
8071
8072 /*
8073 * Control the wakeups monitor for a task.
8074 */
8075 kern_return_t
task_wakeups_monitor_ctl(task_t task,uint32_t * flags,int32_t * rate_hz)8076 task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
8077 {
8078 ledger_t ledger = task->ledger;
8079
8080 task_lock(task);
8081 if (*flags & WAKEMON_GET_PARAMS) {
8082 ledger_amount_t limit;
8083 uint64_t period;
8084
8085 ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
8086 ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
8087
8088 if (limit != LEDGER_LIMIT_INFINITY) {
8089 /*
8090 * An active limit means the wakeups monitor is enabled.
8091 */
8092 *rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
8093 *flags = WAKEMON_ENABLE;
8094 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
8095 *flags |= WAKEMON_MAKE_FATAL;
8096 }
8097 } else {
8098 *flags = WAKEMON_DISABLE;
8099 *rate_hz = -1;
8100 }
8101
8102 /*
8103 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
8104 */
8105 task_unlock(task);
8106 return KERN_SUCCESS;
8107 }
8108
8109 if (*flags & WAKEMON_ENABLE) {
8110 if (*flags & WAKEMON_SET_DEFAULTS) {
8111 *rate_hz = task_wakeups_monitor_rate;
8112 }
8113
8114 #ifndef CONFIG_NOMONITORS
8115 if (*flags & WAKEMON_MAKE_FATAL) {
8116 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
8117 }
8118 #endif /* CONFIG_NOMONITORS */
8119
8120 if (*rate_hz <= 0) {
8121 task_unlock(task);
8122 return KERN_INVALID_ARGUMENT;
8123 }
8124
8125 #ifndef CONFIG_NOMONITORS
8126 ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
8127 (uint8_t)task_wakeups_monitor_ustackshots_trigger_pct);
8128 ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
8129 ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
8130 #endif /* CONFIG_NOMONITORS */
8131 } else if (*flags & WAKEMON_DISABLE) {
8132 /*
8133 * Caller wishes to disable wakeups monitor on the task.
8134 *
8135 * Remove the limit & callback on the wakeups ledger entry.
8136 */
8137 ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
8138 ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
8139 }
8140
8141 task_unlock(task);
8142 return KERN_SUCCESS;
8143 }
8144
8145 void
task_wakeups_rate_exceeded(int warning,__unused const void * param0,__unused const void * param1)8146 task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
8147 {
8148 if (warning == 0) {
8149 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
8150 }
8151 }
8152
8153 TUNABLE(bool, enable_wakeup_reports, "enable_wakeup_reports", false); /* Enable wakeup reports. */
8154
8155 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)8156 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
8157 {
8158 task_t task = current_task();
8159 int pid = 0;
8160 const char *procname = "unknown";
8161 boolean_t fatal;
8162 kern_return_t kr;
8163 #ifdef EXC_RESOURCE_MONITORS
8164 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
8165 #endif /* EXC_RESOURCE_MONITORS */
8166 struct ledger_entry_info lei;
8167
8168 #ifdef MACH_BSD
8169 pid = proc_selfpid();
8170 if (get_bsdtask_info(task) != NULL) {
8171 procname = proc_name_address(get_bsdtask_info(current_task()));
8172 }
8173 #endif
8174
8175 ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
8176
8177 /*
8178 * Disable the exception notification so we don't overwhelm
8179 * the listener with an endless stream of redundant exceptions.
8180 * TODO: detect whether another thread is already reporting the violation.
8181 */
8182 uint32_t flags = WAKEMON_DISABLE;
8183 task_wakeups_monitor_ctl(task, &flags, NULL);
8184
8185 fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
8186 trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
8187 os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
8188 "over ~%llu seconds, averaging %llu wakes / second and "
8189 "violating a %slimit of %llu wakes over %llu seconds.\n",
8190 procname, pid,
8191 lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
8192 lei.lei_last_refill == 0 ? 0 :
8193 (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
8194 fatal ? "FATAL " : "",
8195 lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
8196
8197 if (enable_wakeup_reports) {
8198 kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
8199 fatal ? kRNFatalLimitFlag : 0);
8200 if (kr) {
8201 printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
8202 }
8203 }
8204
8205 #ifdef EXC_RESOURCE_MONITORS
8206 if (disable_exc_resource) {
8207 printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8208 "suppressed by a boot-arg\n", procname, pid);
8209 return;
8210 }
8211 if (disable_exc_resource_during_audio && audio_active) {
8212 os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8213 "suppressed due to audio playback\n", procname, pid);
8214 return;
8215 }
8216 if (lei.lei_last_refill == 0) {
8217 os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8218 "suppressed due to lei.lei_last_refill = 0 \n", procname, pid);
8219 }
8220
8221 code[0] = code[1] = 0;
8222 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
8223 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
8224 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
8225 NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
8226 EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
8227 lei.lei_last_refill);
8228 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
8229 NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
8230 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8231 #endif /* EXC_RESOURCE_MONITORS */
8232
8233 if (fatal) {
8234 task_terminate_internal(task);
8235 }
8236 }
8237
8238 static boolean_t
global_update_logical_writes(int64_t io_delta,int64_t * global_write_count)8239 global_update_logical_writes(int64_t io_delta, int64_t *global_write_count)
8240 {
8241 int64_t old_count, new_count;
8242 boolean_t needs_telemetry;
8243
8244 do {
8245 new_count = old_count = *global_write_count;
8246 new_count += io_delta;
8247 if (new_count >= io_telemetry_limit) {
8248 new_count = 0;
8249 needs_telemetry = TRUE;
8250 } else {
8251 needs_telemetry = FALSE;
8252 }
8253 } while (!OSCompareAndSwap64(old_count, new_count, global_write_count));
8254 return needs_telemetry;
8255 }
8256
8257 void
task_update_physical_writes(__unused task_t task,__unused task_physical_write_flavor_t flavor,__unused uint64_t io_size,__unused task_balance_flags_t flags)8258 task_update_physical_writes(__unused task_t task, __unused task_physical_write_flavor_t flavor, __unused uint64_t io_size, __unused task_balance_flags_t flags)
8259 {
8260 #if CONFIG_PHYS_WRITE_ACCT
8261 if (!io_size) {
8262 return;
8263 }
8264
8265 /*
8266 * task == NULL means that we have to update kernel_task ledgers
8267 */
8268 if (!task) {
8269 task = kernel_task;
8270 }
8271
8272 KDBG((VMDBG_CODE(DBG_VM_PHYS_WRITE_ACCT)) | DBG_FUNC_NONE,
8273 task_pid(task), flavor, io_size, flags);
8274 DTRACE_IO4(physical_writes, struct task *, task, task_physical_write_flavor_t, flavor, uint64_t, io_size, task_balance_flags_t, flags);
8275
8276 if (flags & TASK_BALANCE_CREDIT) {
8277 if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8278 OSAddAtomic64(io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8279 ledger_credit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8280 }
8281 } else if (flags & TASK_BALANCE_DEBIT) {
8282 if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8283 OSAddAtomic64(-1 * io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8284 ledger_debit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8285 }
8286 }
8287 #endif /* CONFIG_PHYS_WRITE_ACCT */
8288 }
8289
8290 void
task_update_logical_writes(task_t task,uint32_t io_size,int flags,void * vp)8291 task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
8292 {
8293 int64_t io_delta = 0;
8294 int64_t * global_counter_to_update;
8295 boolean_t needs_telemetry = FALSE;
8296 boolean_t is_external_device = FALSE;
8297 int ledger_to_update = 0;
8298 struct task_writes_counters * writes_counters_to_update;
8299
8300 if ((!task) || (!io_size) || (!vp)) {
8301 return;
8302 }
8303
8304 KDBG((VMDBG_CODE(DBG_VM_DATA_WRITE)) | DBG_FUNC_NONE,
8305 task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp));
8306 DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
8307
8308 // Is the drive backing this vnode internal or external to the system?
8309 if (vnode_isonexternalstorage(vp) == false) {
8310 global_counter_to_update = &global_logical_writes_count;
8311 ledger_to_update = task_ledgers.logical_writes;
8312 writes_counters_to_update = &task->task_writes_counters_internal;
8313 is_external_device = FALSE;
8314 } else {
8315 global_counter_to_update = &global_logical_writes_to_external_count;
8316 ledger_to_update = task_ledgers.logical_writes_to_external;
8317 writes_counters_to_update = &task->task_writes_counters_external;
8318 is_external_device = TRUE;
8319 }
8320
8321 switch (flags) {
8322 case TASK_WRITE_IMMEDIATE:
8323 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_immediate_writes));
8324 ledger_credit(task->ledger, ledger_to_update, io_size);
8325 if (!is_external_device) {
8326 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8327 }
8328 break;
8329 case TASK_WRITE_DEFERRED:
8330 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_deferred_writes));
8331 ledger_credit(task->ledger, ledger_to_update, io_size);
8332 if (!is_external_device) {
8333 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8334 }
8335 break;
8336 case TASK_WRITE_INVALIDATED:
8337 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_invalidated_writes));
8338 ledger_debit(task->ledger, ledger_to_update, io_size);
8339 if (!is_external_device) {
8340 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, FALSE, io_size);
8341 }
8342 break;
8343 case TASK_WRITE_METADATA:
8344 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_metadata_writes));
8345 ledger_credit(task->ledger, ledger_to_update, io_size);
8346 if (!is_external_device) {
8347 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8348 }
8349 break;
8350 }
8351
8352 io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
8353 if (io_telemetry_limit != 0) {
8354 /* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
8355 needs_telemetry = global_update_logical_writes(io_delta, global_counter_to_update);
8356 if (needs_telemetry && !is_external_device) {
8357 act_set_io_telemetry_ast(current_thread());
8358 }
8359 }
8360 }
8361
8362 /*
8363 * Control the I/O monitor for a task.
8364 */
8365 kern_return_t
task_io_monitor_ctl(task_t task,uint32_t * flags)8366 task_io_monitor_ctl(task_t task, uint32_t *flags)
8367 {
8368 ledger_t ledger = task->ledger;
8369
8370 task_lock(task);
8371 if (*flags & IOMON_ENABLE) {
8372 /* Configure the physical I/O ledger */
8373 ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
8374 ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
8375 } else if (*flags & IOMON_DISABLE) {
8376 /*
8377 * Caller wishes to disable I/O monitor on the task.
8378 */
8379 ledger_disable_refill(ledger, task_ledgers.physical_writes);
8380 ledger_disable_callback(ledger, task_ledgers.physical_writes);
8381 }
8382
8383 task_unlock(task);
8384 return KERN_SUCCESS;
8385 }
8386
8387 void
task_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)8388 task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
8389 {
8390 if (warning == 0) {
8391 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
8392 }
8393 }
8394
8395 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)8396 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
8397 {
8398 int pid = 0;
8399 task_t task = current_task();
8400 #ifdef EXC_RESOURCE_MONITORS
8401 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
8402 #endif /* EXC_RESOURCE_MONITORS */
8403 struct ledger_entry_info lei = {};
8404 kern_return_t kr;
8405
8406 #ifdef MACH_BSD
8407 pid = proc_selfpid();
8408 #endif
8409 /*
8410 * Get the ledger entry info. We need to do this before disabling the exception
8411 * to get correct values for all fields.
8412 */
8413 switch (flavor) {
8414 case FLAVOR_IO_PHYSICAL_WRITES:
8415 ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
8416 break;
8417 }
8418
8419
8420 /*
8421 * Disable the exception notification so we don't overwhelm
8422 * the listener with an endless stream of redundant exceptions.
8423 * TODO: detect whether another thread is already reporting the violation.
8424 */
8425 uint32_t flags = IOMON_DISABLE;
8426 task_io_monitor_ctl(task, &flags);
8427
8428 if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
8429 trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
8430 }
8431 os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
8432 pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
8433
8434 kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
8435 if (kr) {
8436 printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
8437 }
8438
8439 #ifdef EXC_RESOURCE_MONITORS
8440 code[0] = code[1] = 0;
8441 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
8442 EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
8443 EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
8444 EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
8445 EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
8446 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8447 #endif /* EXC_RESOURCE_MONITORS */
8448 }
8449
8450 void
task_port_space_ast(__unused task_t task)8451 task_port_space_ast(__unused task_t task)
8452 {
8453 uint32_t current_size, soft_limit, hard_limit;
8454 assert(task == current_task());
8455 bool should_notify = ipc_space_check_table_size_limit(task->itk_space,
8456 ¤t_size, &soft_limit, &hard_limit);
8457 if (should_notify) {
8458 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task, current_size, soft_limit, hard_limit);
8459 }
8460 }
8461
8462 #if CONFIG_PROC_RESOURCE_LIMITS
8463 static mach_port_t
task_allocate_fatal_port(void)8464 task_allocate_fatal_port(void)
8465 {
8466 mach_port_t task_fatal_port = MACH_PORT_NULL;
8467 task_id_token_t token;
8468
8469 kern_return_t kr = task_create_identity_token(current_task(), &token); /* Takes a reference on the token */
8470 if (kr) {
8471 return MACH_PORT_NULL;
8472 }
8473 task_fatal_port = ipc_kobject_alloc_port((ipc_kobject_t)token, IKOT_TASK_FATAL,
8474 IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
8475
8476 task_id_token_set_port(token, task_fatal_port);
8477
8478 return task_fatal_port;
8479 }
8480
8481 static void
task_fatal_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)8482 task_fatal_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
8483 {
8484 task_t task = TASK_NULL;
8485 kern_return_t kr;
8486
8487 task_id_token_t token = ipc_kobject_get_stable(port, IKOT_TASK_FATAL);
8488
8489 assert(token != NULL);
8490 if (token) {
8491 kr = task_identity_token_get_task_grp(token, &task, TASK_GRP_KERNEL); /* takes a reference on task */
8492 if (task) {
8493 task_bsdtask_kill(task);
8494 task_deallocate(task);
8495 }
8496 task_id_token_release(token); /* consumes ref given by notification */
8497 }
8498 }
8499 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8500
8501 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,uint32_t current_size,uint32_t soft_limit,uint32_t hard_limit)8502 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task, uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit)
8503 {
8504 int pid = 0;
8505 char *procname = (char *) "unknown";
8506 __unused kern_return_t kr;
8507 __unused resource_notify_flags_t flags = kRNFlagsNone;
8508 __unused uint32_t limit;
8509 __unused mach_port_t task_fatal_port = MACH_PORT_NULL;
8510 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
8511
8512 pid = proc_selfpid();
8513 if (get_bsdtask_info(task) != NULL) {
8514 procname = proc_name_address(get_bsdtask_info(task));
8515 }
8516
8517 /*
8518 * Only kernel_task and launchd may be allowed to
8519 * have really large ipc space.
8520 */
8521 if (pid == 0 || pid == 1) {
8522 return;
8523 }
8524
8525 os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many mach ports. \
8526 Num of ports allocated %u; \n", procname, pid, current_size);
8527
8528 /* Abort the process if it has hit the system-wide limit for ipc port table size */
8529 if (!hard_limit && !soft_limit) {
8530 code[0] = code[1] = 0;
8531 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_PORTS);
8532 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_PORT_SPACE_FULL);
8533 EXC_RESOURCE_PORTS_ENCODE_PORTS(code[0], current_size);
8534
8535 exception_info_t info = {
8536 .os_reason = OS_REASON_PORT_SPACE,
8537 .exception_type = EXC_RESOURCE,
8538 .mx_code = code[0],
8539 .mx_subcode = code[1]
8540 };
8541
8542 exit_with_mach_exception(current_proc(), info, PX_DEBUG_NO_HONOR);
8543 return;
8544 }
8545
8546 #if CONFIG_PROC_RESOURCE_LIMITS
8547 if (hard_limit > 0) {
8548 flags |= kRNHardLimitFlag;
8549 limit = hard_limit;
8550 task_fatal_port = task_allocate_fatal_port();
8551 if (!task_fatal_port) {
8552 os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8553 task_bsdtask_kill(task);
8554 }
8555 } else {
8556 flags |= kRNSoftLimitFlag;
8557 limit = soft_limit;
8558 }
8559
8560 kr = send_resource_violation_with_fatal_port(send_port_space_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8561 if (kr) {
8562 os_log(OS_LOG_DEFAULT, "send_resource_violation(ports, ...): error %#x\n", kr);
8563 }
8564 if (task_fatal_port) {
8565 ipc_port_release_send(task_fatal_port);
8566 }
8567 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8568 }
8569
8570 #if CONFIG_PROC_RESOURCE_LIMITS
8571 void
task_kqworkloop_ast(task_t task,int current_size,int soft_limit,int hard_limit)8572 task_kqworkloop_ast(task_t task, int current_size, int soft_limit, int hard_limit)
8573 {
8574 assert(task == current_task());
8575 return SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task, current_size, soft_limit, hard_limit);
8576 }
8577
8578 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task,int current_size,int soft_limit,int hard_limit)8579 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit)
8580 {
8581 int pid = 0;
8582 char *procname = (char *) "unknown";
8583 #ifdef MACH_BSD
8584 pid = proc_selfpid();
8585 if (get_bsdtask_info(task) != NULL) {
8586 procname = proc_name_address(get_bsdtask_info(task));
8587 }
8588 #endif
8589 if (pid == 0 || pid == 1) {
8590 return;
8591 }
8592
8593 os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many kqworkloops. \
8594 Num of kqworkloops allocated %u; \n", procname, pid, current_size);
8595
8596 int limit = 0;
8597 resource_notify_flags_t flags = kRNFlagsNone;
8598 mach_port_t task_fatal_port = MACH_PORT_NULL;
8599 if (hard_limit) {
8600 flags |= kRNHardLimitFlag;
8601 limit = hard_limit;
8602
8603 task_fatal_port = task_allocate_fatal_port();
8604 if (task_fatal_port == MACH_PORT_NULL) {
8605 os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8606 task_bsdtask_kill(task);
8607 }
8608 } else {
8609 flags |= kRNSoftLimitFlag;
8610 limit = soft_limit;
8611 }
8612
8613 kern_return_t kr;
8614 kr = send_resource_violation_with_fatal_port(send_kqworkloops_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8615 if (kr) {
8616 os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(kqworkloops, ...): error %#x\n", kr);
8617 }
8618 if (task_fatal_port) {
8619 ipc_port_release_send(task_fatal_port);
8620 }
8621 }
8622
8623
8624 void
task_filedesc_ast(__unused task_t task,__unused int current_size,__unused int soft_limit,__unused int hard_limit)8625 task_filedesc_ast(__unused task_t task, __unused int current_size, __unused int soft_limit, __unused int hard_limit)
8626 {
8627 assert(task == current_task());
8628 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task, current_size, soft_limit, hard_limit);
8629 }
8630
8631 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task,int current_size,int soft_limit,int hard_limit)8632 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit)
8633 {
8634 int pid = 0;
8635 char *procname = (char *) "unknown";
8636 kern_return_t kr;
8637 resource_notify_flags_t flags = kRNFlagsNone;
8638 int limit;
8639 mach_port_t task_fatal_port = MACH_PORT_NULL;
8640
8641 #ifdef MACH_BSD
8642 pid = proc_selfpid();
8643 if (get_bsdtask_info(task) != NULL) {
8644 procname = proc_name_address(get_bsdtask_info(task));
8645 }
8646 #endif
8647 /*
8648 * Only kernel_task and launchd may be allowed to
8649 * have really large ipc space.
8650 */
8651 if (pid == 0 || pid == 1) {
8652 return;
8653 }
8654
8655 os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many file descriptors. \
8656 Num of fds allocated %u; \n", procname, pid, current_size);
8657
8658 if (hard_limit > 0) {
8659 flags |= kRNHardLimitFlag;
8660 limit = hard_limit;
8661 task_fatal_port = task_allocate_fatal_port();
8662 if (!task_fatal_port) {
8663 os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8664 task_bsdtask_kill(task);
8665 }
8666 } else {
8667 flags |= kRNSoftLimitFlag;
8668 limit = soft_limit;
8669 }
8670
8671 kr = send_resource_violation_with_fatal_port(send_file_descriptors_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8672 if (kr) {
8673 os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(filedesc, ...): error %#x\n", kr);
8674 }
8675 if (task_fatal_port) {
8676 ipc_port_release_send(task_fatal_port);
8677 }
8678 }
8679 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8680
8681 /* Placeholders for the task set/get voucher interfaces */
8682 kern_return_t
task_get_mach_voucher(task_t task,mach_voucher_selector_t __unused which,ipc_voucher_t * voucher)8683 task_get_mach_voucher(
8684 task_t task,
8685 mach_voucher_selector_t __unused which,
8686 ipc_voucher_t *voucher)
8687 {
8688 if (TASK_NULL == task) {
8689 return KERN_INVALID_TASK;
8690 }
8691
8692 *voucher = NULL;
8693 return KERN_SUCCESS;
8694 }
8695
8696 kern_return_t
task_set_mach_voucher(task_t task,ipc_voucher_t __unused voucher)8697 task_set_mach_voucher(
8698 task_t task,
8699 ipc_voucher_t __unused voucher)
8700 {
8701 if (TASK_NULL == task) {
8702 return KERN_INVALID_TASK;
8703 }
8704
8705 return KERN_SUCCESS;
8706 }
8707
8708 kern_return_t
task_swap_mach_voucher(__unused task_t task,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)8709 task_swap_mach_voucher(
8710 __unused task_t task,
8711 __unused ipc_voucher_t new_voucher,
8712 ipc_voucher_t *in_out_old_voucher)
8713 {
8714 /*
8715 * Currently this function is only called from a MIG generated
8716 * routine which doesn't release the reference on the voucher
8717 * addressed by in_out_old_voucher. To avoid leaking this reference,
8718 * a call to release it has been added here.
8719 */
8720 ipc_voucher_release(*in_out_old_voucher);
8721 OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
8722 }
8723
8724 void
task_set_gpu_denied(task_t task,boolean_t denied)8725 task_set_gpu_denied(task_t task, boolean_t denied)
8726 {
8727 task_lock(task);
8728
8729 if (denied) {
8730 task->t_flags |= TF_GPU_DENIED;
8731 } else {
8732 task->t_flags &= ~TF_GPU_DENIED;
8733 }
8734
8735 task_unlock(task);
8736 }
8737
8738 boolean_t
task_is_gpu_denied(task_t task)8739 task_is_gpu_denied(task_t task)
8740 {
8741 /* We don't need the lock to read this flag */
8742 return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE;
8743 }
8744
8745 /*
8746 * Task policy termination uses this path to clear the bit the final time
8747 * during the termination flow, and the TASK_POLICY_TERMINATED bit guarantees
8748 * that it won't be changed again on a terminated task.
8749 */
8750 bool
task_set_game_mode_locked(task_t task,bool enabled)8751 task_set_game_mode_locked(task_t task, bool enabled)
8752 {
8753 task_lock_assert_owned(task);
8754
8755 if (enabled) {
8756 assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
8757 }
8758
8759 bool previously_enabled = task_get_game_mode(task);
8760 bool needs_update = false;
8761 uint32_t new_count = 0;
8762
8763 if (enabled) {
8764 task->t_flags |= TF_GAME_MODE;
8765 } else {
8766 task->t_flags &= ~TF_GAME_MODE;
8767 }
8768
8769 if (enabled && !previously_enabled) {
8770 if (task_coalition_adjust_game_mode_count(task, 1, &new_count) && (new_count == 1)) {
8771 needs_update = true;
8772 }
8773 } else if (!enabled && previously_enabled) {
8774 if (task_coalition_adjust_game_mode_count(task, -1, &new_count) && (new_count == 0)) {
8775 needs_update = true;
8776 }
8777 }
8778
8779 return needs_update;
8780 }
8781
8782 void
task_set_game_mode(task_t task,bool enabled)8783 task_set_game_mode(task_t task, bool enabled)
8784 {
8785 bool needs_update = false;
8786
8787 task_lock(task);
8788
8789 /* After termination, further updates are no longer effective */
8790 if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
8791 needs_update = task_set_game_mode_locked(task, enabled);
8792 }
8793
8794 task_unlock(task);
8795
8796 #if CONFIG_THREAD_GROUPS
8797 if (needs_update) {
8798 task_coalition_thread_group_game_mode_update(task);
8799 }
8800 #endif /* CONFIG_THREAD_GROUPS */
8801 }
8802
8803 bool
task_get_game_mode(task_t task)8804 task_get_game_mode(task_t task)
8805 {
8806 /* We don't need the lock to read this flag */
8807 return task->t_flags & TF_GAME_MODE;
8808 }
8809
8810 bool
task_set_carplay_mode_locked(task_t task,bool enabled)8811 task_set_carplay_mode_locked(task_t task, bool enabled)
8812 {
8813 task_lock_assert_owned(task);
8814
8815 if (enabled) {
8816 assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
8817 }
8818
8819 bool previously_enabled = task_get_carplay_mode(task);
8820 bool needs_update = false;
8821 uint32_t new_count = 0;
8822
8823 if (enabled) {
8824 task->t_flags |= TF_CARPLAY_MODE;
8825 } else {
8826 task->t_flags &= ~TF_CARPLAY_MODE;
8827 }
8828
8829 if (enabled && !previously_enabled) {
8830 if (task_coalition_adjust_carplay_mode_count(task, 1, &new_count) && (new_count == 1)) {
8831 needs_update = true;
8832 }
8833 } else if (!enabled && previously_enabled) {
8834 if (task_coalition_adjust_carplay_mode_count(task, -1, &new_count) && (new_count == 0)) {
8835 needs_update = true;
8836 }
8837 }
8838 return needs_update;
8839 }
8840
8841 void
task_set_carplay_mode(task_t task,bool enabled)8842 task_set_carplay_mode(task_t task, bool enabled)
8843 {
8844 bool needs_update = false;
8845
8846 task_lock(task);
8847
8848 /* After termination, further updates are no longer effective */
8849 if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
8850 needs_update = task_set_carplay_mode_locked(task, enabled);
8851 }
8852
8853 task_unlock(task);
8854
8855 #if CONFIG_THREAD_GROUPS
8856 if (needs_update) {
8857 task_coalition_thread_group_carplay_mode_update(task);
8858 }
8859 #endif /* CONFIG_THREAD_GROUPS */
8860 }
8861
8862 bool
task_get_carplay_mode(task_t task)8863 task_get_carplay_mode(task_t task)
8864 {
8865 /* We don't need the lock to read this flag */
8866 return task->t_flags & TF_CARPLAY_MODE;
8867 }
8868
8869 uint64_t
get_task_memory_region_count(task_t task)8870 get_task_memory_region_count(task_t task)
8871 {
8872 vm_map_t map;
8873 map = (task == kernel_task) ? kernel_map: task->map;
8874 return (uint64_t)get_map_nentries(map);
8875 }
8876
8877 static void
kdebug_trace_dyld_internal(uint32_t base_code,struct dyld_kernel_image_info * info)8878 kdebug_trace_dyld_internal(uint32_t base_code,
8879 struct dyld_kernel_image_info *info)
8880 {
8881 static_assert(sizeof(info->uuid) >= 16);
8882
8883 #if defined(__LP64__)
8884 uint64_t *uuid = (uint64_t *)&(info->uuid);
8885
8886 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8887 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
8888 uuid[1], info->load_addr,
8889 (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
8890 0);
8891 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8892 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
8893 (uint64_t)info->fsobjid.fid_objno |
8894 ((uint64_t)info->fsobjid.fid_generation << 32),
8895 0, 0, 0, 0);
8896 #else /* defined(__LP64__) */
8897 uint32_t *uuid = (uint32_t *)&(info->uuid);
8898
8899 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8900 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
8901 uuid[1], uuid[2], uuid[3], 0);
8902 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8903 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
8904 (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
8905 info->fsobjid.fid_objno, 0);
8906 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8907 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
8908 info->fsobjid.fid_generation, 0, 0, 0, 0);
8909 #endif /* !defined(__LP64__) */
8910 }
8911
8912 static kern_return_t
kdebug_trace_dyld(task_t task,uint32_t base_code,vm_map_copy_t infos_copy,mach_msg_type_number_t infos_len)8913 kdebug_trace_dyld(task_t task, uint32_t base_code,
8914 vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
8915 {
8916 kern_return_t kr;
8917 dyld_kernel_image_info_array_t infos;
8918 vm_map_offset_t map_data;
8919 vm_offset_t data;
8920
8921 if (!infos_copy) {
8922 return KERN_INVALID_ADDRESS;
8923 }
8924
8925 if (!kdebug_enable ||
8926 !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) {
8927 vm_map_copy_discard(infos_copy);
8928 return KERN_SUCCESS;
8929 }
8930
8931 if (task == NULL || task != current_task()) {
8932 return KERN_INVALID_TASK;
8933 }
8934
8935 kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
8936 if (kr != KERN_SUCCESS) {
8937 return kr;
8938 }
8939
8940 infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
8941
8942 for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
8943 kdebug_trace_dyld_internal(base_code, &(infos[i]));
8944 }
8945
8946 data = CAST_DOWN(vm_offset_t, map_data);
8947 mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
8948 return KERN_SUCCESS;
8949 }
8950
8951 kern_return_t
task_register_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8952 task_register_dyld_image_infos(task_t task,
8953 dyld_kernel_image_info_array_t infos_copy,
8954 mach_msg_type_number_t infos_len)
8955 {
8956 return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
8957 (vm_map_copy_t)infos_copy, infos_len);
8958 }
8959
8960 kern_return_t
task_unregister_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8961 task_unregister_dyld_image_infos(task_t task,
8962 dyld_kernel_image_info_array_t infos_copy,
8963 mach_msg_type_number_t infos_len)
8964 {
8965 return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
8966 (vm_map_copy_t)infos_copy, infos_len);
8967 }
8968
8969 kern_return_t
task_get_dyld_image_infos(__unused task_t task,__unused dyld_kernel_image_info_array_t * dyld_images,__unused mach_msg_type_number_t * dyld_imagesCnt)8970 task_get_dyld_image_infos(__unused task_t task,
8971 __unused dyld_kernel_image_info_array_t * dyld_images,
8972 __unused mach_msg_type_number_t * dyld_imagesCnt)
8973 {
8974 return KERN_NOT_SUPPORTED;
8975 }
8976
8977 kern_return_t
task_register_dyld_shared_cache_image_info(task_t task,dyld_kernel_image_info_t cache_img,__unused boolean_t no_cache,__unused boolean_t private_cache)8978 task_register_dyld_shared_cache_image_info(task_t task,
8979 dyld_kernel_image_info_t cache_img,
8980 __unused boolean_t no_cache,
8981 __unused boolean_t private_cache)
8982 {
8983 if (task == NULL || task != current_task()) {
8984 return KERN_INVALID_TASK;
8985 }
8986
8987 kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
8988 return KERN_SUCCESS;
8989 }
8990
8991 kern_return_t
task_register_dyld_set_dyld_state(__unused task_t task,__unused uint8_t dyld_state)8992 task_register_dyld_set_dyld_state(__unused task_t task,
8993 __unused uint8_t dyld_state)
8994 {
8995 return KERN_NOT_SUPPORTED;
8996 }
8997
8998 kern_return_t
task_register_dyld_get_process_state(__unused task_t task,__unused dyld_kernel_process_info_t * dyld_process_state)8999 task_register_dyld_get_process_state(__unused task_t task,
9000 __unused dyld_kernel_process_info_t * dyld_process_state)
9001 {
9002 return KERN_NOT_SUPPORTED;
9003 }
9004
9005 kern_return_t
task_inspect(task_inspect_t task_insp,task_inspect_flavor_t flavor,task_inspect_info_t info_out,mach_msg_type_number_t * size_in_out)9006 task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
9007 task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out)
9008 {
9009 #if CONFIG_PERVASIVE_CPI
9010 task_t task = (task_t)task_insp;
9011 kern_return_t kr = KERN_SUCCESS;
9012 mach_msg_type_number_t size;
9013
9014 if (task == TASK_NULL) {
9015 return KERN_INVALID_ARGUMENT;
9016 }
9017
9018 size = *size_in_out;
9019
9020 switch (flavor) {
9021 case TASK_INSPECT_BASIC_COUNTS: {
9022 struct task_inspect_basic_counts *bc =
9023 (struct task_inspect_basic_counts *)info_out;
9024 struct recount_usage stats = { 0 };
9025 if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
9026 kr = KERN_INVALID_ARGUMENT;
9027 break;
9028 }
9029
9030 recount_sum(&recount_task_plan, task->tk_recount.rtk_lifetime, &stats);
9031 bc->instructions = recount_usage_instructions(&stats);
9032 bc->cycles = recount_usage_cycles(&stats);
9033 size = TASK_INSPECT_BASIC_COUNTS_COUNT;
9034 break;
9035 }
9036 default:
9037 kr = KERN_INVALID_ARGUMENT;
9038 break;
9039 }
9040
9041 if (kr == KERN_SUCCESS) {
9042 *size_in_out = size;
9043 }
9044 return kr;
9045 #else /* CONFIG_PERVASIVE_CPI */
9046 #pragma unused(task_insp, flavor, info_out, size_in_out)
9047 return KERN_NOT_SUPPORTED;
9048 #endif /* !CONFIG_PERVASIVE_CPI */
9049 }
9050
9051 #if CONFIG_SECLUDED_MEMORY
9052 int num_tasks_can_use_secluded_mem = 0;
9053
9054 void
task_set_can_use_secluded_mem(task_t task,boolean_t can_use_secluded_mem)9055 task_set_can_use_secluded_mem(
9056 task_t task,
9057 boolean_t can_use_secluded_mem)
9058 {
9059 if (!task->task_could_use_secluded_mem) {
9060 return;
9061 }
9062 task_lock(task);
9063 task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
9064 task_unlock(task);
9065 }
9066
9067 void
task_set_can_use_secluded_mem_locked(task_t task,boolean_t can_use_secluded_mem)9068 task_set_can_use_secluded_mem_locked(
9069 task_t task,
9070 boolean_t can_use_secluded_mem)
9071 {
9072 assert(task->task_could_use_secluded_mem);
9073 if (can_use_secluded_mem &&
9074 secluded_for_apps && /* global boot-arg */
9075 !task->task_can_use_secluded_mem) {
9076 assert(num_tasks_can_use_secluded_mem >= 0);
9077 OSAddAtomic(+1,
9078 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
9079 task->task_can_use_secluded_mem = TRUE;
9080 } else if (!can_use_secluded_mem &&
9081 task->task_can_use_secluded_mem) {
9082 assert(num_tasks_can_use_secluded_mem > 0);
9083 OSAddAtomic(-1,
9084 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
9085 task->task_can_use_secluded_mem = FALSE;
9086 }
9087 }
9088
9089 void
task_set_could_use_secluded_mem(task_t task,boolean_t could_use_secluded_mem)9090 task_set_could_use_secluded_mem(
9091 task_t task,
9092 boolean_t could_use_secluded_mem)
9093 {
9094 task->task_could_use_secluded_mem = !!could_use_secluded_mem;
9095 }
9096
9097 void
task_set_could_also_use_secluded_mem(task_t task,boolean_t could_also_use_secluded_mem)9098 task_set_could_also_use_secluded_mem(
9099 task_t task,
9100 boolean_t could_also_use_secluded_mem)
9101 {
9102 task->task_could_also_use_secluded_mem = !!could_also_use_secluded_mem;
9103 }
9104
9105 boolean_t
task_can_use_secluded_mem(task_t task,boolean_t is_alloc)9106 task_can_use_secluded_mem(
9107 task_t task,
9108 boolean_t is_alloc)
9109 {
9110 if (task->task_can_use_secluded_mem) {
9111 assert(task->task_could_use_secluded_mem);
9112 assert(num_tasks_can_use_secluded_mem > 0);
9113 return TRUE;
9114 }
9115 if (task->task_could_also_use_secluded_mem &&
9116 num_tasks_can_use_secluded_mem > 0) {
9117 assert(num_tasks_can_use_secluded_mem > 0);
9118 return TRUE;
9119 }
9120
9121 /*
9122 * If a single task is using more than some large amount of
9123 * memory (i.e. secluded_shutoff_trigger) and is approaching
9124 * its task limit, allow it to dip into secluded and begin
9125 * suppression of rebuilding secluded memory until that task exits.
9126 */
9127 if (is_alloc && secluded_shutoff_trigger != 0) {
9128 uint64_t phys_used = get_task_phys_footprint(task);
9129 uint64_t limit = get_task_phys_footprint_limit(task);
9130 if (phys_used > secluded_shutoff_trigger &&
9131 limit > secluded_shutoff_trigger &&
9132 phys_used > limit - secluded_shutoff_headroom) {
9133 start_secluded_suppression(task);
9134 return TRUE;
9135 }
9136 }
9137
9138 return FALSE;
9139 }
9140
9141 boolean_t
task_could_use_secluded_mem(task_t task)9142 task_could_use_secluded_mem(
9143 task_t task)
9144 {
9145 return task->task_could_use_secluded_mem;
9146 }
9147
9148 boolean_t
task_could_also_use_secluded_mem(task_t task)9149 task_could_also_use_secluded_mem(
9150 task_t task)
9151 {
9152 return task->task_could_also_use_secluded_mem;
9153 }
9154 #endif /* CONFIG_SECLUDED_MEMORY */
9155
9156 queue_head_t *
task_io_user_clients(task_t task)9157 task_io_user_clients(task_t task)
9158 {
9159 return &task->io_user_clients;
9160 }
9161
9162 void
task_set_message_app_suspended(task_t task,boolean_t enable)9163 task_set_message_app_suspended(task_t task, boolean_t enable)
9164 {
9165 task->message_app_suspended = enable;
9166 }
9167
9168 void
task_copy_fields_for_exec(task_t dst_task,task_t src_task)9169 task_copy_fields_for_exec(task_t dst_task, task_t src_task)
9170 {
9171 dst_task->vtimers = src_task->vtimers;
9172 }
9173
9174 #if DEVELOPMENT || DEBUG
9175 int vm_region_footprint = 0;
9176 #endif /* DEVELOPMENT || DEBUG */
9177
9178 boolean_t
task_self_region_footprint(void)9179 task_self_region_footprint(void)
9180 {
9181 #if DEVELOPMENT || DEBUG
9182 if (vm_region_footprint) {
9183 /* system-wide override */
9184 return TRUE;
9185 }
9186 #endif /* DEVELOPMENT || DEBUG */
9187 return current_task()->task_region_footprint;
9188 }
9189
9190 void
task_self_region_footprint_set(boolean_t newval)9191 task_self_region_footprint_set(
9192 boolean_t newval)
9193 {
9194 task_t curtask;
9195
9196 curtask = current_task();
9197 task_lock(curtask);
9198 if (newval) {
9199 curtask->task_region_footprint = TRUE;
9200 } else {
9201 curtask->task_region_footprint = FALSE;
9202 }
9203 task_unlock(curtask);
9204 }
9205
9206 int
task_self_region_info_flags(void)9207 task_self_region_info_flags(void)
9208 {
9209 return current_task()->task_region_info_flags;
9210 }
9211
9212 kern_return_t
task_self_region_info_flags_set(int newval)9213 task_self_region_info_flags_set(
9214 int newval)
9215 {
9216 task_t curtask;
9217 kern_return_t err = KERN_SUCCESS;
9218
9219 curtask = current_task();
9220 task_lock(curtask);
9221 curtask->task_region_info_flags = newval;
9222 /* check for overflow (flag added without increasing bitfield size?) */
9223 if (curtask->task_region_info_flags != newval) {
9224 err = KERN_INVALID_ARGUMENT;
9225 }
9226 task_unlock(curtask);
9227
9228 return err;
9229 }
9230
9231 void
task_set_darkwake_mode(task_t task,boolean_t set_mode)9232 task_set_darkwake_mode(task_t task, boolean_t set_mode)
9233 {
9234 assert(task);
9235
9236 task_lock(task);
9237
9238 if (set_mode) {
9239 task->t_flags |= TF_DARKWAKE_MODE;
9240 } else {
9241 task->t_flags &= ~(TF_DARKWAKE_MODE);
9242 }
9243
9244 task_unlock(task);
9245 }
9246
9247 boolean_t
task_get_darkwake_mode(task_t task)9248 task_get_darkwake_mode(task_t task)
9249 {
9250 assert(task);
9251 return (task->t_flags & TF_DARKWAKE_MODE) != 0;
9252 }
9253
9254 /*
9255 * Set default behavior for task's control port and EXC_GUARD variants that have
9256 * settable behavior.
9257 *
9258 * Platform binaries typically have one behavior, third parties another -
9259 * but there are special exception we may need to account for.
9260 */
9261 void
task_set_exc_guard_ctrl_port_default(task_t task,thread_t main_thread,const char * name,unsigned int namelen,boolean_t is_simulated,uint32_t platform,uint32_t sdk)9262 task_set_exc_guard_ctrl_port_default(
9263 task_t task,
9264 thread_t main_thread,
9265 const char *name,
9266 unsigned int namelen,
9267 boolean_t is_simulated,
9268 uint32_t platform,
9269 uint32_t sdk)
9270 {
9271 task_control_port_options_t opts = TASK_CONTROL_PORT_OPTIONS_NONE;
9272
9273 if (task_is_hardened_binary(task)) {
9274 /* set exc guard default behavior for hardened binaries */
9275 task->task_exc_guard = (task_exc_guard_default & TASK_EXC_GUARD_ALL);
9276
9277 if (1 == task_pid(task)) {
9278 /* special flags for inittask - delivery every instance as corpse */
9279 task->task_exc_guard = _TASK_EXC_GUARD_ALL_CORPSE;
9280 } else if (task_exc_guard_default & TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS) {
9281 /* honor by-name default setting overrides */
9282
9283 int count = sizeof(task_exc_guard_named_defaults) / sizeof(struct task_exc_guard_named_default);
9284
9285 for (int i = 0; i < count; i++) {
9286 const struct task_exc_guard_named_default *named_default =
9287 &task_exc_guard_named_defaults[i];
9288 if (strncmp(named_default->name, name, namelen) == 0 &&
9289 strlen(named_default->name) == namelen) {
9290 task->task_exc_guard = named_default->behavior;
9291 break;
9292 }
9293 }
9294 }
9295
9296 /* set control port options for 1p code, inherited from parent task by default */
9297 opts = ipc_control_port_options & ICP_OPTIONS_1P_MASK;
9298 } else {
9299 /* set exc guard default behavior for third-party code */
9300 task->task_exc_guard = ((task_exc_guard_default >> TASK_EXC_GUARD_THIRD_PARTY_DEFAULT_SHIFT) & TASK_EXC_GUARD_ALL);
9301 /* set control port options for 3p code, inherited from parent task by default */
9302 opts = (ipc_control_port_options & ICP_OPTIONS_3P_MASK) >> ICP_OPTIONS_3P_SHIFT;
9303 }
9304
9305 if (is_simulated) {
9306 /* If simulated and built against pre-iOS 15 SDK, disable all EXC_GUARD */
9307 if ((platform == PLATFORM_IOSSIMULATOR && sdk < 0xf0000) ||
9308 (platform == PLATFORM_TVOSSIMULATOR && sdk < 0xf0000) ||
9309 (platform == PLATFORM_WATCHOSSIMULATOR && sdk < 0x80000)) {
9310 task->task_exc_guard = TASK_EXC_GUARD_NONE;
9311 }
9312 /* Disable protection for control ports for simulated binaries */
9313 opts = TASK_CONTROL_PORT_OPTIONS_NONE;
9314 }
9315
9316
9317 task_set_control_port_options(task, opts);
9318
9319 task_set_immovable_pinned(task);
9320 main_thread_set_immovable_pinned(main_thread);
9321 }
9322
9323 kern_return_t
task_get_exc_guard_behavior(task_t task,task_exc_guard_behavior_t * behaviorp)9324 task_get_exc_guard_behavior(
9325 task_t task,
9326 task_exc_guard_behavior_t *behaviorp)
9327 {
9328 if (task == TASK_NULL) {
9329 return KERN_INVALID_TASK;
9330 }
9331 *behaviorp = task->task_exc_guard;
9332 return KERN_SUCCESS;
9333 }
9334
9335 kern_return_t
task_set_exc_guard_behavior(task_t task,task_exc_guard_behavior_t new_behavior)9336 task_set_exc_guard_behavior(
9337 task_t task,
9338 task_exc_guard_behavior_t new_behavior)
9339 {
9340 if (task == TASK_NULL) {
9341 return KERN_INVALID_TASK;
9342 }
9343 if (new_behavior & ~TASK_EXC_GUARD_ALL) {
9344 return KERN_INVALID_VALUE;
9345 }
9346
9347 /* limit setting to that allowed for this config */
9348 new_behavior = new_behavior & task_exc_guard_config_mask;
9349
9350 #if !defined (DEBUG) && !defined (DEVELOPMENT)
9351 /* On release kernels, only allow _upgrading_ exc guard behavior */
9352 task_exc_guard_behavior_t cur_behavior;
9353
9354 os_atomic_rmw_loop(&task->task_exc_guard, cur_behavior, new_behavior, relaxed, {
9355 if ((cur_behavior & task_exc_guard_no_unset_mask) & ~(new_behavior & task_exc_guard_no_unset_mask)) {
9356 os_atomic_rmw_loop_give_up(return KERN_DENIED);
9357 }
9358
9359 if ((new_behavior & task_exc_guard_no_set_mask) & ~(cur_behavior & task_exc_guard_no_set_mask)) {
9360 os_atomic_rmw_loop_give_up(return KERN_DENIED);
9361 }
9362
9363 /* no restrictions on CORPSE bit */
9364 });
9365 #else
9366 task->task_exc_guard = new_behavior;
9367 #endif
9368 return KERN_SUCCESS;
9369 }
9370
9371 kern_return_t
task_set_corpse_forking_behavior(task_t task,task_corpse_forking_behavior_t behavior)9372 task_set_corpse_forking_behavior(task_t task, task_corpse_forking_behavior_t behavior)
9373 {
9374 #if DEVELOPMENT || DEBUG
9375 if (task == TASK_NULL) {
9376 return KERN_INVALID_TASK;
9377 }
9378
9379 task_lock(task);
9380 if (behavior & TASK_CORPSE_FORKING_DISABLED_MEM_DIAG) {
9381 task->t_flags |= TF_NO_CORPSE_FORKING;
9382 } else {
9383 task->t_flags &= ~TF_NO_CORPSE_FORKING;
9384 }
9385 task_unlock(task);
9386
9387 return KERN_SUCCESS;
9388 #else
9389 (void)task;
9390 (void)behavior;
9391 return KERN_NOT_SUPPORTED;
9392 #endif
9393 }
9394
9395 boolean_t
task_corpse_forking_disabled(task_t task)9396 task_corpse_forking_disabled(task_t task)
9397 {
9398 boolean_t disabled = FALSE;
9399
9400 task_lock(task);
9401 disabled = (task->t_flags & TF_NO_CORPSE_FORKING);
9402 task_unlock(task);
9403
9404 return disabled;
9405 }
9406
9407 #if __arm64__
9408 extern int legacy_footprint_entitlement_mode;
9409 extern void memorystatus_act_on_legacy_footprint_entitlement(struct proc *, boolean_t);
9410 extern void memorystatus_act_on_ios13extended_footprint_entitlement(struct proc *);
9411
9412
9413 void
task_set_legacy_footprint(task_t task)9414 task_set_legacy_footprint(
9415 task_t task)
9416 {
9417 task_lock(task);
9418 task->task_legacy_footprint = TRUE;
9419 task_unlock(task);
9420 }
9421
9422 void
task_set_extra_footprint_limit(task_t task)9423 task_set_extra_footprint_limit(
9424 task_t task)
9425 {
9426 if (task->task_extra_footprint_limit) {
9427 return;
9428 }
9429 task_lock(task);
9430 if (task->task_extra_footprint_limit) {
9431 task_unlock(task);
9432 return;
9433 }
9434 task->task_extra_footprint_limit = TRUE;
9435 task_unlock(task);
9436 memorystatus_act_on_legacy_footprint_entitlement(get_bsdtask_info(task), TRUE);
9437 }
9438
9439 void
task_set_ios13extended_footprint_limit(task_t task)9440 task_set_ios13extended_footprint_limit(
9441 task_t task)
9442 {
9443 if (task->task_ios13extended_footprint_limit) {
9444 return;
9445 }
9446 task_lock(task);
9447 if (task->task_ios13extended_footprint_limit) {
9448 task_unlock(task);
9449 return;
9450 }
9451 task->task_ios13extended_footprint_limit = TRUE;
9452 task_unlock(task);
9453 memorystatus_act_on_ios13extended_footprint_entitlement(get_bsdtask_info(task));
9454 }
9455 #endif /* __arm64__ */
9456
9457 static inline ledger_amount_t
task_ledger_get_balance(ledger_t ledger,int ledger_idx)9458 task_ledger_get_balance(
9459 ledger_t ledger,
9460 int ledger_idx)
9461 {
9462 ledger_amount_t amount;
9463 amount = 0;
9464 ledger_get_balance(ledger, ledger_idx, &amount);
9465 return amount;
9466 }
9467
9468 /*
9469 * Gather the amount of memory counted in a task's footprint due to
9470 * being in a specific set of ledgers.
9471 */
9472 void
task_ledgers_footprint(ledger_t ledger,ledger_amount_t * ledger_resident,ledger_amount_t * ledger_compressed)9473 task_ledgers_footprint(
9474 ledger_t ledger,
9475 ledger_amount_t *ledger_resident,
9476 ledger_amount_t *ledger_compressed)
9477 {
9478 *ledger_resident = 0;
9479 *ledger_compressed = 0;
9480
9481 /* purgeable non-volatile memory */
9482 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile);
9483 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile_compressed);
9484
9485 /* "default" tagged memory */
9486 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint);
9487 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint_compressed);
9488
9489 /* "network" currently never counts in the footprint... */
9490
9491 /* "media" tagged memory */
9492 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.media_footprint);
9493 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.media_footprint_compressed);
9494
9495 /* "graphics" tagged memory */
9496 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint);
9497 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint_compressed);
9498
9499 /* "neural" tagged memory */
9500 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.neural_footprint);
9501 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.neural_footprint_compressed);
9502 }
9503
9504 #if CONFIG_MEMORYSTATUS
9505 void
task_ledger_settle_dirty_time(task_t t)9506 task_ledger_settle_dirty_time(task_t t)
9507 {
9508 task_lock(t);
9509 task_ledger_settle_dirty_time_locked(t);
9510 task_unlock(t);
9511 }
9512
9513 /*
9514 * Credit any outstanding task dirty time to the ledger.
9515 * memstat_dirty_start is pushed forward to prevent any possibility of double
9516 * counting, making it safe to call this as often as necessary to ensure that
9517 * anyone reading the ledger gets up-to-date information.
9518 */
9519 void
task_ledger_settle_dirty_time_locked(task_t t)9520 task_ledger_settle_dirty_time_locked(task_t t)
9521 {
9522 task_lock_assert_owned(t);
9523
9524 uint64_t start = t->memstat_dirty_start;
9525 if (start) {
9526 uint64_t now = mach_absolute_time();
9527
9528 uint64_t duration;
9529 absolutetime_to_nanoseconds(now - start, &duration);
9530
9531 ledger_t ledger = get_task_ledger(t);
9532 ledger_credit(ledger, task_ledgers.memorystatus_dirty_time, duration);
9533
9534 t->memstat_dirty_start = now;
9535 }
9536 }
9537 #endif /* CONFIG_MEMORYSTATUS */
9538
9539 static void
task_ledger_settle_counter(ledger_t ledger,int entry,counter_t * counter)9540 task_ledger_settle_counter(ledger_t ledger, int entry, counter_t *counter)
9541 {
9542 ledger_amount_t ledger_val;
9543 kern_return_t kr;
9544 uint64_t counter_val;
9545
9546 kr = ledger_get_balance(ledger, entry, &ledger_val);
9547 if (kr != KERN_SUCCESS) {
9548 return;
9549 }
9550
9551 counter_val = counter_load(counter);
9552 if (counter_val <= ledger_val) {
9553 return; /* These counters should only move forward, but just in case. */
9554 }
9555
9556 ledger_credit(ledger, entry, counter_val - ledger_val);
9557 }
9558
9559 void
task_ledger_settle(task_t t)9560 task_ledger_settle(task_t t)
9561 {
9562 ledger_t ledger;
9563
9564 task_lock(t);
9565
9566 /* Settle pages grabbed */
9567 ledger = get_task_ledger(t);
9568 task_ledger_settle_counter(ledger, task_ledgers.pages_grabbed, &t->pages_grabbed);
9569 task_ledger_settle_counter(ledger, task_ledgers.pages_grabbed_kern, &t->pages_grabbed_kern);
9570 task_ledger_settle_counter(ledger, task_ledgers.pages_grabbed_iopl, &t->pages_grabbed_iopl);
9571 task_ledger_settle_counter(ledger, task_ledgers.pages_grabbed_upl, &t->pages_grabbed_upl);
9572
9573 #if CONFIG_MEMORYSTATUS
9574 /* Settle memorystatus dirty time */
9575 task_ledger_settle_dirty_time_locked(t);
9576 #endif
9577
9578 task_unlock(t);
9579 }
9580
9581 void
task_set_memory_ownership_transfer(task_t task,boolean_t value)9582 task_set_memory_ownership_transfer(
9583 task_t task,
9584 boolean_t value)
9585 {
9586 task_lock(task);
9587 task->task_can_transfer_memory_ownership = !!value;
9588 task_unlock(task);
9589 }
9590
9591 #if DEVELOPMENT || DEBUG
9592
9593 void
task_set_no_footprint_for_debug(task_t task,boolean_t value)9594 task_set_no_footprint_for_debug(task_t task, boolean_t value)
9595 {
9596 task_lock(task);
9597 task->task_no_footprint_for_debug = !!value;
9598 task_unlock(task);
9599 }
9600
9601 int
task_get_no_footprint_for_debug(task_t task)9602 task_get_no_footprint_for_debug(task_t task)
9603 {
9604 return task->task_no_footprint_for_debug;
9605 }
9606
9607 #endif /* DEVELOPMENT || DEBUG */
9608
9609 void
task_copy_vmobjects(task_t task,vm_object_query_t query,size_t len,size_t * num)9610 task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num)
9611 {
9612 vm_object_t find_vmo;
9613 size_t size = 0;
9614
9615 /*
9616 * Allocate a save area for FP state before taking task_objq lock,
9617 * if necessary, to ensure that VM_KERNEL_ADDRHASH() doesn't cause
9618 * an FP state allocation while holding VM locks.
9619 */
9620 ml_fp_save_area_prealloc();
9621
9622 task_objq_lock(task);
9623 if (query != NULL) {
9624 queue_iterate(&task->task_objq, find_vmo, vm_object_t, task_objq)
9625 {
9626 vm_object_query_t p = &query[size++];
9627
9628 /* make sure to not overrun */
9629 if (size * sizeof(vm_object_query_data_t) > len) {
9630 --size;
9631 break;
9632 }
9633
9634 bzero(p, sizeof(*p));
9635 p->object_id = (vm_object_id_t) VM_KERNEL_ADDRHASH(find_vmo);
9636 p->virtual_size = find_vmo->internal ? find_vmo->vo_size : 0;
9637 p->resident_size = find_vmo->resident_page_count * PAGE_SIZE;
9638 p->wired_size = find_vmo->wired_page_count * PAGE_SIZE;
9639 p->reusable_size = find_vmo->reusable_page_count * PAGE_SIZE;
9640 p->vo_no_footprint = find_vmo->vo_no_footprint;
9641 p->vo_ledger_tag = find_vmo->vo_ledger_tag;
9642 p->purgable = find_vmo->purgable;
9643
9644 if (find_vmo->internal && find_vmo->pager_created && find_vmo->pager != NULL) {
9645 p->compressed_size = vm_compressor_pager_get_count(find_vmo->pager) * PAGE_SIZE;
9646 } else {
9647 p->compressed_size = 0;
9648 }
9649 }
9650 } else {
9651 size = (size_t)task->task_owned_objects;
9652 }
9653 task_objq_unlock(task);
9654
9655 *num = size;
9656 }
9657
9658 void
task_get_owned_vmobjects(task_t task,size_t buffer_size,vmobject_list_output_t buffer,size_t * output_size,size_t * entries)9659 task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries)
9660 {
9661 assert(output_size);
9662 assert(entries);
9663
9664 /* copy the vmobjects and vmobject data out of the task */
9665 if (buffer_size == 0) {
9666 task_copy_vmobjects(task, NULL, 0, entries);
9667 *output_size = (*entries > 0) ? *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer) : 0;
9668 } else {
9669 assert(buffer);
9670 task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(*buffer), entries);
9671 buffer->entries = (uint64_t)*entries;
9672 *output_size = *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer);
9673 }
9674 }
9675
9676 static void
task_store_owned_vmobject_info(task_t to_task,task_t from_task)9677 task_store_owned_vmobject_info(task_t to_task, task_t from_task)
9678 {
9679 size_t buffer_size;
9680 vmobject_list_output_t buffer;
9681 size_t output_size;
9682 size_t entries;
9683
9684 /* get the size, allocate a buffer, and populate */
9685 entries = 0;
9686 output_size = 0;
9687 task_get_owned_vmobjects(from_task, 0, NULL, &output_size, &entries);
9688
9689 if (output_size) {
9690 buffer_size = output_size;
9691 buffer = kalloc_data(buffer_size, Z_WAITOK);
9692
9693 if (buffer) {
9694 entries = 0;
9695 output_size = 0;
9696
9697 task_get_owned_vmobjects(from_task, buffer_size, buffer, &output_size, &entries);
9698
9699 task_lock(to_task);
9700
9701 if (!entries || (to_task->corpse_vmobject_list != NULL)) {
9702 kfree_data(buffer, buffer_size);
9703 task_unlock(to_task);
9704 return;
9705 }
9706
9707 to_task->corpse_vmobject_list = buffer;
9708 to_task->corpse_vmobject_list_size = buffer_size;
9709
9710 task_unlock(to_task);
9711 }
9712 }
9713 }
9714
9715 void
task_set_filter_msg_flag(task_t task,boolean_t flag)9716 task_set_filter_msg_flag(
9717 task_t task,
9718 boolean_t flag)
9719 {
9720 assert(task != TASK_NULL);
9721
9722 if (flag) {
9723 task_ro_flags_set(task, TFRO_FILTER_MSG);
9724 } else {
9725 task_ro_flags_clear(task, TFRO_FILTER_MSG);
9726 }
9727 }
9728
9729 boolean_t
task_get_filter_msg_flag(task_t task)9730 task_get_filter_msg_flag(
9731 task_t task)
9732 {
9733 if (!task) {
9734 return false;
9735 }
9736
9737 return (task_ro_flags_get(task) & TFRO_FILTER_MSG) ? TRUE : FALSE;
9738 }
9739 bool
task_is_exotic(task_t task)9740 task_is_exotic(
9741 task_t task)
9742 {
9743 if (task == TASK_NULL) {
9744 return false;
9745 }
9746 return vm_map_is_exotic(get_task_map(task));
9747 }
9748
9749 bool
task_is_alien(task_t task)9750 task_is_alien(
9751 task_t task)
9752 {
9753 if (task == TASK_NULL) {
9754 return false;
9755 }
9756 return vm_map_is_alien(get_task_map(task));
9757 }
9758
9759
9760
9761 #if CONFIG_MACF
9762 uint8_t *
mac_task_get_mach_filter_mask(task_t task)9763 mac_task_get_mach_filter_mask(task_t task)
9764 {
9765 assert(task);
9766 return task_get_mach_trap_filter_mask(task);
9767 }
9768
9769 uint8_t *
mac_task_get_kobj_filter_mask(task_t task)9770 mac_task_get_kobj_filter_mask(task_t task)
9771 {
9772 assert(task);
9773 return task_get_mach_kobj_filter_mask(task);
9774 }
9775
9776 /* Set the filter mask for Mach traps. */
9777 void
mac_task_set_mach_filter_mask(task_t task,uint8_t * maskptr)9778 mac_task_set_mach_filter_mask(task_t task, uint8_t *maskptr)
9779 {
9780 assert(task);
9781
9782 task_set_mach_trap_filter_mask(task, maskptr);
9783 }
9784
9785 /* Set the filter mask for kobject msgs. */
9786 void
mac_task_set_kobj_filter_mask(task_t task,uint8_t * maskptr)9787 mac_task_set_kobj_filter_mask(task_t task, uint8_t *maskptr)
9788 {
9789 assert(task);
9790
9791 task_set_mach_kobj_filter_mask(task, maskptr);
9792 }
9793
9794 /* Hook for mach trap/sc filter evaluation policy. */
9795 SECURITY_READ_ONLY_LATE(mac_task_mach_filter_cbfunc_t) mac_task_mach_trap_evaluate = NULL;
9796
9797 /* Hook for kobj message filter evaluation policy. */
9798 SECURITY_READ_ONLY_LATE(mac_task_kobj_filter_cbfunc_t) mac_task_kobj_msg_evaluate = NULL;
9799
9800 /* Set the callback hooks for the filtering policy. */
9801 int
mac_task_register_filter_callbacks(const mac_task_mach_filter_cbfunc_t mach_cbfunc,const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)9802 mac_task_register_filter_callbacks(
9803 const mac_task_mach_filter_cbfunc_t mach_cbfunc,
9804 const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)
9805 {
9806 if (mach_cbfunc != NULL) {
9807 if (mac_task_mach_trap_evaluate != NULL) {
9808 return KERN_FAILURE;
9809 }
9810 mac_task_mach_trap_evaluate = mach_cbfunc;
9811 }
9812 if (kobj_cbfunc != NULL) {
9813 if (mac_task_kobj_msg_evaluate != NULL) {
9814 return KERN_FAILURE;
9815 }
9816 mac_task_kobj_msg_evaluate = kobj_cbfunc;
9817 }
9818
9819 return KERN_SUCCESS;
9820 }
9821 #endif /* CONFIG_MACF */
9822
9823 #if CONFIG_ROSETTA
9824 bool
task_is_translated(task_t task)9825 task_is_translated(task_t task)
9826 {
9827 extern boolean_t proc_is_translated(struct proc* p);
9828 return task && proc_is_translated(get_bsdtask_info(task));
9829 }
9830 #endif
9831
9832 /* Task runtime security mitigations configuration. */
9833 #define TASK_SECURITY_CONFIG_HELPER_DEFINE(suffix) \
9834 bool task_has_##suffix(task_t task) \
9835 { \
9836 assert(task); \
9837 return (task->security_config. suffix); \
9838 } \
9839 \
9840 void task_set_##suffix(task_t task) \
9841 { \
9842 assert(task);\
9843 task->security_config. suffix = true; \
9844 } \
9845 \
9846 void task_clear_##suffix(task_t task) \
9847 { \
9848 assert(task);\
9849 task->security_config. suffix = false; \
9850 }
9851
9852 uint32_t
task_get_security_config(task_t task)9853 task_get_security_config(task_t task)
9854 {
9855 assert(task);
9856 return (uint32_t)(task->security_config.value);
9857 }
9858
9859 TASK_SECURITY_CONFIG_HELPER_DEFINE(hardened_heap)
9860 TASK_SECURITY_CONFIG_HELPER_DEFINE(tpro)
9861
9862
9863
9864 #if __has_feature(ptrauth_calls)
9865 /* On FPAC, we want to deliver all PAC violations as fatal exceptions, regardless
9866 * of the enable_pac_exception boot-arg value or any other entitlements.
9867 * The only case where we allow non-fatal PAC exceptions on FPAC is for debugging,
9868 * which requires Developer Mode enabled.
9869 *
9870 * On non-FPAC hardware, we gate the decision behind entitlements and the
9871 * enable_pac_exception boot-arg.
9872 */
9873 extern int gARM_FEAT_FPAC;
9874 /*
9875 * Having the PAC_EXCEPTION_ENTITLEMENT entitlement means we always enforce all
9876 * of the PAC exception hardening: fatal exceptions and signed user state.
9877 */
9878 #define PAC_EXCEPTION_ENTITLEMENT "com.apple.private.pac.exception"
9879 /*
9880 * On non-FPAC hardware, when enable_pac_exception boot-arg is set to true,
9881 * processes can choose to get non-fatal PAC exception delivery by setting
9882 * the SKIP_PAC_EXCEPTION_ENTITLEMENT entitlement.
9883 */
9884 #define SKIP_PAC_EXCEPTION_ENTITLEMENT "com.apple.private.skip.pac.exception"
9885
9886 void
task_set_pac_exception_fatal_flag(task_t task)9887 task_set_pac_exception_fatal_flag(
9888 task_t task)
9889 {
9890 assert(task != TASK_NULL);
9891 bool pac_hardened_task = false;
9892 uint32_t set_flags = 0;
9893
9894 /*
9895 * We must not apply this security policy on tasks which have opted out of mach hardening to
9896 * avoid regressions in third party plugins and third party apps when using AMFI boot-args
9897 */
9898 bool platform_binary = task_get_platform_binary(task);
9899 #if XNU_TARGET_OS_OSX
9900 platform_binary &= !task_opted_out_mach_hardening(task);
9901 #endif /* XNU_TARGET_OS_OSX */
9902
9903 /*
9904 * On non-FPAC hardware, we allow gating PAC exceptions behind
9905 * SKIP_PAC_EXCEPTION_ENTITLEMENT and the boot-arg.
9906 */
9907 if (!gARM_FEAT_FPAC && enable_pac_exception &&
9908 IOTaskHasEntitlement(task, SKIP_PAC_EXCEPTION_ENTITLEMENT)) {
9909 return;
9910 }
9911
9912 if (IOTaskHasEntitlement(task, PAC_EXCEPTION_ENTITLEMENT) || task_get_hardened_runtime(task)) {
9913 pac_hardened_task = true;
9914 set_flags |= TFRO_PAC_ENFORCE_USER_STATE;
9915 }
9916
9917 /* On non-FPAC hardware, gate the fatal property behind entitlements and boot-arg. */
9918 if (pac_hardened_task ||
9919 ((enable_pac_exception || gARM_FEAT_FPAC) && platform_binary)) {
9920 set_flags |= TFRO_PAC_EXC_FATAL;
9921 }
9922
9923 if (set_flags != 0) {
9924 task_ro_flags_set(task, set_flags);
9925 }
9926 }
9927
9928 bool
task_is_pac_exception_fatal(task_t task)9929 task_is_pac_exception_fatal(
9930 task_t task)
9931 {
9932 assert(task != TASK_NULL);
9933 return !!(task_ro_flags_get(task) & TFRO_PAC_EXC_FATAL);
9934 }
9935 #endif /* __has_feature(ptrauth_calls) */
9936
9937 /*
9938 * FATAL_EXCEPTION_ENTITLEMENT, if present, will contain a list of
9939 * conditions for which access violations should deliver SIGKILL rather than
9940 * SIGSEGV. This is a hardening measure intended for use by applications
9941 * that are able to handle the stricter error handling behavior. Currently
9942 * this supports FATAL_EXCEPTION_ENTITLEMENT_JIT, which is documented in
9943 * user_fault_in_self_restrict_mode().
9944 */
9945 #define FATAL_EXCEPTION_ENTITLEMENT "com.apple.security.fatal-exceptions"
9946 #define FATAL_EXCEPTION_ENTITLEMENT_JIT "jit"
9947
9948
9949 void
task_set_jit_flags(task_t task)9950 task_set_jit_flags(
9951 task_t task)
9952 {
9953 assert(task != TASK_NULL);
9954 if (IOTaskHasStringEntitlement(task, FATAL_EXCEPTION_ENTITLEMENT, FATAL_EXCEPTION_ENTITLEMENT_JIT)) {
9955 task_ro_flags_set(task, TFRO_JIT_EXC_FATAL);
9956 }
9957
9958 }
9959
9960 bool
task_is_jit_exception_fatal(__unused task_t task)9961 task_is_jit_exception_fatal(
9962 __unused task_t task)
9963 {
9964 #if !defined(XNU_PLATFORM_MacOSX)
9965 return true;
9966 #else
9967 assert(task != TASK_NULL);
9968 return !!(task_ro_flags_get(task) & TFRO_JIT_EXC_FATAL);
9969 #endif
9970 }
9971
9972 bool
task_needs_user_signed_thread_state(task_t task)9973 task_needs_user_signed_thread_state(
9974 task_t task)
9975 {
9976 assert(task != TASK_NULL);
9977 return !!(task_ro_flags_get(task) & TFRO_PAC_ENFORCE_USER_STATE);
9978 }
9979
9980 void
task_set_tecs(task_t task)9981 task_set_tecs(task_t task)
9982 {
9983 if (task == TASK_NULL) {
9984 task = current_task();
9985 }
9986
9987 if (!machine_csv(CPUVN_CI)) {
9988 return;
9989 }
9990
9991 LCK_MTX_ASSERT(&task->lock, LCK_MTX_ASSERT_NOTOWNED);
9992
9993 task_lock(task);
9994
9995 task->t_flags |= TF_TECS;
9996
9997 thread_t thread;
9998 queue_iterate(&task->threads, thread, thread_t, task_threads) {
9999 machine_tecs(thread);
10000 }
10001 task_unlock(task);
10002 }
10003
10004 kern_return_t
task_test_sync_upcall(task_t task,ipc_port_t send_port)10005 task_test_sync_upcall(
10006 task_t task,
10007 ipc_port_t send_port)
10008 {
10009 #if DEVELOPMENT || DEBUG
10010 if (task != current_task() || !IPC_PORT_VALID(send_port)) {
10011 return KERN_INVALID_ARGUMENT;
10012 }
10013
10014 /* Block on sync kernel upcall on the given send port */
10015 mach_test_sync_upcall(send_port);
10016
10017 ipc_port_release_send(send_port);
10018 return KERN_SUCCESS;
10019 #else
10020 (void)task;
10021 (void)send_port;
10022 return KERN_NOT_SUPPORTED;
10023 #endif
10024 }
10025
10026 kern_return_t
task_test_async_upcall_propagation(task_t task,ipc_port_t send_port,int qos,int iotier)10027 task_test_async_upcall_propagation(
10028 task_t task,
10029 ipc_port_t send_port,
10030 int qos,
10031 int iotier)
10032 {
10033 #if DEVELOPMENT || DEBUG
10034 kern_return_t kr;
10035
10036 if (task != current_task() || !IPC_PORT_VALID(send_port)) {
10037 return KERN_INVALID_ARGUMENT;
10038 }
10039
10040 if (qos < THREAD_QOS_DEFAULT || qos > THREAD_QOS_USER_INTERACTIVE ||
10041 iotier < THROTTLE_LEVEL_START || iotier > THROTTLE_LEVEL_END) {
10042 return KERN_INVALID_ARGUMENT;
10043 }
10044
10045 struct thread_attr_for_ipc_propagation attr = {
10046 .tafip_iotier = iotier,
10047 .tafip_qos = qos
10048 };
10049
10050 /* Apply propagate attr to port */
10051 kr = ipc_port_propagate_thread_attr(send_port, attr);
10052 if (kr != KERN_SUCCESS) {
10053 return kr;
10054 }
10055
10056 thread_enable_send_importance(current_thread(), TRUE);
10057
10058 /* Perform an async kernel upcall on the given send port */
10059 mach_test_async_upcall(send_port);
10060 thread_enable_send_importance(current_thread(), FALSE);
10061
10062 ipc_port_release_send(send_port);
10063 return KERN_SUCCESS;
10064 #else
10065 (void)task;
10066 (void)send_port;
10067 (void)qos;
10068 (void)iotier;
10069 return KERN_NOT_SUPPORTED;
10070 #endif
10071 }
10072
10073 #if CONFIG_PROC_RESOURCE_LIMITS
10074 mach_port_name_t
current_task_get_fatal_port_name(void)10075 current_task_get_fatal_port_name(void)
10076 {
10077 mach_port_t task_fatal_port = MACH_PORT_NULL;
10078 mach_port_name_t port_name = 0;
10079
10080 task_fatal_port = task_allocate_fatal_port();
10081
10082 if (task_fatal_port) {
10083 ipc_object_copyout(current_space(), task_fatal_port,
10084 MACH_MSG_TYPE_PORT_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
10085 NULL, &port_name);
10086 }
10087
10088 return port_name;
10089 }
10090 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
10091
10092 #if defined(__x86_64__)
10093 bool
curtask_get_insn_copy_optout(void)10094 curtask_get_insn_copy_optout(void)
10095 {
10096 bool optout;
10097 task_t cur_task = current_task();
10098
10099 task_lock(cur_task);
10100 optout = (cur_task->t_flags & TF_INSN_COPY_OPTOUT) ? true : false;
10101 task_unlock(cur_task);
10102
10103 return optout;
10104 }
10105
10106 void
curtask_set_insn_copy_optout(void)10107 curtask_set_insn_copy_optout(void)
10108 {
10109 task_t cur_task = current_task();
10110
10111 task_lock(cur_task);
10112
10113 cur_task->t_flags |= TF_INSN_COPY_OPTOUT;
10114
10115 thread_t thread;
10116 queue_iterate(&cur_task->threads, thread, thread_t, task_threads) {
10117 machine_thread_set_insn_copy_optout(thread);
10118 }
10119 task_unlock(cur_task);
10120 }
10121 #endif /* defined(__x86_64__) */
10122
10123 void
task_get_corpse_vmobject_list(task_t task,vmobject_list_output_t * list,size_t * list_size)10124 task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size)
10125 {
10126 assert(task);
10127 assert(list_size);
10128
10129 *list = task->corpse_vmobject_list;
10130 *list_size = (size_t)task->corpse_vmobject_list_size;
10131 }
10132
10133 __abortlike
10134 static void
panic_proc_ro_task_backref_mismatch(task_t t,proc_ro_t ro)10135 panic_proc_ro_task_backref_mismatch(task_t t, proc_ro_t ro)
10136 {
10137 panic("proc_ro->task backref mismatch: t=%p, ro=%p, "
10138 "proc_ro_task(ro)=%p", t, ro, proc_ro_task(ro));
10139 }
10140
10141 proc_ro_t
task_get_ro(task_t t)10142 task_get_ro(task_t t)
10143 {
10144 proc_ro_t ro = (proc_ro_t)t->bsd_info_ro;
10145
10146 zone_require_ro(ZONE_ID_PROC_RO, sizeof(struct proc_ro), ro);
10147 if (__improbable(proc_ro_task(ro) != t)) {
10148 panic_proc_ro_task_backref_mismatch(t, ro);
10149 }
10150
10151 return ro;
10152 }
10153
10154 uint32_t
task_ro_flags_get(task_t task)10155 task_ro_flags_get(task_t task)
10156 {
10157 return task_get_ro(task)->t_flags_ro;
10158 }
10159
10160 void
task_ro_flags_set(task_t task,uint32_t flags)10161 task_ro_flags_set(task_t task, uint32_t flags)
10162 {
10163 zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
10164 t_flags_ro, ZRO_ATOMIC_OR_32, flags);
10165 }
10166
10167 void
task_ro_flags_clear(task_t task,uint32_t flags)10168 task_ro_flags_clear(task_t task, uint32_t flags)
10169 {
10170 zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
10171 t_flags_ro, ZRO_ATOMIC_AND_32, ~flags);
10172 }
10173
10174 task_control_port_options_t
task_get_control_port_options(task_t task)10175 task_get_control_port_options(task_t task)
10176 {
10177 return task_get_ro(task)->task_control_port_options;
10178 }
10179
10180 void
task_set_control_port_options(task_t task,task_control_port_options_t opts)10181 task_set_control_port_options(task_t task, task_control_port_options_t opts)
10182 {
10183 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
10184 task_control_port_options, &opts);
10185 }
10186
10187 /*!
10188 * @function kdp_task_is_locked
10189 *
10190 * @abstract
10191 * Checks if task is locked.
10192 *
10193 * @discussion
10194 * NOT SAFE: To be used only by kernel debugger.
10195 *
10196 * @param task task to check
10197 *
10198 * @returns TRUE if the task is locked.
10199 */
10200 boolean_t
kdp_task_is_locked(task_t task)10201 kdp_task_is_locked(task_t task)
10202 {
10203 return kdp_lck_mtx_lock_spin_is_acquired(&task->lock);
10204 }
10205
10206 #if DEBUG || DEVELOPMENT
10207 /**
10208 *
10209 * Check if a threshold limit is valid based on the actual phys memory
10210 * limit. If they are same, race conditions may arise, so we have to prevent
10211 * it to happen.
10212 */
10213 static diagthreshold_check_return
task_check_memorythreshold_is_valid(task_t task,uint64_t new_limit,bool is_diagnostics_value)10214 task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value)
10215 {
10216 int phys_limit_mb;
10217 kern_return_t ret_value;
10218 bool threshold_enabled;
10219 bool dummy;
10220 ret_value = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, &threshold_enabled);
10221 if (ret_value != KERN_SUCCESS) {
10222 return ret_value;
10223 }
10224 if (is_diagnostics_value == true) {
10225 ret_value = task_get_phys_footprint_limit(task, &phys_limit_mb);
10226 } else {
10227 uint64_t diag_limit;
10228 ret_value = task_get_diag_footprint_limit_internal(task, &diag_limit, &dummy);
10229 phys_limit_mb = (int)(diag_limit >> 20);
10230 }
10231 if (ret_value != KERN_SUCCESS) {
10232 return ret_value;
10233 }
10234 if (phys_limit_mb == (int) new_limit) {
10235 if (threshold_enabled == false) {
10236 return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED;
10237 } else {
10238 return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED;
10239 }
10240 }
10241 if (threshold_enabled == false) {
10242 return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED;
10243 } else {
10244 return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED;
10245 }
10246 }
10247 #endif
10248
10249 #if CONFIG_EXCLAVES
10250 kern_return_t
task_add_conclave(task_t task,void * vnode,int64_t off,const char * task_conclave_id)10251 task_add_conclave(task_t task, void *vnode, int64_t off, const char *task_conclave_id)
10252 {
10253 /*
10254 * Only launchd or properly entitled tasks can attach tasks to
10255 * conclaves.
10256 */
10257 if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10258 return KERN_DENIED;
10259 }
10260
10261 /*
10262 * Only entitled tasks can have conclaves attached.
10263 * Allow tasks which have the SPAWN privilege to also host conclaves.
10264 * This allows xpc proxy to add a conclave before execing a daemon.
10265 */
10266 if (!exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_HOST) &&
10267 !exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10268 return KERN_DENIED;
10269 }
10270
10271 return exclaves_conclave_attach(task_conclave_id, task);
10272 }
10273
10274 kern_return_t
task_launch_conclave(mach_port_name_t port __unused)10275 task_launch_conclave(mach_port_name_t port __unused)
10276 {
10277 kern_return_t kr = KERN_FAILURE;
10278 assert3u(port, ==, MACH_PORT_NULL);
10279 exclaves_resource_t *conclave = task_get_conclave(current_task());
10280 if (conclave == NULL) {
10281 return kr;
10282 }
10283
10284 kr = exclaves_conclave_launch(conclave);
10285 if (kr != KERN_SUCCESS) {
10286 return kr;
10287 }
10288 task_set_conclave_taint(current_task());
10289
10290 return KERN_SUCCESS;
10291 }
10292
10293 kern_return_t
task_inherit_conclave(task_t old_task,task_t new_task,void * vnode,int64_t off)10294 task_inherit_conclave(task_t old_task, task_t new_task, void *vnode, int64_t off)
10295 {
10296 if (old_task->conclave == NULL ||
10297 !exclaves_conclave_is_attached(old_task->conclave)) {
10298 return KERN_SUCCESS;
10299 }
10300
10301 /*
10302 * Only launchd or properly entitled tasks can attach tasks to
10303 * conclaves.
10304 */
10305 if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10306 return KERN_DENIED;
10307 }
10308
10309 /*
10310 * Only entitled tasks can have conclaves attached.
10311 */
10312 if (!exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_HOST)) {
10313 return KERN_DENIED;
10314 }
10315
10316 return exclaves_conclave_inherit(old_task->conclave, old_task, new_task);
10317 }
10318
10319 void
task_clear_conclave(task_t task)10320 task_clear_conclave(task_t task)
10321 {
10322 if (task->exclave_crash_info) {
10323 kfree_data(task->exclave_crash_info, CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE);
10324 task->exclave_crash_info = NULL;
10325 }
10326
10327 if (task->conclave == NULL) {
10328 return;
10329 }
10330
10331 /*
10332 * XXX
10333 * This should only fail if either the conclave is in an unexpected
10334 * state (i.e. not ATTACHED) or if the wrong port is supplied.
10335 * We should re-visit this and make sure we guarantee the above
10336 * constraints.
10337 */
10338 __assert_only kern_return_t ret =
10339 exclaves_conclave_detach(task->conclave, task);
10340 assert3u(ret, ==, KERN_SUCCESS);
10341 }
10342
10343 void
task_stop_conclave(task_t task,bool gather_crash_bt)10344 task_stop_conclave(task_t task, bool gather_crash_bt)
10345 {
10346 thread_t thread = current_thread();
10347
10348 if (task->conclave == NULL) {
10349 return;
10350 }
10351
10352 if (task_should_panic_on_exit_due_to_conclave_taint(task)) {
10353 panic("Conclave tainted task %p terminated\n", task);
10354 }
10355
10356 /* Stash the task on current thread for conclave teardown */
10357 thread->conclave_stop_task = task;
10358
10359 __assert_only kern_return_t ret =
10360 exclaves_conclave_stop(task->conclave, gather_crash_bt);
10361
10362 thread->conclave_stop_task = TASK_NULL;
10363
10364 assert3u(ret, ==, KERN_SUCCESS);
10365 }
10366
10367 void
task_suspend_conclave(task_t task)10368 task_suspend_conclave(task_t task)
10369 {
10370 thread_t thread = current_thread();
10371
10372 if (task->conclave == NULL) {
10373 return;
10374 }
10375
10376 /* Stash the task on current thread for conclave teardown */
10377 thread->conclave_stop_task = task;
10378
10379 __assert_only kern_return_t ret =
10380 exclaves_conclave_suspend(task->conclave);
10381
10382 thread->conclave_stop_task = TASK_NULL;
10383
10384 assert3u(ret, ==, KERN_SUCCESS);
10385 }
10386
10387 void
task_resume_conclave(task_t task)10388 task_resume_conclave(task_t task)
10389 {
10390 thread_t thread = current_thread();
10391
10392 if (task->conclave == NULL) {
10393 return;
10394 }
10395
10396 /* Stash the task on current thread for conclave teardown */
10397 thread->conclave_stop_task = task;
10398
10399 __assert_only kern_return_t ret =
10400 exclaves_conclave_resume(task->conclave);
10401
10402 thread->conclave_stop_task = TASK_NULL;
10403
10404 assert3u(ret, ==, KERN_SUCCESS);
10405 }
10406
10407 kern_return_t
task_stop_conclave_upcall(void)10408 task_stop_conclave_upcall(void)
10409 {
10410 task_t task = current_task();
10411 if (task->conclave == NULL) {
10412 return KERN_INVALID_TASK;
10413 }
10414
10415 return exclaves_conclave_stop_upcall(task->conclave);
10416 }
10417
10418 kern_return_t
task_stop_conclave_upcall_complete(void)10419 task_stop_conclave_upcall_complete(void)
10420 {
10421 task_t task = current_task();
10422 thread_t thread = current_thread();
10423
10424 if (!(thread->th_exclaves_state & TH_EXCLAVES_STOP_UPCALL_PENDING)) {
10425 return KERN_SUCCESS;
10426 }
10427
10428 assert3p(task->conclave, !=, NULL);
10429
10430 return exclaves_conclave_stop_upcall_complete(task->conclave, task);
10431 }
10432
10433 kern_return_t
task_suspend_conclave_upcall(uint64_t * scid_list,size_t scid_list_count)10434 task_suspend_conclave_upcall(uint64_t *scid_list, size_t scid_list_count)
10435 {
10436 task_t task = current_task();
10437 thread_t thread;
10438 int scid_count = 0;
10439 kern_return_t kr;
10440 if (task->conclave == NULL) {
10441 return KERN_INVALID_TASK;
10442 }
10443
10444 kr = task_hold_and_wait(task, false);
10445
10446 task_lock(task);
10447 queue_iterate(&task->threads, thread, thread_t, task_threads)
10448 {
10449 if (thread->th_exclaves_state & TH_EXCLAVES_RPC) {
10450 scid_list[scid_count++] = thread->th_exclaves_ipc_ctx.scid;
10451 if (scid_count >= scid_list_count) {
10452 break;
10453 }
10454 }
10455 }
10456
10457 task_unlock(task);
10458 return kr;
10459 }
10460
10461 kern_return_t
task_crash_info_conclave_upcall(task_t task,const struct conclave_sharedbuffer_t * shared_buf,uint32_t length)10462 task_crash_info_conclave_upcall(task_t task, const struct conclave_sharedbuffer_t *shared_buf,
10463 uint32_t length)
10464 {
10465 if (task->conclave == NULL) {
10466 return KERN_INVALID_TASK;
10467 }
10468
10469 /* Allocate the buffer and memcpy it */
10470 int task_crash_info_buffer_size = 0;
10471 uint8_t * task_crash_info_buffer;
10472
10473 if (!length) {
10474 printf("Conclave upcall: task_crash_info_conclave_upcall did not return any page addresses\n");
10475 return KERN_INVALID_ARGUMENT;
10476 }
10477
10478 task_crash_info_buffer_size = CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE;
10479 assert3u(task_crash_info_buffer_size, >=, length);
10480
10481 task_crash_info_buffer = kalloc_data(task_crash_info_buffer_size, Z_WAITOK);
10482 if (!task_crash_info_buffer) {
10483 panic("task_crash_info_conclave_upcall: cannot allocate buffer for task_info shared memory");
10484 return KERN_INVALID_ARGUMENT;
10485 }
10486
10487 uint8_t * dst = task_crash_info_buffer;
10488 uint32_t remaining = length;
10489 for (size_t i = 0; i < CONCLAVE_CRASH_BUFFER_PAGECOUNT; i++) {
10490 if (remaining) {
10491 memcpy(dst, (uint8_t*)phystokv((pmap_paddr_t)shared_buf->physaddr[i]), PAGE_SIZE);
10492 remaining = (remaining >= PAGE_SIZE) ? remaining - PAGE_SIZE : 0;
10493 dst += PAGE_SIZE;
10494 }
10495 }
10496
10497 task_lock(task);
10498 if (task->exclave_crash_info == NULL && task->active) {
10499 task->exclave_crash_info = task_crash_info_buffer;
10500 task->exclave_crash_info_length = length;
10501 task_crash_info_buffer = NULL;
10502 }
10503 task_unlock(task);
10504
10505 if (task_crash_info_buffer) {
10506 kfree_data(task_crash_info_buffer, task_crash_info_buffer_size);
10507 }
10508
10509 return KERN_SUCCESS;
10510 }
10511
10512 exclaves_resource_t *
task_get_conclave(task_t task)10513 task_get_conclave(task_t task)
10514 {
10515 return task->conclave;
10516 }
10517
10518 extern boolean_t IOPMRootDomainGetWillShutdown(void);
10519
10520 TUNABLE(bool, disable_conclave_taint, "disable_conclave_taint", true); /* Do not taint processes when they talk to conclave, so system does not panic when exit. */
10521
10522 static bool
task_should_panic_on_exit_due_to_conclave_taint(task_t task)10523 task_should_panic_on_exit_due_to_conclave_taint(task_t task)
10524 {
10525 /* Check if boot-arg to disable conclave taint is set */
10526 if (disable_conclave_taint) {
10527 return false;
10528 }
10529
10530 /* Check if the system is shutting down */
10531 if (IOPMRootDomainGetWillShutdown()) {
10532 return false;
10533 }
10534
10535 return task_is_conclave_tainted(task);
10536 }
10537
10538 static bool
task_is_conclave_tainted(task_t task)10539 task_is_conclave_tainted(task_t task)
10540 {
10541 return (task->t_exclave_state & TES_CONCLAVE_TAINTED) != 0 &&
10542 !(task->t_exclave_state & TES_CONCLAVE_UNTAINTABLE);
10543 }
10544
10545 static void
task_set_conclave_taint(task_t task)10546 task_set_conclave_taint(task_t task)
10547 {
10548 os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_TAINTED, relaxed);
10549 }
10550
10551 void
task_set_conclave_untaintable(task_t task)10552 task_set_conclave_untaintable(task_t task)
10553 {
10554 os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_UNTAINTABLE, relaxed);
10555 }
10556
10557 void
task_add_conclave_crash_info(task_t task,void * crash_info_ptr)10558 task_add_conclave_crash_info(task_t task, void *crash_info_ptr)
10559 {
10560 __block kern_return_t error = KERN_SUCCESS;
10561 tb_error_t tberr = TB_ERROR_SUCCESS;
10562 void *crash_info;
10563 uint32_t crash_info_length = 0;
10564
10565 if (task->conclave == NULL) {
10566 return;
10567 }
10568
10569 if (task->exclave_crash_info_length == 0) {
10570 return;
10571 }
10572
10573 error = kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_BEGIN,
10574 STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10575 if (error != KERN_SUCCESS) {
10576 return;
10577 }
10578
10579 crash_info = task->exclave_crash_info;
10580 crash_info_length = task->exclave_crash_info_length;
10581
10582 tberr = stackshot_stackshotresult__unmarshal(crash_info,
10583 (uint64_t)crash_info_length, ^(stackshot_stackshotresult_s result){
10584 error = stackshot_exclaves_process_stackshot(&result, crash_info_ptr, false);
10585 if (error != KERN_SUCCESS) {
10586 printf("task_add_conclave_crash_info: error processing stackshot result %d\n", error);
10587 }
10588 });
10589 if (tberr != TB_ERROR_SUCCESS) {
10590 printf("task_conclave_crash: task_add_conclave_crash_info could not unmarshal stackshot data 0x%x\n", tberr);
10591 error = KERN_FAILURE;
10592 goto error_exit;
10593 }
10594
10595 error_exit:
10596 kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_END,
10597 STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10598
10599 return;
10600 }
10601
10602 #endif /* CONFIG_EXCLAVES */
10603
10604 /* defined in bsd/kern/kern_proc.c */
10605 extern void proc_name(int pid, char *buf, int size);
10606 extern const char *proc_best_name(struct proc *p);
10607
10608 void
task_procname(task_t task,char * buf,int size)10609 task_procname(task_t task, char *buf, int size)
10610 {
10611 proc_name(task_pid(task), buf, size);
10612 }
10613
10614 const char *
task_best_name(task_t task)10615 task_best_name(task_t task)
10616 {
10617 return proc_best_name(task_get_proc_raw(task));
10618 }
10619
10620 /*
10621 * Set AST_MACH_EXCEPTION on all threads owned by this task.
10622 * Called with the task locked.
10623 */
10624 void
task_set_ast_mach_exception(task_t task)10625 task_set_ast_mach_exception(task_t task)
10626 {
10627 spl_t s = splsched();
10628
10629 /* Set an AST on each of the task's threads, sending IPIs if needed */
10630 thread_t thread;
10631 queue_iterate(&task->threads, thread, thread_t, task_threads) {
10632 if (thread == current_thread()) {
10633 thread_ast_set(thread, AST_MACH_EXCEPTION);
10634 ast_propagate(thread);
10635 } else {
10636 processor_t processor;
10637
10638 thread_lock(thread);
10639 thread_ast_set(thread, AST_MACH_EXCEPTION);
10640 processor = thread->last_processor;
10641 if (processor != PROCESSOR_NULL &&
10642 processor->state == PROCESSOR_RUNNING &&
10643 processor->active_thread == thread) {
10644 cause_ast_check(processor);
10645 }
10646 thread_unlock(thread);
10647 }
10648 };
10649
10650 splx(s);
10651 }
10652
10653