1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63 /*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to [email protected] any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
81 /*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
88
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/host_priv.h>
92 #include <mach/machine/vm_types.h>
93 #include <mach/vm_param.h>
94 #include <mach/mach_vm.h>
95 #include <mach/semaphore.h>
96 #include <mach/task_info.h>
97 #include <mach/task_inspect.h>
98 #include <mach/task_special_ports.h>
99 #include <mach/sdt.h>
100 #include <mach/mach_test_upcall.h>
101
102 #include <ipc/ipc_importance.h>
103 #include <ipc/ipc_types.h>
104 #include <ipc/ipc_space.h>
105 #include <ipc/ipc_entry.h>
106 #include <ipc/ipc_hash.h>
107 #include <ipc/ipc_policy.h>
108
109 #include <kern/kern_types.h>
110 #include <kern/mach_param.h>
111 #include <kern/misc_protos.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/coalition.h>
115 #include <kern/zalloc.h>
116 #include <kern/kalloc.h>
117 #include <kern/kern_cdata.h>
118 #include <kern/processor.h>
119 #include <kern/recount.h>
120 #include <kern/sched_prim.h> /* for thread_wakeup */
121 #include <kern/ipc_tt.h>
122 #include <kern/host.h>
123 #include <kern/clock.h>
124 #include <kern/timer.h>
125 #include <kern/assert.h>
126 #include <kern/affinity.h>
127 #include <kern/exc_resource.h>
128 #include <kern/machine.h>
129 #include <kern/policy_internal.h>
130 #include <kern/restartable.h>
131 #include <kern/ipc_kobject.h>
132
133 #include <corpses/task_corpse.h>
134 #if CONFIG_TELEMETRY
135 #include <kern/telemetry.h>
136 #endif
137
138 #if CONFIG_PERVASIVE_CPI
139 #include <kern/monotonic.h>
140 #include <machine/monotonic.h>
141 #endif /* CONFIG_PERVASIVE_CPI */
142
143 #if CONFIG_EXCLAVES
144 #include "exclaves_boot.h"
145 #include "exclaves_resource.h"
146 #include "exclaves_boot.h"
147 #include "exclaves_inspection.h"
148 #include "exclaves_conclave.h"
149 #endif /* CONFIG_EXCLAVES */
150
151 #include <os/log.h>
152
153 #include <vm/pmap.h>
154 #include <vm/vm_map_xnu.h>
155 #include <vm/vm_kern_xnu.h> /* for kernel_map, ipc_kernel_map */
156 #include <vm/vm_pageout_xnu.h>
157 #include <vm/vm_protos.h>
158 #include <vm/vm_purgeable_xnu.h>
159 #include <vm/vm_compressor_pager_xnu.h>
160 #include <vm/vm_reclaim_xnu.h>
161 #include <vm/vm_compressor_xnu.h>
162
163 #include <sys/kdebug.h>
164 #include <sys/proc_ro.h>
165 #include <sys/resource.h>
166 #include <sys/signalvar.h> /* for coredump */
167 #include <sys/bsdtask_info.h>
168 #include <sys/kdebug_triage.h>
169 #include <sys/code_signing.h> /* for is_address_space_debugged */
170 #include <sys/reason.h>
171
172 /*
173 * Exported interfaces
174 */
175
176 #include <mach/task_server.h>
177 #include <mach/mach_host_server.h>
178 #include <mach/mach_port_server.h>
179
180 #include <vm/vm_shared_region_xnu.h>
181
182 #include <libkern/OSDebug.h>
183 #include <libkern/OSAtomic.h>
184 #include <libkern/section_keywords.h>
185
186 #include <mach-o/loader.h>
187 #include <kdp/kdp_dyld.h>
188
189 #include <kern/sfi.h> /* picks up ledger.h */
190
191 #if CONFIG_MACF
192 #include <security/mac_mach_internal.h>
193 #endif
194
195 #include <IOKit/IOBSD.h>
196 #include <kdp/processor_core.h>
197
198 #if defined (__arm64__)
199 #include <pexpert/arm64/board_config.h>
200 #endif
201
202 #include <string.h>
203
204 #if KPERF
205 extern int kpc_force_all_ctrs(task_t, int);
206 #endif
207
208 SECURITY_READ_ONLY_LATE(task_t) kernel_task;
209
210 int64_t next_taskuniqueid = 0;
211 const size_t task_alignment = _Alignof(struct task);
212 extern const size_t proc_alignment;
213 extern size_t proc_struct_size;
214 extern size_t proc_and_task_size;
215 size_t task_struct_size;
216
217 extern int large_corpse_count;
218
219 extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p);
220 extern boolean_t proc_is_simulated(const proc_t);
221
222 static void task_port_no_senders(ipc_port_t, mach_msg_type_number_t);
223 static void task_port_with_flavor_no_senders(ipc_port_t, mach_msg_type_number_t);
224 static void task_suspension_no_senders(ipc_port_t, mach_msg_type_number_t);
225 static inline void task_zone_init(void);
226
227 static void task_store_owned_vmobject_info(task_t to_task, task_t from_task);
228 static void task_set_control_port_options(task_t task, task_control_port_options_t opts);
229
230 #if CONFIG_EXCLAVES
231 static bool task_should_panic_on_exit_due_to_conclave_taint(task_t task);
232 static bool task_is_conclave_tainted(task_t task);
233 static void task_set_conclave_taint(task_t task);
234 kern_return_t task_crash_info_conclave_upcall(task_t task,
235 const struct conclave_sharedbuffer_t *shared_buf, uint32_t length);
236 #endif /* CONFIG_EXCLAVES */
237
238 IPC_KOBJECT_DEFINE(IKOT_TASK_NAME,
239 .iko_op_movable_send = true);
240 IPC_KOBJECT_DEFINE(IKOT_TASK_CONTROL,
241 .iko_op_no_senders = task_port_no_senders,
242 .iko_op_movable_send = true, /* see ipc_should_mark_immovable_send */
243 .iko_op_label_free = ipc_kobject_label_free);
244 IPC_KOBJECT_DEFINE(IKOT_TASK_READ,
245 .iko_op_no_senders = task_port_with_flavor_no_senders,
246 .iko_op_label_free = ipc_kobject_label_free);
247 IPC_KOBJECT_DEFINE(IKOT_TASK_INSPECT,
248 .iko_op_no_senders = task_port_with_flavor_no_senders);
249 IPC_KOBJECT_DEFINE(IKOT_TASK_RESUME,
250 .iko_op_movable_send = true,
251 .iko_op_no_senders = task_suspension_no_senders);
252
253 #if CONFIG_PROC_RESOURCE_LIMITS
254 static void task_fatal_port_no_senders(ipc_port_t, mach_msg_type_number_t);
255 static mach_port_t task_allocate_fatal_port(void);
256
257 IPC_KOBJECT_DEFINE(IKOT_TASK_FATAL,
258 .iko_op_movable_send = true,
259 .iko_op_stable = true,
260 .iko_op_no_senders = task_fatal_port_no_senders);
261
262 extern void task_id_token_set_port(task_id_token_t token, ipc_port_t port);
263 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
264
265 /* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
266 int audio_active = 0;
267
268 /*
269 * structure for tracking zone usage
270 * Used either one per task/thread for all zones or <per-task,per-zone>.
271 */
272 typedef struct zinfo_usage_store_t {
273 /* These fields may be updated atomically, and so must be 8 byte aligned */
274 uint64_t alloc __attribute__((aligned(8))); /* allocation counter */
275 uint64_t free __attribute__((aligned(8))); /* free counter */
276 } zinfo_usage_store_t;
277
278 /**
279 * Return codes related to diag threshold and memory limit
280 */
281 __options_decl(diagthreshold_check_return, int, {
282 THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED = 0,
283 THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED = 1,
284 THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED = 2,
285 THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED = 3,
286 });
287
288 /**
289 * Return codes related to diag threshold and memory limit
290 */
291 __options_decl(current_, int, {
292 THRESHOLD_IS_SAME_AS_LIMIT = 0,
293 THRESHOLD_IS_NOT_SAME_AS_LIMIT = 1
294 });
295
296 zinfo_usage_store_t tasks_tkm_private;
297 zinfo_usage_store_t tasks_tkm_shared;
298
299 /* A container to accumulate statistics for expired tasks */
300 expired_task_statistics_t dead_task_statistics;
301 LCK_SPIN_DECLARE_ATTR(dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
302
303 ledger_template_t task_ledger_template = NULL;
304
305 /* global lock for task_dyld_process_info_notify_{register, deregister, get_trap} */
306 LCK_GRP_DECLARE(g_dyldinfo_mtx_grp, "g_dyldinfo");
307 LCK_MTX_DECLARE(g_dyldinfo_mtx, &g_dyldinfo_mtx_grp);
308
309 SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__((used)) =
310 {.cpu_time = -1,
311 .tkm_private = -1,
312 .tkm_shared = -1,
313 .phys_mem = -1,
314 .wired_mem = -1,
315 .internal = -1,
316 .iokit_mapped = -1,
317 .external = -1,
318 .reusable = -1,
319 .alternate_accounting = -1,
320 .alternate_accounting_compressed = -1,
321 .page_table = -1,
322 .phys_footprint = -1,
323 .internal_compressed = -1,
324 .purgeable_volatile = -1,
325 .purgeable_nonvolatile = -1,
326 .purgeable_volatile_compressed = -1,
327 .purgeable_nonvolatile_compressed = -1,
328 .tagged_nofootprint = -1,
329 .tagged_footprint = -1,
330 .tagged_nofootprint_compressed = -1,
331 .tagged_footprint_compressed = -1,
332 .network_volatile = -1,
333 .network_nonvolatile = -1,
334 .network_volatile_compressed = -1,
335 .network_nonvolatile_compressed = -1,
336 .media_nofootprint = -1,
337 .media_footprint = -1,
338 .media_nofootprint_compressed = -1,
339 .media_footprint_compressed = -1,
340 .graphics_nofootprint = -1,
341 .graphics_footprint = -1,
342 .graphics_nofootprint_compressed = -1,
343 .graphics_footprint_compressed = -1,
344 .neural_nofootprint = -1,
345 .neural_footprint = -1,
346 .neural_nofootprint_compressed = -1,
347 .neural_footprint_compressed = -1,
348 .neural_nofootprint_total = -1,
349 .platform_idle_wakeups = -1,
350 .interrupt_wakeups = -1,
351 #if CONFIG_SCHED_SFI
352 .sfi_wait_times = { 0 /* initialized at runtime */},
353 #endif /* CONFIG_SCHED_SFI */
354 .cpu_time_billed_to_me = -1,
355 .cpu_time_billed_to_others = -1,
356 .physical_writes = -1,
357 .logical_writes = -1,
358 .logical_writes_to_external = -1,
359 .pages_grabbed = -1,
360 .pages_grabbed_kern = -1,
361 .pages_grabbed_iopl = -1,
362 .pages_grabbed_upl = -1,
363 #if CONFIG_FREEZE
364 .frozen_to_swap = -1,
365 #endif /* CONFIG_FREEZE */
366 .energy_billed_to_me = -1,
367 .energy_billed_to_others = -1,
368 #if CONFIG_PHYS_WRITE_ACCT
369 .fs_metadata_writes = -1,
370 #endif /* CONFIG_PHYS_WRITE_ACCT */
371 #if CONFIG_MEMORYSTATUS
372 .memorystatus_dirty_time = -1,
373 #endif /* CONFIG_MEMORYSTATUS */
374 .swapins = -1,
375 .conclave_mem = -1, };
376
377 /* System sleep state */
378 boolean_t tasks_suspend_state;
379
380 __options_decl(send_exec_resource_is_fatal, bool, {
381 IS_NOT_FATAL = false,
382 IS_FATAL = true
383 });
384
385 __options_decl(send_exec_resource_is_diagnostics, bool, {
386 IS_NOT_DIAGNOSTICS = false,
387 IS_DIAGNOSTICS = true
388 });
389
390 __options_decl(send_exec_resource_is_warning, bool, {
391 IS_NOT_WARNING = false,
392 IS_WARNING = true
393 });
394
395 __options_decl(send_exec_resource_options_t, uint8_t, {
396 EXEC_RESOURCE_FATAL = 0x01,
397 EXEC_RESOURCE_DIAGNOSTIC = 0x02,
398 EXEC_RESOURCE_WARNING = 0x04,
399 EXEC_RESOURCE_CONCLAVE = 0x08 // A side memory limit independent of the main footprint.
400 });
401
402 /**
403 * Actions to take when a process has reached the memory limit or the diagnostics threshold limits
404 */
405 static inline void task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning);
406 #if DEBUG || DEVELOPMENT
407 static inline void task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size);
408 #endif
409 void init_task_ledgers(void);
410 void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
411 void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
412 void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
413 void task_conclave_mem_limit_exceeded(int warning, __unused const void *param0, __unused const void *param1);
414 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
415 void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options);
416 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
417 #if CONFIG_PROC_RESOURCE_LIMITS
418 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit);
419 mach_port_name_t current_task_get_fatal_port_name(void);
420 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit);
421 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
422
423 kern_return_t task_suspend_internal(task_t);
424 kern_return_t task_resume_internal(task_t);
425 static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
426
427 extern kern_return_t iokit_task_terminate(task_t task, int phase);
428 extern void iokit_task_app_suspended_changed(task_t task);
429
430 extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
431 extern void bsd_copythreadname(void *dst_uth, void *src_uth);
432 extern kern_return_t thread_resume(thread_t thread);
433
434 // Condition to include diag footprints
435 #define RESETTABLE_DIAG_FOOTPRINT_LIMITS ((DEBUG || DEVELOPMENT) && CONFIG_MEMORYSTATUS)
436
437 // Warn tasks when they hit 80% of their memory limit.
438 #define PHYS_FOOTPRINT_WARNING_LEVEL 80
439
440 #define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT 150 /* wakeups per second */
441 #define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL 300 /* in seconds. */
442
443 /*
444 * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
445 *
446 * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
447 * stacktraces, aka micro-stackshots)
448 */
449 #define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER 70
450
451 int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
452 int task_wakeups_monitor_rate; /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
453
454 unsigned int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
455
456 TUNABLE(bool, disable_exc_resource, "disable_exc_resource", false); /* Global override to suppress EXC_RESOURCE for resource monitor violations. */
457 TUNABLE(bool, disable_exc_resource_during_audio, "disable_exc_resource_during_audio", true); /* Global override to suppress EXC_RESOURCE while audio is active */
458
459 ledger_amount_t max_task_footprint = 0; /* Per-task limit on physical memory consumption in bytes */
460 unsigned int max_task_footprint_warning_level = 0; /* Per-task limit warning percentage */
461
462 /*
463 * Configure per-task memory limit.
464 * The boot-arg is interpreted as Megabytes,
465 * and takes precedence over the device tree.
466 * Setting the boot-arg to 0 disables task limits.
467 */
468 TUNABLE_DT_WRITEABLE(int, max_task_footprint_mb, "/defaults", "kern.max_task_pmem", "max_task_pmem", 0, TUNABLE_DT_NONE);
469
470 /* I/O Monitor Limits */
471 #define IOMON_DEFAULT_LIMIT (20480ull) /* MB of logical/physical I/O */
472 #define IOMON_DEFAULT_INTERVAL (86400ull) /* in seconds */
473
474 uint64_t task_iomon_limit_mb; /* Per-task I/O monitor limit in MBs */
475 uint64_t task_iomon_interval_secs; /* Per-task I/O monitor interval in secs */
476
477 #define IO_TELEMETRY_DEFAULT_LIMIT (10ll * 1024ll * 1024ll)
478 int64_t io_telemetry_limit; /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
479 int64_t global_logical_writes_count = 0; /* Global count for logical writes */
480 int64_t global_logical_writes_to_external_count = 0; /* Global count for logical writes to external storage*/
481 static boolean_t global_update_logical_writes(int64_t, int64_t*);
482
483 #if DEBUG || DEVELOPMENT
484 static diagthreshold_check_return task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value);
485 #endif
486 #define TASK_MAX_THREAD_LIMIT 256
487
488 #if MACH_ASSERT
489 int pmap_ledgers_panic = 1;
490 int pmap_ledgers_panic_leeway = 3;
491 #endif /* MACH_ASSERT */
492
493 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
494
495 #if CONFIG_COREDUMP
496 int hwm_user_cores = 0; /* high watermark violations generate user core files */
497 #endif
498
499 #ifdef MACH_BSD
500 extern uint32_t proc_platform(const struct proc *);
501 extern uint32_t proc_sdk(struct proc *);
502 extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
503 extern int proc_pid(struct proc *p);
504 extern int proc_selfpid(void);
505 extern struct proc *current_proc(void);
506 extern char *proc_name_address(struct proc *p);
507 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
508 extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize);
509 extern void workq_proc_suspended(struct proc *p);
510 extern void workq_proc_resumed(struct proc *p);
511 extern struct proc *kernproc;
512 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
513
514 #if CONFIG_MEMORYSTATUS
515 extern void proc_memstat_skip(struct proc* p, boolean_t set);
516 extern void memorystatus_on_ledger_footprint_exceeded(int warning, bool memlimit_is_active, bool memlimit_is_fatal);
517 extern void memorystatus_log_exception(const int max_footprint_mb, bool memlimit_is_active, bool memlimit_is_fatal);
518 extern void memorystatus_log_diag_threshold_exception(const int diag_threshold_value);
519 extern void memorystatus_on_conclave_limit_exceeded(const int max_footprint_mb);
520 extern boolean_t memorystatus_allowed_vm_map_fork(task_t task, bool *is_large);
521 extern uint64_t memorystatus_available_memory_internal(struct proc *p);
522
523 #if DEVELOPMENT || DEBUG
524 extern void memorystatus_abort_vm_map_fork(task_t);
525 #endif
526
527 #endif /* CONFIG_MEMORYSTATUS */
528
529 #endif /* MACH_BSD */
530
531 /* Boot-arg that turns on fatal pac exception delivery for all first-party apps */
532 static TUNABLE(bool, enable_pac_exception, "enable_pac_exception", false);
533
534 /*
535 * Defaults for controllable EXC_GUARD behaviors
536 *
537 * Internal builds are fatal by default (except BRIDGE).
538 * Create an alternate set of defaults for special processes by name.
539 */
540 struct task_exc_guard_named_default {
541 char *name;
542 uint32_t behavior;
543 };
544 #define _TASK_EXC_GUARD_MP_CORPSE (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_CORPSE)
545 #define _TASK_EXC_GUARD_MP_ONCE (_TASK_EXC_GUARD_MP_CORPSE | TASK_EXC_GUARD_MP_ONCE)
546 #define _TASK_EXC_GUARD_MP_FATAL (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_FATAL)
547
548 #define _TASK_EXC_GUARD_VM_CORPSE (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_ONCE)
549 #define _TASK_EXC_GUARD_VM_ONCE (_TASK_EXC_GUARD_VM_CORPSE | TASK_EXC_GUARD_VM_ONCE)
550 #define _TASK_EXC_GUARD_VM_FATAL (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_FATAL)
551
552 #define _TASK_EXC_GUARD_ALL_CORPSE (_TASK_EXC_GUARD_MP_CORPSE | _TASK_EXC_GUARD_VM_CORPSE)
553 #define _TASK_EXC_GUARD_ALL_ONCE (_TASK_EXC_GUARD_MP_ONCE | _TASK_EXC_GUARD_VM_ONCE)
554 #define _TASK_EXC_GUARD_ALL_FATAL (_TASK_EXC_GUARD_MP_FATAL | _TASK_EXC_GUARD_VM_FATAL)
555
556 /* cannot turn off FATAL and DELIVER bit if set */
557 uint32_t task_exc_guard_no_unset_mask = TASK_EXC_GUARD_MP_FATAL | TASK_EXC_GUARD_VM_FATAL |
558 TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_VM_DELIVER;
559 /* cannot turn on ONCE bit if unset */
560 uint32_t task_exc_guard_no_set_mask = TASK_EXC_GUARD_MP_ONCE | TASK_EXC_GUARD_VM_ONCE;
561
562 #if !defined(XNU_TARGET_OS_BRIDGE)
563
564 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_FATAL;
565 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
566 /*
567 * These "by-process-name" default overrides are intended to be a short-term fix to
568 * quickly get over races between changes introducing new EXC_GUARD raising behaviors
569 * in some process and a change in default behavior for same. We should ship with
570 * these lists empty (by fixing the bugs, or explicitly changing the task's EXC_GUARD
571 * exception behavior via task_set_exc_guard_behavior()).
572 *
573 * XXX Remember to add/remove TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS back to
574 * task_exc_guard_default when transitioning this list between empty and
575 * non-empty.
576 */
577 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
578
579 #else /* !defined(XNU_TARGET_OS_BRIDGE) */
580
581 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_ONCE;
582 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
583 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
584
585 #endif /* !defined(XNU_TARGET_OS_BRIDGE) */
586
587 /* Forwards */
588
589 static bool task_hold_locked(task_t task);
590 static void task_wait_locked(task_t task, boolean_t until_not_runnable);
591 static void task_release_locked(task_t task);
592 extern task_t proc_get_task_raw(void *proc);
593 extern void task_ref_hold_proc_task_struct(task_t task);
594 extern void task_release_proc_task_struct(task_t task, proc_ro_t proc_ro);
595
596 static void task_synchronizer_destroy_all(task_t task);
597 static os_ref_count_t
598 task_add_turnstile_watchports_locked(
599 task_t task,
600 struct task_watchports *watchports,
601 struct task_watchport_elem **previous_elem_array,
602 ipc_port_t *portwatch_ports,
603 uint32_t portwatch_count);
604
605 static os_ref_count_t
606 task_remove_turnstile_watchports_locked(
607 task_t task,
608 struct task_watchports *watchports,
609 ipc_port_t *port_freelist);
610
611 static struct task_watchports *
612 task_watchports_alloc_init(
613 task_t task,
614 thread_t thread,
615 uint32_t count);
616
617 static void
618 task_watchports_deallocate(
619 struct task_watchports *watchports);
620
621 void
task_set_64bit(task_t task,boolean_t is_64bit,boolean_t is_64bit_data)622 task_set_64bit(
623 task_t task,
624 boolean_t is_64bit,
625 boolean_t is_64bit_data)
626 {
627 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
628 thread_t thread;
629 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
630
631 task_lock(task);
632
633 /*
634 * Switching to/from 64-bit address spaces
635 */
636 if (is_64bit) {
637 if (!task_has_64Bit_addr(task)) {
638 task_set_64Bit_addr(task);
639 }
640 } else {
641 if (task_has_64Bit_addr(task)) {
642 task_clear_64Bit_addr(task);
643 }
644 }
645
646 /*
647 * Switching to/from 64-bit register state.
648 */
649 if (is_64bit_data) {
650 if (task_has_64Bit_data(task)) {
651 goto out;
652 }
653
654 task_set_64Bit_data(task);
655 } else {
656 if (!task_has_64Bit_data(task)) {
657 goto out;
658 }
659
660 task_clear_64Bit_data(task);
661 }
662
663 /* FIXME: On x86, the thread save state flavor can diverge from the
664 * task's 64-bit feature flag due to the 32-bit/64-bit register save
665 * state dichotomy. Since we can be pre-empted in this interval,
666 * certain routines may observe the thread as being in an inconsistent
667 * state with respect to its task's 64-bitness.
668 */
669
670 #if defined(__x86_64__) || defined(__arm64__)
671 queue_iterate(&task->threads, thread, thread_t, task_threads) {
672 thread_mtx_lock(thread);
673 machine_thread_switch_addrmode(thread);
674 thread_mtx_unlock(thread);
675 }
676 #endif /* defined(__x86_64__) || defined(__arm64__) */
677
678 out:
679 task_unlock(task);
680 }
681
682 bool
task_get_64bit_addr(task_t task)683 task_get_64bit_addr(task_t task)
684 {
685 return task_has_64Bit_addr(task);
686 }
687
688 bool
task_get_64bit_data(task_t task)689 task_get_64bit_data(task_t task)
690 {
691 return task_has_64Bit_data(task);
692 }
693
694 void
task_set_platform_binary(task_t task,boolean_t is_platform)695 task_set_platform_binary(
696 task_t task,
697 boolean_t is_platform)
698 {
699 if (is_platform) {
700 task_ro_flags_set(task, TFRO_PLATFORM);
701 } else {
702 task_ro_flags_clear(task, TFRO_PLATFORM);
703 }
704 assert(task->map);
705 if (task->map) {
706 vm_map_lock(task->map);
707 vm_map_set_platform_binary(task->map, (bool)is_platform);
708 vm_map_unlock(task->map);
709 }
710 }
711
712 #if XNU_TARGET_OS_OSX
713 #if DEVELOPMENT || DEBUG
714 SECURITY_READ_ONLY_LATE(bool) AMFI_bootarg_disable_mach_hardening = false;
715 #endif /* DEVELOPMENT || DEBUG */
716
717 void
task_disable_mach_hardening(task_t task)718 task_disable_mach_hardening(task_t task)
719 {
720 task_ro_flags_set(task, TFRO_MACH_HARDENING_OPT_OUT);
721 }
722
723 bool
task_opted_out_mach_hardening(task_t task)724 task_opted_out_mach_hardening(task_t task)
725 {
726 return task_ro_flags_get(task) & TFRO_MACH_HARDENING_OPT_OUT;
727 }
728 #endif /* XNU_TARGET_OS_OSX */
729
730 boolean_t
task_get_platform_binary(task_t task)731 task_get_platform_binary(task_t task)
732 {
733 return (task_ro_flags_get(task) & TFRO_PLATFORM) != 0;
734 }
735
736 boolean_t
task_is_a_corpse(task_t task)737 task_is_a_corpse(task_t task)
738 {
739 return (task_ro_flags_get(task) & TFRO_CORPSE) != 0;
740 }
741
742 boolean_t
task_is_ipc_active(task_t task)743 task_is_ipc_active(task_t task)
744 {
745 return task->ipc_active;
746 }
747
748 void
task_set_corpse(task_t task)749 task_set_corpse(task_t task)
750 {
751 return task_ro_flags_set(task, TFRO_CORPSE);
752 }
753
754 void
task_copyout_control_port(task_t task)755 task_copyout_control_port(task_t task)
756 {
757 ipc_task_copyout_control_port(task);
758 }
759
760 /*
761 * Set or clear per-task TF_CA_CLIENT_WI flag according to specified argument.
762 * Returns "false" if flag is already set, and "true" in other cases.
763 */
764 bool
task_set_ca_client_wi(task_t task,boolean_t set_or_clear)765 task_set_ca_client_wi(
766 task_t task,
767 boolean_t set_or_clear)
768 {
769 bool ret = true;
770 task_lock(task);
771 if (set_or_clear) {
772 /* Tasks can have only one CA_CLIENT work interval */
773 if (task->t_flags & TF_CA_CLIENT_WI) {
774 ret = false;
775 } else {
776 task->t_flags |= TF_CA_CLIENT_WI;
777 }
778 } else {
779 task->t_flags &= ~TF_CA_CLIENT_WI;
780 }
781 task_unlock(task);
782 return ret;
783 }
784
785 /*
786 * task_set_dyld_info() is called at most three times.
787 * 1) at task struct creation to set addr/size to zero.
788 * 2) in mach_loader.c to set location of __all_image_info section in loaded dyld
789 * 3) is from dyld itself to update location of all_image_info
790 * For security any calls after that are ignored. The TF_DYLD_ALL_IMAGE_SET bit is used to determine state.
791 */
792 kern_return_t
task_set_dyld_info(task_t task,mach_vm_address_t addr,mach_vm_size_t size,bool finalize_value)793 task_set_dyld_info(
794 task_t task,
795 mach_vm_address_t addr,
796 mach_vm_size_t size,
797 bool finalize_value)
798 {
799 mach_vm_address_t end;
800 if (os_add_overflow(addr, size, &end)) {
801 return KERN_FAILURE;
802 }
803
804 task_lock(task);
805 /* don't accept updates if all_image_info_addr is final */
806 if ((task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) == 0) {
807 bool inputNonZero = ((addr != 0) || (size != 0));
808 bool currentNonZero = ((task->all_image_info_addr != 0) || (task->all_image_info_size != 0));
809 task->all_image_info_addr = addr;
810 task->all_image_info_size = size;
811 /* can only change from a non-zero value to another non-zero once */
812 if ((inputNonZero && currentNonZero) || finalize_value) {
813 task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
814 }
815 task_unlock(task);
816 return KERN_SUCCESS;
817 } else {
818 task_unlock(task);
819 return KERN_FAILURE;
820 }
821 }
822
823 bool
task_donates_own_pages(task_t task)824 task_donates_own_pages(
825 task_t task)
826 {
827 return task->donates_own_pages;
828 }
829
830 void
task_set_mach_header_address(task_t task,mach_vm_address_t addr)831 task_set_mach_header_address(
832 task_t task,
833 mach_vm_address_t addr)
834 {
835 task_lock(task);
836 task->mach_header_vm_address = addr;
837 task_unlock(task);
838 }
839
840 void
task_bank_reset(__unused task_t task)841 task_bank_reset(__unused task_t task)
842 {
843 if (task->bank_context != NULL) {
844 bank_task_destroy(task);
845 }
846 }
847
848 /*
849 * NOTE: This should only be called when the P_LINTRANSIT
850 * flag is set (the proc_trans lock is held) on the
851 * proc associated with the task.
852 */
853 void
task_bank_init(__unused task_t task)854 task_bank_init(__unused task_t task)
855 {
856 if (task->bank_context != NULL) {
857 panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
858 }
859 bank_task_initialize(task);
860 }
861
862 void
task_set_did_exec_flag(task_t task)863 task_set_did_exec_flag(task_t task)
864 {
865 task->t_procflags |= TPF_DID_EXEC;
866 }
867
868 void
task_clear_exec_copy_flag(task_t task)869 task_clear_exec_copy_flag(task_t task)
870 {
871 task->t_procflags &= ~TPF_EXEC_COPY;
872 }
873
874 event_t
task_get_return_wait_event(task_t task)875 task_get_return_wait_event(task_t task)
876 {
877 return (event_t)&task->returnwait_inheritor;
878 }
879
880 void
task_clear_return_wait(task_t task,uint32_t flags)881 task_clear_return_wait(task_t task, uint32_t flags)
882 {
883 if (flags & TCRW_CLEAR_INITIAL_WAIT) {
884 thread_wakeup(task_get_return_wait_event(task));
885 }
886
887 if (flags & TCRW_CLEAR_FINAL_WAIT) {
888 is_write_lock(task->itk_space);
889
890 task->t_returnwaitflags &= ~TRW_LRETURNWAIT;
891 task->returnwait_inheritor = NULL;
892
893 if (flags & TCRW_CLEAR_EXEC_COMPLETE) {
894 task->t_returnwaitflags &= ~TRW_LEXEC_COMPLETE;
895 }
896
897 if (task->t_returnwaitflags & TRW_LRETURNWAITER) {
898 struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
899 TURNSTILE_ULOCK);
900
901 waitq_wakeup64_all(&turnstile->ts_waitq,
902 CAST_EVENT64_T(task_get_return_wait_event(task)),
903 THREAD_AWAKENED, WAITQ_UPDATE_INHERITOR);
904
905 turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_HELD);
906
907 turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
908 turnstile_cleanup();
909 task->t_returnwaitflags &= ~TRW_LRETURNWAITER;
910 }
911 is_write_unlock(task->itk_space);
912 }
913 }
914
915 /*
916 * Set default behavior for a task's control ports
917 */
918 static void
task_set_ctrl_port_default(task_t task,thread_t thread)919 task_set_ctrl_port_default(
920 task_t task,
921 thread_t thread)
922 {
923 ipc_space_policy_t pol = ipc_policy_for_task(task);
924 bool movable_allowed = mac_task_check_get_movable_control_port() == 0;
925 bool is_simulated = pol & IPC_SPACE_POLICY_SIMULATED;
926 bool is_translated = false;
927
928 task_control_port_options_t opts = TASK_CONTROL_PORT_OPTIONS_NONE;
929 if (movable_allowed || is_simulated || is_translated) {
930 /* Disable control port hardening for entitled||simulated binaries */
931 opts = TASK_CONTROL_PORT_OPTIONS_NONE;
932 } else if (ipc_should_apply_policy(pol, IPC_POLICY_ENHANCED_V1)) {
933 /* set control port options for 1p code, inherited from parent task by default */
934 if (ipc_control_port_options & ICP_OPTIONS_IMMOVABLE_1P_HARD) {
935 opts |= TASK_CONTROL_PORT_IMMOVABLE_HARD;
936 } else if (ipc_control_port_options & ICP_OPTIONS_IMMOVABLE_1P_SOFT) {
937 opts |= TASK_CONTROL_PORT_IMMOVABLE_SOFT;
938 }
939 } else {
940 /* set control port options for 3p code, inherited from parent task by default */
941 if (ipc_control_port_options & ICP_OPTIONS_IMMOVABLE_3P_HARD) {
942 opts |= TASK_CONTROL_PORT_IMMOVABLE_HARD;
943 } else if (ipc_control_port_options & ICP_OPTIONS_IMMOVABLE_3P_SOFT) {
944 opts |= TASK_CONTROL_PORT_IMMOVABLE_SOFT;
945 }
946 }
947
948 /* see `copyout_should_mark_immovable_send`, which consumes these flags */
949 task_set_control_port_options(task, opts);
950
951 /*
952 * now that we have marked the task as immovable, copyout the task/thread ports
953 * again so that they get marked as immovable on copyout
954 */
955 ipc_task_copyout_control_port(task);
956 /* consumed by ipc_thread_set_immovable_pinned */
957 thread_reference(thread);
958 ipc_thread_set_immovable_pinned(thread);
959 }
960
961 void __attribute__((noreturn))
task_wait_to_return(void)962 task_wait_to_return(void)
963 {
964 task_t task = current_task();
965 thread_t thread = current_thread();
966 uint8_t returnwaitflags;
967
968 is_write_lock(task->itk_space);
969
970 if (task->t_returnwaitflags & TRW_LRETURNWAIT) {
971 struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
972 TURNSTILE_ULOCK);
973
974 do {
975 task->t_returnwaitflags |= TRW_LRETURNWAITER;
976 turnstile_update_inheritor(turnstile, task->returnwait_inheritor,
977 (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
978
979 waitq_assert_wait64(&turnstile->ts_waitq,
980 CAST_EVENT64_T(task_get_return_wait_event(task)),
981 THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
982
983 is_write_unlock(task->itk_space);
984
985 turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
986
987 thread_block(THREAD_CONTINUE_NULL);
988
989 is_write_lock(task->itk_space);
990 } while (task->t_returnwaitflags & TRW_LRETURNWAIT);
991
992 turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
993 }
994
995 returnwaitflags = task->t_returnwaitflags;
996 is_write_unlock(task->itk_space);
997 turnstile_cleanup();
998
999 /**
1000 * In posix_spawn() path, process_signature() is guaranteed to complete
1001 * when the "second wait" is cleared. Call out to execute whatever depends
1002 * on the result of that before we return to EL0.
1003 */
1004 task_post_signature_processing_hook(task);
1005 #if CONFIG_MACF
1006 /*
1007 * Before jumping to userspace and allowing this process
1008 * to execute any code, make sure its credentials are cached,
1009 * and notify any interested parties.
1010 */
1011 extern void current_cached_proc_cred_update(void);
1012
1013 current_cached_proc_cred_update();
1014 if (returnwaitflags & TRW_LEXEC_COMPLETE) {
1015 mac_proc_notify_exec_complete(current_proc());
1016 }
1017 #endif
1018
1019 /*
1020 * Set task/thread control port movability now that we can call AMFI
1021 */
1022 task_set_ctrl_port_default(task, thread);
1023
1024 thread_bootstrap_return();
1025 }
1026
1027 /**
1028 * A callout by task_wait_to_return on the main thread of a newly spawned task
1029 * after process_signature() is completed by the parent task.
1030 *
1031 * @param task The newly spawned task
1032 */
1033 void
task_post_signature_processing_hook(task_t task)1034 task_post_signature_processing_hook(task_t task)
1035 {
1036 ml_task_post_signature_processing_hook(task);
1037 }
1038
1039 bool
task_is_initproc(task_t task)1040 task_is_initproc(task_t task)
1041 {
1042 return get_bsdtask_info(task) == initproc;
1043 }
1044
1045 boolean_t
task_is_exec_copy(task_t task)1046 task_is_exec_copy(task_t task)
1047 {
1048 return task_is_exec_copy_internal(task);
1049 }
1050
1051 boolean_t
task_did_exec(task_t task)1052 task_did_exec(task_t task)
1053 {
1054 return task_did_exec_internal(task);
1055 }
1056
1057 boolean_t
task_is_active(task_t task)1058 task_is_active(task_t task)
1059 {
1060 return task->active;
1061 }
1062
1063 boolean_t
task_is_halting(task_t task)1064 task_is_halting(task_t task)
1065 {
1066 return task->halting;
1067 }
1068
1069 void
task_init(void)1070 task_init(void)
1071 {
1072 if (max_task_footprint_mb != 0) {
1073 #if CONFIG_MEMORYSTATUS
1074 if (max_task_footprint_mb < 50) {
1075 printf("Warning: max_task_pmem %d below minimum.\n",
1076 max_task_footprint_mb);
1077 max_task_footprint_mb = 50;
1078 }
1079 printf("Limiting task physical memory footprint to %d MB\n",
1080 max_task_footprint_mb);
1081
1082 max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024; // Convert MB to bytes
1083
1084 /*
1085 * Configure the per-task memory limit warning level.
1086 * This is computed as a percentage.
1087 */
1088 max_task_footprint_warning_level = 0;
1089
1090 if (max_mem < 0x40000000) {
1091 /*
1092 * On devices with < 1GB of memory:
1093 * -- set warnings to 50MB below the per-task limit.
1094 */
1095 if (max_task_footprint_mb > 50) {
1096 max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
1097 }
1098 } else {
1099 /*
1100 * On devices with >= 1GB of memory:
1101 * -- set warnings to 100MB below the per-task limit.
1102 */
1103 if (max_task_footprint_mb > 100) {
1104 max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
1105 }
1106 }
1107
1108 /*
1109 * Never allow warning level to land below the default.
1110 */
1111 if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
1112 max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
1113 }
1114
1115 printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
1116
1117 #else
1118 printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
1119 #endif /* CONFIG_MEMORYSTATUS */
1120 }
1121
1122 #if DEVELOPMENT || DEBUG
1123 PE_parse_boot_argn("task_exc_guard_default",
1124 &task_exc_guard_default,
1125 sizeof(task_exc_guard_default));
1126 #endif /* DEVELOPMENT || DEBUG */
1127
1128 #if CONFIG_COREDUMP
1129 if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
1130 sizeof(hwm_user_cores))) {
1131 hwm_user_cores = 0;
1132 }
1133 #endif
1134
1135 proc_init_cpumon_params();
1136
1137 if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof(task_wakeups_monitor_rate))) {
1138 task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
1139 }
1140
1141 if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof(task_wakeups_monitor_interval))) {
1142 task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
1143 }
1144
1145 if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
1146 sizeof(task_wakeups_monitor_ustackshots_trigger_pct))) {
1147 task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
1148 }
1149
1150 if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof(task_iomon_limit_mb))) {
1151 task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
1152 }
1153
1154 if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof(task_iomon_interval_secs))) {
1155 task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
1156 }
1157
1158 if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof(io_telemetry_limit))) {
1159 io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
1160 }
1161
1162 /*
1163 * If we have coalitions, coalition_init() will call init_task_ledgers() as it
1164 * sets up the ledgers for the default coalition. If we don't have coalitions,
1165 * then we have to call it now.
1166 */
1167 #if CONFIG_COALITIONS
1168 assert(task_ledger_template);
1169 #else /* CONFIG_COALITIONS */
1170 init_task_ledgers();
1171 #endif /* CONFIG_COALITIONS */
1172
1173 task_ref_init();
1174 task_zone_init();
1175
1176 #ifdef __LP64__
1177 boolean_t is_64bit = TRUE;
1178 #else
1179 boolean_t is_64bit = FALSE;
1180 #endif
1181
1182 kernproc = (struct proc *)zalloc_flags(proc_task_zone, Z_WAITOK | Z_ZERO);
1183 kernel_task = proc_get_task_raw(kernproc);
1184
1185 /*
1186 * Create the kernel task as the first task.
1187 */
1188 if (task_create_internal(TASK_NULL, NULL, NULL, FALSE, is_64bit,
1189 is_64bit, TF_NONE, TF_NONE, TPF_NONE, TWF_NONE, kernel_task) != KERN_SUCCESS) {
1190 panic("task_init");
1191 }
1192
1193
1194
1195 vm_map_setup(get_task_map(kernel_task), kernel_task);
1196
1197 ipc_task_enable(kernel_task);
1198
1199 #if defined(HAS_APPLE_PAC)
1200 kernel_task->rop_pid = ml_default_rop_pid();
1201 kernel_task->jop_pid = ml_default_jop_pid();
1202 // kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
1203 // disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
1204 ml_task_set_disable_user_jop(kernel_task, FALSE);
1205 #endif
1206
1207 vm_map_deallocate(kernel_task->map);
1208 kernel_task->map = kernel_map;
1209 }
1210
1211 static inline void
task_zone_init(void)1212 task_zone_init(void)
1213 {
1214 proc_struct_size = roundup(proc_struct_size, task_alignment);
1215 task_struct_size = roundup(sizeof(struct task), proc_alignment);
1216 proc_and_task_size = proc_struct_size + task_struct_size;
1217
1218 proc_task_zone = zone_create_ext("proc_task", proc_and_task_size,
1219 ZC_ZFREE_CLEARMEM | ZC_SEQUESTER, ZONE_ID_PROC_TASK, NULL); /* sequester is needed for proc_rele() */
1220 }
1221
1222 /*
1223 * Task ledgers
1224 * ------------
1225 *
1226 * phys_footprint
1227 * Physical footprint: This is the sum of:
1228 * + (internal - alternate_accounting)
1229 * + (internal_compressed - alternate_accounting_compressed)
1230 * + iokit_mapped
1231 * + purgeable_nonvolatile
1232 * + purgeable_nonvolatile_compressed
1233 * + page_table
1234 *
1235 * internal
1236 * The task's anonymous memory, which on iOS is always resident.
1237 *
1238 * internal_compressed
1239 * Amount of this task's internal memory which is held by the compressor.
1240 * Such memory is no longer actually resident for the task [i.e., resident in its pmap],
1241 * and could be either decompressed back into memory, or paged out to storage, depending
1242 * on our implementation.
1243 *
1244 * iokit_mapped
1245 * IOKit mappings: The total size of all IOKit mappings in this task, regardless of
1246 * clean/dirty or internal/external state].
1247 *
1248 * alternate_accounting
1249 * The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
1250 * are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
1251 * double counting.
1252 *
1253 * pages_grabbed
1254 * pages_grabbed counts all page grabs in a task. It is also broken out into three subtypes
1255 * which track UPL, IOPL and Kernel page grabs.
1256 */
1257 void
init_task_ledgers(void)1258 init_task_ledgers(void)
1259 {
1260 ledger_template_t t;
1261
1262 assert(task_ledger_template == NULL);
1263 assert(kernel_task == TASK_NULL);
1264
1265 #if MACH_ASSERT
1266 PE_parse_boot_argn("pmap_ledgers_panic",
1267 &pmap_ledgers_panic,
1268 sizeof(pmap_ledgers_panic));
1269 PE_parse_boot_argn("pmap_ledgers_panic_leeway",
1270 &pmap_ledgers_panic_leeway,
1271 sizeof(pmap_ledgers_panic_leeway));
1272 #endif /* MACH_ASSERT */
1273
1274 if ((t = ledger_template_create("Per-task ledger")) == NULL) {
1275 panic("couldn't create task ledger template");
1276 }
1277
1278 task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
1279 task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
1280 "physmem", "bytes");
1281 task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
1282 "bytes");
1283 task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
1284 "bytes");
1285 task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
1286 "bytes");
1287 task_ledgers.conclave_mem = ledger_entry_add_with_flags(t, "conclave_mem", "physmem", "bytes",
1288 LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE | LEDGER_ENTRY_ALLOW_DEBIT);
1289 task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
1290 "bytes");
1291 task_ledgers.iokit_mapped = ledger_entry_add_with_flags(t, "iokit_mapped", "mappings",
1292 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1293 task_ledgers.alternate_accounting = ledger_entry_add_with_flags(t, "alternate_accounting", "physmem",
1294 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1295 task_ledgers.alternate_accounting_compressed = ledger_entry_add_with_flags(t, "alternate_accounting_compressed", "physmem",
1296 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1297 task_ledgers.page_table = ledger_entry_add_with_flags(t, "page_table", "physmem",
1298 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1299 task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
1300 "bytes");
1301 task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
1302 "bytes");
1303 task_ledgers.reusable = ledger_entry_add(t, "reusable", "physmem", "bytes");
1304 task_ledgers.external = ledger_entry_add(t, "external", "physmem", "bytes");
1305 task_ledgers.purgeable_volatile = ledger_entry_add_with_flags(t, "purgeable_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1306 task_ledgers.purgeable_nonvolatile = ledger_entry_add_with_flags(t, "purgeable_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1307 task_ledgers.purgeable_volatile_compressed = ledger_entry_add_with_flags(t, "purgeable_volatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1308 task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add_with_flags(t, "purgeable_nonvolatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1309 task_ledgers.pages_grabbed = ledger_entry_add_with_flags(t, "pages_grabbed", "physmem", "count", LEDGER_ENTRY_USE_COUNTER);
1310 task_ledgers.pages_grabbed_kern = ledger_entry_add_with_flags(t, "pages_grabbed_kern", "physmem", "count", LEDGER_ENTRY_USE_COUNTER);
1311 task_ledgers.pages_grabbed_iopl = ledger_entry_add_with_flags(t, "pages_grabbed_iopl", "physmem", "count", LEDGER_ENTRY_USE_COUNTER);
1312 task_ledgers.pages_grabbed_upl = ledger_entry_add_with_flags(t, "pages_grabbed_upl", "physmem", "count", LEDGER_ENTRY_USE_COUNTER);
1313 task_ledgers.tagged_nofootprint = ledger_entry_add_with_flags(t, "tagged_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1314 task_ledgers.tagged_footprint = ledger_entry_add_with_flags(t, "tagged_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1315 task_ledgers.tagged_nofootprint_compressed = ledger_entry_add_with_flags(t, "tagged_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1316 task_ledgers.tagged_footprint_compressed = ledger_entry_add_with_flags(t, "tagged_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1317 task_ledgers.network_volatile = ledger_entry_add_with_flags(t, "network_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1318 task_ledgers.network_nonvolatile = ledger_entry_add_with_flags(t, "network_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1319 task_ledgers.network_volatile_compressed = ledger_entry_add_with_flags(t, "network_volatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1320 task_ledgers.network_nonvolatile_compressed = ledger_entry_add_with_flags(t, "network_nonvolatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1321 task_ledgers.media_nofootprint = ledger_entry_add_with_flags(t, "media_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1322 task_ledgers.media_footprint = ledger_entry_add_with_flags(t, "media_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1323 task_ledgers.media_nofootprint_compressed = ledger_entry_add_with_flags(t, "media_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1324 task_ledgers.media_footprint_compressed = ledger_entry_add_with_flags(t, "media_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1325 task_ledgers.graphics_nofootprint = ledger_entry_add_with_flags(t, "graphics_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1326 task_ledgers.graphics_footprint = ledger_entry_add_with_flags(t, "graphics_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1327 task_ledgers.graphics_nofootprint_compressed = ledger_entry_add_with_flags(t, "graphics_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1328 task_ledgers.graphics_footprint_compressed = ledger_entry_add_with_flags(t, "graphics_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1329 task_ledgers.neural_nofootprint = ledger_entry_add_with_flags(t, "neural_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1330 task_ledgers.neural_footprint = ledger_entry_add_with_flags(t, "neural_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1331 task_ledgers.neural_nofootprint_compressed = ledger_entry_add_with_flags(t, "neural_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1332 task_ledgers.neural_footprint_compressed = ledger_entry_add_with_flags(t, "neural_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1333 task_ledgers.neural_nofootprint_total = ledger_entry_add(t, "neural_nofootprint_total", "physmem", "bytes");
1334
1335 #if CONFIG_DEFERRED_RECLAIM
1336 task_ledgers.est_reclaimable = ledger_entry_add_with_flags(t, "est_reclaimable", "virtmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1337 #endif /* CONFIG_DEFERRED_RECLAIM */
1338
1339 #if CONFIG_FREEZE
1340 task_ledgers.frozen_to_swap = ledger_entry_add(t, "frozen_to_swap", "physmem", "bytes");
1341 #endif /* CONFIG_FREEZE */
1342
1343 task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
1344 "count");
1345 task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
1346 "count");
1347
1348 #if CONFIG_SCHED_SFI
1349 sfi_class_id_t class_id, ledger_alias;
1350 for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1351 task_ledgers.sfi_wait_times[class_id] = -1;
1352 }
1353
1354 /* don't account for UNSPECIFIED */
1355 for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
1356 ledger_alias = sfi_get_ledger_alias_for_class(class_id);
1357 if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
1358 /* Check to see if alias has been registered yet */
1359 if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
1360 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
1361 } else {
1362 /* Otherwise, initialize it first */
1363 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
1364 }
1365 } else {
1366 task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
1367 }
1368
1369 if (task_ledgers.sfi_wait_times[class_id] < 0) {
1370 panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
1371 }
1372 }
1373
1374 assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID - 1] != -1);
1375 #endif /* CONFIG_SCHED_SFI */
1376
1377 task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
1378 task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
1379 task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
1380 task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
1381 task_ledgers.logical_writes_to_external = ledger_entry_add(t, "logical_writes_to_external", "res", "bytes");
1382 #if CONFIG_PHYS_WRITE_ACCT
1383 task_ledgers.fs_metadata_writes = ledger_entry_add(t, "fs_metadata_writes", "res", "bytes");
1384 #endif /* CONFIG_PHYS_WRITE_ACCT */
1385 task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj");
1386 task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj");
1387
1388 #if CONFIG_MEMORYSTATUS
1389 task_ledgers.memorystatus_dirty_time = ledger_entry_add(t, "memorystatus_dirty_time", "physmem", "ns");
1390 #endif /* CONFIG_MEMORYSTATUS */
1391
1392 task_ledgers.swapins = ledger_entry_add_with_flags(t, "swapins", "physmem", "bytes",
1393 LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1394
1395 if ((task_ledgers.cpu_time < 0) ||
1396 (task_ledgers.tkm_private < 0) ||
1397 (task_ledgers.tkm_shared < 0) ||
1398 (task_ledgers.phys_mem < 0) ||
1399 (task_ledgers.wired_mem < 0) ||
1400 (task_ledgers.conclave_mem < 0) ||
1401 (task_ledgers.internal < 0) ||
1402 (task_ledgers.external < 0) ||
1403 (task_ledgers.reusable < 0) ||
1404 (task_ledgers.iokit_mapped < 0) ||
1405 (task_ledgers.alternate_accounting < 0) ||
1406 (task_ledgers.alternate_accounting_compressed < 0) ||
1407 (task_ledgers.page_table < 0) ||
1408 (task_ledgers.phys_footprint < 0) ||
1409 (task_ledgers.internal_compressed < 0) ||
1410 (task_ledgers.purgeable_volatile < 0) ||
1411 (task_ledgers.purgeable_nonvolatile < 0) ||
1412 (task_ledgers.purgeable_volatile_compressed < 0) ||
1413 (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
1414 (task_ledgers.tagged_nofootprint < 0) ||
1415 (task_ledgers.tagged_footprint < 0) ||
1416 (task_ledgers.tagged_nofootprint_compressed < 0) ||
1417 (task_ledgers.tagged_footprint_compressed < 0) ||
1418 #if CONFIG_FREEZE
1419 (task_ledgers.frozen_to_swap < 0) ||
1420 #endif /* CONFIG_FREEZE */
1421 (task_ledgers.network_volatile < 0) ||
1422 (task_ledgers.network_nonvolatile < 0) ||
1423 (task_ledgers.network_volatile_compressed < 0) ||
1424 (task_ledgers.network_nonvolatile_compressed < 0) ||
1425 (task_ledgers.media_nofootprint < 0) ||
1426 (task_ledgers.media_footprint < 0) ||
1427 (task_ledgers.media_nofootprint_compressed < 0) ||
1428 (task_ledgers.media_footprint_compressed < 0) ||
1429 (task_ledgers.graphics_nofootprint < 0) ||
1430 (task_ledgers.graphics_footprint < 0) ||
1431 (task_ledgers.graphics_nofootprint_compressed < 0) ||
1432 (task_ledgers.graphics_footprint_compressed < 0) ||
1433 (task_ledgers.neural_nofootprint < 0) ||
1434 (task_ledgers.neural_footprint < 0) ||
1435 (task_ledgers.neural_nofootprint_compressed < 0) ||
1436 (task_ledgers.neural_footprint_compressed < 0) ||
1437 (task_ledgers.neural_nofootprint_total < 0) ||
1438 (task_ledgers.platform_idle_wakeups < 0) ||
1439 (task_ledgers.interrupt_wakeups < 0) ||
1440 (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
1441 (task_ledgers.physical_writes < 0) ||
1442 (task_ledgers.logical_writes < 0) ||
1443 (task_ledgers.logical_writes_to_external < 0) ||
1444 #if CONFIG_PHYS_WRITE_ACCT
1445 (task_ledgers.fs_metadata_writes < 0) ||
1446 #endif /* CONFIG_PHYS_WRITE_ACCT */
1447 #if CONFIG_MEMORYSTATUS
1448 (task_ledgers.memorystatus_dirty_time < 0) ||
1449 #endif /* CONFIG_MEMORYSTATUS */
1450 (task_ledgers.energy_billed_to_me < 0) ||
1451 (task_ledgers.energy_billed_to_others < 0) ||
1452 (task_ledgers.swapins < 0)
1453 ) {
1454 panic("couldn't create entries for task ledger template");
1455 }
1456
1457 ledger_track_credit_only(t, task_ledgers.phys_footprint);
1458 ledger_track_credit_only(t, task_ledgers.internal);
1459 ledger_track_credit_only(t, task_ledgers.external);
1460 ledger_track_credit_only(t, task_ledgers.reusable);
1461
1462 ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
1463 ledger_track_maximum(t, task_ledgers.phys_mem, 60);
1464 ledger_track_maximum(t, task_ledgers.internal, 60);
1465 ledger_track_maximum(t, task_ledgers.internal_compressed, 60);
1466 ledger_track_maximum(t, task_ledgers.reusable, 60);
1467 ledger_track_maximum(t, task_ledgers.external, 60);
1468 ledger_track_maximum(t, task_ledgers.neural_nofootprint_total, 60);
1469 #if MACH_ASSERT
1470 if (pmap_ledgers_panic) {
1471 ledger_panic_on_negative(t, task_ledgers.phys_footprint);
1472 ledger_panic_on_negative(t, task_ledgers.conclave_mem);
1473 ledger_panic_on_negative(t, task_ledgers.page_table);
1474 ledger_panic_on_negative(t, task_ledgers.internal);
1475 ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
1476 ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
1477 ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
1478 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
1479 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
1480 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
1481 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
1482 #if CONFIG_PHYS_WRITE_ACCT
1483 ledger_panic_on_negative(t, task_ledgers.fs_metadata_writes);
1484 #endif /* CONFIG_PHYS_WRITE_ACCT */
1485
1486 ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint);
1487 ledger_panic_on_negative(t, task_ledgers.tagged_footprint);
1488 ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint_compressed);
1489 ledger_panic_on_negative(t, task_ledgers.tagged_footprint_compressed);
1490 ledger_panic_on_negative(t, task_ledgers.network_volatile);
1491 ledger_panic_on_negative(t, task_ledgers.network_nonvolatile);
1492 ledger_panic_on_negative(t, task_ledgers.network_volatile_compressed);
1493 ledger_panic_on_negative(t, task_ledgers.network_nonvolatile_compressed);
1494 ledger_panic_on_negative(t, task_ledgers.media_nofootprint);
1495 ledger_panic_on_negative(t, task_ledgers.media_footprint);
1496 ledger_panic_on_negative(t, task_ledgers.media_nofootprint_compressed);
1497 ledger_panic_on_negative(t, task_ledgers.media_footprint_compressed);
1498 ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint);
1499 ledger_panic_on_negative(t, task_ledgers.graphics_footprint);
1500 ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint_compressed);
1501 ledger_panic_on_negative(t, task_ledgers.graphics_footprint_compressed);
1502 ledger_panic_on_negative(t, task_ledgers.neural_nofootprint);
1503 ledger_panic_on_negative(t, task_ledgers.neural_footprint);
1504 ledger_panic_on_negative(t, task_ledgers.neural_nofootprint_compressed);
1505 ledger_panic_on_negative(t, task_ledgers.neural_footprint_compressed);
1506 }
1507 #endif /* MACH_ASSERT */
1508
1509 #if CONFIG_MEMORYSTATUS
1510 ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
1511 ledger_set_callback(t, task_ledgers.conclave_mem, task_conclave_mem_limit_exceeded, NULL, NULL);
1512 #endif /* CONFIG_MEMORYSTATUS */
1513
1514 ledger_set_callback(t, task_ledgers.interrupt_wakeups,
1515 task_wakeups_rate_exceeded, NULL, NULL);
1516 ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
1517
1518 #if CONFIG_SPTM || !XNU_MONITOR
1519 ledger_template_complete(t);
1520 #else /* CONFIG_SPTM || !XNU_MONITOR */
1521 ledger_template_complete_secure_alloc(t);
1522 #endif /* XNU_MONITOR */
1523 task_ledger_template = t;
1524 }
1525
1526 /* Create a task, but leave the task ports disabled */
1527 kern_return_t
task_create_internal(task_t parent_task,proc_ro_t proc_ro,coalition_t * parent_coalitions __unused,boolean_t inherit_memory,boolean_t is_64bit,boolean_t is_64bit_data,uint32_t t_flags,uint32_t t_flags_ro,uint32_t t_procflags,uint8_t t_returnwaitflags,task_t child_task)1528 task_create_internal(
1529 task_t parent_task, /* Null-able */
1530 proc_ro_t proc_ro,
1531 coalition_t *parent_coalitions __unused,
1532 boolean_t inherit_memory,
1533 boolean_t is_64bit,
1534 boolean_t is_64bit_data,
1535 uint32_t t_flags,
1536 uint32_t t_flags_ro,
1537 uint32_t t_procflags,
1538 uint8_t t_returnwaitflags,
1539 task_t child_task)
1540 {
1541 task_t new_task;
1542 vm_shared_region_t shared_region;
1543 ledger_t ledger = NULL;
1544 struct task_ro_data task_ro_data = {};
1545 uint32_t parent_t_flags_ro = 0;
1546
1547 new_task = child_task;
1548
1549 if (task_ref_count_init(new_task) != KERN_SUCCESS) {
1550 return KERN_RESOURCE_SHORTAGE;
1551 }
1552
1553 /* allocate with active entries */
1554 assert(task_ledger_template != NULL);
1555 ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1556 if (ledger == NULL) {
1557 task_ref_count_fini(new_task);
1558 return KERN_RESOURCE_SHORTAGE;
1559 }
1560
1561 counter_alloc(&(new_task->faults));
1562
1563 #if defined(HAS_APPLE_PAC)
1564 const uint8_t disable_user_jop = inherit_memory ? parent_task->disable_user_jop : FALSE;
1565 ml_task_set_rop_pid(new_task, parent_task, inherit_memory);
1566 ml_task_set_jop_pid(new_task, parent_task, inherit_memory, disable_user_jop);
1567 ml_task_set_disable_user_jop(new_task, disable_user_jop);
1568 #endif
1569
1570
1571 new_task->ledger = ledger;
1572
1573 /* if inherit_memory is true, parent_task MUST not be NULL */
1574 if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) {
1575 #if CONFIG_DEFERRED_RECLAIM
1576 if (parent_task->deferred_reclamation_metadata) {
1577 /*
1578 * Prevent concurrent reclaims while we're forking the parent_task's map,
1579 * so that the child's map is in sync with the forked reclamation
1580 * metadata.
1581 */
1582 vm_deferred_reclamation_ring_own(
1583 parent_task->deferred_reclamation_metadata);
1584 }
1585 #endif /* CONFIG_DEFERRED_RECLAIM */
1586 new_task->map = vm_map_fork(ledger, parent_task->map, 0);
1587 #if CONFIG_DEFERRED_RECLAIM
1588 if (new_task->map != NULL &&
1589 parent_task->deferred_reclamation_metadata) {
1590 new_task->deferred_reclamation_metadata =
1591 vm_deferred_reclamation_task_fork(new_task,
1592 parent_task->deferred_reclamation_metadata);
1593 }
1594 if (parent_task->deferred_reclamation_metadata) {
1595 vm_deferred_reclamation_ring_disown(
1596 parent_task->deferred_reclamation_metadata);
1597 }
1598 #endif /* CONFIG_DEFERRED_RECLAIM */
1599 } else {
1600 unsigned int pmap_flags = is_64bit ? PMAP_CREATE_64BIT : 0;
1601 pmap_t pmap = pmap_create_options(ledger, 0, pmap_flags);
1602 vm_map_t new_map;
1603
1604 if (pmap == NULL) {
1605 counter_free(&new_task->faults);
1606 ledger_dereference(ledger);
1607 task_ref_count_fini(new_task);
1608 return KERN_RESOURCE_SHORTAGE;
1609 }
1610 new_map = vm_map_create_options(pmap,
1611 (vm_map_offset_t)(VM_MIN_ADDRESS),
1612 (vm_map_offset_t)(VM_MAX_ADDRESS),
1613 VM_MAP_CREATE_PAGEABLE);
1614 if (parent_task) {
1615 vm_map_inherit_limits(new_map, parent_task->map);
1616 }
1617 new_task->map = new_map;
1618 }
1619
1620 if (new_task->map == NULL) {
1621 counter_free(&new_task->faults);
1622 ledger_dereference(ledger);
1623 task_ref_count_fini(new_task);
1624 return KERN_RESOURCE_SHORTAGE;
1625 }
1626
1627 lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
1628 queue_init(&new_task->threads);
1629 new_task->suspend_count = 0;
1630 new_task->thread_count = 0;
1631 new_task->active_thread_count = 0;
1632 new_task->user_stop_count = 0;
1633 new_task->legacy_stop_count = 0;
1634 new_task->active = TRUE;
1635 new_task->halting = FALSE;
1636 new_task->priv_flags = 0;
1637 new_task->t_flags = t_flags;
1638 task_ro_data.t_flags_ro = t_flags_ro;
1639 new_task->t_procflags = t_procflags;
1640 new_task->t_returnwaitflags = t_returnwaitflags;
1641 new_task->returnwait_inheritor = current_thread();
1642 new_task->importance = 0;
1643 new_task->crashed_thread_id = 0;
1644 new_task->watchports = NULL;
1645 new_task->t_rr_ranges = NULL;
1646
1647 new_task->bank_context = NULL;
1648
1649 if (parent_task) {
1650 parent_t_flags_ro = task_ro_flags_get(parent_task);
1651 }
1652
1653 if (parent_task && inherit_memory) {
1654 #if __has_feature(ptrauth_calls)
1655 /* Inherit the pac exception flags from parent if in fork */
1656 task_ro_data.t_flags_ro |= (parent_t_flags_ro & (TFRO_PAC_ENFORCE_USER_STATE |
1657 TFRO_PAC_EXC_FATAL));
1658 #endif /* __has_feature(ptrauth_calls) */
1659 /* Inherit the platform restrictions flags from parent if in fork */
1660 task_ro_data.t_flags_ro |= parent_t_flags_ro & (TFRO_PLATFORM | TFRO_JIT_EXC_FATAL);
1661 #if XNU_TARGET_OS_OSX
1662 task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_MACH_HARDENING_OPT_OUT;
1663 #endif /* XNU_TARGET_OS_OSX */
1664
1665 /* task_security_config options are always inherited on fork */
1666 new_task->security_config = parent_task->security_config;
1667 }
1668
1669 #ifdef MACH_BSD
1670 new_task->corpse_info = NULL;
1671 #endif /* MACH_BSD */
1672
1673 /* kern_task not created by this function has unique id 0, start with 1 here. */
1674 task_set_uniqueid(new_task);
1675
1676 #if CONFIG_MACF
1677 set_task_crash_label(new_task, NULL);
1678
1679 task_ro_data.task_filters.mach_trap_filter_mask = NULL;
1680 task_ro_data.task_filters.mach_kobj_filter_mask = NULL;
1681 #endif
1682
1683 #if CONFIG_MEMORYSTATUS
1684 if (max_task_footprint != 0) {
1685 ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1686 }
1687 #endif /* CONFIG_MEMORYSTATUS */
1688
1689 if (task_wakeups_monitor_rate != 0) {
1690 uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1691 int32_t rate; // Ignored because of WAKEMON_SET_DEFAULTS
1692 task_wakeups_monitor_ctl(new_task, &flags, &rate);
1693 }
1694
1695 #if CONFIG_IO_ACCOUNTING
1696 uint32_t flags = IOMON_ENABLE;
1697 task_io_monitor_ctl(new_task, &flags);
1698 #endif /* CONFIG_IO_ACCOUNTING */
1699
1700 machine_task_init(new_task, parent_task, inherit_memory);
1701
1702 new_task->task_debug = NULL;
1703
1704 #if DEVELOPMENT || DEBUG
1705 new_task->task_unnested = FALSE;
1706 new_task->task_disconnected_count = 0;
1707 #endif
1708 queue_init(&new_task->semaphore_list);
1709 new_task->semaphores_owned = 0;
1710
1711 new_task->vtimers = 0;
1712
1713 new_task->shared_region = NULL;
1714
1715 new_task->affinity_space = NULL;
1716
1717 #if CONFIG_CPU_COUNTERS
1718 new_task->t_kpc = 0;
1719 #endif /* CONFIG_CPU_COUNTERS */
1720
1721 new_task->pidsuspended = FALSE;
1722 new_task->frozen = FALSE;
1723 new_task->changing_freeze_state = FALSE;
1724 new_task->rusage_cpu_flags = 0;
1725 new_task->rusage_cpu_percentage = 0;
1726 new_task->rusage_cpu_interval = 0;
1727 new_task->rusage_cpu_deadline = 0;
1728 new_task->rusage_cpu_callt = NULL;
1729 #if MACH_ASSERT
1730 new_task->suspends_outstanding = 0;
1731 #endif
1732 recount_task_init(&new_task->tk_recount);
1733
1734 #if HYPERVISOR
1735 new_task->hv_task_target = NULL;
1736 #endif /* HYPERVISOR */
1737
1738 #if CONFIG_TASKWATCH
1739 queue_init(&new_task->task_watchers);
1740 new_task->num_taskwatchers = 0;
1741 new_task->watchapplying = 0;
1742 #endif /* CONFIG_TASKWATCH */
1743
1744 new_task->mem_notify_reserved = 0;
1745
1746 new_task->requested_policy = default_task_requested_policy;
1747 new_task->effective_policy = default_task_effective_policy;
1748
1749 new_task->task_shared_region_slide = -1;
1750
1751 if (parent_task != NULL) {
1752 task_ro_data.task_tokens.sec_token = *task_get_sec_token(parent_task);
1753 task_ro_data.task_tokens.audit_token = *task_get_audit_token(parent_task);
1754
1755 task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_FILTER_MSG;
1756 #if CONFIG_MACF
1757 if (!(t_flags & TF_CORPSE_FORK)) {
1758 task_ro_data.task_filters.mach_trap_filter_mask = task_get_mach_trap_filter_mask(parent_task);
1759 task_ro_data.task_filters.mach_kobj_filter_mask = task_get_mach_kobj_filter_mask(parent_task);
1760 }
1761 #endif
1762 } else {
1763 task_ro_data.task_tokens.sec_token = KERNEL_SECURITY_TOKEN;
1764 task_ro_data.task_tokens.audit_token = KERNEL_AUDIT_TOKEN;
1765 }
1766 /*
1767 * intentionally initialized to zero, it will be set before returning
1768 * to userspace in task_set_ctrl_port_default
1769 */
1770 task_ro_data.task_control_port_options = TASK_CONTROL_PORT_OPTIONS_NONE;
1771
1772 /* must set before task_importance_init_from_parent: */
1773 if (proc_ro != NULL) {
1774 new_task->bsd_info_ro = proc_ro_ref_task(proc_ro, new_task, &task_ro_data);
1775 } else {
1776 new_task->bsd_info_ro = proc_ro_alloc(NULL, NULL, new_task, &task_ro_data);
1777 }
1778
1779 ipc_task_init(new_task, parent_task);
1780
1781 task_importance_init_from_parent(new_task, parent_task);
1782
1783 new_task->corpse_vmobject_list = NULL;
1784
1785 if (parent_task != TASK_NULL) {
1786 /* inherit the parent's shared region */
1787 shared_region = vm_shared_region_get(parent_task);
1788 if (shared_region != NULL) {
1789 vm_shared_region_set(new_task, shared_region);
1790 }
1791
1792 #if __has_feature(ptrauth_calls)
1793 /* use parent's shared_region_id */
1794 char *shared_region_id = task_get_vm_shared_region_id_and_jop_pid(parent_task, NULL);
1795 if (shared_region_id != NULL) {
1796 shared_region_key_alloc(shared_region_id, FALSE, 0); /* get a reference */
1797 }
1798 task_set_shared_region_id(new_task, shared_region_id);
1799 #endif /* __has_feature(ptrauth_calls) */
1800
1801 if (task_has_64Bit_addr(parent_task)) {
1802 task_set_64Bit_addr(new_task);
1803 }
1804
1805 if (task_has_64Bit_data(parent_task)) {
1806 task_set_64Bit_data(new_task);
1807 }
1808
1809 if (inherit_memory) {
1810 new_task->all_image_info_addr = parent_task->all_image_info_addr;
1811 new_task->all_image_info_size = parent_task->all_image_info_size;
1812 if (parent_task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) {
1813 new_task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
1814 }
1815 }
1816 new_task->mach_header_vm_address = 0;
1817
1818 if (inherit_memory && parent_task->affinity_space) {
1819 task_affinity_create(parent_task, new_task);
1820 }
1821
1822 new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
1823
1824 new_task->task_exc_guard = parent_task->task_exc_guard;
1825 if (parent_task->t_flags & TF_NO_SMT) {
1826 new_task->t_flags |= TF_NO_SMT;
1827 }
1828
1829 if (parent_task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE) {
1830 new_task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
1831 }
1832
1833 if (parent_task->t_flags & TF_TECS) {
1834 new_task->t_flags |= TF_TECS;
1835 }
1836
1837 #if defined(__x86_64__)
1838 if (parent_task->t_flags & TF_INSN_COPY_OPTOUT) {
1839 new_task->t_flags |= TF_INSN_COPY_OPTOUT;
1840 }
1841 #endif
1842
1843
1844 new_task->priority = BASEPRI_DEFAULT;
1845 new_task->max_priority = MAXPRI_USER;
1846 } else {
1847 #ifdef __LP64__
1848 if (is_64bit) {
1849 task_set_64Bit_addr(new_task);
1850 }
1851 #endif
1852
1853 if (is_64bit_data) {
1854 task_set_64Bit_data(new_task);
1855 }
1856
1857 new_task->all_image_info_addr = (mach_vm_address_t)0;
1858 new_task->all_image_info_size = (mach_vm_size_t)0;
1859
1860 new_task->pset_hint = PROCESSOR_SET_NULL;
1861
1862 new_task->task_exc_guard = TASK_EXC_GUARD_NONE;
1863
1864 if (new_task == kernel_task) {
1865 new_task->priority = BASEPRI_KERNEL;
1866 new_task->max_priority = MAXPRI_KERNEL;
1867 } else {
1868 new_task->priority = BASEPRI_DEFAULT;
1869 new_task->max_priority = MAXPRI_USER;
1870 }
1871 }
1872
1873 bzero(new_task->coalition, sizeof(new_task->coalition));
1874 for (int i = 0; i < COALITION_NUM_TYPES; i++) {
1875 queue_chain_init(new_task->task_coalition[i]);
1876 }
1877
1878 /* Allocate I/O Statistics */
1879 new_task->task_io_stats = kalloc_data(sizeof(struct io_stat_info),
1880 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1881
1882 bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats));
1883 bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats));
1884
1885 bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
1886
1887 counter_alloc(&(new_task->pageins));
1888 counter_alloc(&(new_task->cow_faults));
1889 counter_alloc(&(new_task->messages_sent));
1890 counter_alloc(&(new_task->messages_received));
1891
1892 /* Copy resource acc. info from Parent for Corpe Forked task. */
1893 if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
1894 task_rollup_accounting_info(new_task, parent_task);
1895 task_store_owned_vmobject_info(new_task, parent_task);
1896 } else {
1897 /* Initialize to zero for standard fork/spawn case */
1898 new_task->total_runnable_time = 0;
1899 new_task->syscalls_mach = 0;
1900 new_task->syscalls_unix = 0;
1901 new_task->c_switch = 0;
1902 new_task->p_switch = 0;
1903 new_task->ps_switch = 0;
1904 new_task->decompressions = 0;
1905 new_task->low_mem_notified_warn = 0;
1906 new_task->low_mem_notified_critical = 0;
1907 new_task->purged_memory_warn = 0;
1908 new_task->purged_memory_critical = 0;
1909 new_task->low_mem_privileged_listener = 0;
1910 os_atomic_store(&new_task->memlimit_flags, 0, relaxed);
1911 new_task->task_timer_wakeups_bin_1 = 0;
1912 new_task->task_timer_wakeups_bin_2 = 0;
1913 new_task->task_gpu_ns = 0;
1914 new_task->task_writes_counters_internal.task_immediate_writes = 0;
1915 new_task->task_writes_counters_internal.task_deferred_writes = 0;
1916 new_task->task_writes_counters_internal.task_invalidated_writes = 0;
1917 new_task->task_writes_counters_internal.task_metadata_writes = 0;
1918 new_task->task_writes_counters_external.task_immediate_writes = 0;
1919 new_task->task_writes_counters_external.task_deferred_writes = 0;
1920 new_task->task_writes_counters_external.task_invalidated_writes = 0;
1921 new_task->task_writes_counters_external.task_metadata_writes = 0;
1922 #if CONFIG_PHYS_WRITE_ACCT
1923 new_task->task_fs_metadata_writes = 0;
1924 #endif /* CONFIG_PHYS_WRITE_ACCT */
1925 }
1926
1927
1928 new_task->donates_own_pages = FALSE;
1929 #if CONFIG_COALITIONS
1930 if (!(t_flags & TF_CORPSE_FORK)) {
1931 /* TODO: there is no graceful failure path here... */
1932 if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1933 coalitions_adopt_task(parent_coalitions, new_task);
1934 if (parent_coalitions[COALITION_TYPE_JETSAM]) {
1935 new_task->donates_own_pages = coalition_is_swappable(parent_coalitions[COALITION_TYPE_JETSAM]);
1936 }
1937 } else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1938 /*
1939 * all tasks at least have a resource coalition, so
1940 * if the parent has one then inherit all coalitions
1941 * the parent is a part of
1942 */
1943 coalitions_adopt_task(parent_task->coalition, new_task);
1944 if (parent_task->coalition[COALITION_TYPE_JETSAM]) {
1945 new_task->donates_own_pages = coalition_is_swappable(parent_task->coalition[COALITION_TYPE_JETSAM]);
1946 }
1947 } else {
1948 /* TODO: assert that new_task will be PID 1 (launchd) */
1949 coalitions_adopt_init_task(new_task);
1950 }
1951 /*
1952 * on exec, we need to transfer the coalition roles from the
1953 * parent task to the exec copy task.
1954 */
1955 if (parent_task && (t_procflags & TPF_EXEC_COPY)) {
1956 int coal_roles[COALITION_NUM_TYPES];
1957 task_coalition_roles(parent_task, coal_roles);
1958 (void)coalitions_set_roles(new_task->coalition, new_task, coal_roles);
1959 }
1960 } else {
1961 coalitions_adopt_corpse_task(new_task);
1962 }
1963
1964 if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1965 panic("created task is not a member of a resource coalition");
1966 }
1967 task_set_coalition_member(new_task);
1968 #endif /* CONFIG_COALITIONS */
1969
1970 if (parent_task != TASK_NULL) {
1971 /* task_policy_create queries the adopted coalition */
1972 task_policy_create(new_task, parent_task);
1973 }
1974
1975 new_task->dispatchqueue_offset = 0;
1976 if (parent_task != NULL) {
1977 new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
1978 }
1979
1980 new_task->task_can_transfer_memory_ownership = FALSE;
1981 new_task->task_volatile_objects = 0;
1982 new_task->task_nonvolatile_objects = 0;
1983 new_task->task_objects_disowning = FALSE;
1984 new_task->task_objects_disowned = FALSE;
1985 new_task->task_owned_objects = 0;
1986 queue_init(&new_task->task_objq);
1987
1988 #if CONFIG_FREEZE
1989 queue_init(&new_task->task_frozen_cseg_q);
1990 #endif /* CONFIG_FREEZE */
1991
1992 task_objq_lock_init(new_task);
1993
1994 #if __arm64__
1995 new_task->task_legacy_footprint = FALSE;
1996 new_task->task_extra_footprint_limit = FALSE;
1997 new_task->task_ios13extended_footprint_limit = FALSE;
1998 #endif /* __arm64__ */
1999 new_task->task_region_footprint = FALSE;
2000 new_task->task_has_crossed_thread_limit = FALSE;
2001 new_task->task_thread_limit = 0;
2002 #if CONFIG_SECLUDED_MEMORY
2003 new_task->task_can_use_secluded_mem = FALSE;
2004 new_task->task_could_use_secluded_mem = FALSE;
2005 new_task->task_could_also_use_secluded_mem = FALSE;
2006 new_task->task_suppressed_secluded = FALSE;
2007 #endif /* CONFIG_SECLUDED_MEMORY */
2008
2009
2010 /*
2011 * t_flags is set up above. But since we don't
2012 * support darkwake mode being set that way
2013 * currently, we clear it out here explicitly.
2014 */
2015 new_task->t_flags &= ~(TF_DARKWAKE_MODE);
2016
2017 queue_init(&new_task->io_user_clients);
2018 new_task->loadTag = 0;
2019
2020 lck_mtx_lock(&tasks_threads_lock);
2021 queue_enter(&tasks, new_task, task_t, tasks);
2022 tasks_count++;
2023 if (tasks_suspend_state) {
2024 task_suspend_internal(new_task);
2025 }
2026 lck_mtx_unlock(&tasks_threads_lock);
2027 task_ref_hold_proc_task_struct(new_task);
2028
2029 return KERN_SUCCESS;
2030 }
2031
2032 /*
2033 * task_rollup_accounting_info
2034 *
2035 * Roll up accounting stats. Used to rollup stats
2036 * for exec copy task and corpse fork.
2037 */
2038 void
task_rollup_accounting_info(task_t to_task,task_t from_task)2039 task_rollup_accounting_info(task_t to_task, task_t from_task)
2040 {
2041 assert(from_task != to_task);
2042
2043 recount_task_copy(&to_task->tk_recount, &from_task->tk_recount);
2044 to_task->total_runnable_time = from_task->total_runnable_time;
2045 counter_add(&to_task->faults, counter_load(&from_task->faults));
2046 counter_add(&to_task->pageins, counter_load(&from_task->pageins));
2047 counter_add(&to_task->cow_faults, counter_load(&from_task->cow_faults));
2048 counter_add(&to_task->messages_sent, counter_load(&from_task->messages_sent));
2049 counter_add(&to_task->messages_received, counter_load(&from_task->messages_received));
2050 to_task->decompressions = from_task->decompressions;
2051 to_task->syscalls_mach = from_task->syscalls_mach;
2052 to_task->syscalls_unix = from_task->syscalls_unix;
2053 to_task->c_switch = from_task->c_switch;
2054 to_task->p_switch = from_task->p_switch;
2055 to_task->ps_switch = from_task->ps_switch;
2056 to_task->extmod_statistics = from_task->extmod_statistics;
2057 to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
2058 to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
2059 to_task->purged_memory_warn = from_task->purged_memory_warn;
2060 to_task->purged_memory_critical = from_task->purged_memory_critical;
2061 to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
2062 *to_task->task_io_stats = *from_task->task_io_stats;
2063 to_task->cpu_time_eqos_stats = from_task->cpu_time_eqos_stats;
2064 to_task->cpu_time_rqos_stats = from_task->cpu_time_rqos_stats;
2065 to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
2066 to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
2067 to_task->task_gpu_ns = from_task->task_gpu_ns;
2068 to_task->task_writes_counters_internal.task_immediate_writes = from_task->task_writes_counters_internal.task_immediate_writes;
2069 to_task->task_writes_counters_internal.task_deferred_writes = from_task->task_writes_counters_internal.task_deferred_writes;
2070 to_task->task_writes_counters_internal.task_invalidated_writes = from_task->task_writes_counters_internal.task_invalidated_writes;
2071 to_task->task_writes_counters_internal.task_metadata_writes = from_task->task_writes_counters_internal.task_metadata_writes;
2072 to_task->task_writes_counters_external.task_immediate_writes = from_task->task_writes_counters_external.task_immediate_writes;
2073 to_task->task_writes_counters_external.task_deferred_writes = from_task->task_writes_counters_external.task_deferred_writes;
2074 to_task->task_writes_counters_external.task_invalidated_writes = from_task->task_writes_counters_external.task_invalidated_writes;
2075 to_task->task_writes_counters_external.task_metadata_writes = from_task->task_writes_counters_external.task_metadata_writes;
2076 #if CONFIG_PHYS_WRITE_ACCT
2077 to_task->task_fs_metadata_writes = from_task->task_fs_metadata_writes;
2078 #endif /* CONFIG_PHYS_WRITE_ACCT */
2079
2080 #if CONFIG_MEMORYSTATUS
2081 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.memorystatus_dirty_time);
2082 #endif /* CONFIG_MEMORYSTATUS */
2083
2084 /* Skip ledger roll up for memory accounting entries */
2085 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
2086 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
2087 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
2088 #if CONFIG_SCHED_SFI
2089 for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
2090 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
2091 }
2092 #endif
2093 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
2094 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
2095 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
2096 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
2097 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_me);
2098 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_others);
2099 }
2100
2101 /*
2102 * task_deallocate_internal:
2103 *
2104 * Drop a reference on a task.
2105 * Don't call this directly.
2106 */
2107 extern void task_deallocate_internal(task_t task, os_ref_count_t refs);
2108 void
task_deallocate_internal(task_t task,os_ref_count_t refs)2109 task_deallocate_internal(
2110 task_t task,
2111 os_ref_count_t refs)
2112 {
2113 ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
2114
2115 if (task == TASK_NULL) {
2116 return;
2117 }
2118
2119 #if IMPORTANCE_INHERITANCE
2120 if (refs == 1) {
2121 /*
2122 * If last ref potentially comes from the task's importance,
2123 * disconnect it. But more task refs may be added before
2124 * that completes, so wait for the reference to go to zero
2125 * naturally (it may happen on a recursive task_deallocate()
2126 * from the ipc_importance_disconnect_task() call).
2127 */
2128 if (IIT_NULL != task->task_imp_base) {
2129 ipc_importance_disconnect_task(task);
2130 }
2131 return;
2132 }
2133 #endif /* IMPORTANCE_INHERITANCE */
2134
2135 if (refs > 0) {
2136 return;
2137 }
2138
2139 /*
2140 * The task should be dead at this point. Ensure other resources
2141 * like threads, are gone before we trash the world.
2142 */
2143 assert(queue_empty(&task->threads));
2144 assert(get_bsdtask_info(task) == NULL);
2145 assert(!is_active(task->itk_space));
2146 assert(!task->active);
2147 assert(task->active_thread_count == 0);
2148 assert(!task_get_game_mode(task));
2149 assert(!task_get_carplay_mode(task));
2150
2151 lck_mtx_lock(&tasks_threads_lock);
2152 assert(terminated_tasks_count > 0);
2153 queue_remove(&terminated_tasks, task, task_t, tasks);
2154 terminated_tasks_count--;
2155 lck_mtx_unlock(&tasks_threads_lock);
2156
2157 /*
2158 * remove the reference on bank context
2159 */
2160 task_bank_reset(task);
2161
2162 kfree_data(task->task_io_stats, sizeof(struct io_stat_info));
2163
2164 /*
2165 * Give the machine dependent code a chance
2166 * to perform cleanup before ripping apart
2167 * the task.
2168 */
2169 machine_task_terminate(task);
2170
2171 ipc_task_terminate(task);
2172
2173 /* let iokit know 2 */
2174 iokit_task_terminate(task, 2);
2175
2176 /* Unregister task from userspace coredumps on panic */
2177 kern_unregister_userspace_coredump(task);
2178
2179 if (task->affinity_space) {
2180 task_affinity_deallocate(task);
2181 }
2182
2183 #if MACH_ASSERT
2184 if (task->ledger != NULL &&
2185 task->map != NULL &&
2186 task->map->pmap != NULL &&
2187 task->map->pmap->ledger != NULL) {
2188 assert(task->ledger == task->map->pmap->ledger);
2189 }
2190 #endif /* MACH_ASSERT */
2191
2192 vm_owned_objects_disown(task);
2193 assert(task->task_objects_disowned);
2194 if (task->task_owned_objects != 0) {
2195 panic("task_deallocate(%p): "
2196 "volatile_objects=%d nonvolatile_objects=%d owned=%d\n",
2197 task,
2198 task->task_volatile_objects,
2199 task->task_nonvolatile_objects,
2200 task->task_owned_objects);
2201 }
2202
2203 #if CONFIG_DEFERRED_RECLAIM
2204 /*
2205 * Remove this tasks reclaim buffer from global queues.
2206 */
2207 if (task->deferred_reclamation_metadata != NULL) {
2208 vm_deferred_reclamation_buffer_deallocate(task->deferred_reclamation_metadata);
2209 task->deferred_reclamation_metadata = NULL;
2210 }
2211 #endif /* CONFIG_DEFERRED_RECLAIM */
2212
2213 vm_map_deallocate(task->map);
2214 if (task->is_large_corpse) {
2215 assert(large_corpse_count > 0);
2216 OSDecrementAtomic(&large_corpse_count);
2217 task->is_large_corpse = false;
2218 }
2219 is_release(task->itk_space);
2220
2221 if (task->t_rr_ranges) {
2222 restartable_ranges_release(task->t_rr_ranges);
2223 }
2224
2225 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
2226 &interrupt_wakeups, &debit);
2227 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
2228 &platform_idle_wakeups, &debit);
2229
2230 struct recount_times_mach sum = { 0 };
2231 struct recount_times_mach p_only = { 0 };
2232 recount_task_times_perf_only(task, &sum, &p_only);
2233 #if CONFIG_PERVASIVE_ENERGY
2234 uint64_t energy = recount_task_energy_nj(task);
2235 #endif /* CONFIG_PERVASIVE_ENERGY */
2236 recount_task_deinit(&task->tk_recount);
2237
2238 /* Accumulate statistics for dead tasks */
2239 lck_spin_lock(&dead_task_statistics_lock);
2240 dead_task_statistics.total_user_time += sum.rtm_user;
2241 dead_task_statistics.total_system_time += sum.rtm_system;
2242
2243 dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
2244 dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
2245
2246 dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
2247 dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
2248 dead_task_statistics.total_ptime += p_only.rtm_user + p_only.rtm_system;
2249 dead_task_statistics.total_pset_switches += task->ps_switch;
2250 dead_task_statistics.task_gpu_ns += task->task_gpu_ns;
2251 #if CONFIG_PERVASIVE_ENERGY
2252 dead_task_statistics.task_energy += energy;
2253 #endif /* CONFIG_PERVASIVE_ENERGY */
2254
2255 lck_spin_unlock(&dead_task_statistics_lock);
2256 lck_mtx_destroy(&task->lock, &task_lck_grp);
2257
2258 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
2259 &debit)) {
2260 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
2261 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
2262 }
2263 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
2264 &debit)) {
2265 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
2266 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
2267 }
2268 ledger_dereference(task->ledger);
2269
2270 counter_free(&task->faults);
2271 counter_free(&task->pageins);
2272 counter_free(&task->cow_faults);
2273 counter_free(&task->messages_sent);
2274 counter_free(&task->messages_received);
2275
2276 #if CONFIG_COALITIONS
2277 task_release_coalitions(task);
2278 #endif /* CONFIG_COALITIONS */
2279
2280 bzero(task->coalition, sizeof(task->coalition));
2281
2282 #if MACH_BSD
2283 /* clean up collected information since last reference to task is gone */
2284 if (task->corpse_info) {
2285 void *corpse_info_kernel = kcdata_memory_get_begin_addr(task->corpse_info);
2286 task_crashinfo_destroy(task->corpse_info);
2287 task->corpse_info = NULL;
2288 kfree_data(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
2289 }
2290 #endif
2291
2292 #if CONFIG_MACF
2293 if (get_task_crash_label(task)) {
2294 mac_exc_free_label(get_task_crash_label(task));
2295 set_task_crash_label(task, NULL);
2296 }
2297 #endif
2298
2299 assert(queue_empty(&task->task_objq));
2300 task_objq_lock_destroy(task);
2301
2302 if (task->corpse_vmobject_list) {
2303 kfree_data(task->corpse_vmobject_list,
2304 (vm_size_t)task->corpse_vmobject_list_size);
2305 }
2306
2307 task_ref_count_fini(task);
2308 proc_ro_erase_task(task->bsd_info_ro);
2309 task_release_proc_task_struct(task, task->bsd_info_ro);
2310 }
2311
2312 /*
2313 * task_name_deallocate_mig:
2314 *
2315 * Drop a reference on a task name.
2316 */
2317 void
task_name_deallocate_mig(task_name_t task_name)2318 task_name_deallocate_mig(
2319 task_name_t task_name)
2320 {
2321 return task_deallocate_grp((task_t)task_name, TASK_GRP_MIG);
2322 }
2323
2324 /*
2325 * task_policy_set_deallocate_mig:
2326 *
2327 * Drop a reference on a task type.
2328 */
2329 void
task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)2330 task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)
2331 {
2332 return task_deallocate_grp((task_t)task_policy_set, TASK_GRP_MIG);
2333 }
2334
2335 /*
2336 * task_policy_get_deallocate_mig:
2337 *
2338 * Drop a reference on a task type.
2339 */
2340 void
task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)2341 task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)
2342 {
2343 return task_deallocate_grp((task_t)task_policy_get, TASK_GRP_MIG);
2344 }
2345
2346 /*
2347 * task_inspect_deallocate_mig:
2348 *
2349 * Drop a task inspection reference.
2350 */
2351 void
task_inspect_deallocate_mig(task_inspect_t task_inspect)2352 task_inspect_deallocate_mig(
2353 task_inspect_t task_inspect)
2354 {
2355 return task_deallocate_grp((task_t)task_inspect, TASK_GRP_MIG);
2356 }
2357
2358 /*
2359 * task_read_deallocate_mig:
2360 *
2361 * Drop a reference on task read port.
2362 */
2363 void
task_read_deallocate_mig(task_read_t task_read)2364 task_read_deallocate_mig(
2365 task_read_t task_read)
2366 {
2367 return task_deallocate_grp((task_t)task_read, TASK_GRP_MIG);
2368 }
2369
2370 /*
2371 * task_suspension_token_deallocate:
2372 *
2373 * Drop a reference on a task suspension token.
2374 */
2375 void
task_suspension_token_deallocate(task_suspension_token_t token)2376 task_suspension_token_deallocate(
2377 task_suspension_token_t token)
2378 {
2379 return task_deallocate((task_t)token);
2380 }
2381
2382 void
task_suspension_token_deallocate_grp(task_suspension_token_t token,task_grp_t grp)2383 task_suspension_token_deallocate_grp(
2384 task_suspension_token_t token,
2385 task_grp_t grp)
2386 {
2387 return task_deallocate_grp((task_t)token, grp);
2388 }
2389
2390 /*
2391 * task_collect_crash_info:
2392 *
2393 * collect crash info from bsd and mach based data
2394 */
2395 kern_return_t
task_collect_crash_info(task_t task,struct label * crash_label,int is_corpse_fork)2396 task_collect_crash_info(
2397 task_t task,
2398 #ifdef CONFIG_MACF
2399 struct label *crash_label,
2400 #endif
2401 int is_corpse_fork)
2402 {
2403 kern_return_t kr = KERN_SUCCESS;
2404
2405 kcdata_descriptor_t crash_data = NULL;
2406 kcdata_descriptor_t crash_data_release = NULL;
2407 mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
2408 mach_vm_offset_t crash_data_ptr = 0;
2409 void *crash_data_kernel = NULL;
2410 void *crash_data_kernel_release = NULL;
2411 #if CONFIG_MACF
2412 struct label *label, *free_label;
2413 #endif
2414
2415 if (!corpses_enabled()) {
2416 return KERN_NOT_SUPPORTED;
2417 }
2418
2419 #if CONFIG_MACF
2420 free_label = label = mac_exc_create_label(NULL);
2421 #endif
2422
2423 task_lock(task);
2424
2425 assert(is_corpse_fork || get_bsdtask_info(task) != NULL);
2426 if (task->corpse_info == NULL && (is_corpse_fork || get_bsdtask_info(task) != NULL)) {
2427 #if CONFIG_MACF
2428 /* Set the crash label, used by the exception delivery mac hook */
2429 free_label = get_task_crash_label(task); // Most likely NULL.
2430 set_task_crash_label(task, label);
2431 mac_exc_update_task_crash_label(task, crash_label);
2432 #endif
2433 task_unlock(task);
2434
2435 crash_data_kernel = kalloc_data(CORPSEINFO_ALLOCATION_SIZE,
2436 Z_WAITOK | Z_ZERO);
2437 if (crash_data_kernel == NULL) {
2438 kr = KERN_RESOURCE_SHORTAGE;
2439 goto out_no_lock;
2440 }
2441 crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
2442
2443 /* Do not get a corpse ref for corpse fork */
2444 crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size,
2445 is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF,
2446 KCFLAG_USE_MEMCOPY);
2447 if (crash_data) {
2448 task_lock(task);
2449 crash_data_release = task->corpse_info;
2450 crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);
2451 task->corpse_info = crash_data;
2452
2453 task_unlock(task);
2454 kr = KERN_SUCCESS;
2455 } else {
2456 kfree_data(crash_data_kernel,
2457 CORPSEINFO_ALLOCATION_SIZE);
2458 kr = KERN_FAILURE;
2459 }
2460
2461 if (crash_data_release != NULL) {
2462 task_crashinfo_destroy(crash_data_release);
2463 }
2464 kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2465 } else {
2466 task_unlock(task);
2467 }
2468
2469 out_no_lock:
2470 #if CONFIG_MACF
2471 if (free_label != NULL) {
2472 mac_exc_free_label(free_label);
2473 }
2474 #endif
2475 return kr;
2476 }
2477
2478 /*
2479 * task_deliver_crash_notification:
2480 *
2481 * Makes outcall to registered host port for a corpse.
2482 */
2483 kern_return_t
task_deliver_crash_notification(task_t corpse,thread_t thread,exception_type_t etype,mach_exception_subcode_t subcode)2484 task_deliver_crash_notification(
2485 task_t corpse, /* corpse or corpse fork */
2486 thread_t thread,
2487 exception_type_t etype,
2488 mach_exception_subcode_t subcode)
2489 {
2490 kcdata_descriptor_t crash_info = corpse->corpse_info;
2491 thread_t th_iter = NULL;
2492 kern_return_t kr = KERN_SUCCESS;
2493 wait_interrupt_t wsave;
2494 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2495 ipc_port_t corpse_port;
2496
2497 if (crash_info == NULL) {
2498 return KERN_FAILURE;
2499 }
2500
2501 assert(task_is_a_corpse(corpse));
2502
2503 task_lock(corpse);
2504
2505 /*
2506 * Always populate code[0] as the effective exception type for EXC_CORPSE_NOTIFY.
2507 * Crash reporters should derive whether it's fatal from corpse blob.
2508 */
2509 code[0] = etype;
2510 code[1] = subcode;
2511
2512 queue_iterate(&corpse->threads, th_iter, thread_t, task_threads)
2513 {
2514 if (th_iter->corpse_dup == FALSE) {
2515 ipc_thread_reset(th_iter);
2516 }
2517 }
2518 task_unlock(corpse);
2519
2520 /* Arm the no-sender notification for taskport */
2521 task_reference(corpse);
2522 corpse_port = convert_corpse_to_port_and_nsrequest(corpse);
2523
2524 wsave = thread_interrupt_level(THREAD_UNINT);
2525 kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
2526 if (kr != KERN_SUCCESS) {
2527 printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(corpse));
2528 }
2529
2530 (void)thread_interrupt_level(wsave);
2531
2532 /*
2533 * Drop the send right on corpse port, will fire the
2534 * no-sender notification if exception deliver failed.
2535 */
2536 ipc_port_release_send(corpse_port);
2537 return kr;
2538 }
2539
2540 /*
2541 * task_terminate:
2542 *
2543 * Terminate the specified task. See comments on thread_terminate
2544 * (kern/thread.c) about problems with terminating the "current task."
2545 */
2546
2547 kern_return_t
task_terminate(task_t task)2548 task_terminate(
2549 task_t task)
2550 {
2551 if (task == TASK_NULL) {
2552 return KERN_INVALID_ARGUMENT;
2553 }
2554
2555 if (get_bsdtask_info(task)) {
2556 return KERN_FAILURE;
2557 }
2558
2559 return task_terminate_internal(task);
2560 }
2561
2562 #if MACH_ASSERT
2563 extern int proc_pid(struct proc *);
2564 extern void proc_name_kdp(struct proc *p, char *buf, int size);
2565 #endif /* MACH_ASSERT */
2566
2567 static void
task_partial_reap(task_t task,__unused int pid)2568 __unused task_partial_reap(task_t task, __unused int pid)
2569 {
2570 unsigned int reclaimed_resident = 0;
2571 unsigned int reclaimed_compressed = 0;
2572 uint64_t task_page_count;
2573
2574 task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
2575
2576 KDBG(VMDBG_CODE(DBG_VM_MAP_PARTIAL_REAP) | DBG_FUNC_START,
2577 pid, task_page_count);
2578
2579 vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
2580
2581 KDBG(VMDBG_CODE(DBG_VM_MAP_PARTIAL_REAP) | DBG_FUNC_END,
2582 pid, reclaimed_resident, reclaimed_compressed);
2583 }
2584
2585 /*
2586 * task_mark_corpse:
2587 *
2588 * Mark the task as a corpse. Called by crashing thread.
2589 */
2590 kern_return_t
task_mark_corpse(task_t task)2591 task_mark_corpse(task_t task)
2592 {
2593 kern_return_t kr = KERN_SUCCESS;
2594 thread_t self_thread;
2595 (void) self_thread;
2596 wait_interrupt_t wsave;
2597 #if CONFIG_MACF
2598 struct label *crash_label = NULL;
2599 #endif
2600
2601 assert(task != kernel_task);
2602 assert(task == current_task());
2603 assert(!task_is_a_corpse(task));
2604
2605 #if CONFIG_MACF
2606 crash_label = mac_exc_create_label_for_proc((struct proc*)get_bsdtask_info(task));
2607 #endif
2608
2609 kr = task_collect_crash_info(task,
2610 #if CONFIG_MACF
2611 crash_label,
2612 #endif
2613 FALSE);
2614 if (kr != KERN_SUCCESS) {
2615 goto out;
2616 }
2617
2618 /* Store owned vmobjects so we can access them after being marked as corpse */
2619 task_store_owned_vmobject_info(task, task);
2620
2621 self_thread = current_thread();
2622
2623 wsave = thread_interrupt_level(THREAD_UNINT);
2624 task_lock(task);
2625
2626 /*
2627 * Check if any other thread called task_terminate_internal
2628 * and made the task inactive before we could mark it for
2629 * corpse pending report. Bail out if the task is inactive.
2630 */
2631 if (!task->active) {
2632 kcdata_descriptor_t crash_data_release = task->corpse_info;;
2633 void *crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);;
2634
2635 task->corpse_info = NULL;
2636 task_unlock(task);
2637
2638 if (crash_data_release != NULL) {
2639 task_crashinfo_destroy(crash_data_release);
2640 }
2641 kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2642 return KERN_TERMINATED;
2643 }
2644
2645 /*
2646 * ipc_task_reset() moved to last thread_terminate_self(): rdar://75737960.
2647 * disable old ports here instead.
2648 *
2649 * The vm_map and ipc_space must exist until this function returns,
2650 * convert_port_to_{map,space}_with_flavor relies on this behavior.
2651 *
2652 * Note this must be done before we mark the port as a corpse,
2653 * so that task_port_no_senders() can determine if the no-senders
2654 * is for a real corpse or not.
2655 */
2656 ipc_task_disable(task);
2657
2658 task_set_corpse_pending_report(task);
2659 task_set_corpse(task);
2660 task->crashed_thread_id = thread_tid(self_thread);
2661
2662 kr = task_start_halt_locked(task, TRUE);
2663 assert(kr == KERN_SUCCESS);
2664
2665 task_set_uniqueid(task);
2666
2667 task_unlock(task);
2668
2669 /* let iokit know 1 */
2670 iokit_task_terminate(task, 1);
2671
2672 /* terminate the ipc space */
2673 ipc_space_terminate(task->itk_space);
2674
2675 /* Add it to global corpse task list */
2676 task_add_to_corpse_task_list(task);
2677
2678 thread_terminate_internal(self_thread);
2679
2680 (void) thread_interrupt_level(wsave);
2681 assert(task->halting == TRUE);
2682
2683 out:
2684 #if CONFIG_MACF
2685 mac_exc_free_label(crash_label);
2686 #endif
2687 return kr;
2688 }
2689
2690 /*
2691 * task_set_uniqueid
2692 *
2693 * Set task uniqueid to systemwide unique 64 bit value
2694 */
2695 void
task_set_uniqueid(task_t task)2696 task_set_uniqueid(task_t task)
2697 {
2698 task->task_uniqueid = OSIncrementAtomic64(&next_taskuniqueid);
2699 }
2700
2701 /*
2702 * task_clear_corpse
2703 *
2704 * Clears the corpse pending bit on task.
2705 * Removes inspection bit on the threads.
2706 */
2707 void
task_clear_corpse(task_t task)2708 task_clear_corpse(task_t task)
2709 {
2710 thread_t th_iter = NULL;
2711
2712 task_lock(task);
2713 queue_iterate(&task->threads, th_iter, thread_t, task_threads)
2714 {
2715 thread_mtx_lock(th_iter);
2716 th_iter->inspection = FALSE;
2717 ipc_thread_disable(th_iter);
2718 thread_mtx_unlock(th_iter);
2719 }
2720
2721 thread_terminate_crashed_threads();
2722 /* remove the pending corpse report flag */
2723 task_clear_corpse_pending_report(task);
2724
2725 task_unlock(task);
2726 }
2727
2728 /*
2729 * task_port_no_senders
2730 *
2731 * Called whenever the Mach port system detects no-senders on
2732 * a control task port.
2733 *
2734 * Only task ports for corpses need to take action on it,
2735 * and each notification that comes in should terminate
2736 * the task (corpse).
2737 */
2738 static void
task_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)2739 task_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
2740 {
2741 bool is_corpse = false;
2742 task_t task;
2743
2744 ip_mq_lock(port);
2745 task = ipc_kobject_get_locked(port, IKOT_TASK_CONTROL);
2746 if (task == TASK_NULL || !task_is_a_corpse(task)) {
2747 task = TASK_NULL;
2748 } else {
2749 task_reference_mig(task);
2750 }
2751 ip_mq_unlock(port);
2752
2753 /*
2754 * Task might be a corpse, we must inspect this under
2755 * the itk_lock to resolve the race with task_mark_corpse():
2756 *
2757 * If the task associated with the port is NULL under the itk_lock(),
2758 * then the port was a former IKOT_TASK_CONTROL port and we should
2759 * leave it alone.
2760 *
2761 * TODO: we should really make corpses use their own IKOT_TASK_CORPSE
2762 * port type instead of these hacks.
2763 */
2764 if (task) {
2765 itk_lock(task);
2766 ip_mq_lock(port);
2767 assert(task_is_a_corpse(task));
2768 is_corpse = (ipc_kobject_get_locked(port, IKOT_TASK_CONTROL) !=
2769 TASK_NULL);
2770 ip_mq_unlock(port);
2771 itk_unlock(task);
2772 task_deallocate_mig(task);
2773 }
2774
2775 if (is_corpse) {
2776 /* Remove the task from global corpse task list */
2777 task_remove_from_corpse_task_list(task);
2778
2779 task_clear_corpse(task);
2780 vm_map_unset_corpse_source(task->map);
2781 task_terminate_internal(task);
2782 }
2783 }
2784
2785 /*
2786 * task_port_with_flavor_no_senders
2787 *
2788 * Called whenever the Mach port system detects no-senders on
2789 * the task inspect or read port. These ports are allocated lazily and
2790 * should be deallocated here when there are no senders remaining.
2791 */
2792 static void
task_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount)2793 task_port_with_flavor_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
2794 {
2795 task_t task;
2796 mach_task_flavor_t flavor;
2797 ipc_kobject_type_t kotype;
2798
2799 ip_mq_lock(port);
2800 if (!ipc_kobject_is_mscount_current_locked(port, mscount)) {
2801 ip_mq_unlock(port);
2802 return;
2803 }
2804
2805 kotype = ip_type(port);
2806 assert((IKOT_TASK_READ == kotype) || (IKOT_TASK_INSPECT == kotype));
2807 task = ipc_kobject_get_locked(port, kotype);
2808 if (task != TASK_NULL) {
2809 task_reference(task);
2810 }
2811 ip_mq_unlock(port);
2812
2813 if (task == TASK_NULL) {
2814 /* The task is exiting or disabled; it will eventually deallocate the port */
2815 return;
2816 }
2817
2818 if (kotype == IKOT_TASK_READ) {
2819 flavor = TASK_FLAVOR_READ;
2820 } else {
2821 flavor = TASK_FLAVOR_INSPECT;
2822 }
2823
2824 itk_lock(task);
2825 ip_mq_lock(port);
2826
2827 /*
2828 * If the port is no longer active, then ipc_task_terminate() ran
2829 * and destroyed the kobject already. Just deallocate the task
2830 * ref we took and go away.
2831 *
2832 * It is also possible that several nsrequests are in flight,
2833 * only one shall NULL-out the port entry, and this is the one
2834 * that gets to dealloc the port.
2835 *
2836 * Check for a stale no-senders notification. A call to any function
2837 * that vends out send rights to this port could resurrect it between
2838 * this notification being generated and actually being handled here.
2839 */
2840 if (task->itk_task_ports[flavor] != port ||
2841 !ipc_kobject_is_mscount_current_locked(port, mscount)) {
2842 ip_mq_unlock(port);
2843 itk_unlock(task);
2844 task_deallocate(task);
2845 return;
2846 }
2847
2848 task->itk_task_ports[flavor] = IP_NULL;
2849 itk_unlock(task);
2850
2851 ipc_kobject_dealloc_port_and_unlock(port, mscount, kotype);
2852
2853 task_deallocate(task);
2854 }
2855
2856 /*
2857 * task_wait_till_threads_terminate_locked
2858 *
2859 * Wait till all the threads in the task are terminated.
2860 * Might release the task lock and re-acquire it.
2861 */
2862 void
task_wait_till_threads_terminate_locked(task_t task)2863 task_wait_till_threads_terminate_locked(task_t task)
2864 {
2865 /* wait for all the threads in the task to terminate */
2866 while (task->active_thread_count != 0) {
2867 assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
2868 task_unlock(task);
2869 thread_block(THREAD_CONTINUE_NULL);
2870
2871 task_lock(task);
2872 }
2873 }
2874
2875 /*
2876 * task_duplicate_map_and_threads
2877 *
2878 * Copy vmmap of source task.
2879 * Copy active threads from source task to destination task.
2880 * Source task would be suspended during the copy.
2881 */
2882 kern_return_t
task_duplicate_map_and_threads(task_t task,void * p,task_t new_task,thread_t * thread_ret,uint64_t ** udata_buffer,int * size,int * num_udata,bool for_exception)2883 task_duplicate_map_and_threads(
2884 task_t task,
2885 void *p,
2886 task_t new_task,
2887 thread_t *thread_ret,
2888 uint64_t **udata_buffer,
2889 int *size,
2890 int *num_udata,
2891 bool for_exception)
2892 {
2893 kern_return_t kr = KERN_SUCCESS;
2894 int active;
2895 thread_t thread, self, thread_return = THREAD_NULL;
2896 thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL;
2897 thread_t *thread_array;
2898 uint32_t active_thread_count = 0, array_count = 0, i;
2899 vm_map_t oldmap;
2900 uint64_t *buffer = NULL;
2901 int buf_size = 0;
2902 int est_knotes = 0, num_knotes = 0;
2903
2904 self = current_thread();
2905
2906 /*
2907 * Suspend the task to copy thread state, use the internal
2908 * variant so that no user-space process can resume
2909 * the task from under us
2910 */
2911 kr = task_suspend_internal(task);
2912 if (kr != KERN_SUCCESS) {
2913 return kr;
2914 }
2915
2916 if (task->map->disable_vmentry_reuse == TRUE) {
2917 /*
2918 * Quite likely GuardMalloc (or some debugging tool)
2919 * is being used on this task. And it has gone through
2920 * its limit. Making a corpse will likely encounter
2921 * a lot of VM entries that will need COW.
2922 *
2923 * Skip it.
2924 */
2925 #if DEVELOPMENT || DEBUG
2926 memorystatus_abort_vm_map_fork(task);
2927 #endif
2928 ktriage_record(thread_tid(self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_FAIL_LIBGMALLOC), 0 /* arg */);
2929 task_resume_internal(task);
2930 return KERN_FAILURE;
2931 }
2932
2933 /* Check with VM if vm_map_fork is allowed for this task */
2934 bool is_large = false;
2935 if (memorystatus_allowed_vm_map_fork(task, &is_large)) {
2936 /* Setup new task's vmmap, switch from parent task's map to it COW map */
2937 oldmap = new_task->map;
2938 new_task->map = vm_map_fork(new_task->ledger,
2939 task->map,
2940 (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
2941 VM_MAP_FORK_PRESERVE_PURGEABLE |
2942 VM_MAP_FORK_CORPSE_FOOTPRINT |
2943 VM_MAP_FORK_SHARE_IF_OWNED));
2944 if (new_task->map) {
2945 new_task->is_large_corpse = is_large;
2946 vm_map_deallocate(oldmap);
2947
2948 /* copy ledgers that impact the memory footprint */
2949 vm_map_copy_footprint_ledgers(task, new_task);
2950
2951 /* Get all the udata pointers from kqueue */
2952 est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2953 if (est_knotes > 0) {
2954 buf_size = (est_knotes + 32) * sizeof(uint64_t);
2955 buffer = kalloc_data(buf_size, Z_WAITOK);
2956 num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2957 if (num_knotes > est_knotes + 32) {
2958 num_knotes = est_knotes + 32;
2959 }
2960 }
2961 } else {
2962 if (is_large) {
2963 assert(large_corpse_count > 0);
2964 OSDecrementAtomic(&large_corpse_count);
2965 }
2966 new_task->map = oldmap;
2967 #if DEVELOPMENT || DEBUG
2968 memorystatus_abort_vm_map_fork(task);
2969 #endif
2970 task_resume_internal(task);
2971 return KERN_NO_SPACE;
2972 }
2973 } else if (!for_exception) {
2974 #if DEVELOPMENT || DEBUG
2975 memorystatus_abort_vm_map_fork(task);
2976 #endif
2977 task_resume_internal(task);
2978 return KERN_NO_SPACE;
2979 }
2980
2981 active_thread_count = task->active_thread_count;
2982 if (active_thread_count == 0) {
2983 kfree_data(buffer, buf_size);
2984 task_resume_internal(task);
2985 return KERN_FAILURE;
2986 }
2987
2988 thread_array = kalloc_type(thread_t, active_thread_count, Z_WAITOK);
2989
2990 /* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
2991 task_lock(task);
2992 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2993 /* Skip inactive threads */
2994 active = thread->active;
2995 if (!active) {
2996 continue;
2997 }
2998
2999 if (array_count >= active_thread_count) {
3000 break;
3001 }
3002
3003 thread_array[array_count++] = thread;
3004 thread_reference(thread);
3005 }
3006 task_unlock(task);
3007
3008 for (i = 0; i < array_count; i++) {
3009 kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
3010 if (kr != KERN_SUCCESS) {
3011 break;
3012 }
3013
3014 /* Equivalent of current thread in corpse */
3015 if (thread_array[i] == self) {
3016 thread_return = new_thread;
3017 new_task->crashed_thread_id = thread_tid(new_thread);
3018 } else if (first_thread == NULL) {
3019 first_thread = new_thread;
3020 } else {
3021 /* drop the extra ref returned by thread_create_with_continuation */
3022 thread_deallocate(new_thread);
3023 }
3024
3025 kr = thread_dup2(thread_array[i], new_thread);
3026 if (kr != KERN_SUCCESS) {
3027 thread_mtx_lock(new_thread);
3028 new_thread->corpse_dup = TRUE;
3029 thread_mtx_unlock(new_thread);
3030 continue;
3031 }
3032
3033 /* Copy thread name */
3034 bsd_copythreadname(get_bsdthread_info(new_thread),
3035 get_bsdthread_info(thread_array[i]));
3036 new_thread->thread_tag = thread_array[i]->thread_tag &
3037 ~THREAD_TAG_USER_JOIN;
3038 thread_copy_resource_info(new_thread, thread_array[i]);
3039 }
3040
3041 /* return the first thread if we couldn't find the equivalent of current */
3042 if (thread_return == THREAD_NULL) {
3043 thread_return = first_thread;
3044 } else if (first_thread != THREAD_NULL) {
3045 /* drop the extra ref returned by thread_create_with_continuation */
3046 thread_deallocate(first_thread);
3047 }
3048
3049 task_resume_internal(task);
3050
3051 for (i = 0; i < array_count; i++) {
3052 thread_deallocate(thread_array[i]);
3053 }
3054 kfree_type(thread_t, active_thread_count, thread_array);
3055
3056 if (kr == KERN_SUCCESS) {
3057 *thread_ret = thread_return;
3058 *udata_buffer = buffer;
3059 *size = buf_size;
3060 *num_udata = num_knotes;
3061 } else {
3062 if (thread_return != THREAD_NULL) {
3063 thread_deallocate(thread_return);
3064 }
3065 kfree_data(buffer, buf_size);
3066 }
3067
3068 return kr;
3069 }
3070
3071 #if CONFIG_SECLUDED_MEMORY
3072 extern void task_set_can_use_secluded_mem_locked(
3073 task_t task,
3074 boolean_t can_use_secluded_mem);
3075 #endif /* CONFIG_SECLUDED_MEMORY */
3076
3077 #if MACH_ASSERT
3078 int debug4k_panic_on_terminate = 0;
3079 #endif /* MACH_ASSERT */
3080 kern_return_t
task_terminate_internal(task_t task)3081 task_terminate_internal(
3082 task_t task)
3083 {
3084 thread_t thread, self;
3085 task_t self_task;
3086 boolean_t interrupt_save;
3087 int pid = 0;
3088
3089 assert(task != kernel_task);
3090
3091 self = current_thread();
3092 self_task = current_task();
3093
3094 /*
3095 * Get the task locked and make sure that we are not racing
3096 * with someone else trying to terminate us.
3097 */
3098 if (task == self_task) {
3099 task_lock(task);
3100 } else if (task < self_task) {
3101 task_lock(task);
3102 task_lock(self_task);
3103 } else {
3104 task_lock(self_task);
3105 task_lock(task);
3106 }
3107
3108 #if CONFIG_SECLUDED_MEMORY
3109 if (task->task_can_use_secluded_mem) {
3110 task_set_can_use_secluded_mem_locked(task, FALSE);
3111 }
3112 task->task_could_use_secluded_mem = FALSE;
3113 task->task_could_also_use_secluded_mem = FALSE;
3114
3115 if (task->task_suppressed_secluded) {
3116 stop_secluded_suppression(task);
3117 }
3118 #endif /* CONFIG_SECLUDED_MEMORY */
3119
3120 if (!task->active) {
3121 /*
3122 * Task is already being terminated.
3123 * Just return an error. If we are dying, this will
3124 * just get us to our AST special handler and that
3125 * will get us to finalize the termination of ourselves.
3126 */
3127 task_unlock(task);
3128 if (self_task != task) {
3129 task_unlock(self_task);
3130 }
3131
3132 return KERN_FAILURE;
3133 }
3134
3135 if (task_corpse_pending_report(task)) {
3136 /*
3137 * Task is marked for reporting as corpse.
3138 * Just return an error. This will
3139 * just get us to our AST special handler and that
3140 * will get us to finish the path to death
3141 */
3142 task_unlock(task);
3143 if (self_task != task) {
3144 task_unlock(self_task);
3145 }
3146
3147 return KERN_FAILURE;
3148 }
3149
3150 if (self_task != task) {
3151 task_unlock(self_task);
3152 }
3153
3154 /*
3155 * Make sure the current thread does not get aborted out of
3156 * the waits inside these operations.
3157 */
3158 interrupt_save = thread_interrupt_level(THREAD_UNINT);
3159
3160 /*
3161 * Indicate that we want all the threads to stop executing
3162 * at user space by holding the task (we would have held
3163 * each thread independently in thread_terminate_internal -
3164 * but this way we may be more likely to already find it
3165 * held there). Mark the task inactive, and prevent
3166 * further task operations via the task port.
3167 *
3168 * The vm_map and ipc_space must exist until this function returns,
3169 * convert_port_to_{map,space}_with_flavor relies on this behavior.
3170 */
3171 bool first_suspension __unused = task_hold_locked(task);
3172 task->active = FALSE;
3173 ipc_task_disable(task);
3174
3175 #if CONFIG_EXCLAVES
3176 /* before conclave can be suspended */
3177 exclaves_conclave_prepare_teardown(task);
3178
3179 //rdar://139307390, first suspension might not have done conclave suspend.
3180 first_suspension = true;
3181 if (first_suspension) {
3182 task_unlock(task);
3183 task_suspend_conclave(task);
3184 task_lock(task);
3185 }
3186 #endif /* CONFIG_EXCLAVES */
3187
3188
3189 /*
3190 * Terminate each thread in the task.
3191 */
3192 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3193 thread_terminate_internal(thread);
3194 }
3195
3196 #ifdef MACH_BSD
3197 void *bsd_info = get_bsdtask_info(task);
3198 if (bsd_info != NULL) {
3199 pid = proc_pid(bsd_info);
3200 }
3201 #endif /* MACH_BSD */
3202
3203 task_unlock(task);
3204
3205 #if CONFIG_EXCLAVES
3206 task_stop_conclave(task, false);
3207 #endif /* CONFIG_EXCLAVES */
3208
3209 proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
3210 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3211
3212 /* Early object reap phase */
3213
3214 // PR-17045188: Revisit implementation
3215 // task_partial_reap(task, pid);
3216
3217 #if CONFIG_TASKWATCH
3218 /*
3219 * remove all task watchers
3220 */
3221 task_removewatchers(task);
3222
3223 #endif /* CONFIG_TASKWATCH */
3224
3225 /*
3226 * Destroy all synchronizers owned by the task.
3227 */
3228 task_synchronizer_destroy_all(task);
3229
3230 /*
3231 * Clear the watchport boost on the task.
3232 */
3233 task_remove_turnstile_watchports(task);
3234
3235 /* let iokit know 1 */
3236 iokit_task_terminate(task, 1);
3237
3238 /*
3239 * Destroy the IPC space, leaving just a reference for it.
3240 */
3241 ipc_space_terminate(task->itk_space);
3242
3243 #if 00
3244 /* if some ledgers go negative on tear-down again... */
3245 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3246 task_ledgers.phys_footprint);
3247 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3248 task_ledgers.internal);
3249 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3250 task_ledgers.iokit_mapped);
3251 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3252 task_ledgers.alternate_accounting);
3253 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3254 task_ledgers.alternate_accounting_compressed);
3255 #endif
3256
3257 /*
3258 * If the current thread is a member of the task
3259 * being terminated, then the last reference to
3260 * the task will not be dropped until the thread
3261 * is finally reaped. To avoid incurring the
3262 * expense of removing the address space regions
3263 * at reap time, we do it explictly here.
3264 */
3265
3266 #if MACH_ASSERT
3267 /*
3268 * Identify the pmap's process, in case the pmap ledgers drift
3269 * and we have to report it.
3270 */
3271 char procname[17];
3272 void *proc = get_bsdtask_info(task);
3273 if (proc) {
3274 pid = proc_pid(proc);
3275 proc_name_kdp(proc, procname, sizeof(procname));
3276 } else {
3277 pid = 0;
3278 strlcpy(procname, "<unknown>", sizeof(procname));
3279 }
3280 pmap_set_process(task->map->pmap, pid, procname);
3281 if (vm_map_page_shift(task->map) < (int)PAGE_SHIFT) {
3282 DEBUG4K_LIFE("map %p procname: %s\n", task->map, procname);
3283 if (debug4k_panic_on_terminate) {
3284 panic("DEBUG4K: %s:%d %d[%s] map %p", __FUNCTION__, __LINE__, pid, procname, task->map);
3285 }
3286 }
3287 #endif /* MACH_ASSERT */
3288
3289 vm_map_terminate(task->map);
3290
3291 /* release our shared region */
3292 vm_shared_region_set(task, NULL);
3293
3294 #if __has_feature(ptrauth_calls)
3295 task_set_shared_region_id(task, NULL);
3296 #endif /* __has_feature(ptrauth_calls) */
3297
3298 lck_mtx_lock(&tasks_threads_lock);
3299 queue_remove(&tasks, task, task_t, tasks);
3300 queue_enter(&terminated_tasks, task, task_t, tasks);
3301 tasks_count--;
3302 terminated_tasks_count++;
3303 lck_mtx_unlock(&tasks_threads_lock);
3304
3305 /*
3306 * We no longer need to guard against being aborted, so restore
3307 * the previous interruptible state.
3308 */
3309 thread_interrupt_level(interrupt_save);
3310
3311 #if CONFIG_CPU_COUNTERS
3312 /* force the task to release all ctrs */
3313 if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) {
3314 kpc_force_all_ctrs(task, 0);
3315 }
3316 #endif /* CONFIG_CPU_COUNTERS */
3317
3318 #if CONFIG_COALITIONS
3319 /*
3320 * Leave the coalition for corpse task or task that
3321 * never had any active threads (e.g. fork, exec failure).
3322 * For task with active threads, the task will be removed
3323 * from coalition by last terminating thread.
3324 */
3325 if (task->active_thread_count == 0) {
3326 coalitions_remove_task(task);
3327 }
3328 #endif
3329
3330 #if CONFIG_FREEZE
3331 extern int vm_compressor_available;
3332 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE && vm_compressor_available) {
3333 task_disown_frozen_csegs(task);
3334 assert(queue_empty(&task->task_frozen_cseg_q));
3335 }
3336 #endif /* CONFIG_FREEZE */
3337
3338
3339 /*
3340 * Get rid of the task active reference on itself.
3341 */
3342 task_deallocate_grp(task, TASK_GRP_INTERNAL);
3343
3344 return KERN_SUCCESS;
3345 }
3346
3347 void
tasks_system_suspend(boolean_t suspend)3348 tasks_system_suspend(boolean_t suspend)
3349 {
3350 task_t task;
3351
3352 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SUSPEND_USERSPACE) |
3353 (suspend ? DBG_FUNC_START : DBG_FUNC_END));
3354
3355 lck_mtx_lock(&tasks_threads_lock);
3356 assert(tasks_suspend_state != suspend);
3357 tasks_suspend_state = suspend;
3358 queue_iterate(&tasks, task, task_t, tasks) {
3359 if (task == kernel_task) {
3360 continue;
3361 }
3362 if (task_is_driver(task)) {
3363 continue;
3364 }
3365 suspend ? task_suspend_internal(task) : task_resume_internal(task);
3366 }
3367 lck_mtx_unlock(&tasks_threads_lock);
3368 }
3369
3370 /*
3371 * task_start_halt:
3372 *
3373 * Shut the current task down (except for the current thread) in
3374 * preparation for dramatic changes to the task (probably exec).
3375 * We hold the task and mark all other threads in the task for
3376 * termination.
3377 */
3378 kern_return_t
task_start_halt(task_t task)3379 task_start_halt(task_t task)
3380 {
3381 kern_return_t kr = KERN_SUCCESS;
3382 task_lock(task);
3383 kr = task_start_halt_locked(task, FALSE);
3384 task_unlock(task);
3385 return kr;
3386 }
3387
3388 static kern_return_t
task_start_halt_locked(task_t task,boolean_t should_mark_corpse)3389 task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
3390 {
3391 thread_t thread, self;
3392 uint64_t dispatchqueue_offset;
3393
3394 assert(task != kernel_task);
3395
3396 self = current_thread();
3397
3398 if (task != get_threadtask(self) && !task_is_a_corpse_fork(task)) {
3399 return KERN_INVALID_ARGUMENT;
3400 }
3401
3402 if (!should_mark_corpse &&
3403 (task->halting || !task->active || !self->active)) {
3404 /*
3405 * Task or current thread is already being terminated.
3406 * Hurry up and return out of the current kernel context
3407 * so that we run our AST special handler to terminate
3408 * ourselves. If should_mark_corpse is set, the corpse
3409 * creation might have raced with exec, let the corpse
3410 * creation continue, once the current thread reaches AST
3411 * thread in exec will be woken up from task_complete_halt.
3412 * Exec will fail cause the proc was marked for exit.
3413 * Once the thread in exec reaches AST, it will call proc_exit
3414 * and deliver the EXC_CORPSE_NOTIFY.
3415 */
3416 return KERN_FAILURE;
3417 }
3418
3419 /* Thread creation will fail after this point of no return. */
3420 task->halting = TRUE;
3421
3422 /*
3423 * Mark all the threads to keep them from starting any more
3424 * user-level execution. The thread_terminate_internal code
3425 * would do this on a thread by thread basis anyway, but this
3426 * gives us a better chance of not having to wait there.
3427 */
3428 bool first_suspension __unused = task_hold_locked(task);
3429
3430 #if CONFIG_EXCLAVES
3431 if (should_mark_corpse) {
3432 void *crash_info_ptr = task_get_corpseinfo(task);
3433 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3434 if (crash_info_ptr != NULL && thread->th_exclaves_ipc_ctx.ipcb != NULL) {
3435 struct thread_crash_exclaves_info info = { 0 };
3436
3437 info.tcei_flags = kExclaveRPCActive;
3438 info.tcei_scid = thread->th_exclaves_ipc_ctx.scid;
3439 info.tcei_thread_id = thread->thread_id;
3440
3441 kcdata_push_data(crash_info_ptr,
3442 STACKSHOT_KCTYPE_KERN_EXCLAVES_CRASH_THREADINFO,
3443 sizeof(struct thread_crash_exclaves_info), &info);
3444 }
3445 }
3446 }
3447 //rdar://139307390, first suspension might not have done conclave suspend.
3448 first_suspension = true;
3449 if (first_suspension || should_mark_corpse) {
3450 task_unlock(task);
3451
3452 /* before we can teardown the conclave */
3453 exclaves_conclave_prepare_teardown(task);
3454
3455 if (first_suspension) {
3456 task_suspend_conclave(task);
3457 }
3458
3459 if (should_mark_corpse) {
3460 task_stop_conclave(task, true);
3461 }
3462 task_lock(task);
3463 }
3464 #endif /* CONFIG_EXCLAVES */
3465
3466 dispatchqueue_offset = get_dispatchqueue_offset_from_proc(get_bsdtask_info(task));
3467 /*
3468 * Terminate all the other threads in the task.
3469 */
3470 queue_iterate(&task->threads, thread, thread_t, task_threads)
3471 {
3472 /*
3473 * Remove priority throttles for threads to terminate timely. This has
3474 * to be done after task_hold_locked() traps all threads to AST, but before
3475 * threads are marked inactive in thread_terminate_internal(). Takes thread
3476 * mutex lock.
3477 *
3478 * We need task_is_a_corpse() check so that we don't accidently update policy
3479 * for tasks that are doing posix_spawn().
3480 *
3481 * See: thread_policy_update_tasklocked().
3482 */
3483 if (task_is_a_corpse(task)) {
3484 proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE,
3485 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3486 }
3487
3488 if (should_mark_corpse) {
3489 thread_mtx_lock(thread);
3490 thread->inspection = TRUE;
3491 thread_mtx_unlock(thread);
3492 }
3493 if (thread != self) {
3494 thread_terminate_internal(thread);
3495 }
3496 }
3497 task->dispatchqueue_offset = dispatchqueue_offset;
3498
3499 task_release_locked(task);
3500
3501 return KERN_SUCCESS;
3502 }
3503
3504
3505 /*
3506 * task_complete_halt:
3507 *
3508 * Complete task halt by waiting for threads to terminate, then clean
3509 * up task resources (VM, port namespace, etc...) and then let the
3510 * current thread go in the (practically empty) task context.
3511 *
3512 * Note: task->halting flag is not cleared in order to avoid creation
3513 * of new thread in old exec'ed task.
3514 */
3515 void
task_complete_halt(task_t task)3516 task_complete_halt(task_t task)
3517 {
3518 task_lock(task);
3519 assert(task->halting);
3520 assert(task == current_task());
3521
3522 /*
3523 * Wait for the other threads to get shut down.
3524 * When the last other thread is reaped, we'll be
3525 * woken up.
3526 */
3527 if (task->thread_count > 1) {
3528 assert_wait((event_t)&task->halting, THREAD_UNINT);
3529 task_unlock(task);
3530 thread_block(THREAD_CONTINUE_NULL);
3531 } else {
3532 task_unlock(task);
3533 }
3534
3535 #if CONFIG_DEFERRED_RECLAIM
3536 if (task->deferred_reclamation_metadata) {
3537 vm_deferred_reclamation_buffer_deallocate(
3538 task->deferred_reclamation_metadata);
3539 task->deferred_reclamation_metadata = NULL;
3540 }
3541 #endif /* CONFIG_DEFERRED_RECLAIM */
3542
3543 /*
3544 * Give the machine dependent code a chance
3545 * to perform cleanup of task-level resources
3546 * associated with the current thread before
3547 * ripping apart the task.
3548 */
3549 machine_task_terminate(task);
3550
3551 /*
3552 * Destroy all synchronizers owned by the task.
3553 */
3554 task_synchronizer_destroy_all(task);
3555
3556 /* let iokit know 1 */
3557 iokit_task_terminate(task, 1);
3558
3559 /*
3560 * Terminate the IPC space. A long time ago,
3561 * this used to be ipc_space_clean() which would
3562 * keep the space active but hollow it.
3563 *
3564 * We really do not need this semantics given
3565 * tasks die with exec now.
3566 */
3567 ipc_space_terminate(task->itk_space);
3568
3569 /*
3570 * Clean out the address space, as we are going to be
3571 * getting a new one.
3572 */
3573 vm_map_terminate(task->map);
3574
3575 /*
3576 * Kick out any IOKitUser handles to the task. At best they're stale,
3577 * at worst someone is racing a SUID exec.
3578 */
3579 /* let iokit know 2 */
3580 iokit_task_terminate(task, 2);
3581 }
3582
3583 #ifdef CONFIG_TASK_SUSPEND_STATS
3584
3585 static void
_task_mark_suspend_source(task_t task)3586 _task_mark_suspend_source(task_t task)
3587 {
3588 int idx;
3589 task_suspend_stats_t stats;
3590 task_suspend_source_t source;
3591 task_lock_assert_owned(task);
3592 stats = &task->t_suspend_stats;
3593
3594 idx = stats->tss_count % TASK_SUSPEND_SOURCES_MAX;
3595 source = &task->t_suspend_sources[idx];
3596 bzero(source, sizeof(*source));
3597
3598 source->tss_time = mach_absolute_time();
3599 source->tss_tid = current_thread()->thread_id;
3600 source->tss_pid = task_pid(current_task());
3601 strlcpy(source->tss_procname, task_best_name(current_task()),
3602 sizeof(source->tss_procname));
3603
3604 stats->tss_count++;
3605 }
3606
3607 static inline void
_task_mark_suspend_start(task_t task)3608 _task_mark_suspend_start(task_t task)
3609 {
3610 task_lock_assert_owned(task);
3611 task->t_suspend_stats.tss_last_start = mach_absolute_time();
3612 }
3613
3614 static inline void
_task_mark_suspend_end(task_t task)3615 _task_mark_suspend_end(task_t task)
3616 {
3617 task_lock_assert_owned(task);
3618 task->t_suspend_stats.tss_last_end = mach_absolute_time();
3619 task->t_suspend_stats.tss_duration += (task->t_suspend_stats.tss_last_end -
3620 task->t_suspend_stats.tss_last_start);
3621 }
3622
3623 static kern_return_t
_task_get_suspend_stats_locked(task_t task,task_suspend_stats_t stats)3624 _task_get_suspend_stats_locked(task_t task, task_suspend_stats_t stats)
3625 {
3626 if (task == TASK_NULL || stats == NULL) {
3627 return KERN_INVALID_ARGUMENT;
3628 }
3629 task_lock_assert_owned(task);
3630 memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3631 return KERN_SUCCESS;
3632 }
3633
3634 static kern_return_t
_task_get_suspend_sources_locked(task_t task,task_suspend_source_t sources)3635 _task_get_suspend_sources_locked(task_t task, task_suspend_source_t sources)
3636 {
3637 if (task == TASK_NULL || sources == NULL) {
3638 return KERN_INVALID_ARGUMENT;
3639 }
3640 task_lock_assert_owned(task);
3641 memcpy(sources, task->t_suspend_sources,
3642 sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3643 return KERN_SUCCESS;
3644 }
3645
3646 #endif /* CONFIG_TASK_SUSPEND_STATS */
3647
3648 kern_return_t
task_get_suspend_stats(task_t task,task_suspend_stats_t stats)3649 task_get_suspend_stats(task_t task, task_suspend_stats_t stats)
3650 {
3651 #ifdef CONFIG_TASK_SUSPEND_STATS
3652 kern_return_t kr;
3653 if (task == TASK_NULL || stats == NULL) {
3654 return KERN_INVALID_ARGUMENT;
3655 }
3656 task_lock(task);
3657 kr = _task_get_suspend_stats_locked(task, stats);
3658 task_unlock(task);
3659 return kr;
3660 #else /* CONFIG_TASK_SUSPEND_STATS */
3661 (void)task;
3662 (void)stats;
3663 return KERN_NOT_SUPPORTED;
3664 #endif
3665 }
3666
3667 kern_return_t
task_get_suspend_stats_kdp(task_t task,task_suspend_stats_t stats)3668 task_get_suspend_stats_kdp(task_t task, task_suspend_stats_t stats)
3669 {
3670 #ifdef CONFIG_TASK_SUSPEND_STATS
3671 if (task == TASK_NULL || stats == NULL) {
3672 return KERN_INVALID_ARGUMENT;
3673 }
3674 memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3675 return KERN_SUCCESS;
3676 #else /* CONFIG_TASK_SUSPEND_STATS */
3677 #pragma unused(task, stats)
3678 return KERN_NOT_SUPPORTED;
3679 #endif /* CONFIG_TASK_SUSPEND_STATS */
3680 }
3681
3682 kern_return_t
task_get_suspend_sources(task_t task,task_suspend_source_array_t sources)3683 task_get_suspend_sources(task_t task, task_suspend_source_array_t sources)
3684 {
3685 #ifdef CONFIG_TASK_SUSPEND_STATS
3686 kern_return_t kr;
3687 if (task == TASK_NULL || sources == NULL) {
3688 return KERN_INVALID_ARGUMENT;
3689 }
3690 task_lock(task);
3691 kr = _task_get_suspend_sources_locked(task, sources);
3692 task_unlock(task);
3693 return kr;
3694 #else /* CONFIG_TASK_SUSPEND_STATS */
3695 (void)task;
3696 (void)sources;
3697 return KERN_NOT_SUPPORTED;
3698 #endif
3699 }
3700
3701 kern_return_t
task_get_suspend_sources_kdp(task_t task,task_suspend_source_array_t sources)3702 task_get_suspend_sources_kdp(task_t task, task_suspend_source_array_t sources)
3703 {
3704 #ifdef CONFIG_TASK_SUSPEND_STATS
3705 if (task == TASK_NULL || sources == NULL) {
3706 return KERN_INVALID_ARGUMENT;
3707 }
3708 memcpy(sources, task->t_suspend_sources,
3709 sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3710 return KERN_SUCCESS;
3711 #else /* CONFIG_TASK_SUSPEND_STATS */
3712 #pragma unused(task, sources)
3713 return KERN_NOT_SUPPORTED;
3714 #endif
3715 }
3716
3717 kern_return_t
task_set_cs_auxiliary_info(task_t task,uint64_t info)3718 task_set_cs_auxiliary_info(task_t task, uint64_t info)
3719 {
3720 if (task == TASK_NULL) {
3721 return KERN_INVALID_ARGUMENT;
3722 }
3723
3724 task->task_cs_auxiliary_info = info;
3725 return KERN_SUCCESS;
3726 }
3727
3728 uint64_t
task_get_cs_auxiliary_info_kdp(task_t task)3729 task_get_cs_auxiliary_info_kdp(task_t task)
3730 {
3731 if (task == TASK_NULL) {
3732 return 0;
3733 }
3734 return task->task_cs_auxiliary_info;
3735 }
3736
3737 /*
3738 * task_hold_locked:
3739 *
3740 * Suspend execution of the specified task.
3741 * This is a recursive-style suspension of the task, a count of
3742 * suspends is maintained.
3743 *
3744 * CONDITIONS: the task is locked and active.
3745 * Returns true if this was first suspension
3746 */
3747 bool
task_hold_locked(task_t task)3748 task_hold_locked(
3749 task_t task)
3750 {
3751 thread_t thread;
3752 void *bsd_info = get_bsdtask_info(task);
3753
3754 assert(task->active);
3755
3756 if (task->suspend_count++ > 0) {
3757 return false;
3758 }
3759
3760 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_TASK_SUSPEND),
3761 task_pid(task), task->user_stop_count, task->pidsuspended);
3762
3763 if (bsd_info) {
3764 workq_proc_suspended(bsd_info);
3765 }
3766
3767 /*
3768 * Iterate through all the threads and hold them.
3769 */
3770 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3771 thread_mtx_lock(thread);
3772 thread_hold(thread);
3773 thread_mtx_unlock(thread);
3774 }
3775
3776 #ifdef CONFIG_TASK_SUSPEND_STATS
3777 _task_mark_suspend_start(task);
3778 #endif
3779 return true;
3780 }
3781
3782 /*
3783 * task_hold_and_wait
3784 *
3785 * Same as the internal routine above, except that is must lock
3786 * and verify that the task is active. This differs from task_suspend
3787 * in that it places a kernel hold on the task rather than just a
3788 * user-level hold. This keeps users from over resuming and setting
3789 * it running out from under the kernel.
3790 *
3791 * CONDITIONS: the caller holds a reference on the task
3792 */
3793 kern_return_t
task_hold_and_wait(task_t task,bool suspend_conclave __unused)3794 task_hold_and_wait(
3795 task_t task,
3796 bool suspend_conclave __unused)
3797 {
3798 if (task == TASK_NULL) {
3799 return KERN_INVALID_ARGUMENT;
3800 }
3801
3802 task_lock(task);
3803 if (!task->active) {
3804 task_unlock(task);
3805 return KERN_FAILURE;
3806 }
3807
3808 #ifdef CONFIG_TASK_SUSPEND_STATS
3809 _task_mark_suspend_source(task);
3810 #endif /* CONFIG_TASK_SUSPEND_STATS */
3811
3812 bool first_suspension __unused = task_hold_locked(task);
3813
3814 #if CONFIG_EXCLAVES
3815 //rdar://139307390, first suspension might not have done conclave suspend.
3816 first_suspension = true;
3817 if (suspend_conclave && first_suspension) {
3818 task_unlock(task);
3819 task_suspend_conclave(task);
3820 task_lock(task);
3821 /*
3822 * If task terminated/resumed before we could wait on threads, then
3823 * it is a race we lost and we could treat that as termination/resume
3824 * happened after the wait and return SUCCESS.
3825 */
3826 if (!task->active || task->suspend_count <= 0) {
3827 task_unlock(task);
3828 return KERN_SUCCESS;
3829 }
3830 }
3831 #endif /* CONFIG_EXCLAVES */
3832
3833 task_wait_locked(task, FALSE);
3834 task_unlock(task);
3835
3836 return KERN_SUCCESS;
3837 }
3838
3839 /*
3840 * task_wait_locked:
3841 *
3842 * Wait for all threads in task to stop.
3843 *
3844 * Conditions:
3845 * Called with task locked, active, and held.
3846 */
3847 void
task_wait_locked(task_t task,boolean_t until_not_runnable)3848 task_wait_locked(
3849 task_t task,
3850 boolean_t until_not_runnable)
3851 {
3852 thread_t thread, self;
3853
3854 assert(task->active);
3855 assert(task->suspend_count > 0);
3856
3857 self = current_thread();
3858
3859 /*
3860 * Iterate through all the threads and wait for them to
3861 * stop. Do not wait for the current thread if it is within
3862 * the task.
3863 */
3864 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3865 if (thread != self) {
3866 thread_wait(thread, until_not_runnable);
3867 }
3868 }
3869 }
3870
3871 boolean_t
task_is_app_suspended(task_t task)3872 task_is_app_suspended(task_t task)
3873 {
3874 return task->pidsuspended;
3875 }
3876
3877 /*
3878 * task_release_locked:
3879 *
3880 * Release a kernel hold on a task.
3881 *
3882 * CONDITIONS: the task is locked and active
3883 */
3884 void
task_release_locked(task_t task)3885 task_release_locked(
3886 task_t task)
3887 {
3888 thread_t thread;
3889 void *bsd_info = get_bsdtask_info(task);
3890
3891 assert(task->active);
3892 assert(task->suspend_count > 0);
3893
3894 if (--task->suspend_count > 0) {
3895 return;
3896 }
3897
3898 if (bsd_info) {
3899 workq_proc_resumed(bsd_info);
3900 }
3901
3902 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3903 thread_mtx_lock(thread);
3904 thread_release(thread);
3905 thread_mtx_unlock(thread);
3906 }
3907
3908 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_TASK_RESUME) | DBG_FUNC_NONE, task_pid(task));
3909
3910 #if CONFIG_TASK_SUSPEND_STATS
3911 _task_mark_suspend_end(task);
3912 #endif
3913
3914 //rdar://139307390.
3915 #if 0
3916 #if CONFIG_EXCLAVES
3917 task_unlock(task);
3918 task_resume_conclave(task);
3919 task_lock(task);
3920 #endif /* CONFIG_EXCLAVES */
3921 #endif
3922 }
3923
3924 /*
3925 * task_release:
3926 *
3927 * Same as the internal routine above, except that it must lock
3928 * and verify that the task is active.
3929 *
3930 * CONDITIONS: The caller holds a reference to the task
3931 */
3932 kern_return_t
task_release(task_t task)3933 task_release(
3934 task_t task)
3935 {
3936 if (task == TASK_NULL) {
3937 return KERN_INVALID_ARGUMENT;
3938 }
3939
3940 task_lock(task);
3941
3942 if (!task->active) {
3943 task_unlock(task);
3944
3945 return KERN_FAILURE;
3946 }
3947
3948 task_release_locked(task);
3949 task_unlock(task);
3950
3951 return KERN_SUCCESS;
3952 }
3953
3954 static kern_return_t
task_threads_internal(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * countp,mach_thread_flavor_t flavor)3955 task_threads_internal(
3956 task_t task,
3957 thread_act_array_t *threads_out,
3958 mach_msg_type_number_t *countp,
3959 mach_thread_flavor_t flavor)
3960 {
3961 mach_msg_type_number_t actual, count, count_needed;
3962 thread_act_array_t thread_list;
3963 thread_t thread;
3964 unsigned int i;
3965
3966 count = 0;
3967 thread_list = NULL;
3968
3969 if (task == TASK_NULL) {
3970 return KERN_INVALID_ARGUMENT;
3971 }
3972
3973 assert(flavor <= THREAD_FLAVOR_INSPECT);
3974
3975 for (;;) {
3976 task_lock(task);
3977 if (!task->active) {
3978 task_unlock(task);
3979
3980 mach_port_array_free(thread_list, count);
3981 return KERN_FAILURE;
3982 }
3983
3984 count_needed = actual = task->thread_count;
3985 if (count_needed <= count) {
3986 break;
3987 }
3988
3989 /* unlock the task and allocate more memory */
3990 task_unlock(task);
3991
3992 mach_port_array_free(thread_list, count);
3993 count = count_needed;
3994 thread_list = mach_port_array_alloc(count, Z_WAITOK);
3995
3996 if (thread_list == NULL) {
3997 return KERN_RESOURCE_SHORTAGE;
3998 }
3999 }
4000
4001 i = 0;
4002 queue_iterate(&task->threads, thread, thread_t, task_threads) {
4003 assert(i < actual);
4004 thread_reference(thread);
4005 ((thread_t *)thread_list)[i++] = thread;
4006 }
4007
4008 count_needed = actual;
4009
4010 /* can unlock task now that we've got the thread refs */
4011 task_unlock(task);
4012
4013 if (actual == 0) {
4014 /* no threads, so return null pointer and deallocate memory */
4015
4016 mach_port_array_free(thread_list, count);
4017
4018 *threads_out = NULL;
4019 *countp = 0;
4020 } else {
4021 /* if we allocated too much, must copy */
4022 if (count_needed < count) {
4023 mach_port_array_t newaddr;
4024
4025 newaddr = mach_port_array_alloc(count_needed, Z_WAITOK);
4026 if (newaddr == NULL) {
4027 for (i = 0; i < actual; ++i) {
4028 thread_deallocate(((thread_t *)thread_list)[i]);
4029 }
4030 mach_port_array_free(thread_list, count);
4031 return KERN_RESOURCE_SHORTAGE;
4032 }
4033
4034 bcopy(thread_list, newaddr, count_needed * sizeof(thread_t));
4035 mach_port_array_free(thread_list, count);
4036 thread_list = newaddr;
4037 }
4038
4039 /* do the conversion that Mig should handle */
4040 convert_thread_array_to_ports(thread_list, actual, flavor);
4041
4042 *threads_out = thread_list;
4043 *countp = actual;
4044 }
4045
4046 return KERN_SUCCESS;
4047 }
4048
4049
4050 kern_return_t
task_threads_from_user(mach_port_t port,thread_act_array_t * threads_out,mach_msg_type_number_t * count)4051 task_threads_from_user(
4052 mach_port_t port,
4053 thread_act_array_t *threads_out,
4054 mach_msg_type_number_t *count)
4055 {
4056 ipc_kobject_type_t kotype;
4057 kern_return_t kr;
4058
4059 task_t task = convert_port_to_task_inspect_no_eval(port);
4060
4061 if (task == TASK_NULL) {
4062 return KERN_INVALID_ARGUMENT;
4063 }
4064
4065 kotype = ip_type(port);
4066
4067 switch (kotype) {
4068 case IKOT_TASK_CONTROL:
4069 kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
4070 break;
4071 case IKOT_TASK_READ:
4072 kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_READ);
4073 break;
4074 case IKOT_TASK_INSPECT:
4075 kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_INSPECT);
4076 break;
4077 default:
4078 panic("strange kobject type");
4079 break;
4080 }
4081
4082 task_deallocate(task);
4083 return kr;
4084 }
4085
4086 #define TASK_HOLD_NORMAL 0
4087 #define TASK_HOLD_PIDSUSPEND 1
4088 #define TASK_HOLD_LEGACY 2
4089 #define TASK_HOLD_LEGACY_ALL 3
4090
4091 static kern_return_t
place_task_hold(task_t task,int mode)4092 place_task_hold(
4093 task_t task,
4094 int mode)
4095 {
4096 if (!task->active && !task_is_a_corpse(task)) {
4097 return KERN_FAILURE;
4098 }
4099
4100 /* Return success for corpse task */
4101 if (task_is_a_corpse(task)) {
4102 return KERN_SUCCESS;
4103 }
4104
4105 #if MACH_ASSERT
4106 current_task()->suspends_outstanding++;
4107 #endif
4108
4109 if (mode == TASK_HOLD_LEGACY) {
4110 task->legacy_stop_count++;
4111 }
4112
4113 #ifdef CONFIG_TASK_SUSPEND_STATS
4114 _task_mark_suspend_source(task);
4115 #endif /* CONFIG_TASK_SUSPEND_STATS */
4116
4117 if (task->user_stop_count++ > 0) {
4118 /*
4119 * If the stop count was positive, the task is
4120 * already stopped and we can exit.
4121 */
4122 return KERN_SUCCESS;
4123 }
4124
4125 /*
4126 * Put a kernel-level hold on the threads in the task (all
4127 * user-level task suspensions added together represent a
4128 * single kernel-level hold). We then wait for the threads
4129 * to stop executing user code.
4130 */
4131 bool first_suspension __unused = task_hold_locked(task);
4132
4133 //rdar://139307390, do not suspend conclave on task suspend.
4134 #if 0
4135 #if CONFIG_EXCLAVES
4136 if (first_suspension) {
4137 task_unlock(task);
4138 task_suspend_conclave(task);
4139
4140 /*
4141 * If task terminated/resumed before we could wait on threads, then
4142 * it is a race we lost and we could treat that as termination/resume
4143 * happened after the wait and return SUCCESS.
4144 */
4145 task_lock(task);
4146 if (!task->active || task->suspend_count <= 0) {
4147 return KERN_SUCCESS;
4148 }
4149 }
4150 #endif /* CONFIG_EXCLAVES */
4151 #endif
4152
4153 task_wait_locked(task, FALSE);
4154
4155 return KERN_SUCCESS;
4156 }
4157
4158 static kern_return_t
release_task_hold(task_t task,int mode)4159 release_task_hold(
4160 task_t task,
4161 int mode)
4162 {
4163 boolean_t release = FALSE;
4164
4165 if (!task->active && !task_is_a_corpse(task)) {
4166 return KERN_FAILURE;
4167 }
4168
4169 /* Return success for corpse task */
4170 if (task_is_a_corpse(task)) {
4171 return KERN_SUCCESS;
4172 }
4173
4174 if (mode == TASK_HOLD_PIDSUSPEND) {
4175 if (task->pidsuspended == FALSE) {
4176 return KERN_FAILURE;
4177 }
4178 task->pidsuspended = FALSE;
4179 }
4180
4181 if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
4182 #if MACH_ASSERT
4183 /*
4184 * This is obviously not robust; if we suspend one task and then resume a different one,
4185 * we'll fly under the radar. This is only meant to catch the common case of a crashed
4186 * or buggy suspender.
4187 */
4188 current_task()->suspends_outstanding--;
4189 #endif
4190
4191 if (mode == TASK_HOLD_LEGACY_ALL) {
4192 if (task->legacy_stop_count >= task->user_stop_count) {
4193 task->user_stop_count = 0;
4194 release = TRUE;
4195 } else {
4196 task->user_stop_count -= task->legacy_stop_count;
4197 }
4198 task->legacy_stop_count = 0;
4199 } else {
4200 if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) {
4201 task->legacy_stop_count--;
4202 }
4203 if (--task->user_stop_count == 0) {
4204 release = TRUE;
4205 }
4206 }
4207 } else {
4208 return KERN_FAILURE;
4209 }
4210
4211 /*
4212 * Release the task if necessary.
4213 */
4214 if (release) {
4215 task_release_locked(task);
4216 }
4217
4218 return KERN_SUCCESS;
4219 }
4220
4221 boolean_t
get_task_suspended(task_t task)4222 get_task_suspended(task_t task)
4223 {
4224 return 0 != task->user_stop_count;
4225 }
4226
4227 /*
4228 * task_suspend:
4229 *
4230 * Implement an (old-fashioned) user-level suspension on a task.
4231 *
4232 * Because the user isn't expecting to have to manage a suspension
4233 * token, we'll track it for him in the kernel in the form of a naked
4234 * send right to the task's resume port. All such send rights
4235 * account for a single suspension against the task (unlike task_suspend2()
4236 * where each caller gets a unique suspension count represented by a
4237 * unique send-once right).
4238 *
4239 * Conditions:
4240 * The caller holds a reference to the task
4241 */
4242 kern_return_t
task_suspend(task_t task)4243 task_suspend(
4244 task_t task)
4245 {
4246 kern_return_t kr;
4247 mach_port_t port;
4248 mach_port_name_t name;
4249
4250 if (task == TASK_NULL || task == kernel_task) {
4251 return KERN_INVALID_ARGUMENT;
4252 }
4253
4254 /*
4255 * place a legacy hold on the task.
4256 */
4257 task_lock(task);
4258 kr = place_task_hold(task, TASK_HOLD_LEGACY);
4259 task_unlock(task);
4260
4261 if (kr != KERN_SUCCESS) {
4262 return kr;
4263 }
4264
4265 /*
4266 * Claim a send right on the task resume port, and request a no-senders
4267 * notification on that port (if none outstanding).
4268 */
4269 itk_lock(task);
4270 port = task->itk_resume;
4271 if (port == IP_NULL) {
4272 port = ipc_kobject_alloc_port(task, IKOT_TASK_RESUME,
4273 IPC_KOBJECT_ALLOC_MAKE_SEND);
4274 task->itk_resume = port;
4275 } else {
4276 (void)ipc_kobject_make_send(port, task, IKOT_TASK_RESUME);
4277 }
4278 itk_unlock(task);
4279
4280 /*
4281 * Copyout the send right into the calling task's IPC space. It won't know it is there,
4282 * but we'll look it up when calling a traditional resume. Any IPC operations that
4283 * deallocate the send right will auto-release the suspension.
4284 */
4285 if (IP_VALID(port)) {
4286 kr = ipc_object_copyout(current_space(), port,
4287 MACH_MSG_TYPE_MOVE_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
4288 NULL, &name);
4289 } else {
4290 kr = KERN_SUCCESS;
4291 }
4292 if (kr != KERN_SUCCESS) {
4293 printf("warning: %s(%d) failed to copyout suspension "
4294 "token for pid %d with error: %d\n",
4295 proc_name_address(get_bsdtask_info(current_task())),
4296 proc_pid(get_bsdtask_info(current_task())),
4297 task_pid(task), kr);
4298 }
4299
4300 return kr;
4301 }
4302
4303 /*
4304 * task_resume:
4305 * Release a user hold on a task.
4306 *
4307 * Conditions:
4308 * The caller holds a reference to the task
4309 */
4310 kern_return_t
task_resume(task_t task)4311 task_resume(
4312 task_t task)
4313 {
4314 kern_return_t kr;
4315 mach_port_name_t resume_port_name;
4316 ipc_entry_t resume_port_entry;
4317 ipc_space_t space = current_task()->itk_space;
4318
4319 if (task == TASK_NULL || task == kernel_task) {
4320 return KERN_INVALID_ARGUMENT;
4321 }
4322
4323 /* release a legacy task hold */
4324 task_lock(task);
4325 kr = release_task_hold(task, TASK_HOLD_LEGACY);
4326 task_unlock(task);
4327
4328 itk_lock(task); /* for itk_resume */
4329 is_write_lock(space); /* spin lock */
4330 if (is_active(space) && IP_VALID(task->itk_resume) &&
4331 ipc_hash_lookup(space, ip_to_object(task->itk_resume), &resume_port_name, &resume_port_entry) == TRUE) {
4332 /*
4333 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
4334 * we are holding one less legacy hold on the task from this caller. If the release failed,
4335 * go ahead and drop all the rights, as someone either already released our holds or the task
4336 * is gone.
4337 */
4338 itk_unlock(task);
4339 if (kr == KERN_SUCCESS) {
4340 ipc_right_dealloc(space, resume_port_name, resume_port_entry);
4341 } else {
4342 ipc_right_destroy(space, resume_port_name, resume_port_entry);
4343 }
4344 /* space unlocked */
4345 } else {
4346 itk_unlock(task);
4347 is_write_unlock(space);
4348 if (kr == KERN_SUCCESS) {
4349 printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
4350 proc_name_address(get_bsdtask_info(current_task())), proc_pid(get_bsdtask_info(current_task())),
4351 task_pid(task));
4352 }
4353 }
4354
4355 return kr;
4356 }
4357
4358 /*
4359 * Suspend the target task.
4360 * Making/holding a token/reference/port is the callers responsibility.
4361 */
4362 kern_return_t
task_suspend_internal(task_t task)4363 task_suspend_internal(task_t task)
4364 {
4365 kern_return_t kr;
4366
4367 if (task == TASK_NULL || task == kernel_task) {
4368 return KERN_INVALID_ARGUMENT;
4369 }
4370
4371 task_lock(task);
4372 kr = place_task_hold(task, TASK_HOLD_NORMAL);
4373 task_unlock(task);
4374 return kr;
4375 }
4376
4377 /*
4378 * Suspend the target task, and return a suspension token. The token
4379 * represents a reference on the suspended task.
4380 */
4381 static kern_return_t
task_suspend2_grp(task_t task,task_suspension_token_t * suspend_token,task_grp_t grp)4382 task_suspend2_grp(
4383 task_t task,
4384 task_suspension_token_t *suspend_token,
4385 task_grp_t grp)
4386 {
4387 kern_return_t kr;
4388
4389 kr = task_suspend_internal(task);
4390 if (kr != KERN_SUCCESS) {
4391 *suspend_token = TASK_NULL;
4392 return kr;
4393 }
4394
4395 /*
4396 * Take a reference on the target task and return that to the caller
4397 * as a "suspension token," which can be converted into an SO right to
4398 * the now-suspended task's resume port.
4399 */
4400 task_reference_grp(task, grp);
4401 *suspend_token = task;
4402
4403 return KERN_SUCCESS;
4404 }
4405
4406 kern_return_t
task_suspend2_mig(task_t task,task_suspension_token_t * suspend_token)4407 task_suspend2_mig(
4408 task_t task,
4409 task_suspension_token_t *suspend_token)
4410 {
4411 return task_suspend2_grp(task, suspend_token, TASK_GRP_MIG);
4412 }
4413
4414 kern_return_t
task_suspend2_external(task_t task,task_suspension_token_t * suspend_token)4415 task_suspend2_external(
4416 task_t task,
4417 task_suspension_token_t *suspend_token)
4418 {
4419 return task_suspend2_grp(task, suspend_token, TASK_GRP_EXTERNAL);
4420 }
4421
4422 /*
4423 * Resume the task
4424 * (reference/token/port management is caller's responsibility).
4425 */
4426 kern_return_t
task_resume_internal(task_suspension_token_t task)4427 task_resume_internal(
4428 task_suspension_token_t task)
4429 {
4430 kern_return_t kr;
4431
4432 if (task == TASK_NULL || task == kernel_task) {
4433 return KERN_INVALID_ARGUMENT;
4434 }
4435
4436 task_lock(task);
4437 kr = release_task_hold(task, TASK_HOLD_NORMAL);
4438 task_unlock(task);
4439 return kr;
4440 }
4441
4442 /*
4443 * Resume the task using a suspension token. Consumes the token's ref.
4444 */
4445 static kern_return_t
task_resume2_grp(task_suspension_token_t task,task_grp_t grp)4446 task_resume2_grp(
4447 task_suspension_token_t task,
4448 task_grp_t grp)
4449 {
4450 kern_return_t kr;
4451
4452 kr = task_resume_internal(task);
4453 task_suspension_token_deallocate_grp(task, grp);
4454
4455 return kr;
4456 }
4457
4458 kern_return_t
task_resume2_mig(task_suspension_token_t task)4459 task_resume2_mig(
4460 task_suspension_token_t task)
4461 {
4462 return task_resume2_grp(task, TASK_GRP_MIG);
4463 }
4464
4465 kern_return_t
task_resume2_external(task_suspension_token_t task)4466 task_resume2_external(
4467 task_suspension_token_t task)
4468 {
4469 return task_resume2_grp(task, TASK_GRP_EXTERNAL);
4470 }
4471
4472 static void
task_suspension_no_senders(ipc_port_t port,mach_port_mscount_t mscount)4473 task_suspension_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
4474 {
4475 task_t task = convert_port_to_task_suspension_token(port);
4476
4477 if (task == TASK_NULL) {
4478 return;
4479 }
4480
4481 if (task == kernel_task) {
4482 task_suspension_token_deallocate(task);
4483 return;
4484 }
4485
4486 task_lock(task);
4487
4488 if (ipc_kobject_is_mscount_current(port, mscount)) {
4489 /* release all the [remaining] outstanding legacy holds */
4490 release_task_hold(task, TASK_HOLD_LEGACY_ALL);
4491 }
4492
4493 task_unlock(task);
4494
4495 task_suspension_token_deallocate(task); /* drop token reference */
4496 }
4497
4498 /*
4499 * Fires when a send once made
4500 * by convert_task_suspension_token_to_port() dies.
4501 */
4502 void
task_suspension_send_once(ipc_port_t port)4503 task_suspension_send_once(ipc_port_t port)
4504 {
4505 task_t task = convert_port_to_task_suspension_token(port);
4506
4507 if (task == TASK_NULL || task == kernel_task) {
4508 return; /* nothing to do */
4509 }
4510
4511 /* release the hold held by this specific send-once right */
4512 task_lock(task);
4513 release_task_hold(task, TASK_HOLD_NORMAL);
4514 task_unlock(task);
4515
4516 task_suspension_token_deallocate(task); /* drop token reference */
4517 }
4518
4519 static kern_return_t
task_pidsuspend_locked(task_t task)4520 task_pidsuspend_locked(task_t task)
4521 {
4522 kern_return_t kr;
4523
4524 if (task->pidsuspended) {
4525 kr = KERN_FAILURE;
4526 goto out;
4527 }
4528
4529 task->pidsuspended = TRUE;
4530
4531 kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
4532 if (kr != KERN_SUCCESS) {
4533 task->pidsuspended = FALSE;
4534 }
4535 out:
4536 return kr;
4537 }
4538
4539
4540 /*
4541 * task_pidsuspend:
4542 *
4543 * Suspends a task by placing a hold on its threads.
4544 *
4545 * Conditions:
4546 * The caller holds a reference to the task
4547 */
4548 kern_return_t
task_pidsuspend(task_t task)4549 task_pidsuspend(
4550 task_t task)
4551 {
4552 kern_return_t kr;
4553
4554 if (task == TASK_NULL || task == kernel_task) {
4555 return KERN_INVALID_ARGUMENT;
4556 }
4557
4558 task_lock(task);
4559
4560 kr = task_pidsuspend_locked(task);
4561
4562 task_unlock(task);
4563
4564 if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4565 iokit_task_app_suspended_changed(task);
4566 vm_deferred_reclamation_task_suspend(task);
4567 }
4568
4569 return kr;
4570 }
4571
4572 /*
4573 * task_pidresume:
4574 * Resumes a previously suspended task.
4575 *
4576 * Conditions:
4577 * The caller holds a reference to the task
4578 */
4579 kern_return_t
task_pidresume(task_t task)4580 task_pidresume(
4581 task_t task)
4582 {
4583 kern_return_t kr;
4584
4585 if (task == TASK_NULL || task == kernel_task) {
4586 return KERN_INVALID_ARGUMENT;
4587 }
4588
4589 task_lock(task);
4590
4591 #if CONFIG_FREEZE
4592
4593 while (task->changing_freeze_state) {
4594 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4595 task_unlock(task);
4596 thread_block(THREAD_CONTINUE_NULL);
4597
4598 task_lock(task);
4599 }
4600 task->changing_freeze_state = TRUE;
4601 #endif
4602
4603 kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
4604
4605 task_unlock(task);
4606
4607 if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4608 iokit_task_app_suspended_changed(task);
4609 }
4610
4611 #if CONFIG_FREEZE
4612
4613 task_lock(task);
4614
4615 if (kr == KERN_SUCCESS) {
4616 task->frozen = FALSE;
4617 }
4618 task->changing_freeze_state = FALSE;
4619 thread_wakeup(&task->changing_freeze_state);
4620
4621 task_unlock(task);
4622 #endif
4623
4624 return kr;
4625 }
4626
4627 os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
4628
4629 /*
4630 * task_add_turnstile_watchports:
4631 * Setup watchports to boost the main thread of the task.
4632 *
4633 * Arguments:
4634 * task: task being spawned
4635 * thread: main thread of task
4636 * portwatch_ports: array of watchports
4637 * portwatch_count: number of watchports
4638 *
4639 * Conditions:
4640 * Nothing locked.
4641 */
4642 void
task_add_turnstile_watchports(task_t task,thread_t thread,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4643 task_add_turnstile_watchports(
4644 task_t task,
4645 thread_t thread,
4646 ipc_port_t *portwatch_ports,
4647 uint32_t portwatch_count)
4648 {
4649 struct task_watchports *watchports = NULL;
4650 struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
4651 os_ref_count_t refs;
4652
4653 /* Check if the task has terminated */
4654 if (!task->active) {
4655 return;
4656 }
4657
4658 assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
4659
4660 watchports = task_watchports_alloc_init(task, thread, portwatch_count);
4661
4662 /* Lock the ipc space */
4663 is_write_lock(task->itk_space);
4664
4665 /* Setup watchports to boost the main thread */
4666 refs = task_add_turnstile_watchports_locked(task,
4667 watchports, previous_elem_array, portwatch_ports,
4668 portwatch_count);
4669
4670 /* Drop the space lock */
4671 is_write_unlock(task->itk_space);
4672
4673 if (refs == 0) {
4674 task_watchports_deallocate(watchports);
4675 }
4676
4677 /* Drop the ref on previous_elem_array */
4678 for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
4679 task_watchport_elem_deallocate(previous_elem_array[i]);
4680 }
4681 }
4682
4683 /*
4684 * task_remove_turnstile_watchports:
4685 * Clear all turnstile boost on the task from watchports.
4686 *
4687 * Arguments:
4688 * task: task being terminated
4689 *
4690 * Conditions:
4691 * Nothing locked.
4692 */
4693 void
task_remove_turnstile_watchports(task_t task)4694 task_remove_turnstile_watchports(
4695 task_t task)
4696 {
4697 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4698 struct task_watchports *watchports = NULL;
4699 ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
4700 uint32_t portwatch_count;
4701
4702 /* Lock the ipc space */
4703 is_write_lock(task->itk_space);
4704
4705 /* Check if watchport boost exist */
4706 if (task->watchports == NULL) {
4707 is_write_unlock(task->itk_space);
4708 return;
4709 }
4710 watchports = task->watchports;
4711 portwatch_count = watchports->tw_elem_array_count;
4712
4713 refs = task_remove_turnstile_watchports_locked(task, watchports,
4714 port_freelist);
4715
4716 is_write_unlock(task->itk_space);
4717
4718 /* Drop all the port references */
4719 for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
4720 ip_release(port_freelist[i]);
4721 }
4722
4723 /* Clear the task and thread references for task_watchport */
4724 if (refs == 0) {
4725 task_watchports_deallocate(watchports);
4726 }
4727 }
4728
4729 /*
4730 * task_transfer_turnstile_watchports:
4731 * Transfer all watchport turnstile boost from old task to new task.
4732 *
4733 * Arguments:
4734 * old_task: task calling exec
4735 * new_task: new exec'ed task
4736 * thread: main thread of new task
4737 *
4738 * Conditions:
4739 * Nothing locked.
4740 */
4741 void
task_transfer_turnstile_watchports(task_t old_task,task_t new_task,thread_t new_thread)4742 task_transfer_turnstile_watchports(
4743 task_t old_task,
4744 task_t new_task,
4745 thread_t new_thread)
4746 {
4747 struct task_watchports *old_watchports = NULL;
4748 struct task_watchports *new_watchports = NULL;
4749 os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
4750 os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
4751 uint32_t portwatch_count;
4752
4753 if (old_task->watchports == NULL || !new_task->active) {
4754 return;
4755 }
4756
4757 /* Get the watch port count from the old task */
4758 is_write_lock(old_task->itk_space);
4759 if (old_task->watchports == NULL) {
4760 is_write_unlock(old_task->itk_space);
4761 return;
4762 }
4763
4764 portwatch_count = old_task->watchports->tw_elem_array_count;
4765 is_write_unlock(old_task->itk_space);
4766
4767 new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
4768
4769 /* Lock the ipc space for old task */
4770 is_write_lock(old_task->itk_space);
4771
4772 /* Lock the ipc space for new task */
4773 is_write_lock(new_task->itk_space);
4774
4775 /* Check if watchport boost exist */
4776 if (old_task->watchports == NULL || !new_task->active) {
4777 is_write_unlock(new_task->itk_space);
4778 is_write_unlock(old_task->itk_space);
4779 (void)task_watchports_release(new_watchports);
4780 task_watchports_deallocate(new_watchports);
4781 return;
4782 }
4783
4784 old_watchports = old_task->watchports;
4785 assert(portwatch_count == old_task->watchports->tw_elem_array_count);
4786
4787 /* Setup new task watchports */
4788 new_task->watchports = new_watchports;
4789
4790 for (uint32_t i = 0; i < portwatch_count; i++) {
4791 ipc_port_t port = old_watchports->tw_elem[i].twe_port;
4792
4793 if (port == NULL) {
4794 task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4795 continue;
4796 }
4797
4798 /* Lock the port and check if it has the entry */
4799 ip_mq_lock(port);
4800
4801 task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
4802
4803 if (ipc_port_replace_watchport_elem_conditional_locked(port,
4804 &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
4805 task_watchport_elem_clear(&old_watchports->tw_elem[i]);
4806
4807 task_watchports_retain(new_watchports);
4808 old_refs = task_watchports_release(old_watchports);
4809
4810 /* Check if all ports are cleaned */
4811 if (old_refs == 0) {
4812 old_task->watchports = NULL;
4813 }
4814 } else {
4815 task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4816 }
4817 /* port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
4818 }
4819
4820 /* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
4821 new_refs = task_watchports_release(new_watchports);
4822 if (new_refs == 0) {
4823 new_task->watchports = NULL;
4824 }
4825
4826 is_write_unlock(new_task->itk_space);
4827 is_write_unlock(old_task->itk_space);
4828
4829 /* Clear the task and thread references for old_watchport */
4830 if (old_refs == 0) {
4831 task_watchports_deallocate(old_watchports);
4832 }
4833
4834 /* Clear the task and thread references for new_watchport */
4835 if (new_refs == 0) {
4836 task_watchports_deallocate(new_watchports);
4837 }
4838 }
4839
4840 /*
4841 * task_add_turnstile_watchports_locked:
4842 * Setup watchports to boost the main thread of the task.
4843 *
4844 * Arguments:
4845 * task: task to boost
4846 * watchports: watchport structure to be attached to the task
4847 * previous_elem_array: an array of old watchport_elem to be returned to caller
4848 * portwatch_ports: array of watchports
4849 * portwatch_count: number of watchports
4850 *
4851 * Conditions:
4852 * ipc space of the task locked.
4853 * returns array of old watchport_elem in previous_elem_array
4854 */
4855 static os_ref_count_t
task_add_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,struct task_watchport_elem ** previous_elem_array,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4856 task_add_turnstile_watchports_locked(
4857 task_t task,
4858 struct task_watchports *watchports,
4859 struct task_watchport_elem **previous_elem_array,
4860 ipc_port_t *portwatch_ports,
4861 uint32_t portwatch_count)
4862 {
4863 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4864
4865 /* Check if the task is still active */
4866 if (!task->active) {
4867 refs = task_watchports_release(watchports);
4868 return refs;
4869 }
4870
4871 assert(task->watchports == NULL);
4872 task->watchports = watchports;
4873
4874 for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
4875 ipc_port_t port = portwatch_ports[i];
4876
4877 task_watchport_elem_init(&watchports->tw_elem[i], task, port);
4878 if (port == NULL) {
4879 task_watchport_elem_clear(&watchports->tw_elem[i]);
4880 continue;
4881 }
4882
4883 ip_mq_lock(port);
4884
4885 /* Check if port is in valid state to be setup as watchport */
4886 if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
4887 &previous_elem_array[j]) != KERN_SUCCESS) {
4888 task_watchport_elem_clear(&watchports->tw_elem[i]);
4889 continue;
4890 }
4891 /* port unlocked on return */
4892
4893 ip_reference(port);
4894 task_watchports_retain(watchports);
4895 if (previous_elem_array[j] != NULL) {
4896 j++;
4897 }
4898 }
4899
4900 /* Drop the reference on task_watchport struct returned by os_ref_init */
4901 refs = task_watchports_release(watchports);
4902 if (refs == 0) {
4903 task->watchports = NULL;
4904 }
4905
4906 return refs;
4907 }
4908
4909 /*
4910 * task_remove_turnstile_watchports_locked:
4911 * Clear all turnstile boost on the task from watchports.
4912 *
4913 * Arguments:
4914 * task: task to remove watchports from
4915 * watchports: watchports structure for the task
4916 * port_freelist: array of ports returned with ref to caller
4917 *
4918 *
4919 * Conditions:
4920 * ipc space of the task locked.
4921 * array of ports with refs are returned in port_freelist
4922 */
4923 static os_ref_count_t
task_remove_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,ipc_port_t * port_freelist)4924 task_remove_turnstile_watchports_locked(
4925 task_t task,
4926 struct task_watchports *watchports,
4927 ipc_port_t *port_freelist)
4928 {
4929 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4930
4931 for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
4932 ipc_port_t port = watchports->tw_elem[i].twe_port;
4933 if (port == NULL) {
4934 continue;
4935 }
4936
4937 /* Lock the port and check if it has the entry */
4938 ip_mq_lock(port);
4939 if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
4940 &watchports->tw_elem[i]) == KERN_SUCCESS) {
4941 task_watchport_elem_clear(&watchports->tw_elem[i]);
4942 port_freelist[j++] = port;
4943 refs = task_watchports_release(watchports);
4944
4945 /* Check if all ports are cleaned */
4946 if (refs == 0) {
4947 task->watchports = NULL;
4948 break;
4949 }
4950 }
4951 /* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
4952 }
4953 return refs;
4954 }
4955
4956 /*
4957 * task_watchports_alloc_init:
4958 * Allocate and initialize task watchport struct.
4959 *
4960 * Conditions:
4961 * Nothing locked.
4962 */
4963 static struct task_watchports *
task_watchports_alloc_init(task_t task,thread_t thread,uint32_t count)4964 task_watchports_alloc_init(
4965 task_t task,
4966 thread_t thread,
4967 uint32_t count)
4968 {
4969 struct task_watchports *watchports = kalloc_type(struct task_watchports,
4970 struct task_watchport_elem, count, Z_WAITOK | Z_ZERO | Z_NOFAIL);
4971
4972 task_reference(task);
4973 thread_reference(thread);
4974 watchports->tw_task = task;
4975 watchports->tw_thread = thread;
4976 watchports->tw_elem_array_count = count;
4977 os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
4978
4979 return watchports;
4980 }
4981
4982 /*
4983 * task_watchports_deallocate:
4984 * Deallocate task watchport struct.
4985 *
4986 * Conditions:
4987 * Nothing locked.
4988 */
4989 static void
task_watchports_deallocate(struct task_watchports * watchports)4990 task_watchports_deallocate(
4991 struct task_watchports *watchports)
4992 {
4993 uint32_t portwatch_count = watchports->tw_elem_array_count;
4994
4995 task_deallocate(watchports->tw_task);
4996 thread_deallocate(watchports->tw_thread);
4997 kfree_type(struct task_watchports, struct task_watchport_elem,
4998 portwatch_count, watchports);
4999 }
5000
5001 /*
5002 * task_watchport_elem_deallocate:
5003 * Deallocate task watchport element and release its ref on task_watchport.
5004 *
5005 * Conditions:
5006 * Nothing locked.
5007 */
5008 void
task_watchport_elem_deallocate(struct task_watchport_elem * watchport_elem)5009 task_watchport_elem_deallocate(
5010 struct task_watchport_elem *watchport_elem)
5011 {
5012 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
5013 task_t task = watchport_elem->twe_task;
5014 struct task_watchports *watchports = NULL;
5015 ipc_port_t port = NULL;
5016
5017 assert(task != NULL);
5018
5019 /* Take the space lock to modify the elememt */
5020 is_write_lock(task->itk_space);
5021
5022 watchports = task->watchports;
5023 assert(watchports != NULL);
5024
5025 port = watchport_elem->twe_port;
5026 assert(port != NULL);
5027
5028 task_watchport_elem_clear(watchport_elem);
5029 refs = task_watchports_release(watchports);
5030
5031 if (refs == 0) {
5032 task->watchports = NULL;
5033 }
5034
5035 is_write_unlock(task->itk_space);
5036
5037 ip_release(port);
5038 if (refs == 0) {
5039 task_watchports_deallocate(watchports);
5040 }
5041 }
5042
5043 /*
5044 * task_has_watchports:
5045 * Return TRUE if task has watchport boosts.
5046 *
5047 * Conditions:
5048 * Nothing locked.
5049 */
5050 boolean_t
task_has_watchports(task_t task)5051 task_has_watchports(task_t task)
5052 {
5053 return task->watchports != NULL;
5054 }
5055
5056 #if DEVELOPMENT || DEBUG
5057
5058 extern void IOSleep(int);
5059
5060 kern_return_t
task_disconnect_page_mappings(task_t task)5061 task_disconnect_page_mappings(task_t task)
5062 {
5063 int n;
5064
5065 if (task == TASK_NULL || task == kernel_task) {
5066 return KERN_INVALID_ARGUMENT;
5067 }
5068
5069 /*
5070 * this function is used to strip all of the mappings from
5071 * the pmap for the specified task to force the task to
5072 * re-fault all of the pages it is actively using... this
5073 * allows us to approximate the true working set of the
5074 * specified task. We only engage if at least 1 of the
5075 * threads in the task is runnable, but we want to continuously
5076 * sweep (at least for a while - I've arbitrarily set the limit at
5077 * 100 sweeps to be re-looked at as we gain experience) to get a better
5078 * view into what areas within a page are being visited (as opposed to only
5079 * seeing the first fault of a page after the task becomes
5080 * runnable)... in the future I may
5081 * try to block until awakened by a thread in this task
5082 * being made runnable, but for now we'll periodically poll from the
5083 * user level debug tool driving the sysctl
5084 */
5085 for (n = 0; n < 100; n++) {
5086 thread_t thread;
5087 boolean_t runnable;
5088 boolean_t do_unnest;
5089 int page_count;
5090
5091 runnable = FALSE;
5092 do_unnest = FALSE;
5093
5094 task_lock(task);
5095
5096 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5097 if (thread->state & TH_RUN) {
5098 runnable = TRUE;
5099 break;
5100 }
5101 }
5102 if (n == 0) {
5103 task->task_disconnected_count++;
5104 }
5105
5106 if (task->task_unnested == FALSE) {
5107 if (runnable == TRUE) {
5108 task->task_unnested = TRUE;
5109 do_unnest = TRUE;
5110 }
5111 }
5112 task_unlock(task);
5113
5114 if (runnable == FALSE) {
5115 break;
5116 }
5117
5118 KDBG_RELEASE((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
5119 task, do_unnest, task->task_disconnected_count);
5120
5121 page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
5122
5123 KDBG_RELEASE((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
5124 task, page_count);
5125
5126 if ((n % 5) == 4) {
5127 IOSleep(1);
5128 }
5129 }
5130 return KERN_SUCCESS;
5131 }
5132
5133 #endif
5134
5135
5136 #if CONFIG_FREEZE
5137
5138 /*
5139 * task_freeze:
5140 *
5141 * Freeze a task.
5142 *
5143 * Conditions:
5144 * The caller holds a reference to the task
5145 */
5146 extern struct freezer_context freezer_context_global;
5147
5148 kern_return_t
task_freeze(task_t task,uint32_t * purgeable_count,uint32_t * wired_count,uint32_t * clean_count,uint32_t * dirty_count,uint32_t dirty_budget,uint32_t * shared_count,int * freezer_error_code,boolean_t eval_only)5149 task_freeze(
5150 task_t task,
5151 uint32_t *purgeable_count,
5152 uint32_t *wired_count,
5153 uint32_t *clean_count,
5154 uint32_t *dirty_count,
5155 uint32_t dirty_budget,
5156 uint32_t *shared_count,
5157 int *freezer_error_code,
5158 boolean_t eval_only)
5159 {
5160 kern_return_t kr = KERN_SUCCESS;
5161
5162 if (task == TASK_NULL || task == kernel_task) {
5163 return KERN_INVALID_ARGUMENT;
5164 }
5165
5166 task_lock(task);
5167
5168 while (task->changing_freeze_state) {
5169 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5170 task_unlock(task);
5171 thread_block(THREAD_CONTINUE_NULL);
5172
5173 task_lock(task);
5174 }
5175 if (task->frozen) {
5176 task_unlock(task);
5177 return KERN_FAILURE;
5178 }
5179 task->changing_freeze_state = TRUE;
5180
5181 freezer_context_global.freezer_ctx_task = task;
5182
5183 task_unlock(task);
5184
5185 #if CONFIG_DEFERRED_RECLAIM
5186 if (vm_deferred_reclamation_task_has_ring(task)) {
5187 kr = vm_deferred_reclamation_task_drain(task, RECLAIM_OPTIONS_NONE);
5188 if (kr != KERN_SUCCESS) {
5189 os_log_error(OS_LOG_DEFAULT, "Failed to drain reclamation ring prior to freezing (%d)\n", kr);
5190 }
5191 }
5192 #endif /* CONFIG_DEFERRED_RECLAIM */
5193
5194 kr = vm_map_freeze(task,
5195 purgeable_count,
5196 wired_count,
5197 clean_count,
5198 dirty_count,
5199 dirty_budget,
5200 shared_count,
5201 freezer_error_code,
5202 eval_only);
5203
5204 task_lock(task);
5205
5206 if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) {
5207 task->frozen = TRUE;
5208
5209 freezer_context_global.freezer_ctx_task = NULL;
5210 freezer_context_global.freezer_ctx_uncompressed_pages = 0;
5211
5212 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
5213 /*
5214 * reset the counter tracking the # of swapped compressed pages
5215 * because we are now done with this freeze session and task.
5216 */
5217
5218 *dirty_count = (uint32_t) (freezer_context_global.freezer_ctx_swapped_bytes / PAGE_SIZE_64); /*used to track pageouts*/
5219 }
5220
5221 freezer_context_global.freezer_ctx_swapped_bytes = 0;
5222 }
5223
5224 task->changing_freeze_state = FALSE;
5225 thread_wakeup(&task->changing_freeze_state);
5226
5227 task_unlock(task);
5228
5229 if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
5230 (kr == KERN_SUCCESS) &&
5231 (eval_only == FALSE)) {
5232 vm_wake_compactor_swapper();
5233 /*
5234 * We do an explicit wakeup of the swapout thread here
5235 * because the compact_and_swap routines don't have
5236 * knowledge about these kind of "per-task packed c_segs"
5237 * and so will not be evaluating whether we need to do
5238 * a wakeup there.
5239 */
5240 thread_wakeup((event_t)&vm_swapout_thread);
5241 }
5242
5243 return kr;
5244 }
5245
5246 /*
5247 * task_thaw:
5248 *
5249 * Thaw a currently frozen task.
5250 *
5251 * Conditions:
5252 * The caller holds a reference to the task
5253 */
5254 kern_return_t
task_thaw(task_t task)5255 task_thaw(
5256 task_t task)
5257 {
5258 if (task == TASK_NULL || task == kernel_task) {
5259 return KERN_INVALID_ARGUMENT;
5260 }
5261
5262 task_lock(task);
5263
5264 while (task->changing_freeze_state) {
5265 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5266 task_unlock(task);
5267 thread_block(THREAD_CONTINUE_NULL);
5268
5269 task_lock(task);
5270 }
5271 if (!task->frozen) {
5272 task_unlock(task);
5273 return KERN_FAILURE;
5274 }
5275 task->frozen = FALSE;
5276
5277 task_unlock(task);
5278
5279 return KERN_SUCCESS;
5280 }
5281
5282 void
task_update_frozen_to_swap_acct(task_t task,int64_t amount,freezer_acct_op_t op)5283 task_update_frozen_to_swap_acct(task_t task, int64_t amount, freezer_acct_op_t op)
5284 {
5285 /*
5286 * We don't assert that the task lock is held because we call this
5287 * routine from the decompression path and we won't be holding the
5288 * task lock. However, since we are in the context of the task we are
5289 * safe.
5290 * In the case of the task_freeze path, we call it from behind the task
5291 * lock but we don't need to because we have a reference on the proc
5292 * being frozen.
5293 */
5294
5295 assert(task);
5296 if (amount == 0) {
5297 return;
5298 }
5299
5300 if (op == CREDIT_TO_SWAP) {
5301 ledger_credit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5302 } else if (op == DEBIT_FROM_SWAP) {
5303 ledger_debit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5304 } else {
5305 panic("task_update_frozen_to_swap_acct: Invalid ledger op");
5306 }
5307 }
5308 #endif /* CONFIG_FREEZE */
5309
5310 kern_return_t
task_set_security_tokens(task_t task,security_token_t sec_token,audit_token_t audit_token,host_priv_t host_priv)5311 task_set_security_tokens(
5312 task_t task,
5313 security_token_t sec_token,
5314 audit_token_t audit_token,
5315 host_priv_t host_priv)
5316 {
5317 ipc_port_t host_port = IP_NULL;
5318 kern_return_t kr;
5319
5320 if (task == TASK_NULL) {
5321 return KERN_INVALID_ARGUMENT;
5322 }
5323
5324 task_lock(task);
5325 task_set_tokens(task, &sec_token, &audit_token);
5326 task_unlock(task);
5327
5328 if (host_priv != HOST_PRIV_NULL) {
5329 kr = host_get_host_priv_port(host_priv, &host_port);
5330 } else {
5331 kr = host_get_host_port(host_priv_self(), &host_port);
5332 }
5333 assert(kr == KERN_SUCCESS);
5334
5335 kr = task_set_special_port_internal(task, TASK_HOST_PORT, host_port);
5336 return kr;
5337 }
5338
5339 kern_return_t
task_send_trace_memory(__unused task_t target_task,__unused uint32_t pid,__unused uint64_t uniqueid)5340 task_send_trace_memory(
5341 __unused task_t target_task,
5342 __unused uint32_t pid,
5343 __unused uint64_t uniqueid)
5344 {
5345 return KERN_INVALID_ARGUMENT;
5346 }
5347
5348 /*
5349 * This routine was added, pretty much exclusively, for registering the
5350 * RPC glue vector for in-kernel short circuited tasks. Rather than
5351 * removing it completely, I have only disabled that feature (which was
5352 * the only feature at the time). It just appears that we are going to
5353 * want to add some user data to tasks in the future (i.e. bsd info,
5354 * task names, etc...), so I left it in the formal task interface.
5355 */
5356 kern_return_t
task_set_info(task_t task,task_flavor_t flavor,__unused task_info_t task_info_in,__unused mach_msg_type_number_t task_info_count)5357 task_set_info(
5358 task_t task,
5359 task_flavor_t flavor,
5360 __unused task_info_t task_info_in, /* pointer to IN array */
5361 __unused mach_msg_type_number_t task_info_count)
5362 {
5363 if (task == TASK_NULL) {
5364 return KERN_INVALID_ARGUMENT;
5365 }
5366 switch (flavor) {
5367 #if CONFIG_ATM
5368 case TASK_TRACE_MEMORY_INFO:
5369 return KERN_NOT_SUPPORTED;
5370 #endif // CONFIG_ATM
5371 default:
5372 return KERN_INVALID_ARGUMENT;
5373 }
5374 }
5375
5376 static void
_task_fill_times(task_t task,time_value_t * user_time,time_value_t * sys_time)5377 _task_fill_times(task_t task, time_value_t *user_time, time_value_t *sys_time)
5378 {
5379 clock_sec_t sec;
5380 clock_usec_t usec;
5381
5382 struct recount_times_mach times = recount_task_terminated_times(task);
5383 absolutetime_to_microtime(times.rtm_user, &sec, &usec);
5384 user_time->seconds = (typeof(user_time->seconds))sec;
5385 user_time->microseconds = usec;
5386 absolutetime_to_microtime(times.rtm_system, &sec, &usec);
5387 sys_time->seconds = (typeof(sys_time->seconds))sec;
5388 sys_time->microseconds = usec;
5389 }
5390
5391 int radar_20146450 = 1;
5392 kern_return_t
task_info(task_t task,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)5393 task_info(
5394 task_t task,
5395 task_flavor_t flavor,
5396 task_info_t task_info_out,
5397 mach_msg_type_number_t *task_info_count)
5398 {
5399 kern_return_t error = KERN_SUCCESS;
5400 mach_msg_type_number_t original_task_info_count;
5401 bool is_kernel_task = (task == kernel_task);
5402
5403 if (task == TASK_NULL) {
5404 return KERN_INVALID_ARGUMENT;
5405 }
5406
5407 original_task_info_count = *task_info_count;
5408 task_lock(task);
5409
5410 if (task != current_task() && !task->active) {
5411 task_unlock(task);
5412 return KERN_INVALID_ARGUMENT;
5413 }
5414
5415
5416 switch (flavor) {
5417 case TASK_BASIC_INFO_32:
5418 case TASK_BASIC2_INFO_32:
5419 #if defined(__arm64__)
5420 case TASK_BASIC_INFO_64:
5421 #endif
5422 {
5423 task_basic_info_32_t basic_info;
5424 ledger_amount_t tmp;
5425
5426 if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
5427 error = KERN_INVALID_ARGUMENT;
5428 break;
5429 }
5430
5431 basic_info = (task_basic_info_32_t)task_info_out;
5432
5433 basic_info->virtual_size = (typeof(basic_info->virtual_size))
5434 vm_map_adjusted_size(is_kernel_task ? kernel_map : task->map);
5435 if (flavor == TASK_BASIC2_INFO_32) {
5436 /*
5437 * The "BASIC2" flavor gets the maximum resident
5438 * size instead of the current resident size...
5439 */
5440 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, &tmp);
5441 } else {
5442 ledger_get_balance(task->ledger, task_ledgers.phys_mem, &tmp);
5443 }
5444 basic_info->resident_size = (natural_t) MIN((ledger_amount_t) UINT32_MAX, tmp);
5445
5446 _task_fill_times(task, &basic_info->user_time,
5447 &basic_info->system_time);
5448
5449 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5450 basic_info->suspend_count = task->user_stop_count;
5451
5452 *task_info_count = TASK_BASIC_INFO_32_COUNT;
5453 break;
5454 }
5455
5456 #if defined(__arm64__)
5457 case TASK_BASIC_INFO_64_2:
5458 {
5459 task_basic_info_64_2_t basic_info;
5460
5461 if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) {
5462 error = KERN_INVALID_ARGUMENT;
5463 break;
5464 }
5465
5466 basic_info = (task_basic_info_64_2_t)task_info_out;
5467
5468 basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5469 kernel_map : task->map);
5470 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
5471 (ledger_amount_t *)&basic_info->resident_size);
5472 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5473 basic_info->suspend_count = task->user_stop_count;
5474 _task_fill_times(task, &basic_info->user_time,
5475 &basic_info->system_time);
5476
5477 *task_info_count = TASK_BASIC_INFO_64_2_COUNT;
5478 break;
5479 }
5480
5481 #else /* defined(__arm64__) */
5482 case TASK_BASIC_INFO_64:
5483 {
5484 task_basic_info_64_t basic_info;
5485
5486 if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
5487 error = KERN_INVALID_ARGUMENT;
5488 break;
5489 }
5490
5491 basic_info = (task_basic_info_64_t)task_info_out;
5492
5493 basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5494 kernel_map : task->map);
5495 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *)&basic_info->resident_size);
5496 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5497 basic_info->suspend_count = task->user_stop_count;
5498 _task_fill_times(task, &basic_info->user_time,
5499 &basic_info->system_time);
5500
5501 *task_info_count = TASK_BASIC_INFO_64_COUNT;
5502 break;
5503 }
5504 #endif /* defined(__arm64__) */
5505
5506 case MACH_TASK_BASIC_INFO:
5507 {
5508 mach_task_basic_info_t basic_info;
5509
5510 if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
5511 error = KERN_INVALID_ARGUMENT;
5512 break;
5513 }
5514
5515 basic_info = (mach_task_basic_info_t)task_info_out;
5516
5517 basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5518 kernel_map : task->map);
5519 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size);
5520 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size_max);
5521 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5522 basic_info->suspend_count = task->user_stop_count;
5523 _task_fill_times(task, &basic_info->user_time,
5524 &basic_info->system_time);
5525
5526 *task_info_count = MACH_TASK_BASIC_INFO_COUNT;
5527 break;
5528 }
5529
5530 case TASK_THREAD_TIMES_INFO:
5531 {
5532 task_thread_times_info_t times_info;
5533 thread_t thread;
5534
5535 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
5536 error = KERN_INVALID_ARGUMENT;
5537 break;
5538 }
5539
5540 times_info = (task_thread_times_info_t)task_info_out;
5541 times_info->user_time = (time_value_t){ 0 };
5542 times_info->system_time = (time_value_t){ 0 };
5543
5544 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5545 if ((thread->options & TH_OPT_IDLE_THREAD) == 0) {
5546 time_value_t user_time, system_time;
5547
5548 thread_read_times(thread, &user_time, &system_time, NULL);
5549 time_value_add(×_info->user_time, &user_time);
5550 time_value_add(×_info->system_time, &system_time);
5551 }
5552 }
5553
5554 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
5555 break;
5556 }
5557
5558 case TASK_ABSOLUTETIME_INFO:
5559 {
5560 task_absolutetime_info_t info;
5561
5562 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
5563 error = KERN_INVALID_ARGUMENT;
5564 break;
5565 }
5566
5567 info = (task_absolutetime_info_t)task_info_out;
5568
5569 struct recount_times_mach term_times =
5570 recount_task_terminated_times(task);
5571 struct recount_times_mach total_times = recount_task_times(task);
5572
5573 info->total_user = total_times.rtm_user;
5574 info->total_system = total_times.rtm_system;
5575 info->threads_user = total_times.rtm_user - term_times.rtm_user;
5576 info->threads_system += total_times.rtm_system - term_times.rtm_system;
5577
5578 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
5579 break;
5580 }
5581
5582 case TASK_DYLD_INFO:
5583 {
5584 task_dyld_info_t info;
5585
5586 /*
5587 * We added the format field to TASK_DYLD_INFO output. For
5588 * temporary backward compatibility, accept the fact that
5589 * clients may ask for the old version - distinquished by the
5590 * size of the expected result structure.
5591 */
5592 #define TASK_LEGACY_DYLD_INFO_COUNT \
5593 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
5594
5595 if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
5596 error = KERN_INVALID_ARGUMENT;
5597 break;
5598 }
5599
5600 info = (task_dyld_info_t)task_info_out;
5601 info->all_image_info_addr = task->all_image_info_addr;
5602 info->all_image_info_size = task->all_image_info_size;
5603
5604 /* only set format on output for those expecting it */
5605 if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
5606 info->all_image_info_format = task_has_64Bit_addr(task) ?
5607 TASK_DYLD_ALL_IMAGE_INFO_64 :
5608 TASK_DYLD_ALL_IMAGE_INFO_32;
5609 *task_info_count = TASK_DYLD_INFO_COUNT;
5610 } else {
5611 *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
5612 }
5613 break;
5614 }
5615
5616 case TASK_EXTMOD_INFO:
5617 {
5618 task_extmod_info_t info;
5619 void *p;
5620
5621 if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
5622 error = KERN_INVALID_ARGUMENT;
5623 break;
5624 }
5625
5626 info = (task_extmod_info_t)task_info_out;
5627
5628 p = get_bsdtask_info(task);
5629 if (p) {
5630 proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
5631 } else {
5632 bzero(info->task_uuid, sizeof(info->task_uuid));
5633 }
5634 info->extmod_statistics = task->extmod_statistics;
5635 *task_info_count = TASK_EXTMOD_INFO_COUNT;
5636
5637 break;
5638 }
5639
5640 case TASK_KERNELMEMORY_INFO:
5641 {
5642 task_kernelmemory_info_t tkm_info;
5643 ledger_amount_t credit, debit;
5644
5645 if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
5646 error = KERN_INVALID_ARGUMENT;
5647 break;
5648 }
5649
5650 tkm_info = (task_kernelmemory_info_t) task_info_out;
5651 tkm_info->total_palloc = 0;
5652 tkm_info->total_pfree = 0;
5653 tkm_info->total_salloc = 0;
5654 tkm_info->total_sfree = 0;
5655
5656 if (task == kernel_task) {
5657 /*
5658 * All shared allocs/frees from other tasks count against
5659 * the kernel private memory usage. If we are looking up
5660 * info for the kernel task, gather from everywhere.
5661 */
5662 task_unlock(task);
5663
5664 /* start by accounting for all the terminated tasks against the kernel */
5665 tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
5666 tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
5667
5668 /* count all other task/thread shared alloc/free against the kernel */
5669 lck_mtx_lock(&tasks_threads_lock);
5670
5671 /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
5672 queue_iterate(&tasks, task, task_t, tasks) {
5673 if (task == kernel_task) {
5674 if (ledger_get_entries(task->ledger,
5675 task_ledgers.tkm_private, &credit,
5676 &debit) == KERN_SUCCESS) {
5677 tkm_info->total_palloc += credit;
5678 tkm_info->total_pfree += debit;
5679 }
5680 }
5681 if (!ledger_get_entries(task->ledger,
5682 task_ledgers.tkm_shared, &credit, &debit)) {
5683 tkm_info->total_palloc += credit;
5684 tkm_info->total_pfree += debit;
5685 }
5686 }
5687 lck_mtx_unlock(&tasks_threads_lock);
5688 } else {
5689 if (!ledger_get_entries(task->ledger,
5690 task_ledgers.tkm_private, &credit, &debit)) {
5691 tkm_info->total_palloc = credit;
5692 tkm_info->total_pfree = debit;
5693 }
5694 if (!ledger_get_entries(task->ledger,
5695 task_ledgers.tkm_shared, &credit, &debit)) {
5696 tkm_info->total_salloc = credit;
5697 tkm_info->total_sfree = debit;
5698 }
5699 task_unlock(task);
5700 }
5701
5702 *task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
5703 return KERN_SUCCESS;
5704 }
5705
5706 /* OBSOLETE */
5707 case TASK_SCHED_FIFO_INFO:
5708 {
5709 if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
5710 error = KERN_INVALID_ARGUMENT;
5711 break;
5712 }
5713
5714 error = KERN_INVALID_POLICY;
5715 break;
5716 }
5717
5718 /* OBSOLETE */
5719 case TASK_SCHED_RR_INFO:
5720 {
5721 policy_rr_base_t rr_base;
5722 uint32_t quantum_time;
5723 uint64_t quantum_ns;
5724
5725 if (*task_info_count < POLICY_RR_BASE_COUNT) {
5726 error = KERN_INVALID_ARGUMENT;
5727 break;
5728 }
5729
5730 rr_base = (policy_rr_base_t) task_info_out;
5731
5732 if (task != kernel_task) {
5733 error = KERN_INVALID_POLICY;
5734 break;
5735 }
5736
5737 rr_base->base_priority = task->priority;
5738
5739 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
5740 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
5741
5742 rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
5743
5744 *task_info_count = POLICY_RR_BASE_COUNT;
5745 break;
5746 }
5747
5748 /* OBSOLETE */
5749 case TASK_SCHED_TIMESHARE_INFO:
5750 {
5751 policy_timeshare_base_t ts_base;
5752
5753 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
5754 error = KERN_INVALID_ARGUMENT;
5755 break;
5756 }
5757
5758 ts_base = (policy_timeshare_base_t) task_info_out;
5759
5760 if (task == kernel_task) {
5761 error = KERN_INVALID_POLICY;
5762 break;
5763 }
5764
5765 ts_base->base_priority = task->priority;
5766
5767 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
5768 break;
5769 }
5770
5771 case TASK_SECURITY_TOKEN:
5772 {
5773 security_token_t *sec_token_p;
5774
5775 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
5776 error = KERN_INVALID_ARGUMENT;
5777 break;
5778 }
5779
5780 sec_token_p = (security_token_t *) task_info_out;
5781
5782 *sec_token_p = *task_get_sec_token(task);
5783
5784 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
5785 break;
5786 }
5787
5788 case TASK_AUDIT_TOKEN:
5789 {
5790 audit_token_t *audit_token_p;
5791
5792 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
5793 error = KERN_INVALID_ARGUMENT;
5794 break;
5795 }
5796
5797 audit_token_p = (audit_token_t *) task_info_out;
5798
5799 *audit_token_p = *task_get_audit_token(task);
5800
5801 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
5802 break;
5803 }
5804
5805 case TASK_SCHED_INFO:
5806 error = KERN_INVALID_ARGUMENT;
5807 break;
5808
5809 case TASK_EVENTS_INFO:
5810 {
5811 task_events_info_t events_info;
5812 thread_t thread;
5813 uint64_t n_syscalls_mach, n_syscalls_unix, n_csw;
5814
5815 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
5816 error = KERN_INVALID_ARGUMENT;
5817 break;
5818 }
5819
5820 events_info = (task_events_info_t) task_info_out;
5821
5822
5823 events_info->faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
5824 events_info->pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
5825 events_info->cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
5826 events_info->messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
5827 events_info->messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
5828
5829 n_syscalls_mach = task->syscalls_mach;
5830 n_syscalls_unix = task->syscalls_unix;
5831 n_csw = task->c_switch;
5832
5833 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5834 n_csw += thread->c_switch;
5835 n_syscalls_mach += thread->syscalls_mach;
5836 n_syscalls_unix += thread->syscalls_unix;
5837 }
5838
5839 events_info->syscalls_mach = (int32_t) MIN(n_syscalls_mach, INT32_MAX);
5840 events_info->syscalls_unix = (int32_t) MIN(n_syscalls_unix, INT32_MAX);
5841 events_info->csw = (int32_t) MIN(n_csw, INT32_MAX);
5842
5843 *task_info_count = TASK_EVENTS_INFO_COUNT;
5844 break;
5845 }
5846 case TASK_AFFINITY_TAG_INFO:
5847 {
5848 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
5849 error = KERN_INVALID_ARGUMENT;
5850 break;
5851 }
5852
5853 error = task_affinity_info(task, task_info_out, task_info_count);
5854 break;
5855 }
5856 case TASK_POWER_INFO:
5857 {
5858 if (*task_info_count < TASK_POWER_INFO_COUNT) {
5859 error = KERN_INVALID_ARGUMENT;
5860 break;
5861 }
5862
5863 task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL, NULL);
5864 break;
5865 }
5866
5867 case TASK_POWER_INFO_V2:
5868 {
5869 if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
5870 error = KERN_INVALID_ARGUMENT;
5871 break;
5872 }
5873 task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
5874 task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2, NULL);
5875 break;
5876 }
5877
5878 case TASK_VM_INFO:
5879 case TASK_VM_INFO_PURGEABLE:
5880 {
5881 task_vm_info_t vm_info;
5882 vm_map_t map;
5883 ledger_amount_t tmp_amount;
5884
5885 struct proc *p;
5886 uint32_t platform, sdk;
5887
5888 vmlp_api_start(TASK_INFO); /* this is the only case that is relevant to the lock */
5889
5890 p = current_proc();
5891 platform = proc_platform(p);
5892 sdk = proc_sdk(p);
5893 if (original_task_info_count > TASK_VM_INFO_COUNT) {
5894 /*
5895 * Some iOS apps pass an incorrect value for
5896 * task_info_count, expressed in number of bytes
5897 * instead of number of "natural_t" elements, which
5898 * can lead to binary compatibility issues (including
5899 * stack corruption) when the data structure is
5900 * expanded in the future.
5901 * Let's make this potential issue visible by
5902 * logging about it...
5903 */
5904 if (!proc_is_simulated(p)) {
5905 os_log(OS_LOG_DEFAULT, "%s[%d] task_info: possibly invalid "
5906 "task_info_count %d > TASK_VM_INFO_COUNT=%d on platform %d sdk "
5907 "%d.%d.%d - please use TASK_VM_INFO_COUNT",
5908 proc_name_address(p), proc_pid(p),
5909 original_task_info_count, TASK_VM_INFO_COUNT,
5910 platform, (sdk >> 16), ((sdk >> 8) & 0xff), (sdk & 0xff));
5911 }
5912 DTRACE_VM4(suspicious_task_vm_info_count,
5913 mach_msg_type_number_t, original_task_info_count,
5914 mach_msg_type_number_t, TASK_VM_INFO_COUNT,
5915 uint32_t, platform,
5916 uint32_t, sdk);
5917 }
5918 #if __arm64__
5919 if (original_task_info_count > TASK_VM_INFO_REV2_COUNT &&
5920 platform == PLATFORM_IOS &&
5921 sdk != 0 &&
5922 (sdk >> 16) <= 12) {
5923 /*
5924 * Some iOS apps pass an incorrect value for
5925 * task_info_count, expressed in number of bytes
5926 * instead of number of "natural_t" elements.
5927 * For the sake of backwards binary compatibility
5928 * for apps built with an iOS12 or older SDK and using
5929 * the "rev2" data structure, let's fix task_info_count
5930 * for them, to avoid stomping past the actual end
5931 * of their buffer.
5932 */
5933 #if DEVELOPMENT || DEBUG
5934 printf("%s:%d %d[%s] rdar://49484582 task_info_count %d -> %d "
5935 "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5936 proc_name_address(p), original_task_info_count,
5937 TASK_VM_INFO_REV2_COUNT, platform, (sdk >> 16),
5938 ((sdk >> 8) & 0xff), (sdk & 0xff));
5939 #endif /* DEVELOPMENT || DEBUG */
5940 DTRACE_VM4(workaround_task_vm_info_count,
5941 mach_msg_type_number_t, original_task_info_count,
5942 mach_msg_type_number_t, TASK_VM_INFO_REV2_COUNT,
5943 uint32_t, platform,
5944 uint32_t, sdk);
5945 original_task_info_count = TASK_VM_INFO_REV2_COUNT;
5946 *task_info_count = original_task_info_count;
5947 }
5948 if (original_task_info_count > TASK_VM_INFO_REV5_COUNT &&
5949 platform == PLATFORM_IOS &&
5950 sdk != 0 &&
5951 (sdk >> 16) <= 15) {
5952 /*
5953 * Some iOS apps pass an incorrect value for
5954 * task_info_count, expressed in number of bytes
5955 * instead of number of "natural_t" elements.
5956 */
5957 printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5958 "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5959 proc_name_address(p), original_task_info_count,
5960 TASK_VM_INFO_REV5_COUNT, platform, (sdk >> 16),
5961 ((sdk >> 8) & 0xff), (sdk & 0xff));
5962 DTRACE_VM4(workaround_task_vm_info_count,
5963 mach_msg_type_number_t, original_task_info_count,
5964 mach_msg_type_number_t, TASK_VM_INFO_REV5_COUNT,
5965 uint32_t, platform,
5966 uint32_t, sdk);
5967 #if DEVELOPMENT || DEBUG
5968 /*
5969 * For the sake of internal builds livability,
5970 * work around this user-space bug by capping the
5971 * buffer's size to what it was with the iOS15 SDK.
5972 */
5973 original_task_info_count = TASK_VM_INFO_REV5_COUNT;
5974 *task_info_count = original_task_info_count;
5975 #endif /* DEVELOPMENT || DEBUG */
5976 }
5977
5978 if (original_task_info_count > TASK_VM_INFO_REV7_COUNT &&
5979 platform == PLATFORM_IOS &&
5980 sdk != 0 &&
5981 (sdk >> 16) == 17) {
5982 /*
5983 * Some iOS apps still pass an incorrect value for
5984 * task_info_count, expressed in number of bytes
5985 * instead of number of "natural_t" elements.
5986 */
5987 printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5988 "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5989 proc_name_address(p), original_task_info_count,
5990 TASK_VM_INFO_REV7_COUNT, platform, (sdk >> 16),
5991 ((sdk >> 8) & 0xff), (sdk & 0xff));
5992 DTRACE_VM4(workaround_task_vm_info_count,
5993 mach_msg_type_number_t, original_task_info_count,
5994 mach_msg_type_number_t, TASK_VM_INFO_REV6_COUNT,
5995 uint32_t, platform,
5996 uint32_t, sdk);
5997 #if DEVELOPMENT || DEBUG
5998 /*
5999 * For the sake of internal builds livability,
6000 * work around this user-space bug by capping the
6001 * buffer's size to what it was with the iOS15 and iOS16 SDKs.
6002 */
6003 original_task_info_count = TASK_VM_INFO_REV6_COUNT;
6004 *task_info_count = original_task_info_count;
6005 #endif /* DEVELOPMENT || DEBUG */
6006 }
6007 #endif /* __arm64__ */
6008
6009 if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
6010 error = KERN_INVALID_ARGUMENT;
6011 vmlp_api_end(TASK_INFO, error);
6012 break;
6013 }
6014
6015 vm_info = (task_vm_info_t)task_info_out;
6016
6017 /*
6018 * Do not hold both the task and map locks,
6019 * so convert the task lock into a map reference,
6020 * drop the task lock, then lock the map.
6021 */
6022 if (is_kernel_task) {
6023 map = kernel_map;
6024 task_unlock(task);
6025 /* no lock, no reference */
6026 } else {
6027 map = task->map;
6028 vm_map_reference(map);
6029 task_unlock(task);
6030 vm_map_lock_read(map);
6031 }
6032
6033 vmlp_range_event_all(map);
6034
6035 vm_info->virtual_size = (typeof(vm_info->virtual_size))vm_map_adjusted_size(map);
6036 vm_info->region_count = map->hdr.nentries;
6037 vm_info->page_size = vm_map_page_size(map);
6038
6039 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size);
6040 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size_peak);
6041
6042 vm_info->device = 0;
6043 vm_info->device_peak = 0;
6044 ledger_get_balance(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external);
6045 ledger_get_lifetime_max(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external_peak);
6046 ledger_get_balance(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal);
6047 ledger_get_lifetime_max(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal_peak);
6048 ledger_get_balance(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable);
6049 ledger_get_lifetime_max(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable_peak);
6050 ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed);
6051 ledger_get_lifetime_max(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_peak);
6052 ledger_get_entries(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_lifetime, &tmp_amount);
6053 ledger_get_balance(task->ledger, task_ledgers.neural_nofootprint_total, (ledger_amount_t *) &vm_info->ledger_tag_neural_nofootprint_total);
6054 ledger_get_lifetime_max(task->ledger, task_ledgers.neural_nofootprint_total, (ledger_amount_t *) &vm_info->ledger_tag_neural_nofootprint_peak);
6055
6056 vm_info->purgeable_volatile_pmap = 0;
6057 vm_info->purgeable_volatile_resident = 0;
6058 vm_info->purgeable_volatile_virtual = 0;
6059 if (is_kernel_task) {
6060 /*
6061 * We do not maintain the detailed stats for the
6062 * kernel_pmap, so just count everything as
6063 * "internal"...
6064 */
6065 vm_info->internal = vm_info->resident_size;
6066 /*
6067 * ... but since the memory held by the VM compressor
6068 * in the kernel address space ought to be attributed
6069 * to user-space tasks, we subtract it from "internal"
6070 * to give memory reporting tools a more accurate idea
6071 * of what the kernel itself is actually using, instead
6072 * of making it look like the kernel is leaking memory
6073 * when the system is under memory pressure.
6074 */
6075 vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
6076 PAGE_SIZE);
6077 } else {
6078 mach_vm_size_t volatile_virtual_size;
6079 mach_vm_size_t volatile_resident_size;
6080 mach_vm_size_t volatile_compressed_size;
6081 mach_vm_size_t volatile_pmap_size;
6082 mach_vm_size_t volatile_compressed_pmap_size;
6083 kern_return_t kr;
6084
6085 if (flavor == TASK_VM_INFO_PURGEABLE) {
6086 kr = vm_map_query_volatile(
6087 map,
6088 &volatile_virtual_size,
6089 &volatile_resident_size,
6090 &volatile_compressed_size,
6091 &volatile_pmap_size,
6092 &volatile_compressed_pmap_size);
6093 if (kr == KERN_SUCCESS) {
6094 vm_info->purgeable_volatile_pmap =
6095 volatile_pmap_size;
6096 if (radar_20146450) {
6097 vm_info->compressed -=
6098 volatile_compressed_pmap_size;
6099 }
6100 vm_info->purgeable_volatile_resident =
6101 volatile_resident_size;
6102 vm_info->purgeable_volatile_virtual =
6103 volatile_virtual_size;
6104 }
6105 }
6106 }
6107 *task_info_count = TASK_VM_INFO_REV0_COUNT;
6108
6109 if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
6110 /* must be captured while we still have the map lock */
6111 vm_info->min_address = map->min_offset;
6112 vm_info->max_address = map->max_offset;
6113 }
6114
6115 /*
6116 * Done with vm map things, can drop the map lock and reference,
6117 * and take the task lock back.
6118 *
6119 * Re-validate that the task didn't die on us.
6120 */
6121 if (!is_kernel_task) {
6122 vm_map_unlock_read(map);
6123 vm_map_deallocate(map);
6124 }
6125 map = VM_MAP_NULL;
6126
6127 task_lock(task);
6128
6129 if ((task != current_task()) && (!task->active)) {
6130 error = KERN_INVALID_ARGUMENT;
6131 vmlp_api_end(TASK_INFO, error);
6132 break;
6133 }
6134
6135 if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
6136 vm_info->phys_footprint =
6137 (mach_vm_size_t) get_task_phys_footprint(task);
6138 *task_info_count = TASK_VM_INFO_REV1_COUNT;
6139 }
6140 if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
6141 /* data was captured above */
6142 *task_info_count = TASK_VM_INFO_REV2_COUNT;
6143 }
6144
6145 if (original_task_info_count >= TASK_VM_INFO_REV3_COUNT) {
6146 ledger_get_lifetime_max(task->ledger,
6147 task_ledgers.phys_footprint,
6148 &vm_info->ledger_phys_footprint_peak);
6149 ledger_get_balance(task->ledger,
6150 task_ledgers.purgeable_nonvolatile,
6151 &vm_info->ledger_purgeable_nonvolatile);
6152 ledger_get_balance(task->ledger,
6153 task_ledgers.purgeable_nonvolatile_compressed,
6154 &vm_info->ledger_purgeable_novolatile_compressed);
6155 ledger_get_balance(task->ledger,
6156 task_ledgers.purgeable_volatile,
6157 &vm_info->ledger_purgeable_volatile);
6158 ledger_get_balance(task->ledger,
6159 task_ledgers.purgeable_volatile_compressed,
6160 &vm_info->ledger_purgeable_volatile_compressed);
6161 ledger_get_balance(task->ledger,
6162 task_ledgers.network_nonvolatile,
6163 &vm_info->ledger_tag_network_nonvolatile);
6164 ledger_get_balance(task->ledger,
6165 task_ledgers.network_nonvolatile_compressed,
6166 &vm_info->ledger_tag_network_nonvolatile_compressed);
6167 ledger_get_balance(task->ledger,
6168 task_ledgers.network_volatile,
6169 &vm_info->ledger_tag_network_volatile);
6170 ledger_get_balance(task->ledger,
6171 task_ledgers.network_volatile_compressed,
6172 &vm_info->ledger_tag_network_volatile_compressed);
6173 ledger_get_balance(task->ledger,
6174 task_ledgers.media_footprint,
6175 &vm_info->ledger_tag_media_footprint);
6176 ledger_get_balance(task->ledger,
6177 task_ledgers.media_footprint_compressed,
6178 &vm_info->ledger_tag_media_footprint_compressed);
6179 ledger_get_balance(task->ledger,
6180 task_ledgers.media_nofootprint,
6181 &vm_info->ledger_tag_media_nofootprint);
6182 ledger_get_balance(task->ledger,
6183 task_ledgers.media_nofootprint_compressed,
6184 &vm_info->ledger_tag_media_nofootprint_compressed);
6185 ledger_get_balance(task->ledger,
6186 task_ledgers.graphics_footprint,
6187 &vm_info->ledger_tag_graphics_footprint);
6188 ledger_get_balance(task->ledger,
6189 task_ledgers.graphics_footprint_compressed,
6190 &vm_info->ledger_tag_graphics_footprint_compressed);
6191 ledger_get_balance(task->ledger,
6192 task_ledgers.graphics_nofootprint,
6193 &vm_info->ledger_tag_graphics_nofootprint);
6194 ledger_get_balance(task->ledger,
6195 task_ledgers.graphics_nofootprint_compressed,
6196 &vm_info->ledger_tag_graphics_nofootprint_compressed);
6197 ledger_get_balance(task->ledger,
6198 task_ledgers.neural_footprint,
6199 &vm_info->ledger_tag_neural_footprint);
6200 ledger_get_balance(task->ledger,
6201 task_ledgers.neural_footprint_compressed,
6202 &vm_info->ledger_tag_neural_footprint_compressed);
6203 ledger_get_balance(task->ledger,
6204 task_ledgers.neural_nofootprint,
6205 &vm_info->ledger_tag_neural_nofootprint);
6206 ledger_get_balance(task->ledger,
6207 task_ledgers.neural_nofootprint_compressed,
6208 &vm_info->ledger_tag_neural_nofootprint_compressed);
6209 *task_info_count = TASK_VM_INFO_REV3_COUNT;
6210 }
6211 if (original_task_info_count >= TASK_VM_INFO_REV4_COUNT) {
6212 if (get_bsdtask_info(task)) {
6213 vm_info->limit_bytes_remaining =
6214 memorystatus_available_memory_internal(get_bsdtask_info(task));
6215 } else {
6216 vm_info->limit_bytes_remaining = 0;
6217 }
6218 *task_info_count = TASK_VM_INFO_REV4_COUNT;
6219 }
6220 if (original_task_info_count >= TASK_VM_INFO_REV5_COUNT) {
6221 thread_t thread;
6222 uint64_t total = task->decompressions;
6223 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6224 total += thread->decompressions;
6225 }
6226 vm_info->decompressions = (int32_t) MIN(total, INT32_MAX);
6227 *task_info_count = TASK_VM_INFO_REV5_COUNT;
6228 }
6229 if (original_task_info_count >= TASK_VM_INFO_REV6_COUNT) {
6230 ledger_get_balance(task->ledger, task_ledgers.swapins,
6231 &vm_info->ledger_swapins);
6232 *task_info_count = TASK_VM_INFO_REV6_COUNT;
6233 }
6234 if (original_task_info_count >= TASK_VM_INFO_REV7_COUNT) {
6235 ledger_get_balance(task->ledger,
6236 task_ledgers.neural_nofootprint_total,
6237 &vm_info->ledger_tag_neural_nofootprint_total);
6238 ledger_get_lifetime_max(task->ledger,
6239 task_ledgers.neural_nofootprint_total,
6240 &vm_info->ledger_tag_neural_nofootprint_peak);
6241 *task_info_count = TASK_VM_INFO_REV7_COUNT;
6242 }
6243
6244 vmlp_api_end(TASK_INFO, error);
6245 break;
6246 }
6247
6248 case TASK_WAIT_STATE_INFO:
6249 {
6250 /*
6251 * Deprecated flavor. Currently allowing some results until all users
6252 * stop calling it. The results may not be accurate.
6253 */
6254 task_wait_state_info_t wait_state_info;
6255 uint64_t total_sfi_ledger_val = 0;
6256
6257 if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
6258 error = KERN_INVALID_ARGUMENT;
6259 break;
6260 }
6261
6262 wait_state_info = (task_wait_state_info_t) task_info_out;
6263
6264 wait_state_info->total_wait_state_time = 0;
6265 bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
6266
6267 #if CONFIG_SCHED_SFI
6268 int i, prev_lentry = -1;
6269 int64_t val_credit, val_debit;
6270
6271 for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
6272 val_credit = 0;
6273 /*
6274 * checking with prev_lentry != entry ensures adjacent classes
6275 * which share the same ledger do not add wait times twice.
6276 * Note: Use ledger() call to get data for each individual sfi class.
6277 */
6278 if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
6279 KERN_SUCCESS == ledger_get_entries(task->ledger,
6280 task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
6281 total_sfi_ledger_val += val_credit;
6282 }
6283 prev_lentry = task_ledgers.sfi_wait_times[i];
6284 }
6285
6286 #endif /* CONFIG_SCHED_SFI */
6287 wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
6288 *task_info_count = TASK_WAIT_STATE_INFO_COUNT;
6289
6290 break;
6291 }
6292 case TASK_VM_INFO_PURGEABLE_ACCOUNT:
6293 {
6294 #if DEVELOPMENT || DEBUG
6295 pvm_account_info_t acnt_info;
6296
6297 if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
6298 error = KERN_INVALID_ARGUMENT;
6299 break;
6300 }
6301
6302 if (task_info_out == NULL) {
6303 error = KERN_INVALID_ARGUMENT;
6304 break;
6305 }
6306
6307 acnt_info = (pvm_account_info_t) task_info_out;
6308
6309 error = vm_purgeable_account(task, acnt_info);
6310
6311 *task_info_count = PVM_ACCOUNT_INFO_COUNT;
6312
6313 break;
6314 #else /* DEVELOPMENT || DEBUG */
6315 error = KERN_NOT_SUPPORTED;
6316 break;
6317 #endif /* DEVELOPMENT || DEBUG */
6318 }
6319 case TASK_FLAGS_INFO:
6320 {
6321 task_flags_info_t flags_info;
6322
6323 if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
6324 error = KERN_INVALID_ARGUMENT;
6325 break;
6326 }
6327
6328 flags_info = (task_flags_info_t)task_info_out;
6329
6330 /* only publish the 64-bit flag of the task */
6331 flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA);
6332
6333 *task_info_count = TASK_FLAGS_INFO_COUNT;
6334 break;
6335 }
6336
6337 case TASK_DEBUG_INFO_INTERNAL:
6338 {
6339 #if DEVELOPMENT || DEBUG
6340 task_debug_info_internal_t dbg_info;
6341 ipc_space_t space = task->itk_space;
6342 if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
6343 error = KERN_NOT_SUPPORTED;
6344 break;
6345 }
6346
6347 if (task_info_out == NULL) {
6348 error = KERN_INVALID_ARGUMENT;
6349 break;
6350 }
6351 dbg_info = (task_debug_info_internal_t) task_info_out;
6352 dbg_info->ipc_space_size = 0;
6353
6354 if (space) {
6355 smr_ipc_enter();
6356 ipc_entry_table_t table = smr_entered_load(&space->is_table);
6357 if (table) {
6358 dbg_info->ipc_space_size =
6359 ipc_entry_table_count(table);
6360 }
6361 smr_ipc_leave();
6362 }
6363
6364 dbg_info->suspend_count = task->suspend_count;
6365
6366 error = KERN_SUCCESS;
6367 *task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
6368 break;
6369 #else /* DEVELOPMENT || DEBUG */
6370 error = KERN_NOT_SUPPORTED;
6371 break;
6372 #endif /* DEVELOPMENT || DEBUG */
6373 }
6374 case TASK_SUSPEND_STATS_INFO:
6375 {
6376 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6377 if (*task_info_count < TASK_SUSPEND_STATS_INFO_COUNT || task_info_out == NULL) {
6378 error = KERN_INVALID_ARGUMENT;
6379 break;
6380 }
6381 error = _task_get_suspend_stats_locked(task, (task_suspend_stats_t)task_info_out);
6382 *task_info_count = TASK_SUSPEND_STATS_INFO_COUNT;
6383 break;
6384 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6385 error = KERN_NOT_SUPPORTED;
6386 break;
6387 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6388 }
6389 case TASK_SUSPEND_SOURCES_INFO:
6390 {
6391 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6392 if (*task_info_count < TASK_SUSPEND_SOURCES_INFO_COUNT || task_info_out == NULL) {
6393 error = KERN_INVALID_ARGUMENT;
6394 break;
6395 }
6396 error = _task_get_suspend_sources_locked(task, (task_suspend_source_t)task_info_out);
6397 *task_info_count = TASK_SUSPEND_SOURCES_INFO_COUNT;
6398 break;
6399 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6400 error = KERN_NOT_SUPPORTED;
6401 break;
6402 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6403 }
6404 case TASK_SECURITY_CONFIG_INFO:
6405 {
6406 task_security_config_info_t security_config;
6407
6408 if (*task_info_count < TASK_SECURITY_CONFIG_INFO_COUNT) {
6409 error = KERN_INVALID_ARGUMENT;
6410 break;
6411 }
6412
6413 security_config = (task_security_config_info_t)task_info_out;
6414 security_config->config = (uint32_t)task->security_config.value;
6415
6416 *task_info_count = TASK_SECURITY_CONFIG_INFO_COUNT;
6417 break;
6418 }
6419 case TASK_IPC_SPACE_POLICY_INFO:
6420 {
6421 task_ipc_space_policy_info_t ipc_space_config;
6422
6423 if (*task_info_count < TASK_IPC_SPACE_POLICY_INFO_COUNT) {
6424 error = KERN_INVALID_ARGUMENT;
6425 break;
6426 }
6427
6428 ipc_space_config = (task_ipc_space_policy_info_t)task_info_out;
6429 struct ipc_space *space = task->itk_space;
6430 if (space) {
6431 ipc_space_config->space_policy = (uint32_t)space->is_policy;
6432 *task_info_count = TASK_SECURITY_CONFIG_INFO_COUNT;
6433 }
6434 break;
6435 }
6436 default:
6437 error = KERN_INVALID_ARGUMENT;
6438 }
6439
6440 task_unlock(task);
6441 return error;
6442 }
6443
6444 /*
6445 * task_info_from_user
6446 *
6447 * When calling task_info from user space,
6448 * this function will be executed as mig server side
6449 * instead of calling directly into task_info.
6450 * This gives the possibility to perform more security
6451 * checks on task_port.
6452 *
6453 * In the case of TASK_DYLD_INFO, we require the more
6454 * privileged task_read_port not the less-privileged task_name_port.
6455 *
6456 */
6457 kern_return_t
task_info_from_user(mach_port_t task_port,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)6458 task_info_from_user(
6459 mach_port_t task_port,
6460 task_flavor_t flavor,
6461 task_info_t task_info_out,
6462 mach_msg_type_number_t *task_info_count)
6463 {
6464 task_t task;
6465 kern_return_t ret;
6466
6467 if (flavor == TASK_DYLD_INFO) {
6468 task = convert_port_to_task_read(task_port);
6469 } else {
6470 task = convert_port_to_task_name(task_port);
6471 }
6472
6473 ret = task_info(task, flavor, task_info_out, task_info_count);
6474
6475 task_deallocate(task);
6476
6477 return ret;
6478 }
6479
6480 /*
6481 * Routine: task_dyld_process_info_update_helper
6482 *
6483 * Release send rights in release_ports.
6484 *
6485 * If no active ports found in task's dyld notifier array, unset the magic value
6486 * in user space to indicate so.
6487 *
6488 * Condition:
6489 * task's itk_lock is locked, and is unlocked upon return.
6490 * Global g_dyldinfo_mtx is locked, and is unlocked upon return.
6491 */
6492 void
task_dyld_process_info_update_helper(task_t task,size_t active_count,vm_map_address_t magic_addr,ipc_port_t * release_ports,size_t release_count)6493 task_dyld_process_info_update_helper(
6494 task_t task,
6495 size_t active_count,
6496 vm_map_address_t magic_addr, /* a userspace address */
6497 ipc_port_t *release_ports,
6498 size_t release_count)
6499 {
6500 void *notifiers_ptr = NULL;
6501
6502 assert(release_count <= DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT);
6503
6504 if (active_count == 0) {
6505 assert(task->itk_dyld_notify != NULL);
6506 notifiers_ptr = task->itk_dyld_notify;
6507 task->itk_dyld_notify = NULL;
6508 itk_unlock(task);
6509
6510 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6511 (void)copyoutmap_atomic32(task->map, MACH_PORT_NULL, magic_addr); /* unset magic */
6512 } else {
6513 itk_unlock(task);
6514 (void)copyoutmap_atomic32(task->map, (mach_port_name_t)DYLD_PROCESS_INFO_NOTIFY_MAGIC,
6515 magic_addr); /* reset magic */
6516 }
6517
6518 lck_mtx_unlock(&g_dyldinfo_mtx);
6519
6520 for (size_t i = 0; i < release_count; i++) {
6521 ipc_port_release_send(release_ports[i]);
6522 }
6523 }
6524
6525 /*
6526 * Routine: task_dyld_process_info_notify_register
6527 *
6528 * Insert a send right to target task's itk_dyld_notify array. Allocate kernel
6529 * memory for the array if it's the first port to be registered. Also cleanup
6530 * any dead rights found in the array.
6531 *
6532 * Consumes sright if returns KERN_SUCCESS, otherwise MIG will destroy it.
6533 *
6534 * Args:
6535 * task: Target task for the registration.
6536 * sright: A send right.
6537 *
6538 * Returns:
6539 * KERN_SUCCESS: Registration succeeded.
6540 * KERN_INVALID_TASK: task is invalid.
6541 * KERN_INVALID_RIGHT: sright is invalid.
6542 * KERN_DENIED: Security policy denied this call.
6543 * KERN_RESOURCE_SHORTAGE: Kernel memory allocation failed.
6544 * KERN_NO_SPACE: No available notifier port slot left for this task.
6545 * KERN_RIGHT_EXISTS: The notifier port is already registered and active.
6546 *
6547 * Other error code see task_info().
6548 *
6549 * See Also:
6550 * task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6551 */
6552 kern_return_t
task_dyld_process_info_notify_register(task_t task,ipc_port_t sright)6553 task_dyld_process_info_notify_register(
6554 task_t task,
6555 ipc_port_t sright)
6556 {
6557 struct task_dyld_info dyld_info;
6558 mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6559 ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6560 uint32_t release_count = 0, active_count = 0;
6561 mach_vm_address_t ports_addr; /* a user space address */
6562 kern_return_t kr;
6563 boolean_t right_exists = false;
6564 ipc_port_t *notifiers_ptr = NULL;
6565 ipc_port_t *portp;
6566
6567 if (task == TASK_NULL || task == kernel_task) {
6568 return KERN_INVALID_TASK;
6569 }
6570
6571 if (!ipc_can_stash_naked_send(sright)) {
6572 return KERN_INVALID_RIGHT;
6573 }
6574
6575 if (!IP_VALID(sright)) {
6576 return KERN_INVALID_RIGHT;
6577 }
6578
6579 #if CONFIG_MACF
6580 if (mac_task_check_dyld_process_info_notify_register()) {
6581 return KERN_DENIED;
6582 }
6583 #endif
6584
6585 kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6586 if (kr) {
6587 return kr;
6588 }
6589
6590 if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6591 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6592 offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6593 } else {
6594 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6595 offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6596 }
6597
6598 retry:
6599 if (task->itk_dyld_notify == NULL) {
6600 notifiers_ptr = kalloc_type(ipc_port_t,
6601 DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT,
6602 Z_WAITOK | Z_ZERO | Z_NOFAIL);
6603 }
6604
6605 lck_mtx_lock(&g_dyldinfo_mtx);
6606 itk_lock(task);
6607
6608 if (task->itk_dyld_notify == NULL) {
6609 if (notifiers_ptr == NULL) {
6610 itk_unlock(task);
6611 lck_mtx_unlock(&g_dyldinfo_mtx);
6612 goto retry;
6613 }
6614 task->itk_dyld_notify = notifiers_ptr;
6615 notifiers_ptr = NULL;
6616 }
6617
6618 assert(task->itk_dyld_notify != NULL);
6619 /* First pass: clear dead names and check for duplicate registration */
6620 for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6621 portp = &task->itk_dyld_notify[slot];
6622 if (*portp != IPC_PORT_NULL && !ip_active(*portp)) {
6623 release_ports[release_count++] = *portp;
6624 *portp = IPC_PORT_NULL;
6625 } else if (*portp == sright) {
6626 /* the port is already registered and is active */
6627 right_exists = true;
6628 }
6629
6630 if (*portp != IPC_PORT_NULL) {
6631 active_count++;
6632 }
6633 }
6634
6635 if (right_exists) {
6636 /* skip second pass */
6637 kr = KERN_RIGHT_EXISTS;
6638 goto out;
6639 }
6640
6641 /* Second pass: register the port */
6642 kr = KERN_NO_SPACE;
6643 for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6644 portp = &task->itk_dyld_notify[slot];
6645 if (*portp == IPC_PORT_NULL) {
6646 *portp = sright;
6647 active_count++;
6648 kr = KERN_SUCCESS;
6649 break;
6650 }
6651 }
6652
6653 out:
6654 assert(active_count > 0);
6655
6656 task_dyld_process_info_update_helper(task, active_count,
6657 (vm_map_address_t)ports_addr, release_ports, release_count);
6658 /* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6659
6660 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6661
6662 return kr;
6663 }
6664
6665 /*
6666 * Routine: task_dyld_process_info_notify_deregister
6667 *
6668 * Remove a send right in target task's itk_dyld_notify array matching the receive
6669 * right name passed in. Deallocate kernel memory for the array if it's the last port to
6670 * be deregistered, or all ports have died. Also cleanup any dead rights found in the array.
6671 *
6672 * Does not consume any reference.
6673 *
6674 * Args:
6675 * task: Target task for the deregistration.
6676 * rcv_name: The name denoting the receive right in caller's space.
6677 *
6678 * Returns:
6679 * KERN_SUCCESS: A matching entry found and degistration succeeded.
6680 * KERN_INVALID_TASK: task is invalid.
6681 * KERN_INVALID_NAME: name is invalid.
6682 * KERN_DENIED: Security policy denied this call.
6683 * KERN_FAILURE: A matching entry is not found.
6684 * KERN_INVALID_RIGHT: The name passed in does not represent a valid rcv right.
6685 *
6686 * Other error code see task_info().
6687 *
6688 * See Also:
6689 * task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6690 */
6691 kern_return_t
task_dyld_process_info_notify_deregister(task_t task,mach_port_name_t rcv_name)6692 task_dyld_process_info_notify_deregister(
6693 task_t task,
6694 mach_port_name_t rcv_name)
6695 {
6696 struct task_dyld_info dyld_info;
6697 mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6698 ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6699 uint32_t release_count = 0, active_count = 0;
6700 boolean_t port_found = false;
6701 mach_vm_address_t ports_addr; /* a user space address */
6702 ipc_port_t sright;
6703 kern_return_t kr;
6704 ipc_port_t *portp;
6705
6706 if (task == TASK_NULL || task == kernel_task) {
6707 return KERN_INVALID_TASK;
6708 }
6709
6710 if (!MACH_PORT_VALID(rcv_name)) {
6711 return KERN_INVALID_NAME;
6712 }
6713
6714 #if CONFIG_MACF
6715 if (mac_task_check_dyld_process_info_notify_register()) {
6716 return KERN_DENIED;
6717 }
6718 #endif
6719
6720 kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6721 if (kr) {
6722 return kr;
6723 }
6724
6725 if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6726 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6727 offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6728 } else {
6729 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6730 offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6731 }
6732
6733 kr = ipc_port_translate_receive(current_space(), rcv_name, &sright); /* does not produce port ref */
6734 if (kr) {
6735 return KERN_INVALID_RIGHT;
6736 }
6737
6738 ip_reference(sright);
6739 ip_mq_unlock(sright);
6740
6741 assert(sright != IPC_PORT_NULL);
6742
6743 lck_mtx_lock(&g_dyldinfo_mtx);
6744 itk_lock(task);
6745
6746 if (task->itk_dyld_notify == NULL) {
6747 itk_unlock(task);
6748 lck_mtx_unlock(&g_dyldinfo_mtx);
6749 ip_release(sright);
6750 return KERN_FAILURE;
6751 }
6752
6753 for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6754 portp = &task->itk_dyld_notify[slot];
6755 if (*portp == sright) {
6756 release_ports[release_count++] = *portp;
6757 *portp = IPC_PORT_NULL;
6758 port_found = true;
6759 } else if ((*portp != IPC_PORT_NULL && !ip_active(*portp))) {
6760 release_ports[release_count++] = *portp;
6761 *portp = IPC_PORT_NULL;
6762 }
6763
6764 if (*portp != IPC_PORT_NULL) {
6765 active_count++;
6766 }
6767 }
6768
6769 task_dyld_process_info_update_helper(task, active_count,
6770 (vm_map_address_t)ports_addr, release_ports, release_count);
6771 /* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6772
6773 ip_release(sright);
6774
6775 return port_found ? KERN_SUCCESS : KERN_FAILURE;
6776 }
6777
6778 /*
6779 * task_power_info
6780 *
6781 * Returns power stats for the task.
6782 * Note: Called with task locked.
6783 */
6784 void
task_power_info_locked(task_t task,task_power_info_t info,gpu_energy_data_t ginfo,task_power_info_v2_t infov2,struct task_power_info_extra * extra_info)6785 task_power_info_locked(
6786 task_t task,
6787 task_power_info_t info,
6788 gpu_energy_data_t ginfo,
6789 task_power_info_v2_t infov2,
6790 struct task_power_info_extra *extra_info)
6791 {
6792 thread_t thread;
6793 ledger_amount_t tmp;
6794
6795 uint64_t runnable_time_sum = 0;
6796
6797 task_lock_assert_owned(task);
6798
6799 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
6800 (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
6801 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
6802 (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
6803
6804 info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
6805 info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
6806
6807 struct recount_usage usage = { 0 };
6808 struct recount_usage usage_perf = { 0 };
6809 recount_task_usage_perf_only(task, &usage, &usage_perf);
6810
6811 info->total_user = usage.ru_metrics[RCT_LVL_USER].rm_time_mach;
6812 info->total_system = recount_usage_system_time_mach(&usage);
6813 runnable_time_sum = task->total_runnable_time;
6814
6815 if (ginfo) {
6816 ginfo->task_gpu_utilisation = task->task_gpu_ns;
6817 }
6818
6819 if (infov2) {
6820 infov2->task_ptime = recount_usage_time_mach(&usage_perf);
6821 infov2->task_pset_switches = task->ps_switch;
6822 #if CONFIG_PERVASIVE_ENERGY
6823 infov2->task_energy = usage.ru_energy_nj;
6824 #endif /* CONFIG_PERVASIVE_ENERGY */
6825 }
6826
6827 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6828 spl_t x;
6829
6830 if (thread->options & TH_OPT_IDLE_THREAD) {
6831 continue;
6832 }
6833
6834 x = splsched();
6835 thread_lock(thread);
6836
6837 info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
6838 info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
6839
6840 if (infov2) {
6841 infov2->task_pset_switches += thread->ps_switch;
6842 }
6843
6844 runnable_time_sum += timer_grab(&thread->runnable_timer);
6845
6846 if (ginfo) {
6847 ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
6848 }
6849 thread_unlock(thread);
6850 splx(x);
6851 }
6852
6853 if (extra_info) {
6854 extra_info->runnable_time = runnable_time_sum;
6855 #if CONFIG_PERVASIVE_CPI
6856 extra_info->cycles = recount_usage_cycles(&usage);
6857 extra_info->instructions = recount_usage_instructions(&usage);
6858 extra_info->pcycles = recount_usage_cycles(&usage_perf);
6859 extra_info->pinstructions = recount_usage_instructions(&usage_perf);
6860 extra_info->user_ptime = usage_perf.ru_metrics[RCT_LVL_USER].rm_time_mach;
6861 extra_info->system_ptime = recount_usage_system_time_mach(&usage_perf);
6862 #endif // CONFIG_PERVASIVE_CPI
6863 #if CONFIG_PERVASIVE_ENERGY
6864 extra_info->energy = usage.ru_energy_nj;
6865 extra_info->penergy = usage_perf.ru_energy_nj;
6866 #endif // CONFIG_PERVASIVE_ENERGY
6867 #if RECOUNT_SECURE_METRICS
6868 if (PE_i_can_has_debugger(NULL)) {
6869 extra_info->secure_time = usage.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6870 extra_info->secure_ptime = usage_perf.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6871 }
6872 #endif // RECOUNT_SECURE_METRICS
6873 }
6874 }
6875
6876 /*
6877 * task_gpu_utilisation
6878 *
6879 * Returns the total gpu time used by the all the threads of the task
6880 * (both dead and alive)
6881 */
6882 uint64_t
task_gpu_utilisation(task_t task)6883 task_gpu_utilisation(
6884 task_t task)
6885 {
6886 uint64_t gpu_time = 0;
6887 #if defined(__x86_64__)
6888 thread_t thread;
6889
6890 task_lock(task);
6891 gpu_time += task->task_gpu_ns;
6892
6893 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6894 spl_t x;
6895 x = splsched();
6896 thread_lock(thread);
6897 gpu_time += ml_gpu_stat(thread);
6898 thread_unlock(thread);
6899 splx(x);
6900 }
6901
6902 task_unlock(task);
6903 #else /* defined(__x86_64__) */
6904 /* silence compiler warning */
6905 (void)task;
6906 #endif /* defined(__x86_64__) */
6907 return gpu_time;
6908 }
6909
6910 /* This function updates the cpu time in the arrays for each
6911 * effective and requested QoS class
6912 */
6913 void
task_update_cpu_time_qos_stats(task_t task,uint64_t * eqos_stats,uint64_t * rqos_stats)6914 task_update_cpu_time_qos_stats(
6915 task_t task,
6916 uint64_t *eqos_stats,
6917 uint64_t *rqos_stats)
6918 {
6919 if (!eqos_stats && !rqos_stats) {
6920 return;
6921 }
6922
6923 task_lock(task);
6924 thread_t thread;
6925 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6926 if (thread->options & TH_OPT_IDLE_THREAD) {
6927 continue;
6928 }
6929
6930 thread_update_qos_cpu_time(thread);
6931 }
6932
6933 if (eqos_stats) {
6934 eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default;
6935 eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
6936 eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background;
6937 eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility;
6938 eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy;
6939 eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
6940 eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
6941 }
6942
6943 if (rqos_stats) {
6944 rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default;
6945 rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance;
6946 rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background;
6947 rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility;
6948 rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy;
6949 rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated;
6950 rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive;
6951 }
6952
6953 task_unlock(task);
6954 }
6955
6956 kern_return_t
task_purgable_info(task_t task,task_purgable_info_t * stats)6957 task_purgable_info(
6958 task_t task,
6959 task_purgable_info_t *stats)
6960 {
6961 if (task == TASK_NULL || stats == NULL) {
6962 return KERN_INVALID_ARGUMENT;
6963 }
6964 /* Take task reference */
6965 task_reference(task);
6966 vm_purgeable_stats((vm_purgeable_info_t)stats, task);
6967 /* Drop task reference */
6968 task_deallocate(task);
6969 return KERN_SUCCESS;
6970 }
6971
6972 void
task_vtimer_set(task_t task,integer_t which)6973 task_vtimer_set(
6974 task_t task,
6975 integer_t which)
6976 {
6977 thread_t thread;
6978 spl_t x;
6979
6980 task_lock(task);
6981
6982 task->vtimers |= which;
6983
6984 switch (which) {
6985 case TASK_VTIMER_USER:
6986 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6987 x = splsched();
6988 thread_lock(thread);
6989 struct recount_times_mach times = recount_thread_times(thread);
6990 thread->vtimer_user_save = times.rtm_user;
6991 thread_unlock(thread);
6992 splx(x);
6993 }
6994 break;
6995
6996 case TASK_VTIMER_PROF:
6997 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6998 x = splsched();
6999 thread_lock(thread);
7000 thread->vtimer_prof_save = recount_thread_time_mach(thread);
7001 thread_unlock(thread);
7002 splx(x);
7003 }
7004 break;
7005
7006 case TASK_VTIMER_RLIM:
7007 queue_iterate(&task->threads, thread, thread_t, task_threads) {
7008 x = splsched();
7009 thread_lock(thread);
7010 thread->vtimer_rlim_save = recount_thread_time_mach(thread);
7011 thread_unlock(thread);
7012 splx(x);
7013 }
7014 break;
7015 }
7016
7017 task_unlock(task);
7018 }
7019
7020 void
task_vtimer_clear(task_t task,integer_t which)7021 task_vtimer_clear(
7022 task_t task,
7023 integer_t which)
7024 {
7025 task_lock(task);
7026
7027 task->vtimers &= ~which;
7028
7029 task_unlock(task);
7030 }
7031
7032 void
task_vtimer_update(__unused task_t task,integer_t which,uint32_t * microsecs)7033 task_vtimer_update(
7034 __unused
7035 task_t task,
7036 integer_t which,
7037 uint32_t *microsecs)
7038 {
7039 thread_t thread = current_thread();
7040 uint32_t tdelt = 0;
7041 clock_sec_t secs = 0;
7042 uint64_t tsum;
7043
7044 assert(task == current_task());
7045
7046 spl_t s = splsched();
7047 thread_lock(thread);
7048
7049 if ((task->vtimers & which) != (uint32_t)which) {
7050 thread_unlock(thread);
7051 splx(s);
7052 return;
7053 }
7054
7055 switch (which) {
7056 case TASK_VTIMER_USER:;
7057 struct recount_times_mach times = recount_thread_times(thread);
7058 tsum = times.rtm_user;
7059 tdelt = (uint32_t)(tsum - thread->vtimer_user_save);
7060 thread->vtimer_user_save = tsum;
7061 absolutetime_to_microtime(tdelt, &secs, microsecs);
7062 break;
7063
7064 case TASK_VTIMER_PROF:
7065 tsum = recount_current_thread_time_mach();
7066 tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
7067 absolutetime_to_microtime(tdelt, &secs, microsecs);
7068 /* if the time delta is smaller than a usec, ignore */
7069 if (*microsecs != 0) {
7070 thread->vtimer_prof_save = tsum;
7071 }
7072 break;
7073
7074 case TASK_VTIMER_RLIM:
7075 tsum = recount_current_thread_time_mach();
7076 tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
7077 thread->vtimer_rlim_save = tsum;
7078 absolutetime_to_microtime(tdelt, &secs, microsecs);
7079 break;
7080 }
7081
7082 thread_unlock(thread);
7083 splx(s);
7084 }
7085
7086 uint64_t
get_task_dispatchqueue_offset(task_t task)7087 get_task_dispatchqueue_offset(
7088 task_t task)
7089 {
7090 return task->dispatchqueue_offset;
7091 }
7092
7093 void
task_synchronizer_destroy_all(task_t task)7094 task_synchronizer_destroy_all(task_t task)
7095 {
7096 /*
7097 * Destroy owned semaphores
7098 */
7099 semaphore_destroy_all(task);
7100 }
7101
7102 /*
7103 * Install default (machine-dependent) initial thread state
7104 * on the task. Subsequent thread creation will have this initial
7105 * state set on the thread by machine_thread_inherit_taskwide().
7106 * Flavors and structures are exactly the same as those to thread_set_state()
7107 */
7108 kern_return_t
task_set_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t state_count)7109 task_set_state(
7110 task_t task,
7111 int flavor,
7112 thread_state_t state,
7113 mach_msg_type_number_t state_count)
7114 {
7115 kern_return_t ret;
7116
7117 if (task == TASK_NULL) {
7118 return KERN_INVALID_ARGUMENT;
7119 }
7120
7121 task_lock(task);
7122
7123 if (!task->active) {
7124 task_unlock(task);
7125 return KERN_FAILURE;
7126 }
7127
7128 ret = machine_task_set_state(task, flavor, state, state_count);
7129
7130 task_unlock(task);
7131 return ret;
7132 }
7133
7134 /*
7135 * Examine the default (machine-dependent) initial thread state
7136 * on the task, as set by task_set_state(). Flavors and structures
7137 * are exactly the same as those passed to thread_get_state().
7138 */
7139 kern_return_t
task_get_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)7140 task_get_state(
7141 task_t task,
7142 int flavor,
7143 thread_state_t state,
7144 mach_msg_type_number_t *state_count)
7145 {
7146 kern_return_t ret;
7147
7148 if (task == TASK_NULL) {
7149 return KERN_INVALID_ARGUMENT;
7150 }
7151
7152 task_lock(task);
7153
7154 if (!task->active) {
7155 task_unlock(task);
7156 return KERN_FAILURE;
7157 }
7158
7159 ret = machine_task_get_state(task, flavor, state, state_count);
7160
7161 task_unlock(task);
7162 return ret;
7163 }
7164
7165
7166 static kern_return_t __attribute__((noinline, not_tail_called))
PROC_VIOLATED_GUARD__SEND_EXC_GUARD(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,boolean_t backtrace_only)7167 PROC_VIOLATED_GUARD__SEND_EXC_GUARD(
7168 mach_exception_code_t code,
7169 mach_exception_subcode_t subcode,
7170 void *reason,
7171 boolean_t backtrace_only)
7172 {
7173 #ifdef MACH_BSD
7174 if (1 == proc_selfpid()) {
7175 return KERN_NOT_SUPPORTED; // initproc is immune
7176 }
7177 #endif
7178 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
7179 [0] = code,
7180 [1] = subcode,
7181 };
7182 task_t task = current_task();
7183 kern_return_t kr;
7184 void *bsd_info = get_bsdtask_info(task);
7185
7186 /* (See jetsam-related comments below) */
7187
7188 proc_memstat_skip(bsd_info, TRUE);
7189 kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason, backtrace_only);
7190 proc_memstat_skip(bsd_info, FALSE);
7191 return kr;
7192 }
7193
7194 kern_return_t
task_violated_guard(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,bool backtrace_only)7195 task_violated_guard(
7196 mach_exception_code_t code,
7197 mach_exception_subcode_t subcode,
7198 void *reason,
7199 bool backtrace_only)
7200 {
7201 return PROC_VIOLATED_GUARD__SEND_EXC_GUARD(code, subcode, reason, backtrace_only);
7202 }
7203
7204
7205 #if CONFIG_MEMORYSTATUS
7206
7207 bool
task_get_memlimit_is_active(task_t task)7208 task_get_memlimit_is_active(task_t task)
7209 {
7210 assert(task != NULL);
7211
7212 return os_atomic_load(&task->memlimit_flags, relaxed) & TASK_MEMLIMIT_IS_ACTIVE;
7213 }
7214
7215 void
task_set_memlimit_is_active(task_t task,bool memlimit_is_active)7216 task_set_memlimit_is_active(task_t task, bool memlimit_is_active)
7217 {
7218 assert(task != NULL);
7219
7220 if (memlimit_is_active) {
7221 os_atomic_or(&task->memlimit_flags, TASK_MEMLIMIT_IS_ACTIVE, relaxed);
7222 } else {
7223 os_atomic_andnot(&task->memlimit_flags, TASK_MEMLIMIT_IS_ACTIVE, relaxed);
7224 }
7225 }
7226
7227 bool
task_get_memlimit_is_fatal(task_t task)7228 task_get_memlimit_is_fatal(task_t task)
7229 {
7230 assert(task != NULL);
7231
7232 return os_atomic_load(&task->memlimit_flags, relaxed) & TASK_MEMLIMIT_IS_FATAL;
7233 }
7234
7235 void
task_set_memlimit_is_fatal(task_t task,bool memlimit_is_fatal)7236 task_set_memlimit_is_fatal(task_t task, bool memlimit_is_fatal)
7237 {
7238 assert(task != NULL);
7239
7240 if (memlimit_is_fatal) {
7241 os_atomic_or(&task->memlimit_flags, TASK_MEMLIMIT_IS_FATAL, relaxed);
7242 } else {
7243 os_atomic_andnot(&task->memlimit_flags, TASK_MEMLIMIT_IS_FATAL, relaxed);
7244 }
7245 }
7246
7247 uint64_t
task_get_dirty_start(task_t task)7248 task_get_dirty_start(task_t task)
7249 {
7250 return task->memstat_dirty_start;
7251 }
7252
7253 void
task_set_dirty_start(task_t task,uint64_t start)7254 task_set_dirty_start(task_t task, uint64_t start)
7255 {
7256 task_lock(task);
7257 task->memstat_dirty_start = start;
7258 task_unlock(task);
7259 }
7260
7261 bool
task_set_exc_resource_bit(task_t task,bool memlimit_is_active)7262 task_set_exc_resource_bit(task_t task, bool memlimit_is_active)
7263 {
7264 /*
7265 * Sets the specified EXC_RESOURCE bit if not set already, and returns
7266 * true if the bit was changed (i.e. it was 0 before).
7267 */
7268
7269 task_memlimit_flags_t memlimit_orig;
7270 task_memlimit_flags_t bit =
7271 memlimit_is_active ?
7272 TASK_MEMLIMIT_ACTIVE_EXC_RESOURCE :
7273 TASK_MEMLIMIT_INACTIVE_EXC_RESOURCE;
7274
7275 memlimit_orig = os_atomic_or_orig(&task->memlimit_flags, bit, acquire);
7276
7277 return !(memlimit_orig & bit);
7278 }
7279
7280 void
task_reset_triggered_exc_resource(task_t task,bool memlimit_is_active)7281 task_reset_triggered_exc_resource(task_t task, bool memlimit_is_active)
7282 {
7283 task_memlimit_flags_t bit =
7284 memlimit_is_active ?
7285 TASK_MEMLIMIT_ACTIVE_EXC_RESOURCE :
7286 TASK_MEMLIMIT_INACTIVE_EXC_RESOURCE;
7287
7288 os_atomic_andnot(&task->memlimit_flags, bit, relaxed);
7289 }
7290
7291 bool
task_get_jetsam_realtime_audio(task_t task)7292 task_get_jetsam_realtime_audio(task_t task)
7293 {
7294 return task->task_jetsam_realtime_audio;
7295 }
7296
7297 void
task_set_jetsam_realtime_audio(task_t task,bool realtime_audio)7298 task_set_jetsam_realtime_audio(task_t task, bool realtime_audio)
7299 {
7300 task_lock(task);
7301 task->task_jetsam_realtime_audio = realtime_audio;
7302 task_unlock(task);
7303 }
7304
7305 #define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
7306
7307 void __attribute__((noinline))
PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb,send_exec_resource_options_t exception_options)7308 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options)
7309 {
7310 task_t task = current_task();
7311 int pid = 0;
7312 const char *procname = "unknown";
7313 const char *reason = "high watermark";
7314 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
7315 boolean_t send_sync_exc_resource = FALSE;
7316 void *cur_bsd_info = get_bsdtask_info(current_task());
7317 int flavor = FLAVOR_HIGH_WATERMARK;
7318
7319 #ifdef MACH_BSD
7320 pid = proc_selfpid();
7321
7322 if (pid == 1) {
7323 /*
7324 * Cannot have ReportCrash analyzing
7325 * a suspended initproc.
7326 */
7327 return;
7328 }
7329
7330 if (cur_bsd_info != NULL) {
7331 procname = proc_name_address(cur_bsd_info);
7332 send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(cur_bsd_info);
7333 }
7334 #endif
7335 #if CONFIG_COREDUMP
7336 if (hwm_user_cores) {
7337 int error;
7338 uint64_t starttime, end;
7339 clock_sec_t secs = 0;
7340 uint32_t microsecs = 0;
7341
7342 starttime = mach_absolute_time();
7343 /*
7344 * Trigger a coredump of this process. Don't proceed unless we know we won't
7345 * be filling up the disk; and ignore the core size resource limit for this
7346 * core file.
7347 */
7348 if ((error = coredump(cur_bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
7349 printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
7350 }
7351 /*
7352 * coredump() leaves the task suspended.
7353 */
7354 task_resume_internal(current_task());
7355
7356 end = mach_absolute_time();
7357 absolutetime_to_microtime(end - starttime, &secs, µsecs);
7358 printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
7359 proc_name_address(cur_bsd_info), pid, (int)secs, microsecs);
7360 }
7361 #endif /* CONFIG_COREDUMP */
7362
7363 if (disable_exc_resource) {
7364 printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7365 "suppressed by a boot-arg.\n", procname, pid, max_footprint_mb);
7366 return;
7367 }
7368
7369 /*
7370 * For the reason string, diagnostic limit is prioritized over fatal limit,
7371 * but for the EXC_RESOURCE flavor it's the other way round.
7372 */
7373 if (exception_options & EXEC_RESOURCE_DIAGNOSTIC) {
7374 reason = "diagnostics limit";
7375 if (!(exception_options & EXEC_RESOURCE_FATAL)) {
7376 flavor = FLAVOR_DIAG_MEMLIMIT;
7377 }
7378 } else if (exception_options & EXEC_RESOURCE_CONCLAVE) {
7379 reason = "conclave limit";
7380 flavor = FLAVOR_CONCLAVE_LIMIT;
7381 }
7382
7383 printf("process %s [%d] crossed memory %s (%d MB); EXC_RESOURCE "
7384 "\n", procname, pid, reason, max_footprint_mb);
7385
7386 /*
7387 * A task that has triggered an EXC_RESOURCE, should not be
7388 * jetsammed when the device is under memory pressure. Here
7389 * we set the P_MEMSTAT_SKIP flag so that the process
7390 * will be skipped if the memorystatus_thread wakes up.
7391 *
7392 * This is a debugging aid to ensure we can get a corpse before
7393 * the jetsam thread kills the process.
7394 * Note that proc_memstat_skip is a no-op on release kernels.
7395 */
7396 proc_memstat_skip(cur_bsd_info, TRUE);
7397
7398 code[0] = code[1] = 0;
7399 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
7400 EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
7401 EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
7402 /*
7403 * Do not generate a corpse fork if the violation is a fatal one
7404 * or the process wants synchronous EXC_RESOURCE exceptions.
7405 */
7406 if ((exception_options & EXEC_RESOURCE_FATAL) || send_sync_exc_resource || !exc_via_corpse_forking) {
7407 if (exception_options & EXEC_RESOURCE_FATAL) {
7408 vm_map_set_corpse_source(task->map);
7409 }
7410
7411 /* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */
7412 if (send_sync_exc_resource || !corpse_for_fatal_memkill) {
7413 /*
7414 * Use the _internal_ variant so that no user-space
7415 * process can resume our task from under us.
7416 */
7417 task_suspend_internal(task);
7418 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
7419 task_resume_internal(task);
7420 }
7421 } else {
7422 if (disable_exc_resource_during_audio && audio_active && task->task_jetsam_realtime_audio) {
7423 printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7424 "suppressed due to audio playback.\n", procname, pid, max_footprint_mb);
7425 } else {
7426 task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
7427 code, EXCEPTION_CODE_MAX, NULL, FALSE);
7428 }
7429 }
7430
7431 /*
7432 * After the EXC_RESOURCE has been handled, we must clear the
7433 * P_MEMSTAT_SKIP flag so that the process can again be
7434 * considered for jetsam if the memorystatus_thread wakes up.
7435 */
7436 proc_memstat_skip(cur_bsd_info, FALSE); /* clear the flag */
7437 }
7438 /*
7439 * Callback invoked when a task exceeds its physical footprint limit.
7440 */
7441 void
task_footprint_exceeded(int warning,__unused const void * param0,__unused const void * param1)7442 task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
7443 {
7444 ledger_amount_t enforced_limit_mb = 0;
7445 ledger_amount_t enforced_limit = 0;
7446 #if CONFIG_DEFERRED_RECLAIM
7447 ledger_amount_t current_footprint;
7448 #endif /* CONFIG_DEFERRED_RECLAIM */
7449 task_t task;
7450 send_exec_resource_is_warning is_warning = IS_NOT_WARNING;
7451 boolean_t memlimit_is_active;
7452 send_exec_resource_is_fatal memlimit_is_fatal;
7453 send_exec_resource_is_diagnostics is_diag_mem_threshold = IS_NOT_DIAGNOSTICS;
7454 if (warning == LEDGER_WARNING_DIAG_MEM_THRESHOLD) {
7455 is_diag_mem_threshold = IS_DIAGNOSTICS;
7456 is_warning = IS_WARNING;
7457 } else if (warning == LEDGER_WARNING_DIPPED_BELOW) {
7458 /*
7459 * Task memory limits only provide a warning on the way up.
7460 */
7461 return;
7462 } else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
7463 /*
7464 * This task is in danger of violating a memory limit,
7465 * It has exceeded a percentage level of the limit.
7466 */
7467 is_warning = IS_WARNING;
7468 } else {
7469 /*
7470 * The task has exceeded the physical footprint limit.
7471 * This is not a warning but a true limit violation.
7472 */
7473 is_warning = IS_NOT_WARNING;
7474 }
7475
7476 task = current_task();
7477
7478 #if DEBUG || DEVELOPMENT
7479 if (is_diag_mem_threshold == IS_DIAGNOSTICS) {
7480 ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &enforced_limit);
7481 } else {
7482 ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &enforced_limit);
7483 }
7484 #else /* DEBUG || DEVELOPMENT */
7485 ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &enforced_limit);
7486 #endif /* !(DEBUG || DEVELOPMENT) */
7487 #if CONFIG_DEFERRED_RECLAIM
7488 if (!is_warning && vm_deferred_reclamation_task_has_ring(task)) {
7489 /*
7490 * Task is enrolled in deferred reclamation.
7491 * Do a reclaim to ensure it's really over its limit.
7492 */
7493 vm_deferred_reclamation_task_drain(task, RECLAIM_OPTIONS_NONE);
7494 ledger_get_balance(task->ledger, task_ledgers.phys_footprint, ¤t_footprint);
7495 if (current_footprint < enforced_limit) {
7496 return;
7497 }
7498 }
7499 #endif /* CONFIG_DEFERRED_RECLAIM */
7500 enforced_limit_mb = enforced_limit >> 20;
7501 memlimit_is_active = task_get_memlimit_is_active(task);
7502 memlimit_is_fatal = task_get_memlimit_is_fatal(task) == FALSE ? IS_NOT_FATAL : IS_FATAL;
7503 #if DEBUG || DEVELOPMENT
7504 if (is_diag_mem_threshold == IS_NOT_DIAGNOSTICS) {
7505 task_process_crossed_limit_no_diag(task, enforced_limit_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7506 } else {
7507 task_process_crossed_limit_diag(enforced_limit_mb);
7508 }
7509 #else /* DEBUG || DEVELOPMENT */
7510 task_process_crossed_limit_no_diag(task, enforced_limit_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7511 #endif /* !(DEBUG || DEVELOPMENT) */
7512 }
7513
7514 /*
7515 * Actions to perfrom when a process has crossed watermark or is a fatal consumption */
7516 static inline void
task_process_crossed_limit_no_diag(task_t task,ledger_amount_t ledger_limit_size,bool memlimit_is_fatal,bool memlimit_is_active,send_exec_resource_is_warning is_warning)7517 task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning)
7518 {
7519 send_exec_resource_options_t exception_options = 0;
7520 if (memlimit_is_fatal) {
7521 exception_options |= EXEC_RESOURCE_FATAL;
7522 }
7523 /*
7524 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7525 * We only generate the exception once per process per memlimit (active/inactive limit).
7526 * To enforce this, we monitor state based on the memlimit's active/inactive attribute
7527 * and we disable it by marking that memlimit as exception triggered.
7528 */
7529 if (is_warning == IS_NOT_WARNING && task_set_exc_resource_bit(task, memlimit_is_active)) {
7530 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7531 // If it was not a diag threshold (if was a memory limit), then we do not want more signalling,
7532 // however, if was a diag limit, the user may reload a different limit and signal again the violation
7533 memorystatus_log_exception((int)ledger_limit_size, memlimit_is_active, memlimit_is_fatal);
7534 }
7535 memorystatus_on_ledger_footprint_exceeded(is_warning == IS_NOT_WARNING ? FALSE : TRUE, memlimit_is_active, memlimit_is_fatal);
7536 }
7537
7538 /*
7539 * Callback invoked when a task exceeds its conclave memory limit.
7540 */
7541 void
task_conclave_mem_limit_exceeded(__unused int warning,__unused const void * param0,__unused const void * param1)7542 task_conclave_mem_limit_exceeded(__unused int warning, __unused const void *param0, __unused const void *param1)
7543 {
7544 ledger_amount_t max_footprint = 0;
7545 ledger_amount_t max_footprint_mb = 0;
7546
7547 task_t task = current_task();
7548
7549 ledger_get_limit(task->ledger, task_ledgers.conclave_mem, &max_footprint);
7550 max_footprint_mb = max_footprint >> 20;
7551
7552 /*
7553 * The conclave memory limit is always fatal.
7554 * For the moment, we assume conclave memory isn't tied to process memory
7555 * and so this doesn't participate in the once-per-process rule above.
7556 */
7557 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)max_footprint_mb, EXEC_RESOURCE_FATAL | EXEC_RESOURCE_CONCLAVE);
7558
7559 memorystatus_on_conclave_limit_exceeded((int)max_footprint_mb);
7560 }
7561
7562 #if DEBUG || DEVELOPMENT
7563 /**
7564 * Actions to take when a process has crossed the diagnostics limit
7565 */
7566 static inline void
task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)7567 task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)
7568 {
7569 /*
7570 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7571 * In the case of the diagnostics thresholds, the exception will be signaled only once, but the
7572 * inhibit / rearm mechanism if performed at ledger level.
7573 */
7574 send_exec_resource_options_t exception_options = EXEC_RESOURCE_DIAGNOSTIC;
7575 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7576 memorystatus_log_diag_threshold_exception((int)ledger_limit_size);
7577 }
7578 #endif
7579
7580 extern int proc_check_footprint_priv(void);
7581
7582 kern_return_t
task_set_phys_footprint_limit(task_t task,int new_limit_mb,int * old_limit_mb)7583 task_set_phys_footprint_limit(
7584 task_t task,
7585 int new_limit_mb,
7586 int *old_limit_mb)
7587 {
7588 kern_return_t error;
7589
7590 boolean_t memlimit_is_active;
7591 boolean_t memlimit_is_fatal;
7592
7593 if ((error = proc_check_footprint_priv())) {
7594 return KERN_NO_ACCESS;
7595 }
7596
7597 /*
7598 * This call should probably be obsoleted.
7599 * But for now, we default to current state.
7600 */
7601 memlimit_is_active = task_get_memlimit_is_active(task);
7602 memlimit_is_fatal = task_get_memlimit_is_fatal(task);
7603
7604 return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
7605 }
7606
7607 /*
7608 * Set the limit of diagnostics memory consumption for a concrete task
7609 */
7610 #if CONFIG_MEMORYSTATUS
7611 #if DEVELOPMENT || DEBUG
7612 kern_return_t
task_set_diag_footprint_limit(task_t task,uint64_t new_limit_mb,uint64_t * old_limit_mb)7613 task_set_diag_footprint_limit(
7614 task_t task,
7615 uint64_t new_limit_mb,
7616 uint64_t *old_limit_mb)
7617 {
7618 kern_return_t error;
7619
7620 if ((error = proc_check_footprint_priv())) {
7621 return KERN_NO_ACCESS;
7622 }
7623
7624 return task_set_diag_footprint_limit_internal(task, new_limit_mb, old_limit_mb);
7625 }
7626
7627 #endif // DEVELOPMENT || DEBUG
7628 #endif // CONFIG_MEMORYSTATUS
7629
7630 kern_return_t
task_convert_phys_footprint_limit(int limit_mb,int * converted_limit_mb)7631 task_convert_phys_footprint_limit(
7632 int limit_mb,
7633 int *converted_limit_mb)
7634 {
7635 if (limit_mb == -1) {
7636 /*
7637 * No limit
7638 */
7639 if (max_task_footprint != 0) {
7640 *converted_limit_mb = (int)(max_task_footprint / 1024 / 1024); /* bytes to MB */
7641 } else {
7642 *converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
7643 }
7644 } else {
7645 /* nothing to convert */
7646 *converted_limit_mb = limit_mb;
7647 }
7648 return KERN_SUCCESS;
7649 }
7650
7651 kern_return_t
task_set_phys_footprint_limit_internal(task_t task,int new_limit_mb,int * old_limit_mb,boolean_t memlimit_is_active,boolean_t memlimit_is_fatal)7652 task_set_phys_footprint_limit_internal(
7653 task_t task,
7654 int new_limit_mb,
7655 int *old_limit_mb,
7656 boolean_t memlimit_is_active,
7657 boolean_t memlimit_is_fatal)
7658 {
7659 ledger_amount_t old;
7660 kern_return_t ret;
7661 #if DEVELOPMENT || DEBUG
7662 diagthreshold_check_return diag_threshold_validity;
7663 #endif
7664 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
7665
7666 if (ret != KERN_SUCCESS) {
7667 return ret;
7668 }
7669 /**
7670 * Maybe we will need to re-enable the diag threshold, lets get the value
7671 * and the current status
7672 */
7673 #if DEVELOPMENT || DEBUG
7674 diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_mb, false);
7675 /**
7676 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7677 */
7678 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7679 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7680 } else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7681 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7682 }
7683 #endif
7684
7685 /*
7686 * Check that limit >> 20 will not give an "unexpected" 32-bit
7687 * result. There are, however, implicit assumptions that -1 mb limit
7688 * equates to LEDGER_LIMIT_INFINITY.
7689 */
7690 assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
7691
7692 if (old_limit_mb) {
7693 *old_limit_mb = (int)(old >> 20);
7694 }
7695
7696 if (new_limit_mb == -1) {
7697 /*
7698 * Caller wishes to remove the limit.
7699 */
7700 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7701 max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
7702 max_task_footprint ? (uint8_t)max_task_footprint_warning_level : 0);
7703
7704 task_lock(task);
7705 task_set_memlimit_is_active(task, memlimit_is_active);
7706 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7707 task_unlock(task);
7708 /**
7709 * If the diagnostics were disabled, and now we have a new limit, we have to re-enable it.
7710 */
7711 #if DEVELOPMENT || DEBUG
7712 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7713 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7714 } else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7715 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7716 }
7717 #endif
7718 return KERN_SUCCESS;
7719 }
7720
7721 #ifdef CONFIG_NOMONITORS
7722 return KERN_SUCCESS;
7723 #endif /* CONFIG_NOMONITORS */
7724
7725 task_lock(task);
7726
7727 if ((memlimit_is_active == task_get_memlimit_is_active(task)) &&
7728 (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) &&
7729 (((ledger_amount_t)new_limit_mb << 20) == old)) {
7730 /*
7731 * memlimit state is not changing
7732 */
7733 task_unlock(task);
7734 return KERN_SUCCESS;
7735 }
7736
7737 task_set_memlimit_is_active(task, memlimit_is_active);
7738 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7739
7740 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7741 (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
7742
7743 if (task == current_task()) {
7744 ledger_check_new_balance(current_thread(), task->ledger,
7745 task_ledgers.phys_footprint);
7746 }
7747
7748 task_unlock(task);
7749 #if DEVELOPMENT || DEBUG
7750 if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7751 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7752 }
7753 #endif
7754
7755 return KERN_SUCCESS;
7756 }
7757
7758 #if RESETTABLE_DIAG_FOOTPRINT_LIMITS
7759 kern_return_t
task_set_diag_footprint_limit_internal(task_t task,uint64_t new_limit_bytes,uint64_t * old_limit_bytes)7760 task_set_diag_footprint_limit_internal(
7761 task_t task,
7762 uint64_t new_limit_bytes,
7763 uint64_t *old_limit_bytes)
7764 {
7765 ledger_amount_t old = 0;
7766 kern_return_t ret = KERN_SUCCESS;
7767 diagthreshold_check_return diag_threshold_validity;
7768 ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &old);
7769
7770 if (ret != KERN_SUCCESS) {
7771 return ret;
7772 }
7773 /**
7774 * Maybe we will need to re-enable the diag threshold, lets get the value
7775 * and the current status
7776 */
7777 diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_bytes >> 20, true);
7778 /**
7779 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7780 */
7781 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7782 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7783 }
7784
7785 /*
7786 * Check that limit >> 20 will not give an "unexpected" 32-bit
7787 * result. There are, however, implicit assumptions that -1 mb limit
7788 * equates to LEDGER_LIMIT_INFINITY.
7789 */
7790 if (old_limit_bytes) {
7791 *old_limit_bytes = old;
7792 }
7793
7794 if (new_limit_bytes == -1) {
7795 /*
7796 * Caller wishes to remove the limit.
7797 */
7798 ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7799 LEDGER_LIMIT_INFINITY);
7800 /*
7801 * If the memory diagnostics flag was disabled, lets enable it again
7802 */
7803 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7804 return KERN_SUCCESS;
7805 }
7806
7807 #ifdef CONFIG_NOMONITORS
7808 return KERN_SUCCESS;
7809 #else
7810
7811 task_lock(task);
7812 ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7813 (ledger_amount_t)new_limit_bytes );
7814 if (task == current_task()) {
7815 ledger_check_new_balance(current_thread(), task->ledger,
7816 task_ledgers.phys_footprint);
7817 }
7818
7819 task_unlock(task);
7820 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7821 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7822 } else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7823 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7824 }
7825
7826 return KERN_SUCCESS;
7827 #endif /* CONFIG_NOMONITORS */
7828 }
7829
7830 kern_return_t
task_get_diag_footprint_limit_internal(task_t task,uint64_t * new_limit_bytes,bool * threshold_disabled)7831 task_get_diag_footprint_limit_internal(
7832 task_t task,
7833 uint64_t *new_limit_bytes,
7834 bool *threshold_disabled)
7835 {
7836 ledger_amount_t ledger_limit;
7837 kern_return_t ret = KERN_SUCCESS;
7838 if (new_limit_bytes == NULL || threshold_disabled == NULL) {
7839 return KERN_INVALID_ARGUMENT;
7840 }
7841 ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &ledger_limit);
7842 if (ledger_limit == LEDGER_LIMIT_INFINITY) {
7843 ledger_limit = -1;
7844 }
7845 if (ret == KERN_SUCCESS) {
7846 *new_limit_bytes = ledger_limit;
7847 ret = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, threshold_disabled);
7848 }
7849 return ret;
7850 }
7851 #endif /* RESETTABLE_DIAG_FOOTPRINT_LIMITS */
7852
7853
7854 kern_return_t
task_get_phys_footprint_limit(task_t task,int * limit_mb)7855 task_get_phys_footprint_limit(
7856 task_t task,
7857 int *limit_mb)
7858 {
7859 ledger_amount_t limit;
7860 kern_return_t ret;
7861
7862 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
7863 if (ret != KERN_SUCCESS) {
7864 return ret;
7865 }
7866
7867 /*
7868 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
7869 * result. There are, however, implicit assumptions that -1 mb limit
7870 * equates to LEDGER_LIMIT_INFINITY.
7871 */
7872 assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
7873 *limit_mb = (int)(limit >> 20);
7874
7875 return KERN_SUCCESS;
7876 }
7877 #else /* CONFIG_MEMORYSTATUS */
7878 kern_return_t
task_set_phys_footprint_limit(__unused task_t task,__unused int new_limit_mb,__unused int * old_limit_mb)7879 task_set_phys_footprint_limit(
7880 __unused task_t task,
7881 __unused int new_limit_mb,
7882 __unused int *old_limit_mb)
7883 {
7884 return KERN_FAILURE;
7885 }
7886
7887 kern_return_t
task_get_phys_footprint_limit(__unused task_t task,__unused int * limit_mb)7888 task_get_phys_footprint_limit(
7889 __unused task_t task,
7890 __unused int *limit_mb)
7891 {
7892 return KERN_FAILURE;
7893 }
7894 #endif /* CONFIG_MEMORYSTATUS */
7895
7896 security_token_t *
task_get_sec_token(task_t task)7897 task_get_sec_token(task_t task)
7898 {
7899 return &task_get_ro(task)->task_tokens.sec_token;
7900 }
7901
7902 void
task_set_sec_token(task_t task,security_token_t * token)7903 task_set_sec_token(task_t task, security_token_t *token)
7904 {
7905 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7906 task_tokens.sec_token, token);
7907 }
7908
7909 audit_token_t *
task_get_audit_token(task_t task)7910 task_get_audit_token(task_t task)
7911 {
7912 return &task_get_ro(task)->task_tokens.audit_token;
7913 }
7914
7915 void
task_set_audit_token(task_t task,audit_token_t * token)7916 task_set_audit_token(task_t task, audit_token_t *token)
7917 {
7918 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7919 task_tokens.audit_token, token);
7920 }
7921
7922 void
task_set_tokens(task_t task,security_token_t * sec_token,audit_token_t * audit_token)7923 task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token)
7924 {
7925 struct task_token_ro_data tokens;
7926
7927 tokens = task_get_ro(task)->task_tokens;
7928 tokens.sec_token = *sec_token;
7929 tokens.audit_token = *audit_token;
7930
7931 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task), task_tokens,
7932 &tokens);
7933 }
7934
7935 boolean_t
task_is_privileged(task_t task)7936 task_is_privileged(task_t task)
7937 {
7938 return task_get_sec_token(task)->val[0] == 0;
7939 }
7940
7941 #ifdef CONFIG_MACF
7942 uint8_t *
task_get_mach_trap_filter_mask(task_t task)7943 task_get_mach_trap_filter_mask(task_t task)
7944 {
7945 return task_get_ro(task)->task_filters.mach_trap_filter_mask;
7946 }
7947
7948 void
task_set_mach_trap_filter_mask(task_t task,uint8_t * mask)7949 task_set_mach_trap_filter_mask(task_t task, uint8_t *mask)
7950 {
7951 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7952 task_filters.mach_trap_filter_mask, &mask);
7953 }
7954
7955 uint8_t *
task_get_mach_kobj_filter_mask(task_t task)7956 task_get_mach_kobj_filter_mask(task_t task)
7957 {
7958 return task_get_ro(task)->task_filters.mach_kobj_filter_mask;
7959 }
7960
7961 mach_vm_address_t
task_get_all_image_info_addr(task_t task)7962 task_get_all_image_info_addr(task_t task)
7963 {
7964 return task->all_image_info_addr;
7965 }
7966
7967 void
task_set_mach_kobj_filter_mask(task_t task,uint8_t * mask)7968 task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask)
7969 {
7970 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7971 task_filters.mach_kobj_filter_mask, &mask);
7972 }
7973
7974 #endif /* CONFIG_MACF */
7975
7976 void
task_set_thread_limit(task_t task,uint16_t thread_limit)7977 task_set_thread_limit(task_t task, uint16_t thread_limit)
7978 {
7979 assert(task != kernel_task);
7980 if (thread_limit <= TASK_MAX_THREAD_LIMIT) {
7981 task_lock(task);
7982 task->task_thread_limit = thread_limit;
7983 task_unlock(task);
7984 }
7985 }
7986
7987 kern_return_t
task_get_conclave_mem_limit(task_t task,uint64_t * conclave_limit)7988 task_get_conclave_mem_limit(task_t task, uint64_t *conclave_limit)
7989 {
7990 kern_return_t ret;
7991 ledger_amount_t max;
7992
7993 ret = ledger_get_limit(task->ledger, task_ledgers.conclave_mem, &max);
7994 if (ret != KERN_SUCCESS) {
7995 return ret;
7996 }
7997
7998 *conclave_limit = max;
7999
8000 return KERN_SUCCESS;
8001 }
8002
8003 kern_return_t
task_set_conclave_mem_limit(task_t task,uint64_t conclave_limit)8004 task_set_conclave_mem_limit(task_t task, uint64_t conclave_limit)
8005 {
8006 kern_return_t error;
8007
8008 if ((error = proc_check_footprint_priv())) {
8009 (void) error;
8010 /* Following task_set_phys_footprint_limit, always returns KERN_NO_ACCESS. */
8011 return KERN_NO_ACCESS;
8012 }
8013
8014 task_lock(task);
8015
8016 ledger_set_limit(task->ledger, task_ledgers.conclave_mem,
8017 (ledger_amount_t)conclave_limit << 20, 0);
8018
8019 if (task == current_task()) {
8020 ledger_check_new_balance(current_thread(), task->ledger,
8021 task_ledgers.conclave_mem);
8022 }
8023
8024 task_unlock(task);
8025
8026 return KERN_SUCCESS;
8027 }
8028
8029 #if CONFIG_PROC_RESOURCE_LIMITS
8030 kern_return_t
task_set_port_space_limits(task_t task,uint32_t soft_limit,uint32_t hard_limit)8031 task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit)
8032 {
8033 return ipc_space_set_table_size_limits(task->itk_space, soft_limit, hard_limit);
8034 }
8035 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8036
8037 #if XNU_TARGET_OS_OSX
8038 boolean_t
task_has_system_version_compat_enabled(task_t task)8039 task_has_system_version_compat_enabled(task_t task)
8040 {
8041 boolean_t enabled = FALSE;
8042
8043 task_lock(task);
8044 enabled = (task->t_flags & TF_SYS_VERSION_COMPAT);
8045 task_unlock(task);
8046
8047 return enabled;
8048 }
8049
8050 void
task_set_system_version_compat_enabled(task_t task,boolean_t enable_system_version_compat)8051 task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat)
8052 {
8053 assert(task == current_task());
8054 assert(task != kernel_task);
8055
8056 task_lock(task);
8057 if (enable_system_version_compat) {
8058 task->t_flags |= TF_SYS_VERSION_COMPAT;
8059 } else {
8060 task->t_flags &= ~TF_SYS_VERSION_COMPAT;
8061 }
8062 task_unlock(task);
8063 }
8064 #endif /* XNU_TARGET_OS_OSX */
8065
8066 /*
8067 * We need to export some functions to other components that
8068 * are currently implemented in macros within the osfmk
8069 * component. Just export them as functions of the same name.
8070 */
8071 boolean_t
is_kerneltask(task_t t)8072 is_kerneltask(task_t t)
8073 {
8074 if (t == kernel_task) {
8075 return TRUE;
8076 }
8077
8078 return FALSE;
8079 }
8080
8081 boolean_t
is_corpsefork(task_t t)8082 is_corpsefork(task_t t)
8083 {
8084 return task_is_a_corpse_fork(t);
8085 }
8086
8087 task_t
current_task_early(void)8088 current_task_early(void)
8089 {
8090 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
8091 if (current_thread()->t_tro == NULL) {
8092 return TASK_NULL;
8093 }
8094 }
8095 return get_threadtask(current_thread());
8096 }
8097
8098 task_t
current_task(void)8099 current_task(void)
8100 {
8101 return get_threadtask(current_thread());
8102 }
8103
8104 /* defined in bsd/kern/kern_prot.c */
8105 extern int get_audit_token_pid(audit_token_t *audit_token);
8106
8107 int
task_pid(task_t task)8108 task_pid(task_t task)
8109 {
8110 if (task) {
8111 return get_audit_token_pid(task_get_audit_token(task));
8112 }
8113 return -1;
8114 }
8115
8116 #if __has_feature(ptrauth_calls)
8117 /*
8118 * Get the shared region id and jop signing key for the task.
8119 * The function will allocate a kalloc buffer and return
8120 * it to caller, the caller needs to free it. This is used
8121 * for getting the information via task port.
8122 */
8123 char *
task_get_vm_shared_region_id_and_jop_pid(task_t task,uint64_t * jop_pid)8124 task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid)
8125 {
8126 size_t len;
8127 char *shared_region_id = NULL;
8128
8129 task_lock(task);
8130 if (task->shared_region_id == NULL) {
8131 task_unlock(task);
8132 return NULL;
8133 }
8134 len = strlen(task->shared_region_id) + 1;
8135
8136 /* don't hold task lock while allocating */
8137 task_unlock(task);
8138 shared_region_id = kalloc_data(len, Z_WAITOK);
8139 task_lock(task);
8140
8141 if (task->shared_region_id == NULL) {
8142 task_unlock(task);
8143 kfree_data(shared_region_id, len);
8144 return NULL;
8145 }
8146 assert(len == strlen(task->shared_region_id) + 1); /* should never change */
8147 strlcpy(shared_region_id, task->shared_region_id, len);
8148 task_unlock(task);
8149
8150 /* find key from its auth pager */
8151 if (jop_pid != NULL) {
8152 *jop_pid = shared_region_find_key(shared_region_id);
8153 }
8154
8155 return shared_region_id;
8156 }
8157
8158 /*
8159 * set the shared region id for a task
8160 */
8161 void
task_set_shared_region_id(task_t task,char * id)8162 task_set_shared_region_id(task_t task, char *id)
8163 {
8164 char *old_id;
8165
8166 task_lock(task);
8167 old_id = task->shared_region_id;
8168 task->shared_region_id = id;
8169 task->shared_region_auth_remapped = FALSE;
8170 task_unlock(task);
8171
8172 /* free any pre-existing shared region id */
8173 if (old_id != NULL) {
8174 shared_region_key_dealloc(old_id);
8175 kfree_data(old_id, strlen(old_id) + 1);
8176 }
8177 }
8178 #endif /* __has_feature(ptrauth_calls) */
8179
8180 /*
8181 * This routine finds a thread in a task by its unique id
8182 * Returns a referenced thread or THREAD_NULL if the thread was not found
8183 *
8184 * TODO: This is super inefficient - it's an O(threads in task) list walk!
8185 * We should make a tid hash, or transition all tid clients to thread ports
8186 *
8187 * Precondition: No locks held (will take task lock)
8188 */
8189 thread_t
task_findtid(task_t task,uint64_t tid)8190 task_findtid(task_t task, uint64_t tid)
8191 {
8192 thread_t self = current_thread();
8193 thread_t found_thread = THREAD_NULL;
8194 thread_t iter_thread = THREAD_NULL;
8195
8196 /* Short-circuit the lookup if we're looking up ourselves */
8197 if (tid == self->thread_id || tid == TID_NULL) {
8198 assert(get_threadtask(self) == task);
8199
8200 thread_reference(self);
8201
8202 return self;
8203 }
8204
8205 task_lock(task);
8206
8207 queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
8208 if (iter_thread->thread_id == tid) {
8209 found_thread = iter_thread;
8210 thread_reference(found_thread);
8211 break;
8212 }
8213 }
8214
8215 task_unlock(task);
8216
8217 return found_thread;
8218 }
8219
8220 int
pid_from_task(task_t task)8221 pid_from_task(task_t task)
8222 {
8223 int pid = -1;
8224 void *bsd_info = get_bsdtask_info(task);
8225
8226 if (bsd_info) {
8227 pid = proc_pid(bsd_info);
8228 } else {
8229 pid = task_pid(task);
8230 }
8231
8232 return pid;
8233 }
8234
8235 /*
8236 * Control the CPU usage monitor for a task.
8237 */
8238 kern_return_t
task_cpu_usage_monitor_ctl(task_t task,uint32_t * flags)8239 task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
8240 {
8241 int error = KERN_SUCCESS;
8242
8243 if (*flags & CPUMON_MAKE_FATAL) {
8244 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
8245 } else {
8246 error = KERN_INVALID_ARGUMENT;
8247 }
8248
8249 return error;
8250 }
8251
8252 /*
8253 * Control the wakeups monitor for a task.
8254 */
8255 kern_return_t
task_wakeups_monitor_ctl(task_t task,uint32_t * flags,int32_t * rate_hz)8256 task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
8257 {
8258 ledger_t ledger = task->ledger;
8259
8260 task_lock(task);
8261 if (*flags & WAKEMON_GET_PARAMS) {
8262 ledger_amount_t limit;
8263 uint64_t period;
8264
8265 ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
8266 ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
8267
8268 if (limit != LEDGER_LIMIT_INFINITY) {
8269 /*
8270 * An active limit means the wakeups monitor is enabled.
8271 */
8272 *rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
8273 *flags = WAKEMON_ENABLE;
8274 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
8275 *flags |= WAKEMON_MAKE_FATAL;
8276 }
8277 } else {
8278 *flags = WAKEMON_DISABLE;
8279 *rate_hz = -1;
8280 }
8281
8282 /*
8283 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
8284 */
8285 task_unlock(task);
8286 return KERN_SUCCESS;
8287 }
8288
8289 if (*flags & WAKEMON_ENABLE) {
8290 if (*flags & WAKEMON_SET_DEFAULTS) {
8291 *rate_hz = task_wakeups_monitor_rate;
8292 }
8293
8294 #ifndef CONFIG_NOMONITORS
8295 if (*flags & WAKEMON_MAKE_FATAL) {
8296 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
8297 }
8298 #endif /* CONFIG_NOMONITORS */
8299
8300 if (*rate_hz <= 0) {
8301 task_unlock(task);
8302 return KERN_INVALID_ARGUMENT;
8303 }
8304
8305 #ifndef CONFIG_NOMONITORS
8306 ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
8307 (uint8_t)task_wakeups_monitor_ustackshots_trigger_pct);
8308 ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
8309 ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
8310 #endif /* CONFIG_NOMONITORS */
8311 } else if (*flags & WAKEMON_DISABLE) {
8312 /*
8313 * Caller wishes to disable wakeups monitor on the task.
8314 *
8315 * Remove the limit & callback on the wakeups ledger entry.
8316 */
8317 ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
8318 ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
8319 }
8320
8321 task_unlock(task);
8322 return KERN_SUCCESS;
8323 }
8324
8325 void
task_wakeups_rate_exceeded(int warning,__unused const void * param0,__unused const void * param1)8326 task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
8327 {
8328 if (warning == 0) {
8329 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
8330 }
8331 }
8332
8333 TUNABLE(bool, enable_wakeup_reports, "enable_wakeup_reports", false); /* Enable wakeup reports. */
8334
8335 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)8336 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
8337 {
8338 task_t task = current_task();
8339 int pid = 0;
8340 const char *procname = "unknown";
8341 boolean_t fatal;
8342 kern_return_t kr;
8343 #ifdef EXC_RESOURCE_MONITORS
8344 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
8345 #endif /* EXC_RESOURCE_MONITORS */
8346 struct ledger_entry_info lei;
8347
8348 #ifdef MACH_BSD
8349 pid = proc_selfpid();
8350 if (get_bsdtask_info(task) != NULL) {
8351 procname = proc_name_address(get_bsdtask_info(current_task()));
8352 }
8353 #endif
8354
8355 ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
8356
8357 /*
8358 * Disable the exception notification so we don't overwhelm
8359 * the listener with an endless stream of redundant exceptions.
8360 * TODO: detect whether another thread is already reporting the violation.
8361 */
8362 uint32_t flags = WAKEMON_DISABLE;
8363 task_wakeups_monitor_ctl(task, &flags, NULL);
8364
8365 fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
8366 trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
8367 os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
8368 "over ~%llu seconds, averaging %llu wakes / second and "
8369 "violating a %slimit of %llu wakes over %llu seconds.\n",
8370 procname, pid,
8371 lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
8372 lei.lei_last_refill == 0 ? 0 :
8373 (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
8374 fatal ? "FATAL " : "",
8375 lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
8376
8377 if (enable_wakeup_reports) {
8378 kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
8379 fatal ? kRNFatalLimitFlag : 0);
8380 if (kr) {
8381 printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
8382 }
8383 }
8384
8385 #ifdef EXC_RESOURCE_MONITORS
8386 if (disable_exc_resource) {
8387 printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8388 "suppressed by a boot-arg\n", procname, pid);
8389 return;
8390 }
8391 if (disable_exc_resource_during_audio && audio_active && task->task_jetsam_realtime_audio) {
8392 os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8393 "suppressed due to audio playback\n", procname, pid);
8394 return;
8395 }
8396 if (lei.lei_last_refill == 0) {
8397 os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8398 "suppressed due to lei.lei_last_refill = 0 \n", procname, pid);
8399 }
8400
8401 code[0] = code[1] = 0;
8402 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
8403 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
8404 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
8405 NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
8406 EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
8407 lei.lei_last_refill);
8408 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
8409 NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
8410 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8411 #endif /* EXC_RESOURCE_MONITORS */
8412
8413 if (fatal) {
8414 task_terminate_internal(task);
8415 }
8416 }
8417
8418 static boolean_t
global_update_logical_writes(int64_t io_delta,int64_t * global_write_count)8419 global_update_logical_writes(int64_t io_delta, int64_t *global_write_count)
8420 {
8421 int64_t old_count, new_count;
8422 boolean_t needs_telemetry;
8423
8424 do {
8425 new_count = old_count = *global_write_count;
8426 new_count += io_delta;
8427 if (new_count >= io_telemetry_limit) {
8428 new_count = 0;
8429 needs_telemetry = TRUE;
8430 } else {
8431 needs_telemetry = FALSE;
8432 }
8433 } while (!OSCompareAndSwap64(old_count, new_count, global_write_count));
8434 return needs_telemetry;
8435 }
8436
8437 void
task_update_physical_writes(__unused task_t task,__unused task_physical_write_flavor_t flavor,__unused uint64_t io_size,__unused task_balance_flags_t flags)8438 task_update_physical_writes(__unused task_t task, __unused task_physical_write_flavor_t flavor, __unused uint64_t io_size, __unused task_balance_flags_t flags)
8439 {
8440 #if CONFIG_PHYS_WRITE_ACCT
8441 if (!io_size) {
8442 return;
8443 }
8444
8445 /*
8446 * task == NULL means that we have to update kernel_task ledgers
8447 */
8448 if (!task) {
8449 task = kernel_task;
8450 }
8451
8452 KDBG((VMDBG_CODE(DBG_VM_PHYS_WRITE_ACCT)) | DBG_FUNC_NONE,
8453 task_pid(task), flavor, io_size, flags);
8454 DTRACE_IO4(physical_writes, struct task *, task, task_physical_write_flavor_t, flavor, uint64_t, io_size, task_balance_flags_t, flags);
8455
8456 if (flags & TASK_BALANCE_CREDIT) {
8457 if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8458 OSAddAtomic64(io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8459 ledger_credit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8460 }
8461 } else if (flags & TASK_BALANCE_DEBIT) {
8462 if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8463 OSAddAtomic64(-1 * io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8464 ledger_debit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8465 }
8466 }
8467 #endif /* CONFIG_PHYS_WRITE_ACCT */
8468 }
8469
8470 void
task_update_logical_writes(task_t task,uint32_t io_size,int flags,void * vp)8471 task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
8472 {
8473 int64_t io_delta = 0;
8474 int64_t * global_counter_to_update;
8475 boolean_t needs_telemetry = FALSE;
8476 boolean_t is_external_device = FALSE;
8477 int ledger_to_update = 0;
8478 struct task_writes_counters * writes_counters_to_update;
8479
8480 if ((!task) || (!io_size) || (!vp)) {
8481 return;
8482 }
8483
8484 KDBG((VMDBG_CODE(DBG_VM_DATA_WRITE)) | DBG_FUNC_NONE,
8485 task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp));
8486 DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
8487
8488 // Is the drive backing this vnode internal or external to the system?
8489 if (vnode_isonexternalstorage(vp) == false) {
8490 global_counter_to_update = &global_logical_writes_count;
8491 ledger_to_update = task_ledgers.logical_writes;
8492 writes_counters_to_update = &task->task_writes_counters_internal;
8493 is_external_device = FALSE;
8494 } else {
8495 global_counter_to_update = &global_logical_writes_to_external_count;
8496 ledger_to_update = task_ledgers.logical_writes_to_external;
8497 writes_counters_to_update = &task->task_writes_counters_external;
8498 is_external_device = TRUE;
8499 }
8500
8501 switch (flags) {
8502 case TASK_WRITE_IMMEDIATE:
8503 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_immediate_writes));
8504 ledger_credit(task->ledger, ledger_to_update, io_size);
8505 if (!is_external_device) {
8506 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8507 }
8508 break;
8509 case TASK_WRITE_DEFERRED:
8510 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_deferred_writes));
8511 ledger_credit(task->ledger, ledger_to_update, io_size);
8512 if (!is_external_device) {
8513 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8514 }
8515 break;
8516 case TASK_WRITE_INVALIDATED:
8517 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_invalidated_writes));
8518 ledger_debit(task->ledger, ledger_to_update, io_size);
8519 if (!is_external_device) {
8520 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, FALSE, io_size);
8521 }
8522 break;
8523 case TASK_WRITE_METADATA:
8524 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_metadata_writes));
8525 ledger_credit(task->ledger, ledger_to_update, io_size);
8526 if (!is_external_device) {
8527 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8528 }
8529 break;
8530 }
8531
8532 io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
8533 if (io_telemetry_limit != 0) {
8534 /* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
8535 needs_telemetry = global_update_logical_writes(io_delta, global_counter_to_update);
8536 if (needs_telemetry && !is_external_device) {
8537 act_set_io_telemetry_ast(current_thread());
8538 }
8539 }
8540 }
8541
8542 /*
8543 * Control the I/O monitor for a task.
8544 */
8545 kern_return_t
task_io_monitor_ctl(task_t task,uint32_t * flags)8546 task_io_monitor_ctl(task_t task, uint32_t *flags)
8547 {
8548 ledger_t ledger = task->ledger;
8549
8550 task_lock(task);
8551 if (*flags & IOMON_ENABLE) {
8552 /* Configure the physical I/O ledger */
8553 ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
8554 ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
8555 } else if (*flags & IOMON_DISABLE) {
8556 /*
8557 * Caller wishes to disable I/O monitor on the task.
8558 */
8559 ledger_disable_refill(ledger, task_ledgers.physical_writes);
8560 ledger_disable_callback(ledger, task_ledgers.physical_writes);
8561 }
8562
8563 task_unlock(task);
8564 return KERN_SUCCESS;
8565 }
8566
8567 void
task_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)8568 task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
8569 {
8570 if (warning == 0) {
8571 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
8572 }
8573 }
8574
8575 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)8576 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
8577 {
8578 int pid = 0;
8579 task_t task = current_task();
8580 #ifdef EXC_RESOURCE_MONITORS
8581 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
8582 #endif /* EXC_RESOURCE_MONITORS */
8583 struct ledger_entry_info lei = {};
8584 kern_return_t kr;
8585
8586 #ifdef MACH_BSD
8587 pid = proc_selfpid();
8588 #endif
8589 /*
8590 * Get the ledger entry info. We need to do this before disabling the exception
8591 * to get correct values for all fields.
8592 */
8593 switch (flavor) {
8594 case FLAVOR_IO_PHYSICAL_WRITES:
8595 ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
8596 break;
8597 }
8598
8599
8600 /*
8601 * Disable the exception notification so we don't overwhelm
8602 * the listener with an endless stream of redundant exceptions.
8603 * TODO: detect whether another thread is already reporting the violation.
8604 */
8605 uint32_t flags = IOMON_DISABLE;
8606 task_io_monitor_ctl(task, &flags);
8607
8608 if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
8609 trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
8610 }
8611 os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
8612 pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
8613
8614 kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
8615 if (kr) {
8616 printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
8617 }
8618
8619 #ifdef EXC_RESOURCE_MONITORS
8620 code[0] = code[1] = 0;
8621 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
8622 EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
8623 EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
8624 EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
8625 EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
8626 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8627 #endif /* EXC_RESOURCE_MONITORS */
8628 }
8629
8630 void
task_port_space_ast(__unused task_t task)8631 task_port_space_ast(__unused task_t task)
8632 {
8633 uint32_t current_size, soft_limit, hard_limit;
8634 assert(task == current_task());
8635 bool should_notify = ipc_space_check_table_size_limit(task->itk_space,
8636 ¤t_size, &soft_limit, &hard_limit);
8637 if (should_notify) {
8638 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task, current_size, soft_limit, hard_limit);
8639 }
8640 }
8641
8642 #if CONFIG_PROC_RESOURCE_LIMITS
8643 static mach_port_t
task_allocate_fatal_port(void)8644 task_allocate_fatal_port(void)
8645 {
8646 mach_port_t task_fatal_port = MACH_PORT_NULL;
8647 task_id_token_t token;
8648
8649 kern_return_t kr = task_create_identity_token(current_task(), &token); /* Takes a reference on the token */
8650 if (kr) {
8651 return MACH_PORT_NULL;
8652 }
8653 task_fatal_port = ipc_kobject_alloc_port(token, IKOT_TASK_FATAL,
8654 IPC_KOBJECT_ALLOC_MAKE_SEND);
8655
8656 task_id_token_set_port(token, task_fatal_port);
8657
8658 return task_fatal_port;
8659 }
8660
8661 static void
task_fatal_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)8662 task_fatal_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
8663 {
8664 task_t task = TASK_NULL;
8665 kern_return_t kr;
8666
8667 task_id_token_t token = ipc_kobject_get_stable(port, IKOT_TASK_FATAL);
8668
8669 assert(token != NULL);
8670 if (token) {
8671 kr = task_identity_token_get_task_grp(token, &task, TASK_GRP_KERNEL); /* takes a reference on task */
8672 if (task) {
8673 task_bsdtask_kill(task);
8674 task_deallocate(task);
8675 }
8676 task_id_token_release(token); /* consumes ref given by notification */
8677 }
8678 }
8679 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8680
8681 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,uint32_t current_size,uint32_t soft_limit,uint32_t hard_limit)8682 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task, uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit)
8683 {
8684 int pid = 0;
8685 char *procname = (char *) "unknown";
8686 __unused kern_return_t kr;
8687 __unused resource_notify_flags_t flags = kRNFlagsNone;
8688 __unused uint32_t limit;
8689 __unused mach_port_t task_fatal_port = MACH_PORT_NULL;
8690 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
8691
8692 pid = proc_selfpid();
8693 if (get_bsdtask_info(task) != NULL) {
8694 procname = proc_name_address(get_bsdtask_info(task));
8695 }
8696
8697 /*
8698 * Only kernel_task and launchd may be allowed to
8699 * have really large ipc space.
8700 */
8701 if (pid == 0 || pid == 1) {
8702 return;
8703 }
8704
8705 os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many mach ports. \
8706 Num of ports allocated %u; \n", procname, pid, current_size);
8707
8708 /* Abort the process if it has hit the system-wide limit for ipc port table size */
8709 if (!hard_limit && !soft_limit) {
8710 code[0] = code[1] = 0;
8711 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_PORTS);
8712 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_PORT_SPACE_FULL);
8713 EXC_RESOURCE_PORTS_ENCODE_PORTS(code[0], current_size);
8714
8715 exception_info_t info = {
8716 .os_reason = OS_REASON_PORT_SPACE,
8717 .exception_type = EXC_RESOURCE,
8718 .mx_code = code[0],
8719 .mx_subcode = code[1]
8720 };
8721
8722 exit_with_mach_exception(current_proc(), info, PX_DEBUG_NO_HONOR);
8723 return;
8724 }
8725
8726 #if CONFIG_PROC_RESOURCE_LIMITS
8727 if (hard_limit > 0) {
8728 flags |= kRNHardLimitFlag;
8729 limit = hard_limit;
8730 task_fatal_port = task_allocate_fatal_port();
8731 if (!task_fatal_port) {
8732 os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8733 task_bsdtask_kill(task);
8734 }
8735 } else {
8736 flags |= kRNSoftLimitFlag;
8737 limit = soft_limit;
8738 }
8739
8740 kr = send_resource_violation_with_fatal_port(send_port_space_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8741 if (kr) {
8742 os_log(OS_LOG_DEFAULT, "send_resource_violation(ports, ...): error %#x\n", kr);
8743 }
8744 if (task_fatal_port) {
8745 ipc_port_release_send(task_fatal_port);
8746 }
8747 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8748 }
8749
8750 #if CONFIG_PROC_RESOURCE_LIMITS
8751 void
task_kqworkloop_ast(task_t task,int current_size,int soft_limit,int hard_limit)8752 task_kqworkloop_ast(task_t task, int current_size, int soft_limit, int hard_limit)
8753 {
8754 assert(task == current_task());
8755 return SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task, current_size, soft_limit, hard_limit);
8756 }
8757
8758 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task,int current_size,int soft_limit,int hard_limit)8759 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit)
8760 {
8761 int pid = 0;
8762 char *procname = (char *) "unknown";
8763 #ifdef MACH_BSD
8764 pid = proc_selfpid();
8765 if (get_bsdtask_info(task) != NULL) {
8766 procname = proc_name_address(get_bsdtask_info(task));
8767 }
8768 #endif
8769 if (pid == 0 || pid == 1) {
8770 return;
8771 }
8772
8773 os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many kqworkloops. \
8774 Num of kqworkloops allocated %u; \n", procname, pid, current_size);
8775
8776 int limit = 0;
8777 resource_notify_flags_t flags = kRNFlagsNone;
8778 mach_port_t task_fatal_port = MACH_PORT_NULL;
8779 if (hard_limit) {
8780 flags |= kRNHardLimitFlag;
8781 limit = hard_limit;
8782
8783 task_fatal_port = task_allocate_fatal_port();
8784 if (task_fatal_port == MACH_PORT_NULL) {
8785 os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8786 task_bsdtask_kill(task);
8787 }
8788 } else {
8789 flags |= kRNSoftLimitFlag;
8790 limit = soft_limit;
8791 }
8792
8793 kern_return_t kr;
8794 kr = send_resource_violation_with_fatal_port(send_kqworkloops_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8795 if (kr) {
8796 os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(kqworkloops, ...): error %#x\n", kr);
8797 }
8798 if (task_fatal_port) {
8799 ipc_port_release_send(task_fatal_port);
8800 }
8801 }
8802
8803
8804 void
task_filedesc_ast(__unused task_t task,__unused int current_size,__unused int soft_limit,__unused int hard_limit)8805 task_filedesc_ast(__unused task_t task, __unused int current_size, __unused int soft_limit, __unused int hard_limit)
8806 {
8807 assert(task == current_task());
8808 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task, current_size, soft_limit, hard_limit);
8809 }
8810
8811 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task,int current_size,int soft_limit,int hard_limit)8812 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit)
8813 {
8814 int pid = 0;
8815 char *procname = (char *) "unknown";
8816 kern_return_t kr;
8817 resource_notify_flags_t flags = kRNFlagsNone;
8818 int limit;
8819 mach_port_t task_fatal_port = MACH_PORT_NULL;
8820
8821 #ifdef MACH_BSD
8822 pid = proc_selfpid();
8823 if (get_bsdtask_info(task) != NULL) {
8824 procname = proc_name_address(get_bsdtask_info(task));
8825 }
8826 #endif
8827 /*
8828 * Only kernel_task and launchd may be allowed to
8829 * have really large ipc space.
8830 */
8831 if (pid == 0 || pid == 1) {
8832 return;
8833 }
8834
8835 os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many file descriptors. \
8836 Num of fds allocated %u; \n", procname, pid, current_size);
8837
8838 if (hard_limit > 0) {
8839 flags |= kRNHardLimitFlag;
8840 limit = hard_limit;
8841 task_fatal_port = task_allocate_fatal_port();
8842 if (!task_fatal_port) {
8843 os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8844 task_bsdtask_kill(task);
8845 }
8846 } else {
8847 flags |= kRNSoftLimitFlag;
8848 limit = soft_limit;
8849 }
8850
8851 kr = send_resource_violation_with_fatal_port(send_file_descriptors_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8852 if (kr) {
8853 os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(filedesc, ...): error %#x\n", kr);
8854 }
8855 if (task_fatal_port) {
8856 ipc_port_release_send(task_fatal_port);
8857 }
8858 }
8859 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8860
8861 /* Placeholders for the task set/get voucher interfaces */
8862 kern_return_t
task_get_mach_voucher(task_t task,mach_voucher_selector_t __unused which,ipc_voucher_t * voucher)8863 task_get_mach_voucher(
8864 task_t task,
8865 mach_voucher_selector_t __unused which,
8866 ipc_voucher_t *voucher)
8867 {
8868 if (TASK_NULL == task) {
8869 return KERN_INVALID_TASK;
8870 }
8871
8872 *voucher = NULL;
8873 return KERN_SUCCESS;
8874 }
8875
8876 kern_return_t
task_set_mach_voucher(task_t task,ipc_voucher_t __unused voucher)8877 task_set_mach_voucher(
8878 task_t task,
8879 ipc_voucher_t __unused voucher)
8880 {
8881 if (TASK_NULL == task) {
8882 return KERN_INVALID_TASK;
8883 }
8884
8885 return KERN_SUCCESS;
8886 }
8887
8888 kern_return_t
task_swap_mach_voucher(__unused task_t task,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)8889 task_swap_mach_voucher(
8890 __unused task_t task,
8891 __unused ipc_voucher_t new_voucher,
8892 ipc_voucher_t *in_out_old_voucher)
8893 {
8894 /*
8895 * Currently this function is only called from a MIG generated
8896 * routine which doesn't release the reference on the voucher
8897 * addressed by in_out_old_voucher. To avoid leaking this reference,
8898 * a call to release it has been added here.
8899 */
8900 ipc_voucher_release(*in_out_old_voucher);
8901 OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
8902 }
8903
8904 void
task_set_gpu_role(task_t task,darwin_gpu_role_t gpu_role)8905 task_set_gpu_role(task_t task, darwin_gpu_role_t gpu_role)
8906 {
8907 task_lock(task);
8908
8909 os_atomic_store(&task->t_gpu_role, gpu_role, relaxed);
8910
8911 KDBG(IMPORTANCE_CODE(IMP_SET_GPU_ROLE, 0), gpu_role);
8912
8913 task_unlock(task);
8914 }
8915
8916 darwin_gpu_role_t
task_get_gpu_role(task_t task)8917 task_get_gpu_role(task_t task)
8918 {
8919 return os_atomic_load(&task->t_gpu_role, relaxed);
8920 }
8921
8922 boolean_t
task_is_gpu_denied(task_t task)8923 task_is_gpu_denied(task_t task)
8924 {
8925 return (os_atomic_load(&task->t_gpu_role, relaxed) == PRIO_DARWIN_GPU_DENY) ? TRUE : FALSE;
8926 }
8927
8928 /*
8929 * Task policy termination uses this path to clear the bit the final time
8930 * during the termination flow, and the TASK_POLICY_TERMINATED bit guarantees
8931 * that it won't be changed again on a terminated task.
8932 */
8933 bool
task_set_game_mode_locked(task_t task,bool enabled)8934 task_set_game_mode_locked(task_t task, bool enabled)
8935 {
8936 task_lock_assert_owned(task);
8937
8938 if (enabled) {
8939 assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
8940 }
8941
8942 bool previously_enabled = task_get_game_mode(task);
8943 bool needs_update = false;
8944 uint32_t new_count = 0;
8945
8946 if (enabled) {
8947 task->t_flags |= TF_GAME_MODE;
8948 } else {
8949 task->t_flags &= ~TF_GAME_MODE;
8950 }
8951
8952 if (enabled && !previously_enabled) {
8953 if (task_coalition_adjust_game_mode_count(task, 1, &new_count) && (new_count == 1)) {
8954 needs_update = true;
8955 }
8956 } else if (!enabled && previously_enabled) {
8957 if (task_coalition_adjust_game_mode_count(task, -1, &new_count) && (new_count == 0)) {
8958 needs_update = true;
8959 }
8960 }
8961
8962 return needs_update;
8963 }
8964
8965 void
task_set_game_mode(task_t task,bool enabled)8966 task_set_game_mode(task_t task, bool enabled)
8967 {
8968 bool needs_update = false;
8969
8970 task_lock(task);
8971
8972 /* After termination, further updates are no longer effective */
8973 if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
8974 needs_update = task_set_game_mode_locked(task, enabled);
8975 }
8976
8977 task_unlock(task);
8978
8979 #if CONFIG_THREAD_GROUPS
8980 if (needs_update) {
8981 task_coalition_thread_group_game_mode_update(task);
8982 }
8983 #endif /* CONFIG_THREAD_GROUPS */
8984 }
8985
8986 bool
task_get_game_mode(task_t task)8987 task_get_game_mode(task_t task)
8988 {
8989 /* We don't need the lock to read this flag */
8990 return task->t_flags & TF_GAME_MODE;
8991 }
8992
8993 bool
task_set_carplay_mode_locked(task_t task,bool enabled)8994 task_set_carplay_mode_locked(task_t task, bool enabled)
8995 {
8996 task_lock_assert_owned(task);
8997
8998 if (enabled) {
8999 assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
9000 }
9001
9002 bool previously_enabled = task_get_carplay_mode(task);
9003 bool needs_update = false;
9004 uint32_t new_count = 0;
9005
9006 if (enabled) {
9007 task->t_flags |= TF_CARPLAY_MODE;
9008 } else {
9009 task->t_flags &= ~TF_CARPLAY_MODE;
9010 }
9011
9012 if (enabled && !previously_enabled) {
9013 if (task_coalition_adjust_carplay_mode_count(task, 1, &new_count) && (new_count == 1)) {
9014 needs_update = true;
9015 }
9016 } else if (!enabled && previously_enabled) {
9017 if (task_coalition_adjust_carplay_mode_count(task, -1, &new_count) && (new_count == 0)) {
9018 needs_update = true;
9019 }
9020 }
9021 return needs_update;
9022 }
9023
9024 void
task_set_carplay_mode(task_t task,bool enabled)9025 task_set_carplay_mode(task_t task, bool enabled)
9026 {
9027 bool needs_update = false;
9028
9029 task_lock(task);
9030
9031 /* After termination, further updates are no longer effective */
9032 if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
9033 needs_update = task_set_carplay_mode_locked(task, enabled);
9034 }
9035
9036 task_unlock(task);
9037
9038 #if CONFIG_THREAD_GROUPS
9039 if (needs_update) {
9040 task_coalition_thread_group_carplay_mode_update(task);
9041 }
9042 #endif /* CONFIG_THREAD_GROUPS */
9043 }
9044
9045 bool
task_get_carplay_mode(task_t task)9046 task_get_carplay_mode(task_t task)
9047 {
9048 /* We don't need the lock to read this flag */
9049 return task->t_flags & TF_CARPLAY_MODE;
9050 }
9051
9052 uint64_t
get_task_memory_region_count(task_t task)9053 get_task_memory_region_count(task_t task)
9054 {
9055 vm_map_t map;
9056 map = (task == kernel_task) ? kernel_map: task->map;
9057 return (uint64_t)get_map_nentries(map);
9058 }
9059
9060 static void
kdebug_trace_dyld_internal(uint32_t base_code,struct dyld_kernel_image_info * info)9061 kdebug_trace_dyld_internal(uint32_t base_code,
9062 struct dyld_kernel_image_info *info)
9063 {
9064 static_assert(sizeof(info->uuid) >= 16);
9065
9066 #if defined(__LP64__)
9067 uint64_t *uuid = (uint64_t *)&(info->uuid);
9068
9069 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
9070 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
9071 uuid[1], info->load_addr,
9072 (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
9073 0);
9074 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
9075 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
9076 (uint64_t)info->fsobjid.fid_objno |
9077 ((uint64_t)info->fsobjid.fid_generation << 32),
9078 0, 0, 0, 0);
9079 #else /* defined(__LP64__) */
9080 uint32_t *uuid = (uint32_t *)&(info->uuid);
9081
9082 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
9083 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
9084 uuid[1], uuid[2], uuid[3], 0);
9085 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
9086 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
9087 (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
9088 info->fsobjid.fid_objno, 0);
9089 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
9090 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
9091 info->fsobjid.fid_generation, 0, 0, 0, 0);
9092 #endif /* !defined(__LP64__) */
9093 }
9094
9095 static kern_return_t
kdebug_trace_dyld(task_t task,uint32_t base_code,vm_map_copy_t infos_copy,mach_msg_type_number_t infos_len)9096 kdebug_trace_dyld(task_t task, uint32_t base_code,
9097 vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
9098 {
9099 kern_return_t kr;
9100 dyld_kernel_image_info_array_t infos;
9101 vm_map_offset_t map_data;
9102 vm_offset_t data;
9103
9104 if (!infos_copy) {
9105 return KERN_INVALID_ADDRESS;
9106 }
9107
9108 if (!kdebug_enable ||
9109 !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) {
9110 vm_map_copy_discard(infos_copy);
9111 return KERN_SUCCESS;
9112 }
9113
9114 if (task == NULL || task != current_task()) {
9115 return KERN_INVALID_TASK;
9116 }
9117
9118 kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
9119 if (kr != KERN_SUCCESS) {
9120 return kr;
9121 }
9122
9123 infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
9124
9125 for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
9126 kdebug_trace_dyld_internal(base_code, &(infos[i]));
9127 }
9128
9129 data = CAST_DOWN(vm_offset_t, map_data);
9130 mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
9131 return KERN_SUCCESS;
9132 }
9133
9134 kern_return_t
task_register_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)9135 task_register_dyld_image_infos(task_t task,
9136 dyld_kernel_image_info_array_t infos_copy,
9137 mach_msg_type_number_t infos_len)
9138 {
9139 return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
9140 (vm_map_copy_t)infos_copy, infos_len);
9141 }
9142
9143 kern_return_t
task_unregister_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)9144 task_unregister_dyld_image_infos(task_t task,
9145 dyld_kernel_image_info_array_t infos_copy,
9146 mach_msg_type_number_t infos_len)
9147 {
9148 return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
9149 (vm_map_copy_t)infos_copy, infos_len);
9150 }
9151
9152 kern_return_t
task_get_dyld_image_infos(__unused task_t task,__unused dyld_kernel_image_info_array_t * dyld_images,__unused mach_msg_type_number_t * dyld_imagesCnt)9153 task_get_dyld_image_infos(__unused task_t task,
9154 __unused dyld_kernel_image_info_array_t * dyld_images,
9155 __unused mach_msg_type_number_t * dyld_imagesCnt)
9156 {
9157 return KERN_NOT_SUPPORTED;
9158 }
9159
9160 kern_return_t
task_register_dyld_shared_cache_image_info(task_t task,dyld_kernel_image_info_t cache_img,__unused boolean_t no_cache,__unused boolean_t private_cache)9161 task_register_dyld_shared_cache_image_info(task_t task,
9162 dyld_kernel_image_info_t cache_img,
9163 __unused boolean_t no_cache,
9164 __unused boolean_t private_cache)
9165 {
9166 if (task == NULL || task != current_task()) {
9167 return KERN_INVALID_TASK;
9168 }
9169
9170 kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
9171 return KERN_SUCCESS;
9172 }
9173
9174 kern_return_t
task_register_dyld_set_dyld_state(__unused task_t task,__unused uint8_t dyld_state)9175 task_register_dyld_set_dyld_state(__unused task_t task,
9176 __unused uint8_t dyld_state)
9177 {
9178 return KERN_NOT_SUPPORTED;
9179 }
9180
9181 kern_return_t
task_register_dyld_get_process_state(__unused task_t task,__unused dyld_kernel_process_info_t * dyld_process_state)9182 task_register_dyld_get_process_state(__unused task_t task,
9183 __unused dyld_kernel_process_info_t * dyld_process_state)
9184 {
9185 return KERN_NOT_SUPPORTED;
9186 }
9187
9188 kern_return_t
task_inspect(task_inspect_t task_insp,task_inspect_flavor_t flavor,task_inspect_info_t info_out,mach_msg_type_number_t * size_in_out)9189 task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
9190 task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out)
9191 {
9192 #if CONFIG_PERVASIVE_CPI
9193 task_t task = (task_t)task_insp;
9194 kern_return_t kr = KERN_SUCCESS;
9195 mach_msg_type_number_t size;
9196
9197 if (task == TASK_NULL) {
9198 return KERN_INVALID_ARGUMENT;
9199 }
9200
9201 size = *size_in_out;
9202
9203 switch (flavor) {
9204 case TASK_INSPECT_BASIC_COUNTS: {
9205 struct task_inspect_basic_counts *bc =
9206 (struct task_inspect_basic_counts *)info_out;
9207 struct recount_usage stats = { 0 };
9208 if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
9209 kr = KERN_INVALID_ARGUMENT;
9210 break;
9211 }
9212
9213 recount_sum(&recount_task_plan, task->tk_recount.rtk_lifetime, &stats);
9214 bc->instructions = recount_usage_instructions(&stats);
9215 bc->cycles = recount_usage_cycles(&stats);
9216 size = TASK_INSPECT_BASIC_COUNTS_COUNT;
9217 break;
9218 }
9219 default:
9220 kr = KERN_INVALID_ARGUMENT;
9221 break;
9222 }
9223
9224 if (kr == KERN_SUCCESS) {
9225 *size_in_out = size;
9226 }
9227 return kr;
9228 #else /* CONFIG_PERVASIVE_CPI */
9229 #pragma unused(task_insp, flavor, info_out, size_in_out)
9230 return KERN_NOT_SUPPORTED;
9231 #endif /* !CONFIG_PERVASIVE_CPI */
9232 }
9233
9234 #if CONFIG_SECLUDED_MEMORY
9235 int num_tasks_can_use_secluded_mem = 0;
9236
9237 void
task_set_can_use_secluded_mem(task_t task,boolean_t can_use_secluded_mem)9238 task_set_can_use_secluded_mem(
9239 task_t task,
9240 boolean_t can_use_secluded_mem)
9241 {
9242 if (!task->task_could_use_secluded_mem) {
9243 return;
9244 }
9245 task_lock(task);
9246 task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
9247 task_unlock(task);
9248 }
9249
9250 void
task_set_can_use_secluded_mem_locked(task_t task,boolean_t can_use_secluded_mem)9251 task_set_can_use_secluded_mem_locked(
9252 task_t task,
9253 boolean_t can_use_secluded_mem)
9254 {
9255 assert(task->task_could_use_secluded_mem);
9256 if (can_use_secluded_mem &&
9257 secluded_for_apps && /* global boot-arg */
9258 !task->task_can_use_secluded_mem) {
9259 assert(num_tasks_can_use_secluded_mem >= 0);
9260 OSAddAtomic(+1,
9261 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
9262 task->task_can_use_secluded_mem = TRUE;
9263 } else if (!can_use_secluded_mem &&
9264 task->task_can_use_secluded_mem) {
9265 assert(num_tasks_can_use_secluded_mem > 0);
9266 OSAddAtomic(-1,
9267 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
9268 task->task_can_use_secluded_mem = FALSE;
9269 }
9270 }
9271
9272 void
task_set_could_use_secluded_mem(task_t task,boolean_t could_use_secluded_mem)9273 task_set_could_use_secluded_mem(
9274 task_t task,
9275 boolean_t could_use_secluded_mem)
9276 {
9277 task->task_could_use_secluded_mem = !!could_use_secluded_mem;
9278 }
9279
9280 void
task_set_could_also_use_secluded_mem(task_t task,boolean_t could_also_use_secluded_mem)9281 task_set_could_also_use_secluded_mem(
9282 task_t task,
9283 boolean_t could_also_use_secluded_mem)
9284 {
9285 task->task_could_also_use_secluded_mem = !!could_also_use_secluded_mem;
9286 }
9287
9288 boolean_t
task_can_use_secluded_mem(task_t task,boolean_t is_alloc)9289 task_can_use_secluded_mem(
9290 task_t task,
9291 boolean_t is_alloc)
9292 {
9293 if (task->task_can_use_secluded_mem) {
9294 assert(task->task_could_use_secluded_mem);
9295 assert(num_tasks_can_use_secluded_mem > 0);
9296 return TRUE;
9297 }
9298 if (task->task_could_also_use_secluded_mem &&
9299 num_tasks_can_use_secluded_mem > 0) {
9300 assert(num_tasks_can_use_secluded_mem > 0);
9301 return TRUE;
9302 }
9303
9304 /*
9305 * If a single task is using more than some large amount of
9306 * memory (i.e. secluded_shutoff_trigger) and is approaching
9307 * its task limit, allow it to dip into secluded and begin
9308 * suppression of rebuilding secluded memory until that task exits.
9309 */
9310 if (is_alloc && secluded_shutoff_trigger != 0) {
9311 uint64_t phys_used = get_task_phys_footprint(task);
9312 uint64_t limit = get_task_phys_footprint_limit(task);
9313 if (phys_used > secluded_shutoff_trigger &&
9314 limit > secluded_shutoff_trigger &&
9315 phys_used > limit - secluded_shutoff_headroom) {
9316 start_secluded_suppression(task);
9317 return TRUE;
9318 }
9319 }
9320
9321 return FALSE;
9322 }
9323
9324 boolean_t
task_could_use_secluded_mem(task_t task)9325 task_could_use_secluded_mem(
9326 task_t task)
9327 {
9328 return task->task_could_use_secluded_mem;
9329 }
9330
9331 boolean_t
task_could_also_use_secluded_mem(task_t task)9332 task_could_also_use_secluded_mem(
9333 task_t task)
9334 {
9335 return task->task_could_also_use_secluded_mem;
9336 }
9337 #endif /* CONFIG_SECLUDED_MEMORY */
9338
9339 queue_head_t *
task_io_user_clients(task_t task)9340 task_io_user_clients(task_t task)
9341 {
9342 return &task->io_user_clients;
9343 }
9344
9345 void
task_set_message_app_suspended(task_t task,boolean_t enable)9346 task_set_message_app_suspended(task_t task, boolean_t enable)
9347 {
9348 task->message_app_suspended = enable;
9349 }
9350
9351 void
task_copy_fields_for_exec(task_t dst_task,task_t src_task)9352 task_copy_fields_for_exec(task_t dst_task, task_t src_task)
9353 {
9354 dst_task->vtimers = src_task->vtimers;
9355 }
9356
9357 #if DEVELOPMENT || DEBUG
9358 int vm_region_footprint = 0;
9359 #endif /* DEVELOPMENT || DEBUG */
9360
9361 boolean_t
task_self_region_footprint(void)9362 task_self_region_footprint(void)
9363 {
9364 #if DEVELOPMENT || DEBUG
9365 if (vm_region_footprint) {
9366 /* system-wide override */
9367 return TRUE;
9368 }
9369 #endif /* DEVELOPMENT || DEBUG */
9370 return current_task()->task_region_footprint;
9371 }
9372
9373 void
task_self_region_footprint_set(boolean_t newval)9374 task_self_region_footprint_set(
9375 boolean_t newval)
9376 {
9377 task_t curtask;
9378
9379 curtask = current_task();
9380 task_lock(curtask);
9381 if (newval) {
9382 curtask->task_region_footprint = TRUE;
9383 } else {
9384 curtask->task_region_footprint = FALSE;
9385 }
9386 task_unlock(curtask);
9387 }
9388
9389 int
task_self_region_info_flags(void)9390 task_self_region_info_flags(void)
9391 {
9392 return current_task()->task_region_info_flags;
9393 }
9394
9395 kern_return_t
task_self_region_info_flags_set(int newval)9396 task_self_region_info_flags_set(
9397 int newval)
9398 {
9399 task_t curtask;
9400 kern_return_t err = KERN_SUCCESS;
9401
9402 curtask = current_task();
9403 task_lock(curtask);
9404 curtask->task_region_info_flags = newval;
9405 /* check for overflow (flag added without increasing bitfield size?) */
9406 if (curtask->task_region_info_flags != newval) {
9407 err = KERN_INVALID_ARGUMENT;
9408 }
9409 task_unlock(curtask);
9410
9411 return err;
9412 }
9413
9414 void
task_set_darkwake_mode(task_t task,boolean_t set_mode)9415 task_set_darkwake_mode(task_t task, boolean_t set_mode)
9416 {
9417 assert(task);
9418
9419 task_lock(task);
9420
9421 if (set_mode) {
9422 task->t_flags |= TF_DARKWAKE_MODE;
9423 } else {
9424 task->t_flags &= ~(TF_DARKWAKE_MODE);
9425 }
9426
9427 task_unlock(task);
9428 }
9429
9430 boolean_t
task_get_darkwake_mode(task_t task)9431 task_get_darkwake_mode(task_t task)
9432 {
9433 assert(task);
9434 return (task->t_flags & TF_DARKWAKE_MODE) != 0;
9435 }
9436
9437 /*
9438 * Set task default behavior for EXC_GUARD variants that have settable behavior.
9439 *
9440 * Platform binaries typically have one behavior, third parties another -
9441 * but there are special exception we may need to account for.
9442 */
9443 void
task_set_exc_guard_default(task_t task,const char * name,unsigned long namelen,boolean_t is_simulated,uint32_t platform,uint32_t sdk)9444 task_set_exc_guard_default(
9445 task_t task,
9446 const char *name,
9447 unsigned long namelen,
9448 boolean_t is_simulated,
9449 uint32_t platform,
9450 uint32_t sdk)
9451 {
9452 if (task_get_platform_restrictions_version(task) >= 1) {
9453 /* set exc guard default behavior for platform restrictions binaries */
9454 task->task_exc_guard = (task_exc_guard_default & TASK_EXC_GUARD_ALL);
9455
9456 if (1 == task_pid(task)) {
9457 /* special flags for inittask - delivery every instance as corpse */
9458 task->task_exc_guard = _TASK_EXC_GUARD_ALL_CORPSE;
9459 } else if (task_exc_guard_default & TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS) {
9460 /* honor by-name default setting overrides */
9461
9462 int count = sizeof(task_exc_guard_named_defaults) / sizeof(struct task_exc_guard_named_default);
9463
9464 for (int i = 0; i < count; i++) {
9465 const struct task_exc_guard_named_default *named_default =
9466 &task_exc_guard_named_defaults[i];
9467 if (strncmp(named_default->name, name, namelen) == 0 &&
9468 strlen(named_default->name) == namelen) {
9469 task->task_exc_guard = named_default->behavior;
9470 break;
9471 }
9472 }
9473 }
9474 } else {
9475 /* set exc guard default behavior for third-party code */
9476 task->task_exc_guard = ((task_exc_guard_default >> TASK_EXC_GUARD_THIRD_PARTY_DEFAULT_SHIFT) & TASK_EXC_GUARD_ALL);
9477 }
9478
9479 if (is_simulated) {
9480 /* If simulated and built against pre-iOS 15 SDK, disable all EXC_GUARD */
9481 if ((platform == PLATFORM_IOSSIMULATOR && sdk < 0xf0000) ||
9482 (platform == PLATFORM_TVOSSIMULATOR && sdk < 0xf0000) ||
9483 (platform == PLATFORM_WATCHOSSIMULATOR && sdk < 0x80000)) {
9484 task->task_exc_guard = TASK_EXC_GUARD_NONE;
9485 }
9486 }
9487 }
9488
9489 kern_return_t
task_get_exc_guard_behavior(task_t task,task_exc_guard_behavior_t * behaviorp)9490 task_get_exc_guard_behavior(
9491 task_t task,
9492 task_exc_guard_behavior_t *behaviorp)
9493 {
9494 if (task == TASK_NULL) {
9495 return KERN_INVALID_TASK;
9496 }
9497 *behaviorp = task->task_exc_guard;
9498 return KERN_SUCCESS;
9499 }
9500
9501 kern_return_t
task_set_exc_guard_behavior(task_t task,task_exc_guard_behavior_t new_behavior)9502 task_set_exc_guard_behavior(
9503 task_t task,
9504 task_exc_guard_behavior_t new_behavior)
9505 {
9506 if (task == TASK_NULL) {
9507 return KERN_INVALID_TASK;
9508 }
9509 if (new_behavior & ~TASK_EXC_GUARD_ALL) {
9510 return KERN_INVALID_VALUE;
9511 }
9512
9513 /* limit setting to that allowed for this config */
9514 new_behavior = new_behavior & task_exc_guard_config_mask;
9515
9516 #if !defined (DEBUG) && !defined (DEVELOPMENT)
9517 /* On release kernels, only allow _upgrading_ exc guard behavior */
9518 task_exc_guard_behavior_t cur_behavior;
9519
9520 os_atomic_rmw_loop(&task->task_exc_guard, cur_behavior, new_behavior, relaxed, {
9521 if ((cur_behavior & task_exc_guard_no_unset_mask) & ~(new_behavior & task_exc_guard_no_unset_mask)) {
9522 os_atomic_rmw_loop_give_up(return KERN_DENIED);
9523 }
9524
9525 if ((new_behavior & task_exc_guard_no_set_mask) & ~(cur_behavior & task_exc_guard_no_set_mask)) {
9526 os_atomic_rmw_loop_give_up(return KERN_DENIED);
9527 }
9528
9529 /* no restrictions on CORPSE bit */
9530 });
9531 #else
9532 task->task_exc_guard = new_behavior;
9533 #endif
9534 return KERN_SUCCESS;
9535 }
9536
9537 kern_return_t
task_set_corpse_forking_behavior(task_t task,task_corpse_forking_behavior_t behavior)9538 task_set_corpse_forking_behavior(task_t task, task_corpse_forking_behavior_t behavior)
9539 {
9540 #if DEVELOPMENT || DEBUG
9541 if (task == TASK_NULL) {
9542 return KERN_INVALID_TASK;
9543 }
9544
9545 task_lock(task);
9546 if (behavior & TASK_CORPSE_FORKING_DISABLED_MEM_DIAG) {
9547 task->t_flags |= TF_NO_CORPSE_FORKING;
9548 } else {
9549 task->t_flags &= ~TF_NO_CORPSE_FORKING;
9550 }
9551 task_unlock(task);
9552
9553 return KERN_SUCCESS;
9554 #else
9555 (void)task;
9556 (void)behavior;
9557 return KERN_NOT_SUPPORTED;
9558 #endif
9559 }
9560
9561 boolean_t
task_corpse_forking_disabled(task_t task)9562 task_corpse_forking_disabled(task_t task)
9563 {
9564 boolean_t disabled = FALSE;
9565
9566 task_lock(task);
9567 disabled = (task->t_flags & TF_NO_CORPSE_FORKING);
9568 task_unlock(task);
9569
9570 return disabled;
9571 }
9572
9573 #if __arm64__
9574 extern int legacy_footprint_entitlement_mode;
9575 extern void memorystatus_act_on_legacy_footprint_entitlement(struct proc *, boolean_t);
9576 extern void memorystatus_act_on_ios13extended_footprint_entitlement(struct proc *);
9577
9578
9579 void
task_set_legacy_footprint(task_t task)9580 task_set_legacy_footprint(
9581 task_t task)
9582 {
9583 task_lock(task);
9584 task->task_legacy_footprint = TRUE;
9585 task_unlock(task);
9586 }
9587
9588 void
task_set_extra_footprint_limit(task_t task)9589 task_set_extra_footprint_limit(
9590 task_t task)
9591 {
9592 if (task->task_extra_footprint_limit) {
9593 return;
9594 }
9595 task_lock(task);
9596 if (task->task_extra_footprint_limit) {
9597 task_unlock(task);
9598 return;
9599 }
9600 task->task_extra_footprint_limit = TRUE;
9601 task_unlock(task);
9602 memorystatus_act_on_legacy_footprint_entitlement(get_bsdtask_info(task), TRUE);
9603 }
9604
9605 void
task_set_ios13extended_footprint_limit(task_t task)9606 task_set_ios13extended_footprint_limit(
9607 task_t task)
9608 {
9609 if (task->task_ios13extended_footprint_limit) {
9610 return;
9611 }
9612 task_lock(task);
9613 if (task->task_ios13extended_footprint_limit) {
9614 task_unlock(task);
9615 return;
9616 }
9617 task->task_ios13extended_footprint_limit = TRUE;
9618 task_unlock(task);
9619 memorystatus_act_on_ios13extended_footprint_entitlement(get_bsdtask_info(task));
9620 }
9621 #endif /* __arm64__ */
9622
9623 static inline ledger_amount_t
task_ledger_get_balance(ledger_t ledger,int ledger_idx)9624 task_ledger_get_balance(
9625 ledger_t ledger,
9626 int ledger_idx)
9627 {
9628 ledger_amount_t amount;
9629 amount = 0;
9630 ledger_get_balance(ledger, ledger_idx, &amount);
9631 return amount;
9632 }
9633
9634 /*
9635 * Gather the amount of memory counted in a task's footprint due to
9636 * being in a specific set of ledgers.
9637 */
9638 void
task_ledgers_footprint(ledger_t ledger,ledger_amount_t * ledger_resident,ledger_amount_t * ledger_compressed)9639 task_ledgers_footprint(
9640 ledger_t ledger,
9641 ledger_amount_t *ledger_resident,
9642 ledger_amount_t *ledger_compressed)
9643 {
9644 *ledger_resident = 0;
9645 *ledger_compressed = 0;
9646
9647 /* purgeable non-volatile memory */
9648 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile);
9649 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile_compressed);
9650
9651 /* "default" tagged memory */
9652 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint);
9653 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint_compressed);
9654
9655 /* "network" currently never counts in the footprint... */
9656
9657 /* "media" tagged memory */
9658 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.media_footprint);
9659 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.media_footprint_compressed);
9660
9661 /* "graphics" tagged memory */
9662 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint);
9663 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint_compressed);
9664
9665 /* "neural" tagged memory */
9666 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.neural_footprint);
9667 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.neural_footprint_compressed);
9668 }
9669
9670 #if CONFIG_MEMORYSTATUS
9671 void
task_ledger_settle_dirty_time(task_t t)9672 task_ledger_settle_dirty_time(task_t t)
9673 {
9674 task_lock(t);
9675 task_ledger_settle_dirty_time_locked(t);
9676 task_unlock(t);
9677 }
9678
9679 /*
9680 * Credit any outstanding task dirty time to the ledger.
9681 * memstat_dirty_start is pushed forward to prevent any possibility of double
9682 * counting, making it safe to call this as often as necessary to ensure that
9683 * anyone reading the ledger gets up-to-date information.
9684 */
9685 void
task_ledger_settle_dirty_time_locked(task_t t)9686 task_ledger_settle_dirty_time_locked(task_t t)
9687 {
9688 task_lock_assert_owned(t);
9689
9690 uint64_t start = t->memstat_dirty_start;
9691 if (start) {
9692 uint64_t now = mach_absolute_time();
9693
9694 uint64_t duration;
9695 absolutetime_to_nanoseconds(now - start, &duration);
9696
9697 ledger_t ledger = get_task_ledger(t);
9698 ledger_credit(ledger, task_ledgers.memorystatus_dirty_time, duration);
9699
9700 t->memstat_dirty_start = now;
9701 }
9702 }
9703 #endif /* CONFIG_MEMORYSTATUS */
9704
9705 void
task_ledger_settle(task_t t)9706 task_ledger_settle(task_t t)
9707 {
9708 #if CONFIG_MEMORYSTATUS
9709 task_lock(t);
9710 /* Settle memorystatus dirty time */
9711 task_ledger_settle_dirty_time_locked(t);
9712 task_unlock(t);
9713 #endif /* CONFIG_MEMORYSTATUS */
9714
9715 #if CONFIG_DEFERRED_RECLAIM
9716 vm_deferred_reclamation_settle_ledger(t);
9717 #endif /* CONFIG_DEFERRED_RECLAIM */
9718 }
9719
9720 void
task_set_memory_ownership_transfer(task_t task,boolean_t value)9721 task_set_memory_ownership_transfer(
9722 task_t task,
9723 boolean_t value)
9724 {
9725 task_lock(task);
9726 task->task_can_transfer_memory_ownership = !!value;
9727 task_unlock(task);
9728 }
9729
9730 #if DEVELOPMENT || DEBUG
9731
9732 void
task_set_no_footprint_for_debug(task_t task,boolean_t value)9733 task_set_no_footprint_for_debug(task_t task, boolean_t value)
9734 {
9735 task_lock(task);
9736 task->task_no_footprint_for_debug = !!value;
9737 task_unlock(task);
9738 }
9739
9740 int
task_get_no_footprint_for_debug(task_t task)9741 task_get_no_footprint_for_debug(task_t task)
9742 {
9743 return task->task_no_footprint_for_debug;
9744 }
9745
9746 #endif /* DEVELOPMENT || DEBUG */
9747
9748 void
task_copy_vmobjects(task_t task,vm_object_query_t query,size_t len,size_t * num)9749 task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num)
9750 {
9751 vm_object_t find_vmo;
9752 size_t size = 0;
9753
9754 /*
9755 * Allocate a save area for FP state before taking task_objq lock,
9756 * if necessary, to ensure that VM_KERNEL_ADDRHASH() doesn't cause
9757 * an FP state allocation while holding VM locks.
9758 */
9759 ml_fp_save_area_prealloc();
9760
9761 task_objq_lock(task);
9762 if (query != NULL) {
9763 queue_iterate(&task->task_objq, find_vmo, vm_object_t, task_objq)
9764 {
9765 vm_object_query_t p = &query[size++];
9766
9767 /* make sure to not overrun */
9768 if (size * sizeof(vm_object_query_data_t) > len) {
9769 --size;
9770 break;
9771 }
9772
9773 bzero(p, sizeof(*p));
9774 p->object_id = (vm_object_id_t) VM_KERNEL_ADDRHASH(find_vmo);
9775 p->virtual_size = find_vmo->internal ? find_vmo->vo_size : 0;
9776 p->resident_size = find_vmo->resident_page_count * PAGE_SIZE;
9777 p->wired_size = find_vmo->wired_page_count * PAGE_SIZE;
9778 p->reusable_size = find_vmo->reusable_page_count * PAGE_SIZE;
9779 p->vo_no_footprint = find_vmo->vo_no_footprint;
9780 p->vo_ledger_tag = find_vmo->vo_ledger_tag;
9781 p->purgable = find_vmo->purgable;
9782
9783 if (find_vmo->internal && find_vmo->pager_created && find_vmo->pager != NULL) {
9784 p->compressed_size = vm_compressor_pager_get_count(find_vmo->pager) * PAGE_SIZE;
9785 } else {
9786 p->compressed_size = 0;
9787 }
9788 }
9789 } else {
9790 size = (size_t)task->task_owned_objects;
9791 }
9792 task_objq_unlock(task);
9793
9794 *num = size;
9795 }
9796
9797 void
task_get_owned_vmobjects(task_t task,size_t buffer_size,vmobject_list_output_t buffer,size_t * output_size,size_t * entries)9798 task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries)
9799 {
9800 assert(output_size);
9801 assert(entries);
9802
9803 /* copy the vmobjects and vmobject data out of the task */
9804 if (buffer_size == 0) {
9805 task_copy_vmobjects(task, NULL, 0, entries);
9806 *output_size = (*entries > 0) ? *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer) : 0;
9807 } else {
9808 assert(buffer);
9809 task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(*buffer), entries);
9810 buffer->entries = (uint64_t)*entries;
9811 *output_size = *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer);
9812 }
9813 }
9814
9815 static void
task_store_owned_vmobject_info(task_t to_task,task_t from_task)9816 task_store_owned_vmobject_info(task_t to_task, task_t from_task)
9817 {
9818 size_t buffer_size;
9819 vmobject_list_output_t buffer;
9820 size_t output_size;
9821 size_t entries;
9822
9823 /* get the size, allocate a buffer, and populate */
9824 entries = 0;
9825 output_size = 0;
9826 task_get_owned_vmobjects(from_task, 0, NULL, &output_size, &entries);
9827
9828 if (output_size) {
9829 buffer_size = output_size;
9830 buffer = kalloc_data(buffer_size, Z_WAITOK);
9831
9832 if (buffer) {
9833 entries = 0;
9834 output_size = 0;
9835
9836 task_get_owned_vmobjects(from_task, buffer_size, buffer, &output_size, &entries);
9837
9838 task_lock(to_task);
9839
9840 if (!entries || (to_task->corpse_vmobject_list != NULL)) {
9841 kfree_data(buffer, buffer_size);
9842 task_unlock(to_task);
9843 return;
9844 }
9845
9846 to_task->corpse_vmobject_list = buffer;
9847 to_task->corpse_vmobject_list_size = buffer_size;
9848
9849 task_unlock(to_task);
9850 }
9851 }
9852 }
9853
9854 void
task_set_filter_msg_flag(task_t task,boolean_t flag)9855 task_set_filter_msg_flag(
9856 task_t task,
9857 boolean_t flag)
9858 {
9859 assert(task != TASK_NULL);
9860
9861 if (flag) {
9862 task_ro_flags_set(task, TFRO_FILTER_MSG);
9863 } else {
9864 task_ro_flags_clear(task, TFRO_FILTER_MSG);
9865 }
9866 }
9867
9868 boolean_t
task_get_filter_msg_flag(task_t task)9869 task_get_filter_msg_flag(
9870 task_t task)
9871 {
9872 if (!task) {
9873 return false;
9874 }
9875
9876 return (task_ro_flags_get(task) & TFRO_FILTER_MSG) ? TRUE : FALSE;
9877 }
9878 bool
task_is_exotic(task_t task)9879 task_is_exotic(
9880 task_t task)
9881 {
9882 if (task == TASK_NULL) {
9883 return false;
9884 }
9885 return vm_map_is_exotic(get_task_map(task));
9886 }
9887
9888 bool
task_is_alien(task_t task)9889 task_is_alien(
9890 task_t task)
9891 {
9892 if (task == TASK_NULL) {
9893 return false;
9894 }
9895 return vm_map_is_alien(get_task_map(task));
9896 }
9897
9898
9899
9900 #if CONFIG_MACF
9901 uint8_t *
mac_task_get_mach_filter_mask(task_t task)9902 mac_task_get_mach_filter_mask(task_t task)
9903 {
9904 assert(task);
9905 return task_get_mach_trap_filter_mask(task);
9906 }
9907
9908 uint8_t *
mac_task_get_kobj_filter_mask(task_t task)9909 mac_task_get_kobj_filter_mask(task_t task)
9910 {
9911 assert(task);
9912 return task_get_mach_kobj_filter_mask(task);
9913 }
9914
9915 /* Set the filter mask for Mach traps. */
9916 void
mac_task_set_mach_filter_mask(task_t task,uint8_t * maskptr)9917 mac_task_set_mach_filter_mask(task_t task, uint8_t *maskptr)
9918 {
9919 assert(task);
9920
9921 task_set_mach_trap_filter_mask(task, maskptr);
9922 }
9923
9924 /* Set the filter mask for kobject msgs. */
9925 void
mac_task_set_kobj_filter_mask(task_t task,uint8_t * maskptr)9926 mac_task_set_kobj_filter_mask(task_t task, uint8_t *maskptr)
9927 {
9928 assert(task);
9929
9930 task_set_mach_kobj_filter_mask(task, maskptr);
9931 }
9932
9933 /* Hook for mach trap/sc filter evaluation policy. */
9934 SECURITY_READ_ONLY_LATE(mac_task_mach_filter_cbfunc_t) mac_task_mach_trap_evaluate = NULL;
9935
9936 /* Hook for kobj message filter evaluation policy. */
9937 SECURITY_READ_ONLY_LATE(mac_task_kobj_filter_cbfunc_t) mac_task_kobj_msg_evaluate = NULL;
9938
9939 /* Set the callback hooks for the filtering policy. */
9940 int
mac_task_register_filter_callbacks(const mac_task_mach_filter_cbfunc_t mach_cbfunc,const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)9941 mac_task_register_filter_callbacks(
9942 const mac_task_mach_filter_cbfunc_t mach_cbfunc,
9943 const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)
9944 {
9945 if (mach_cbfunc != NULL) {
9946 if (mac_task_mach_trap_evaluate != NULL) {
9947 return KERN_FAILURE;
9948 }
9949 mac_task_mach_trap_evaluate = mach_cbfunc;
9950 }
9951 if (kobj_cbfunc != NULL) {
9952 if (mac_task_kobj_msg_evaluate != NULL) {
9953 return KERN_FAILURE;
9954 }
9955 mac_task_kobj_msg_evaluate = kobj_cbfunc;
9956 }
9957
9958 return KERN_SUCCESS;
9959 }
9960 #endif /* CONFIG_MACF */
9961
9962 #if CONFIG_ROSETTA
9963 bool
task_is_translated(task_t task)9964 task_is_translated(task_t task)
9965 {
9966 extern boolean_t proc_is_translated(struct proc* p);
9967 return task && proc_is_translated(get_bsdtask_info(task));
9968 }
9969 #endif
9970
9971 /* Task runtime security mitigations configuration. */
9972 #define TASK_SECURITY_CONFIG_HELPER_DEFINE(suffix, checked) \
9973 bool task_has_##suffix(task_t task) \
9974 { \
9975 assert(task); \
9976 return (task->security_config. suffix); \
9977 } \
9978 \
9979 void task_set_##suffix(task_t task) \
9980 { \
9981 assert(task);\
9982 task->security_config. suffix = true; \
9983 } \
9984 \
9985 void task_clear_##suffix(task_t task) \
9986 { \
9987 assert(task);\
9988 task->security_config. suffix = false; \
9989 }
9990
9991 uint32_t
task_get_security_config(task_t task)9992 task_get_security_config(task_t task)
9993 {
9994 assert(task);
9995 return (uint32_t)(task->security_config.value);
9996 }
9997
TASK_SECURITY_CONFIG_HELPER_DEFINE(hardened_heap,true)9998 TASK_SECURITY_CONFIG_HELPER_DEFINE(hardened_heap, true)
9999 TASK_SECURITY_CONFIG_HELPER_DEFINE(tpro, true)
10000
10001 uint8_t
10002 task_get_platform_restrictions_version(task_t task)
10003 {
10004 assert(task);
10005 return task->security_config.platform_restrictions_version;
10006 }
10007
10008 void
task_set_platform_restrictions_version(task_t task,uint64_t version)10009 task_set_platform_restrictions_version(task_t task, uint64_t version)
10010 {
10011 assert(task);
10012 /* platform_restrictions_version is a 3-bit field */
10013 if (version < 8) {
10014 task->security_config.platform_restrictions_version = (uint8_t)version;
10015 }
10016 }
10017
10018 uint8_t
task_get_hardened_process_version(task_t task)10019 task_get_hardened_process_version(task_t task)
10020 {
10021 assert(task);
10022 return task->security_config.hardened_process_version;
10023 }
10024 void
task_set_hardened_process_version(task_t task,uint64_t version)10025 task_set_hardened_process_version(task_t task, uint64_t version)
10026 {
10027 assert(task);
10028 task->security_config.hardened_process_version = (uint8_t)version;
10029 }
10030
10031
10032
10033 #if __has_feature(ptrauth_calls)
10034 /* On FPAC, we want to deliver all PAC violations as fatal exceptions, regardless
10035 * of the enable_pac_exception boot-arg value or any other entitlements.
10036 * The only case where we allow non-fatal PAC exceptions on FPAC is for debugging,
10037 * which requires Developer Mode enabled.
10038 *
10039 * On non-FPAC hardware, we gate the decision behind entitlements and the
10040 * enable_pac_exception boot-arg.
10041 */
10042 extern int gARM_FEAT_FPAC;
10043 /*
10044 * Having the PAC_EXCEPTION_ENTITLEMENT entitlement means we always enforce all
10045 * of the PAC exception hardening: fatal exceptions and signed user state.
10046 */
10047 #define PAC_EXCEPTION_ENTITLEMENT "com.apple.private.pac.exception"
10048 /*
10049 * On non-FPAC hardware, when enable_pac_exception boot-arg is set to true,
10050 * processes can choose to get non-fatal PAC exception delivery by setting
10051 * the SKIP_PAC_EXCEPTION_ENTITLEMENT entitlement.
10052 */
10053 #define SKIP_PAC_EXCEPTION_ENTITLEMENT "com.apple.private.skip.pac.exception"
10054
10055 void
task_set_pac_exception_fatal_flag(task_t task)10056 task_set_pac_exception_fatal_flag(
10057 task_t task)
10058 {
10059 assert(task != TASK_NULL);
10060 bool pac_hardened_task = false;
10061 uint32_t set_flags = 0;
10062
10063 /*
10064 * We must not apply this security policy on tasks which have opted out of mach hardening to
10065 * avoid regressions in third party plugins and third party apps when using AMFI boot-args
10066 */
10067 ipc_space_policy_t pol = ipc_policy_for_task(task);
10068 bool platform_binary = pol & IPC_SPACE_POLICY_PLATFORM;
10069 #if XNU_TARGET_OS_OSX
10070 platform_binary &= !(pol & IPC_SPACE_POLICY_OPTED_OUT);
10071 #endif /* XNU_TARGET_OS_OSX */
10072
10073 /*
10074 * On non-FPAC hardware, we allow gating PAC exceptions behind
10075 * SKIP_PAC_EXCEPTION_ENTITLEMENT and the boot-arg.
10076 */
10077 if (!gARM_FEAT_FPAC && enable_pac_exception &&
10078 IOTaskHasEntitlement(task, SKIP_PAC_EXCEPTION_ENTITLEMENT)) {
10079 return;
10080 }
10081
10082 if (IOTaskHasEntitlement(task, PAC_EXCEPTION_ENTITLEMENT) ||
10083 (task_get_platform_restrictions_version(task) >= 1)) {
10084 pac_hardened_task = true;
10085 set_flags |= TFRO_PAC_ENFORCE_USER_STATE;
10086 }
10087
10088 /* On non-FPAC hardware, gate the fatal property behind entitlements and boot-arg. */
10089 if (pac_hardened_task ||
10090 ((enable_pac_exception || gARM_FEAT_FPAC) && platform_binary)) {
10091 set_flags |= TFRO_PAC_EXC_FATAL;
10092 }
10093
10094 if (set_flags != 0) {
10095 task_ro_flags_set(task, set_flags);
10096 }
10097 }
10098
10099 bool
task_is_pac_exception_fatal(task_t task)10100 task_is_pac_exception_fatal(
10101 task_t task)
10102 {
10103 assert(task != TASK_NULL);
10104 return !!(task_ro_flags_get(task) & TFRO_PAC_EXC_FATAL);
10105 }
10106 #endif /* __has_feature(ptrauth_calls) */
10107
10108 /*
10109 * FATAL_EXCEPTION_ENTITLEMENT, if present, will contain a list of
10110 * conditions for which access violations should deliver SIGKILL rather than
10111 * SIGSEGV. This is a hardening measure intended for use by applications
10112 * that are able to handle the stricter error handling behavior. Currently
10113 * this supports FATAL_EXCEPTION_ENTITLEMENT_JIT, which is documented in
10114 * user_fault_in_self_restrict_mode().
10115 */
10116 #define FATAL_EXCEPTION_ENTITLEMENT "com.apple.security.fatal-exceptions"
10117 #define FATAL_EXCEPTION_ENTITLEMENT_JIT "jit"
10118
10119
10120 void
task_set_jit_flags(task_t task)10121 task_set_jit_flags(
10122 task_t task)
10123 {
10124 assert(task != TASK_NULL);
10125 if (IOTaskHasStringEntitlement(task, FATAL_EXCEPTION_ENTITLEMENT, FATAL_EXCEPTION_ENTITLEMENT_JIT)) {
10126 task_ro_flags_set(task, TFRO_JIT_EXC_FATAL);
10127 }
10128
10129 }
10130
10131 bool
task_is_jit_exception_fatal(__unused task_t task)10132 task_is_jit_exception_fatal(
10133 __unused task_t task)
10134 {
10135 #if !defined(XNU_PLATFORM_MacOSX)
10136 return true;
10137 #else
10138 assert(task != TASK_NULL);
10139 return !!(task_ro_flags_get(task) & TFRO_JIT_EXC_FATAL);
10140 #endif
10141 }
10142
10143 bool
task_needs_user_signed_thread_state(task_t task)10144 task_needs_user_signed_thread_state(
10145 task_t task)
10146 {
10147 assert(task != TASK_NULL);
10148 return !!(task_ro_flags_get(task) & TFRO_PAC_ENFORCE_USER_STATE);
10149 }
10150
10151 void
task_set_tecs(task_t task)10152 task_set_tecs(task_t task)
10153 {
10154 if (task == TASK_NULL) {
10155 task = current_task();
10156 }
10157
10158 if (!machine_csv(CPUVN_CI)) {
10159 return;
10160 }
10161
10162 LCK_MTX_ASSERT(&task->lock, LCK_MTX_ASSERT_NOTOWNED);
10163
10164 task_lock(task);
10165
10166 task->t_flags |= TF_TECS;
10167
10168 thread_t thread;
10169 queue_iterate(&task->threads, thread, thread_t, task_threads) {
10170 machine_tecs(thread);
10171 }
10172 task_unlock(task);
10173 }
10174
10175 kern_return_t
task_test_sync_upcall(task_t task,ipc_port_t send_port)10176 task_test_sync_upcall(
10177 task_t task,
10178 ipc_port_t send_port)
10179 {
10180 #if DEVELOPMENT || DEBUG
10181 if (task != current_task() || !IPC_PORT_VALID(send_port)) {
10182 return KERN_INVALID_ARGUMENT;
10183 }
10184
10185 /* Block on sync kernel upcall on the given send port */
10186 mach_test_sync_upcall(send_port);
10187
10188 ipc_port_release_send(send_port);
10189 return KERN_SUCCESS;
10190 #else
10191 (void)task;
10192 (void)send_port;
10193 return KERN_NOT_SUPPORTED;
10194 #endif
10195 }
10196
10197 kern_return_t
task_test_async_upcall_propagation(task_t task,ipc_port_t send_port,int qos,int iotier)10198 task_test_async_upcall_propagation(
10199 task_t task,
10200 ipc_port_t send_port,
10201 int qos,
10202 int iotier)
10203 {
10204 #if DEVELOPMENT || DEBUG
10205 kern_return_t kr;
10206
10207 if (task != current_task() || !IPC_PORT_VALID(send_port)) {
10208 return KERN_INVALID_ARGUMENT;
10209 }
10210
10211 if (qos < THREAD_QOS_DEFAULT || qos > THREAD_QOS_USER_INTERACTIVE ||
10212 iotier < THROTTLE_LEVEL_START || iotier > THROTTLE_LEVEL_END) {
10213 return KERN_INVALID_ARGUMENT;
10214 }
10215
10216 struct thread_attr_for_ipc_propagation attr = {
10217 .tafip_iotier = iotier,
10218 .tafip_qos = qos
10219 };
10220
10221 /* Apply propagate attr to port */
10222 kr = ipc_port_propagate_thread_attr(send_port, attr);
10223 if (kr != KERN_SUCCESS) {
10224 return kr;
10225 }
10226
10227 thread_enable_send_importance(current_thread(), TRUE);
10228
10229 /* Perform an async kernel upcall on the given send port */
10230 mach_test_async_upcall(send_port);
10231 thread_enable_send_importance(current_thread(), FALSE);
10232
10233 ipc_port_release_send(send_port);
10234 return KERN_SUCCESS;
10235 #else
10236 (void)task;
10237 (void)send_port;
10238 (void)qos;
10239 (void)iotier;
10240 return KERN_NOT_SUPPORTED;
10241 #endif
10242 }
10243
10244 #if CONFIG_PROC_RESOURCE_LIMITS
10245 mach_port_name_t
current_task_get_fatal_port_name(void)10246 current_task_get_fatal_port_name(void)
10247 {
10248 mach_port_t task_fatal_port = MACH_PORT_NULL;
10249 mach_port_name_t port_name = 0;
10250
10251 task_fatal_port = task_allocate_fatal_port();
10252
10253 if (task_fatal_port) {
10254 ipc_object_copyout(current_space(), task_fatal_port,
10255 MACH_MSG_TYPE_PORT_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
10256 NULL, &port_name);
10257 }
10258
10259 return port_name;
10260 }
10261 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
10262
10263 #if defined(__x86_64__)
10264 bool
curtask_get_insn_copy_optout(void)10265 curtask_get_insn_copy_optout(void)
10266 {
10267 bool optout;
10268 task_t cur_task = current_task();
10269
10270 task_lock(cur_task);
10271 optout = (cur_task->t_flags & TF_INSN_COPY_OPTOUT) ? true : false;
10272 task_unlock(cur_task);
10273
10274 return optout;
10275 }
10276
10277 void
curtask_set_insn_copy_optout(void)10278 curtask_set_insn_copy_optout(void)
10279 {
10280 task_t cur_task = current_task();
10281
10282 task_lock(cur_task);
10283
10284 cur_task->t_flags |= TF_INSN_COPY_OPTOUT;
10285
10286 thread_t thread;
10287 queue_iterate(&cur_task->threads, thread, thread_t, task_threads) {
10288 machine_thread_set_insn_copy_optout(thread);
10289 }
10290 task_unlock(cur_task);
10291 }
10292 #endif /* defined(__x86_64__) */
10293
10294 void
task_get_corpse_vmobject_list(task_t task,vmobject_list_output_t * list,size_t * list_size)10295 task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size)
10296 {
10297 assert(task);
10298 assert(list_size);
10299
10300 *list = task->corpse_vmobject_list;
10301 *list_size = (size_t)task->corpse_vmobject_list_size;
10302 }
10303
10304 __abortlike
10305 static void
panic_proc_ro_task_backref_mismatch(task_t t,proc_ro_t ro)10306 panic_proc_ro_task_backref_mismatch(task_t t, proc_ro_t ro)
10307 {
10308 panic("proc_ro->task backref mismatch: t=%p, ro=%p, "
10309 "proc_ro_task(ro)=%p", t, ro, proc_ro_task(ro));
10310 }
10311
10312 proc_ro_t
task_get_ro(task_t t)10313 task_get_ro(task_t t)
10314 {
10315 proc_ro_t ro = (proc_ro_t)t->bsd_info_ro;
10316
10317 zone_require_ro(ZONE_ID_PROC_RO, sizeof(struct proc_ro), ro);
10318 if (__improbable(proc_ro_task(ro) != t)) {
10319 panic_proc_ro_task_backref_mismatch(t, ro);
10320 }
10321
10322 return ro;
10323 }
10324
10325 uint32_t
task_ro_flags_get(task_t task)10326 task_ro_flags_get(task_t task)
10327 {
10328 return task_get_ro(task)->t_flags_ro;
10329 }
10330
10331 void
task_ro_flags_set(task_t task,uint32_t flags)10332 task_ro_flags_set(task_t task, uint32_t flags)
10333 {
10334 zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
10335 t_flags_ro, ZRO_ATOMIC_OR_32, flags);
10336 }
10337
10338 void
task_ro_flags_clear(task_t task,uint32_t flags)10339 task_ro_flags_clear(task_t task, uint32_t flags)
10340 {
10341 zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
10342 t_flags_ro, ZRO_ATOMIC_AND_32, ~flags);
10343 }
10344
10345 task_control_port_options_t
task_get_control_port_options(task_t task)10346 task_get_control_port_options(task_t task)
10347 {
10348 return task_get_ro(task)->task_control_port_options;
10349 }
10350
10351 /*
10352 * intentionally static, as calling this after the task has been started
10353 * will have no affect, control ports cannot go from immovable back to movable
10354 */
10355 static void
task_set_control_port_options(task_t task,task_control_port_options_t opts)10356 task_set_control_port_options(task_t task, task_control_port_options_t opts)
10357 {
10358 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
10359 task_control_port_options, &opts);
10360 }
10361
10362 /*!
10363 * @function kdp_task_is_locked
10364 *
10365 * @abstract
10366 * Checks if task is locked.
10367 *
10368 * @discussion
10369 * NOT SAFE: To be used only by kernel debugger.
10370 *
10371 * @param task task to check
10372 *
10373 * @returns TRUE if the task is locked.
10374 */
10375 boolean_t
kdp_task_is_locked(task_t task)10376 kdp_task_is_locked(task_t task)
10377 {
10378 return kdp_lck_mtx_lock_spin_is_acquired(&task->lock);
10379 }
10380
10381 #if DEBUG || DEVELOPMENT
10382 /**
10383 *
10384 * Check if a threshold limit is valid based on the actual phys memory
10385 * limit. If they are same, race conditions may arise, so we have to prevent
10386 * it to happen.
10387 */
10388 static diagthreshold_check_return
task_check_memorythreshold_is_valid(task_t task,uint64_t new_limit,bool is_diagnostics_value)10389 task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value)
10390 {
10391 int phys_limit_mb;
10392 kern_return_t ret_value;
10393 bool threshold_enabled;
10394 bool dummy;
10395 ret_value = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, &threshold_enabled);
10396 if (ret_value != KERN_SUCCESS) {
10397 return ret_value;
10398 }
10399 if (is_diagnostics_value == true) {
10400 ret_value = task_get_phys_footprint_limit(task, &phys_limit_mb);
10401 } else {
10402 uint64_t diag_limit;
10403 ret_value = task_get_diag_footprint_limit_internal(task, &diag_limit, &dummy);
10404 phys_limit_mb = (int)(diag_limit >> 20);
10405 }
10406 if (ret_value != KERN_SUCCESS) {
10407 return ret_value;
10408 }
10409 if (phys_limit_mb == (int) new_limit) {
10410 if (threshold_enabled == false) {
10411 return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED;
10412 } else {
10413 return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED;
10414 }
10415 }
10416 if (threshold_enabled == false) {
10417 return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED;
10418 } else {
10419 return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED;
10420 }
10421 }
10422 #endif
10423
10424 #if CONFIG_EXCLAVES
10425 kern_return_t
task_add_conclave(task_t task,void * vnode,int64_t off,const char * task_conclave_id)10426 task_add_conclave(task_t task, void *vnode, int64_t off, const char *task_conclave_id)
10427 {
10428 /*
10429 * Only launchd or properly entitled tasks can attach tasks to
10430 * conclaves.
10431 */
10432 if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10433 return KERN_DENIED;
10434 }
10435
10436 /*
10437 * Only entitled tasks can have conclaves attached.
10438 * Allow tasks which have the SPAWN privilege to also host conclaves.
10439 * This allows xpc proxy to add a conclave before execing a daemon.
10440 */
10441 if (!exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_HOST) &&
10442 !exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10443 return KERN_DENIED;
10444 }
10445
10446 return exclaves_conclave_attach(task_conclave_id, task);
10447 }
10448
10449 kern_return_t
task_launch_conclave(mach_port_name_t port __unused)10450 task_launch_conclave(mach_port_name_t port __unused)
10451 {
10452 kern_return_t kr = KERN_FAILURE;
10453 assert3u(port, ==, MACH_PORT_NULL);
10454 exclaves_resource_t *conclave = task_get_conclave(current_task());
10455 if (conclave == NULL || exclaves_is_forwarding_resource(conclave)) {
10456 return kr;
10457 }
10458
10459 kr = exclaves_conclave_launch(conclave);
10460 if (kr != KERN_SUCCESS) {
10461 return kr;
10462 }
10463 task_set_conclave_taint(current_task());
10464
10465 return KERN_SUCCESS;
10466 }
10467
10468 kern_return_t
task_inherit_conclave(task_t old_task,task_t new_task,void * vnode,int64_t off)10469 task_inherit_conclave(task_t old_task, task_t new_task, void *vnode, int64_t off)
10470 {
10471 if (old_task->conclave == NULL ||
10472 !exclaves_conclave_is_attached(old_task->conclave)) {
10473 return KERN_SUCCESS;
10474 }
10475
10476 /*
10477 * Only launchd or properly entitled tasks can attach tasks to
10478 * conclaves.
10479 */
10480 if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10481 return KERN_DENIED;
10482 }
10483
10484 /*
10485 * Only entitled tasks can have conclaves attached.
10486 */
10487 if (!exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_HOST)) {
10488 return KERN_DENIED;
10489 }
10490
10491 return exclaves_conclave_inherit(old_task->conclave, old_task, new_task);
10492 }
10493
10494 void
task_clear_conclave(task_t task)10495 task_clear_conclave(task_t task)
10496 {
10497 if (task->exclave_crash_info) {
10498 kfree_data(task->exclave_crash_info, CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE);
10499 task->exclave_crash_info = NULL;
10500 }
10501
10502 if (task->conclave == NULL) {
10503 return;
10504 }
10505
10506 /*
10507 * XXX
10508 * This should only fail if either the conclave is in an unexpected
10509 * state (i.e. not ATTACHED) or if the wrong port is supplied.
10510 * We should re-visit this and make sure we guarantee the above
10511 * constraints.
10512 */
10513 __assert_only kern_return_t ret =
10514 exclaves_conclave_detach(task->conclave, task);
10515 assert3u(ret, ==, KERN_SUCCESS);
10516 }
10517
10518 void
task_stop_conclave(task_t task,bool gather_crash_bt)10519 task_stop_conclave(task_t task, bool gather_crash_bt)
10520 {
10521 thread_t thread = current_thread();
10522
10523 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10524 return;
10525 }
10526
10527 if (task_should_panic_on_exit_due_to_conclave_taint(task)) {
10528 panic("Conclave tainted task %p terminated\n", task);
10529 }
10530
10531 /* Stash the task on current thread for conclave teardown */
10532 thread->conclave_stop_task = task;
10533
10534 __assert_only kern_return_t ret =
10535 exclaves_conclave_stop(task->conclave, gather_crash_bt);
10536
10537 thread->conclave_stop_task = TASK_NULL;
10538
10539 assert3u(ret, ==, KERN_SUCCESS);
10540 }
10541
10542 void
task_suspend_conclave(task_t task)10543 task_suspend_conclave(task_t task)
10544 {
10545 thread_t thread = current_thread();
10546
10547 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10548 return;
10549 }
10550
10551 /* Stash the task on current thread for conclave teardown */
10552 thread->conclave_stop_task = task;
10553
10554 __assert_only kern_return_t ret =
10555 exclaves_conclave_suspend(task->conclave);
10556
10557 thread->conclave_stop_task = TASK_NULL;
10558
10559 assert3u(ret, ==, KERN_SUCCESS);
10560 }
10561
10562 void
task_resume_conclave(task_t task)10563 task_resume_conclave(task_t task)
10564 {
10565 thread_t thread = current_thread();
10566
10567 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10568 return;
10569 }
10570
10571 /* Stash the task on current thread for conclave teardown */
10572 thread->conclave_stop_task = task;
10573
10574 __assert_only kern_return_t ret =
10575 exclaves_conclave_resume(task->conclave);
10576
10577 thread->conclave_stop_task = TASK_NULL;
10578
10579 assert3u(ret, ==, KERN_SUCCESS);
10580 }
10581
10582 kern_return_t
task_stop_conclave_upcall(void)10583 task_stop_conclave_upcall(void)
10584 {
10585 task_t task = current_task();
10586 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10587 return KERN_INVALID_TASK;
10588 }
10589
10590 return exclaves_conclave_stop_upcall(task->conclave);
10591 }
10592
10593 kern_return_t
task_stop_conclave_upcall_complete(void)10594 task_stop_conclave_upcall_complete(void)
10595 {
10596 task_t task = current_task();
10597 thread_t thread = current_thread();
10598
10599 if (!(thread->th_exclaves_state & TH_EXCLAVES_STOP_UPCALL_PENDING)) {
10600 return KERN_SUCCESS;
10601 }
10602
10603 assert3p(task->conclave, !=, NULL);
10604
10605 return exclaves_conclave_stop_upcall_complete(task->conclave, task);
10606 }
10607
10608 kern_return_t
task_suspend_conclave_upcall(uint64_t * scid_list,size_t scid_list_count)10609 task_suspend_conclave_upcall(uint64_t *scid_list, size_t scid_list_count)
10610 {
10611 task_t task = current_task();
10612 thread_t thread;
10613 int scid_count = 0;
10614 kern_return_t kr;
10615 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10616 return KERN_INVALID_TASK;
10617 }
10618
10619 kr = task_hold_and_wait(task, false);
10620
10621 task_lock(task);
10622 queue_iterate(&task->threads, thread, thread_t, task_threads)
10623 {
10624 if (thread->th_exclaves_state & TH_EXCLAVES_RPC) {
10625 scid_list[scid_count++] = thread->th_exclaves_ipc_ctx.scid;
10626 if (scid_count >= scid_list_count) {
10627 break;
10628 }
10629 }
10630 }
10631
10632 task_unlock(task);
10633 return kr;
10634 }
10635
10636 kern_return_t
task_crash_info_conclave_upcall(task_t task,const struct conclave_sharedbuffer_t * shared_buf,uint32_t length)10637 task_crash_info_conclave_upcall(task_t task, const struct conclave_sharedbuffer_t *shared_buf,
10638 uint32_t length)
10639 {
10640 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10641 return KERN_INVALID_TASK;
10642 }
10643
10644 /* Allocate the buffer and memcpy it */
10645 int task_crash_info_buffer_size = 0;
10646 uint8_t * task_crash_info_buffer;
10647
10648 if (!length) {
10649 printf("Conclave upcall: task_crash_info_conclave_upcall did not return any page addresses\n");
10650 return KERN_INVALID_ARGUMENT;
10651 }
10652
10653 task_crash_info_buffer_size = CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE;
10654 assert3u(task_crash_info_buffer_size, >=, length);
10655
10656 task_crash_info_buffer = kalloc_data(task_crash_info_buffer_size, Z_WAITOK);
10657 if (!task_crash_info_buffer) {
10658 panic("task_crash_info_conclave_upcall: cannot allocate buffer for task_info shared memory");
10659 return KERN_INVALID_ARGUMENT;
10660 }
10661
10662 uint8_t * dst = task_crash_info_buffer;
10663 uint32_t remaining = length;
10664 for (size_t i = 0; i < CONCLAVE_CRASH_BUFFER_PAGECOUNT; i++) {
10665 if (remaining) {
10666 memcpy(dst, (uint8_t*)phystokv((pmap_paddr_t)shared_buf->physaddr[i]), PAGE_SIZE);
10667 remaining = (remaining >= PAGE_SIZE) ? remaining - PAGE_SIZE : 0;
10668 dst += PAGE_SIZE;
10669 }
10670 }
10671
10672 task_lock(task);
10673 if (task->exclave_crash_info == NULL && task->active) {
10674 task->exclave_crash_info = task_crash_info_buffer;
10675 task->exclave_crash_info_length = length;
10676 task_crash_info_buffer = NULL;
10677 }
10678 task_unlock(task);
10679
10680 if (task_crash_info_buffer) {
10681 kfree_data(task_crash_info_buffer, task_crash_info_buffer_size);
10682 }
10683
10684 return KERN_SUCCESS;
10685 }
10686
10687 exclaves_resource_t *
task_get_conclave(task_t task)10688 task_get_conclave(task_t task)
10689 {
10690 return task->conclave;
10691 }
10692
10693 extern boolean_t IOPMRootDomainGetWillShutdown(void);
10694
10695 TUNABLE(bool, disable_conclave_taint, "disable_conclave_taint", true); /* Do not taint processes when they talk to conclave, so system does not panic when exit. */
10696
10697 static bool
task_should_panic_on_exit_due_to_conclave_taint(task_t task)10698 task_should_panic_on_exit_due_to_conclave_taint(task_t task)
10699 {
10700 /* Check if boot-arg to disable conclave taint is set */
10701 if (disable_conclave_taint) {
10702 return false;
10703 }
10704
10705 /* Check if the system is shutting down */
10706 if (IOPMRootDomainGetWillShutdown()) {
10707 return false;
10708 }
10709
10710 return task_is_conclave_tainted(task);
10711 }
10712
10713 static bool
task_is_conclave_tainted(task_t task)10714 task_is_conclave_tainted(task_t task)
10715 {
10716 return (task->t_exclave_state & TES_CONCLAVE_TAINTED) != 0 &&
10717 !(task->t_exclave_state & TES_CONCLAVE_UNTAINTABLE);
10718 }
10719
10720 static void
task_set_conclave_taint(task_t task)10721 task_set_conclave_taint(task_t task)
10722 {
10723 os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_TAINTED, relaxed);
10724 }
10725
10726 void
task_set_conclave_untaintable(task_t task)10727 task_set_conclave_untaintable(task_t task)
10728 {
10729 os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_UNTAINTABLE, relaxed);
10730 }
10731
10732 void
task_add_conclave_crash_info(task_t task,void * crash_info_ptr)10733 task_add_conclave_crash_info(task_t task, void *crash_info_ptr)
10734 {
10735 __block kern_return_t error = KERN_SUCCESS;
10736 tb_error_t tberr = TB_ERROR_SUCCESS;
10737 void *crash_info;
10738 uint32_t crash_info_length = 0;
10739
10740 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10741 return;
10742 }
10743
10744 if (task->exclave_crash_info_length == 0) {
10745 return;
10746 }
10747
10748 error = kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_BEGIN,
10749 STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10750 if (error != KERN_SUCCESS) {
10751 return;
10752 }
10753
10754 crash_info = task->exclave_crash_info;
10755 crash_info_length = task->exclave_crash_info_length;
10756
10757 tberr = stackshot_stackshotresult__unmarshal(crash_info,
10758 (uint64_t)crash_info_length, ^(stackshot_stackshotresult_s result){
10759 error = stackshot_exclaves_process_stackshot(&result, crash_info_ptr, false);
10760 if (error != KERN_SUCCESS) {
10761 printf("task_add_conclave_crash_info: error processing stackshot result %d\n", error);
10762 }
10763 });
10764 if (tberr != TB_ERROR_SUCCESS) {
10765 printf("task_conclave_crash: task_add_conclave_crash_info could not unmarshal stackshot data 0x%x\n", tberr);
10766 error = KERN_FAILURE;
10767 goto error_exit;
10768 }
10769
10770 error_exit:
10771 kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_END,
10772 STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10773
10774 return;
10775 }
10776
10777 #endif /* CONFIG_EXCLAVES */
10778
10779 /* defined in bsd/kern/kern_proc.c */
10780 extern void proc_name(int pid, char *buf, int size);
10781 extern const char *proc_best_name(struct proc *p);
10782
10783 void
task_procname(task_t task,char * buf,int size)10784 task_procname(task_t task, char *buf, int size)
10785 {
10786 proc_name(task_pid(task), buf, size);
10787 }
10788
10789 const char *
task_best_name(task_t task)10790 task_best_name(task_t task)
10791 {
10792 return proc_best_name(task_get_proc_raw(task));
10793 }
10794
10795
10796