1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63 /*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to [email protected] any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
81 /*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
88
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/host_priv.h>
92 #include <mach/machine/vm_types.h>
93 #include <mach/vm_param.h>
94 #include <mach/mach_vm.h>
95 #include <mach/semaphore.h>
96 #include <mach/task_info.h>
97 #include <mach/task_inspect.h>
98 #include <mach/task_special_ports.h>
99 #include <mach/sdt.h>
100 #include <mach/mach_test_upcall.h>
101
102 #include <ipc/ipc_importance.h>
103 #include <ipc/ipc_types.h>
104 #include <ipc/ipc_space.h>
105 #include <ipc/ipc_entry.h>
106 #include <ipc/ipc_hash.h>
107 #include <ipc/ipc_policy.h>
108
109 #include <kern/kern_types.h>
110 #include <kern/mach_param.h>
111 #include <kern/misc_protos.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/coalition.h>
115 #include <kern/zalloc.h>
116 #include <kern/kalloc.h>
117 #include <kern/kern_cdata.h>
118 #include <kern/processor.h>
119 #include <kern/recount.h>
120 #include <kern/sched_prim.h> /* for thread_wakeup */
121 #include <kern/ipc_tt.h>
122 #include <kern/host.h>
123 #include <kern/clock.h>
124 #include <kern/timer.h>
125 #include <kern/assert.h>
126 #include <kern/affinity.h>
127 #include <kern/exc_resource.h>
128 #include <kern/machine.h>
129 #include <kern/policy_internal.h>
130 #include <kern/restartable.h>
131 #include <kern/ipc_kobject.h>
132
133 #include <corpses/task_corpse.h>
134 #if CONFIG_TELEMETRY
135 #include <kern/telemetry.h>
136 #endif
137
138 #if CONFIG_PERVASIVE_CPI
139 #include <kern/monotonic.h>
140 #include <machine/monotonic.h>
141 #endif /* CONFIG_PERVASIVE_CPI */
142
143 #if CONFIG_EXCLAVES
144 #include "exclaves_boot.h"
145 #include "exclaves_resource.h"
146 #include "exclaves_boot.h"
147 #include "exclaves_inspection.h"
148 #include "exclaves_conclave.h"
149 #endif /* CONFIG_EXCLAVES */
150
151 #include <os/log.h>
152
153 #include <vm/pmap.h>
154 #include <vm/vm_map_xnu.h>
155 #include <vm/vm_kern_xnu.h> /* for kernel_map, ipc_kernel_map */
156 #include <vm/vm_pageout_xnu.h>
157 #include <vm/vm_protos.h>
158 #include <vm/vm_purgeable_xnu.h>
159 #include <vm/vm_compressor_pager_xnu.h>
160 #include <vm/vm_reclaim_xnu.h>
161 #include <vm/vm_compressor_xnu.h>
162
163 #include <sys/kdebug.h>
164 #include <sys/proc_ro.h>
165 #include <sys/resource.h>
166 #include <sys/signalvar.h> /* for coredump */
167 #include <sys/bsdtask_info.h>
168 #include <sys/kdebug_triage.h>
169 #include <sys/code_signing.h> /* for is_address_space_debugged */
170 #include <sys/reason.h>
171
172 /*
173 * Exported interfaces
174 */
175
176 #include <mach/task_server.h>
177 #include <mach/mach_host_server.h>
178 #include <mach/mach_port_server.h>
179
180 #include <vm/vm_shared_region_xnu.h>
181
182 #include <libkern/OSDebug.h>
183 #include <libkern/OSAtomic.h>
184 #include <libkern/section_keywords.h>
185
186 #include <mach-o/loader.h>
187 #include <kdp/kdp_dyld.h>
188
189 #include <kern/sfi.h> /* picks up ledger.h */
190
191 #if CONFIG_MACF
192 #include <security/mac_mach_internal.h>
193 #endif
194
195 #include <IOKit/IOBSD.h>
196 #include <kdp/processor_core.h>
197
198 #if defined (__arm64__)
199 #include <pexpert/arm64/board_config.h>
200 #endif
201
202 #include <string.h>
203
204 #if KPERF
205 extern int kpc_force_all_ctrs(task_t, int);
206 #endif
207
208 SECURITY_READ_ONLY_LATE(task_t) kernel_task;
209
210 int64_t next_taskuniqueid = 0;
211 const size_t task_alignment = _Alignof(struct task);
212 extern const size_t proc_alignment;
213 extern size_t proc_struct_size;
214 extern size_t proc_and_task_size;
215 size_t task_struct_size;
216
217 extern int large_corpse_count;
218
219 extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p);
220 extern boolean_t proc_is_simulated(const proc_t);
221
222 static void task_port_no_senders(ipc_port_t, mach_msg_type_number_t);
223 static void task_port_with_flavor_no_senders(ipc_port_t, mach_msg_type_number_t);
224 static void task_suspension_no_senders(ipc_port_t, mach_msg_type_number_t);
225 static inline void task_zone_init(void);
226
227 static void task_store_owned_vmobject_info(task_t to_task, task_t from_task);
228 static void task_set_control_port_options(task_t task, task_control_port_options_t opts);
229
230 #if CONFIG_EXCLAVES
231 static bool task_should_panic_on_exit_due_to_conclave_taint(task_t task);
232 static bool task_is_conclave_tainted(task_t task);
233 static void task_set_conclave_taint(task_t task);
234 kern_return_t task_crash_info_conclave_upcall(task_t task,
235 const struct conclave_sharedbuffer_t *shared_buf, uint32_t length);
236 #endif /* CONFIG_EXCLAVES */
237
238 IPC_KOBJECT_DEFINE(IKOT_TASK_NAME,
239 .iko_op_movable_send = true);
240 IPC_KOBJECT_DEFINE(IKOT_TASK_CONTROL,
241 .iko_op_no_senders = task_port_no_senders,
242 .iko_op_movable_send = true, /* see ipc_should_mark_immovable_send */
243 .iko_op_label_free = ipc_kobject_label_free);
244 IPC_KOBJECT_DEFINE(IKOT_TASK_READ,
245 .iko_op_no_senders = task_port_with_flavor_no_senders,
246 .iko_op_label_free = ipc_kobject_label_free);
247 IPC_KOBJECT_DEFINE(IKOT_TASK_INSPECT,
248 .iko_op_no_senders = task_port_with_flavor_no_senders);
249 IPC_KOBJECT_DEFINE(IKOT_TASK_RESUME,
250 .iko_op_movable_send = true,
251 .iko_op_no_senders = task_suspension_no_senders);
252
253 #if CONFIG_PROC_RESOURCE_LIMITS
254 static void task_fatal_port_no_senders(ipc_port_t, mach_msg_type_number_t);
255 static mach_port_t task_allocate_fatal_port(void);
256
257 IPC_KOBJECT_DEFINE(IKOT_TASK_FATAL,
258 .iko_op_movable_send = true,
259 .iko_op_stable = true,
260 .iko_op_no_senders = task_fatal_port_no_senders);
261
262 extern void task_id_token_set_port(task_id_token_t token, ipc_port_t port);
263 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
264
265 /* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
266 int audio_active = 0;
267
268 /*
269 * structure for tracking zone usage
270 * Used either one per task/thread for all zones or <per-task,per-zone>.
271 */
272 typedef struct zinfo_usage_store_t {
273 /* These fields may be updated atomically, and so must be 8 byte aligned */
274 uint64_t alloc __attribute__((aligned(8))); /* allocation counter */
275 uint64_t free __attribute__((aligned(8))); /* free counter */
276 } zinfo_usage_store_t;
277
278 /**
279 * Return codes related to diag threshold and memory limit
280 */
281 __options_decl(diagthreshold_check_return, int, {
282 THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED = 0,
283 THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED = 1,
284 THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED = 2,
285 THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED = 3,
286 });
287
288 /**
289 * Return codes related to diag threshold and memory limit
290 */
291 __options_decl(current_, int, {
292 THRESHOLD_IS_SAME_AS_LIMIT = 0,
293 THRESHOLD_IS_NOT_SAME_AS_LIMIT = 1
294 });
295
296 zinfo_usage_store_t tasks_tkm_private;
297 zinfo_usage_store_t tasks_tkm_shared;
298
299 /* A container to accumulate statistics for expired tasks */
300 expired_task_statistics_t dead_task_statistics;
301 LCK_SPIN_DECLARE_ATTR(dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
302
303 ledger_template_t task_ledger_template = NULL;
304
305 /* global lock for task_dyld_process_info_notify_{register, deregister, get_trap} */
306 LCK_GRP_DECLARE(g_dyldinfo_mtx_grp, "g_dyldinfo");
307 LCK_MTX_DECLARE(g_dyldinfo_mtx, &g_dyldinfo_mtx_grp);
308
309 SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__((used)) =
310 {.cpu_time = -1,
311 .tkm_private = -1,
312 .tkm_shared = -1,
313 .phys_mem = -1,
314 .wired_mem = -1,
315 .internal = -1,
316 .iokit_mapped = -1,
317 .external = -1,
318 .reusable = -1,
319 .alternate_accounting = -1,
320 .alternate_accounting_compressed = -1,
321 .page_table = -1,
322 .phys_footprint = -1,
323 .internal_compressed = -1,
324 .purgeable_volatile = -1,
325 .purgeable_nonvolatile = -1,
326 .purgeable_volatile_compressed = -1,
327 .purgeable_nonvolatile_compressed = -1,
328 .tagged_nofootprint = -1,
329 .tagged_footprint = -1,
330 .tagged_nofootprint_compressed = -1,
331 .tagged_footprint_compressed = -1,
332 .network_volatile = -1,
333 .network_nonvolatile = -1,
334 .network_volatile_compressed = -1,
335 .network_nonvolatile_compressed = -1,
336 .media_nofootprint = -1,
337 .media_footprint = -1,
338 .media_nofootprint_compressed = -1,
339 .media_footprint_compressed = -1,
340 .graphics_nofootprint = -1,
341 .graphics_footprint = -1,
342 .graphics_nofootprint_compressed = -1,
343 .graphics_footprint_compressed = -1,
344 .neural_nofootprint = -1,
345 .neural_footprint = -1,
346 .neural_nofootprint_compressed = -1,
347 .neural_footprint_compressed = -1,
348 .neural_nofootprint_total = -1,
349 .platform_idle_wakeups = -1,
350 .interrupt_wakeups = -1,
351 #if CONFIG_SCHED_SFI
352 .sfi_wait_times = { 0 /* initialized at runtime */},
353 #endif /* CONFIG_SCHED_SFI */
354 .cpu_time_billed_to_me = -1,
355 .cpu_time_billed_to_others = -1,
356 .physical_writes = -1,
357 .logical_writes = -1,
358 .logical_writes_to_external = -1,
359 .pages_grabbed = -1,
360 .pages_grabbed_kern = -1,
361 .pages_grabbed_iopl = -1,
362 .pages_grabbed_upl = -1,
363 #if CONFIG_FREEZE
364 .frozen_to_swap = -1,
365 #endif /* CONFIG_FREEZE */
366 .energy_billed_to_me = -1,
367 .energy_billed_to_others = -1,
368 #if CONFIG_PHYS_WRITE_ACCT
369 .fs_metadata_writes = -1,
370 #endif /* CONFIG_PHYS_WRITE_ACCT */
371 #if CONFIG_MEMORYSTATUS
372 .memorystatus_dirty_time = -1,
373 #endif /* CONFIG_MEMORYSTATUS */
374 .swapins = -1,
375 .conclave_mem = -1, };
376
377 /* System sleep state */
378 boolean_t tasks_suspend_state;
379
380 __options_decl(send_exec_resource_is_fatal, bool, {
381 IS_NOT_FATAL = false,
382 IS_FATAL = true
383 });
384
385 __options_decl(send_exec_resource_is_diagnostics, bool, {
386 IS_NOT_DIAGNOSTICS = false,
387 IS_DIAGNOSTICS = true
388 });
389
390 __options_decl(send_exec_resource_is_warning, bool, {
391 IS_NOT_WARNING = false,
392 IS_WARNING = true
393 });
394
395 __options_decl(send_exec_resource_options_t, uint8_t, {
396 EXEC_RESOURCE_FATAL = 0x01,
397 EXEC_RESOURCE_DIAGNOSTIC = 0x02,
398 EXEC_RESOURCE_WARNING = 0x04,
399 EXEC_RESOURCE_CONCLAVE = 0x08 // A side memory limit independent of the main footprint.
400 });
401
402 /**
403 * Actions to take when a process has reached the memory limit or the diagnostics threshold limits
404 */
405 static inline void task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning);
406 #if DEBUG || DEVELOPMENT
407 static inline void task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size);
408 #endif
409 void init_task_ledgers(void);
410 void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
411 void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
412 void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
413 void task_conclave_mem_limit_exceeded(int warning, __unused const void *param0, __unused const void *param1);
414 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
415 void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options);
416 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
417 #if CONFIG_PROC_RESOURCE_LIMITS
418 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit);
419 mach_port_name_t current_task_get_fatal_port_name(void);
420 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit);
421 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
422
423 kern_return_t task_suspend_internal(task_t);
424 kern_return_t task_resume_internal(task_t);
425 static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
426
427 extern kern_return_t iokit_task_terminate(task_t task, int phase);
428 extern void iokit_task_app_suspended_changed(task_t task);
429
430 extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
431 extern void bsd_copythreadname(void *dst_uth, void *src_uth);
432 extern kern_return_t thread_resume(thread_t thread);
433
434 // Condition to include diag footprints
435 #define RESETTABLE_DIAG_FOOTPRINT_LIMITS ((DEBUG || DEVELOPMENT) && CONFIG_MEMORYSTATUS)
436
437 // Warn tasks when they hit 80% of their memory limit.
438 #define PHYS_FOOTPRINT_WARNING_LEVEL 80
439
440 #define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT 150 /* wakeups per second */
441 #define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL 300 /* in seconds. */
442
443 /*
444 * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
445 *
446 * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
447 * stacktraces, aka micro-stackshots)
448 */
449 #define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER 70
450
451 int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
452 int task_wakeups_monitor_rate; /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
453
454 unsigned int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
455
456 TUNABLE(bool, disable_exc_resource, "disable_exc_resource", false); /* Global override to suppress EXC_RESOURCE for resource monitor violations. */
457 TUNABLE(bool, disable_exc_resource_during_audio, "disable_exc_resource_during_audio", true); /* Global override to suppress EXC_RESOURCE while audio is active */
458
459 ledger_amount_t max_task_footprint = 0; /* Per-task limit on physical memory consumption in bytes */
460 unsigned int max_task_footprint_warning_level = 0; /* Per-task limit warning percentage */
461
462 /*
463 * Configure per-task memory limit.
464 * The boot-arg is interpreted as Megabytes,
465 * and takes precedence over the device tree.
466 * Setting the boot-arg to 0 disables task limits.
467 */
468 TUNABLE_DT_WRITEABLE(int, max_task_footprint_mb, "/defaults", "kern.max_task_pmem", "max_task_pmem", 0, TUNABLE_DT_NONE);
469
470 /* I/O Monitor Limits */
471 #define IOMON_DEFAULT_LIMIT (20480ull) /* MB of logical/physical I/O */
472 #define IOMON_DEFAULT_INTERVAL (86400ull) /* in seconds */
473
474 uint64_t task_iomon_limit_mb; /* Per-task I/O monitor limit in MBs */
475 uint64_t task_iomon_interval_secs; /* Per-task I/O monitor interval in secs */
476
477 #define IO_TELEMETRY_DEFAULT_LIMIT (10ll * 1024ll * 1024ll)
478 int64_t io_telemetry_limit; /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
479 int64_t global_logical_writes_count = 0; /* Global count for logical writes */
480 int64_t global_logical_writes_to_external_count = 0; /* Global count for logical writes to external storage*/
481 static boolean_t global_update_logical_writes(int64_t, int64_t*);
482
483 #if DEBUG || DEVELOPMENT
484 static diagthreshold_check_return task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value);
485 #endif
486 #define TASK_MAX_THREAD_LIMIT 256
487
488 #if MACH_ASSERT
489 int pmap_ledgers_panic = 1;
490 int pmap_ledgers_panic_leeway = 3;
491 #endif /* MACH_ASSERT */
492
493 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
494
495 #if CONFIG_COREDUMP
496 int hwm_user_cores = 0; /* high watermark violations generate user core files */
497 #endif
498
499 #ifdef MACH_BSD
500 extern uint32_t proc_platform(const struct proc *);
501 extern uint32_t proc_sdk(struct proc *);
502 extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
503 extern int proc_pid(struct proc *p);
504 extern int proc_selfpid(void);
505 extern struct proc *current_proc(void);
506 extern char *proc_name_address(struct proc *p);
507 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
508 extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize);
509 extern void workq_proc_suspended(struct proc *p);
510 extern void workq_proc_resumed(struct proc *p);
511 extern struct proc *kernproc;
512 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
513
514 #if CONFIG_MEMORYSTATUS
515 extern void proc_memstat_skip(struct proc* p, boolean_t set);
516 extern void memorystatus_on_ledger_footprint_exceeded(int warning, bool memlimit_is_active, bool memlimit_is_fatal);
517 extern void memorystatus_log_exception(const int max_footprint_mb, bool memlimit_is_active, bool memlimit_is_fatal);
518 extern void memorystatus_log_diag_threshold_exception(const int diag_threshold_value);
519 extern void memorystatus_on_conclave_limit_exceeded(const int max_footprint_mb);
520 extern boolean_t memorystatus_allowed_vm_map_fork(task_t task, bool *is_large);
521 extern uint64_t memorystatus_available_memory_internal(struct proc *p);
522
523 #if DEVELOPMENT || DEBUG
524 extern void memorystatus_abort_vm_map_fork(task_t);
525 #endif
526
527 #endif /* CONFIG_MEMORYSTATUS */
528
529 #endif /* MACH_BSD */
530
531 /* Boot-arg that turns on fatal pac exception delivery for all first-party apps */
532 static TUNABLE(bool, enable_pac_exception, "enable_pac_exception", false);
533
534 /*
535 * Defaults for controllable EXC_GUARD behaviors
536 *
537 * Internal builds are fatal by default (except BRIDGE).
538 * Create an alternate set of defaults for special processes by name.
539 */
540 struct task_exc_guard_named_default {
541 char *name;
542 uint32_t behavior;
543 };
544 #define _TASK_EXC_GUARD_MP_CORPSE (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_CORPSE)
545 #define _TASK_EXC_GUARD_MP_ONCE (_TASK_EXC_GUARD_MP_CORPSE | TASK_EXC_GUARD_MP_ONCE)
546 #define _TASK_EXC_GUARD_MP_FATAL (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_FATAL)
547
548 #define _TASK_EXC_GUARD_VM_CORPSE (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_ONCE)
549 #define _TASK_EXC_GUARD_VM_ONCE (_TASK_EXC_GUARD_VM_CORPSE | TASK_EXC_GUARD_VM_ONCE)
550 #define _TASK_EXC_GUARD_VM_FATAL (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_FATAL)
551
552 #define _TASK_EXC_GUARD_ALL_CORPSE (_TASK_EXC_GUARD_MP_CORPSE | _TASK_EXC_GUARD_VM_CORPSE)
553 #define _TASK_EXC_GUARD_ALL_ONCE (_TASK_EXC_GUARD_MP_ONCE | _TASK_EXC_GUARD_VM_ONCE)
554 #define _TASK_EXC_GUARD_ALL_FATAL (_TASK_EXC_GUARD_MP_FATAL | _TASK_EXC_GUARD_VM_FATAL)
555
556 /* cannot turn off FATAL and DELIVER bit if set */
557 uint32_t task_exc_guard_no_unset_mask = TASK_EXC_GUARD_MP_FATAL | TASK_EXC_GUARD_VM_FATAL |
558 TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_VM_DELIVER;
559 /* cannot turn on ONCE bit if unset */
560 uint32_t task_exc_guard_no_set_mask = TASK_EXC_GUARD_MP_ONCE | TASK_EXC_GUARD_VM_ONCE;
561
562 #if !defined(XNU_TARGET_OS_BRIDGE)
563
564 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_FATAL;
565 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
566 /*
567 * These "by-process-name" default overrides are intended to be a short-term fix to
568 * quickly get over races between changes introducing new EXC_GUARD raising behaviors
569 * in some process and a change in default behavior for same. We should ship with
570 * these lists empty (by fixing the bugs, or explicitly changing the task's EXC_GUARD
571 * exception behavior via task_set_exc_guard_behavior()).
572 *
573 * XXX Remember to add/remove TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS back to
574 * task_exc_guard_default when transitioning this list between empty and
575 * non-empty.
576 */
577 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
578
579 #else /* !defined(XNU_TARGET_OS_BRIDGE) */
580
581 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_ONCE;
582 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
583 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
584
585 #endif /* !defined(XNU_TARGET_OS_BRIDGE) */
586
587 /* Forwards */
588
589 static bool task_hold_locked(task_t task);
590 static void task_wait_locked(task_t task, boolean_t until_not_runnable);
591 static void task_release_locked(task_t task);
592 extern task_t proc_get_task_raw(void *proc);
593 extern void task_ref_hold_proc_task_struct(task_t task);
594 extern void task_release_proc_task_struct(task_t task, proc_ro_t proc_ro);
595
596 static void task_synchronizer_destroy_all(task_t task);
597 static os_ref_count_t
598 task_add_turnstile_watchports_locked(
599 task_t task,
600 struct task_watchports *watchports,
601 struct task_watchport_elem **previous_elem_array,
602 ipc_port_t *portwatch_ports,
603 uint32_t portwatch_count);
604
605 static os_ref_count_t
606 task_remove_turnstile_watchports_locked(
607 task_t task,
608 struct task_watchports *watchports,
609 ipc_port_t *port_freelist);
610
611 static struct task_watchports *
612 task_watchports_alloc_init(
613 task_t task,
614 thread_t thread,
615 uint32_t count);
616
617 static void
618 task_watchports_deallocate(
619 struct task_watchports *watchports);
620
621 void
task_set_64bit(task_t task,boolean_t is_64bit,boolean_t is_64bit_data)622 task_set_64bit(
623 task_t task,
624 boolean_t is_64bit,
625 boolean_t is_64bit_data)
626 {
627 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
628 thread_t thread;
629 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
630
631 task_lock(task);
632
633 /*
634 * Switching to/from 64-bit address spaces
635 */
636 if (is_64bit) {
637 if (!task_has_64Bit_addr(task)) {
638 task_set_64Bit_addr(task);
639 }
640 } else {
641 if (task_has_64Bit_addr(task)) {
642 task_clear_64Bit_addr(task);
643 }
644 }
645
646 /*
647 * Switching to/from 64-bit register state.
648 */
649 if (is_64bit_data) {
650 if (task_has_64Bit_data(task)) {
651 goto out;
652 }
653
654 task_set_64Bit_data(task);
655 } else {
656 if (!task_has_64Bit_data(task)) {
657 goto out;
658 }
659
660 task_clear_64Bit_data(task);
661 }
662
663 /* FIXME: On x86, the thread save state flavor can diverge from the
664 * task's 64-bit feature flag due to the 32-bit/64-bit register save
665 * state dichotomy. Since we can be pre-empted in this interval,
666 * certain routines may observe the thread as being in an inconsistent
667 * state with respect to its task's 64-bitness.
668 */
669
670 #if defined(__x86_64__) || defined(__arm64__)
671 queue_iterate(&task->threads, thread, thread_t, task_threads) {
672 thread_mtx_lock(thread);
673 machine_thread_switch_addrmode(thread);
674 thread_mtx_unlock(thread);
675 }
676 #endif /* defined(__x86_64__) || defined(__arm64__) */
677
678 out:
679 task_unlock(task);
680 }
681
682 bool
task_get_64bit_addr(task_t task)683 task_get_64bit_addr(task_t task)
684 {
685 return task_has_64Bit_addr(task);
686 }
687
688 bool
task_get_64bit_data(task_t task)689 task_get_64bit_data(task_t task)
690 {
691 return task_has_64Bit_data(task);
692 }
693
694 void
task_set_platform_binary(task_t task,boolean_t is_platform)695 task_set_platform_binary(
696 task_t task,
697 boolean_t is_platform)
698 {
699 if (is_platform) {
700 task_ro_flags_set(task, TFRO_PLATFORM);
701 } else {
702 task_ro_flags_clear(task, TFRO_PLATFORM);
703 }
704 assert(task->map);
705 if (task->map) {
706 vm_map_lock(task->map);
707 vm_map_set_platform_binary(task->map, (bool)is_platform);
708 vm_map_unlock(task->map);
709 }
710 }
711
712 #if XNU_TARGET_OS_OSX
713 #if DEVELOPMENT || DEBUG
714 SECURITY_READ_ONLY_LATE(bool) AMFI_bootarg_disable_mach_hardening = false;
715 #endif /* DEVELOPMENT || DEBUG */
716
717 void
task_disable_mach_hardening(task_t task)718 task_disable_mach_hardening(task_t task)
719 {
720 task_ro_flags_set(task, TFRO_MACH_HARDENING_OPT_OUT);
721 }
722
723 bool
task_opted_out_mach_hardening(task_t task)724 task_opted_out_mach_hardening(task_t task)
725 {
726 return task_ro_flags_get(task) & TFRO_MACH_HARDENING_OPT_OUT;
727 }
728 #endif /* XNU_TARGET_OS_OSX */
729
730 boolean_t
task_get_platform_binary(task_t task)731 task_get_platform_binary(task_t task)
732 {
733 return (task_ro_flags_get(task) & TFRO_PLATFORM) != 0;
734 }
735
736 boolean_t
task_is_a_corpse(task_t task)737 task_is_a_corpse(task_t task)
738 {
739 return (task_ro_flags_get(task) & TFRO_CORPSE) != 0;
740 }
741
742 boolean_t
task_is_ipc_active(task_t task)743 task_is_ipc_active(task_t task)
744 {
745 return task->ipc_active;
746 }
747
748 bool
task_is_immovable_no_assert(task_t task)749 task_is_immovable_no_assert(task_t task)
750 {
751 task_control_port_options_t opt = task_get_control_port_options(task);
752 return !!(opt & TASK_CONTROL_PORT_IMMOVABLE_MASK);
753 }
754
755 bool
task_is_immovable(task_t task)756 task_is_immovable(task_t task)
757 {
758 task_control_port_options_t opt = task_get_control_port_options(task);
759 assert(opt != TASK_CONTROL_PORT_OPTIONS_INVALID);
760 return !!(opt & TASK_CONTROL_PORT_IMMOVABLE_MASK);
761 }
762
763 void
task_set_corpse(task_t task)764 task_set_corpse(task_t task)
765 {
766 return task_ro_flags_set(task, TFRO_CORPSE);
767 }
768
769 /*
770 * Set or clear per-task TF_CA_CLIENT_WI flag according to specified argument.
771 * Returns "false" if flag is already set, and "true" in other cases.
772 */
773 bool
task_set_ca_client_wi(task_t task,boolean_t set_or_clear)774 task_set_ca_client_wi(
775 task_t task,
776 boolean_t set_or_clear)
777 {
778 bool ret = true;
779 task_lock(task);
780 if (set_or_clear) {
781 /* Tasks can have only one CA_CLIENT work interval */
782 if (task->t_flags & TF_CA_CLIENT_WI) {
783 ret = false;
784 } else {
785 task->t_flags |= TF_CA_CLIENT_WI;
786 }
787 } else {
788 task->t_flags &= ~TF_CA_CLIENT_WI;
789 }
790 task_unlock(task);
791 return ret;
792 }
793
794 /*
795 * task_set_dyld_info() is called at most three times.
796 * 1) at task struct creation to set addr/size to zero.
797 * 2) in mach_loader.c to set location of __all_image_info section in loaded dyld
798 * 3) is from dyld itself to update location of all_image_info
799 * For security any calls after that are ignored. The TF_DYLD_ALL_IMAGE_SET bit is used to determine state.
800 */
801 kern_return_t
task_set_dyld_info(task_t task,mach_vm_address_t addr,mach_vm_size_t size,bool finalize_value)802 task_set_dyld_info(
803 task_t task,
804 mach_vm_address_t addr,
805 mach_vm_size_t size,
806 bool finalize_value)
807 {
808 mach_vm_address_t end;
809 if (os_add_overflow(addr, size, &end)) {
810 return KERN_FAILURE;
811 }
812
813 task_lock(task);
814 /* don't accept updates if all_image_info_addr is final */
815 if ((task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) == 0) {
816 bool inputNonZero = ((addr != 0) || (size != 0));
817 bool currentNonZero = ((task->all_image_info_addr != 0) || (task->all_image_info_size != 0));
818 task->all_image_info_addr = addr;
819 task->all_image_info_size = size;
820 /* can only change from a non-zero value to another non-zero once */
821 if ((inputNonZero && currentNonZero) || finalize_value) {
822 task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
823 }
824 task_unlock(task);
825 return KERN_SUCCESS;
826 } else {
827 task_unlock(task);
828 return KERN_FAILURE;
829 }
830 }
831
832 bool
task_donates_own_pages(task_t task)833 task_donates_own_pages(
834 task_t task)
835 {
836 return task->donates_own_pages;
837 }
838
839 void
task_set_mach_header_address(task_t task,mach_vm_address_t addr)840 task_set_mach_header_address(
841 task_t task,
842 mach_vm_address_t addr)
843 {
844 task_lock(task);
845 task->mach_header_vm_address = addr;
846 task_unlock(task);
847 }
848
849 void
task_bank_reset(__unused task_t task)850 task_bank_reset(__unused task_t task)
851 {
852 if (task->bank_context != NULL) {
853 bank_task_destroy(task);
854 }
855 }
856
857 /*
858 * NOTE: This should only be called when the P_LINTRANSIT
859 * flag is set (the proc_trans lock is held) on the
860 * proc associated with the task.
861 */
862 void
task_bank_init(__unused task_t task)863 task_bank_init(__unused task_t task)
864 {
865 if (task->bank_context != NULL) {
866 panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
867 }
868 bank_task_initialize(task);
869 }
870
871 void
task_set_did_exec_flag(task_t task)872 task_set_did_exec_flag(task_t task)
873 {
874 task->t_procflags |= TPF_DID_EXEC;
875 }
876
877 void
task_clear_exec_copy_flag(task_t task)878 task_clear_exec_copy_flag(task_t task)
879 {
880 task->t_procflags &= ~TPF_EXEC_COPY;
881 }
882
883 event_t
task_get_return_wait_event(task_t task)884 task_get_return_wait_event(task_t task)
885 {
886 return (event_t)&task->returnwait_inheritor;
887 }
888
889 void
task_clear_return_wait(task_t task,uint32_t flags)890 task_clear_return_wait(task_t task, uint32_t flags)
891 {
892 if (flags & TCRW_CLEAR_INITIAL_WAIT) {
893 thread_wakeup(task_get_return_wait_event(task));
894 }
895
896 if (flags & TCRW_CLEAR_FINAL_WAIT) {
897 is_write_lock(task->itk_space);
898
899 task->t_returnwaitflags &= ~TRW_LRETURNWAIT;
900 task->returnwait_inheritor = NULL;
901
902 if (flags & TCRW_CLEAR_EXEC_COMPLETE) {
903 task->t_returnwaitflags &= ~TRW_LEXEC_COMPLETE;
904 }
905
906 if (task->t_returnwaitflags & TRW_LRETURNWAITER) {
907 struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
908 TURNSTILE_ULOCK);
909
910 waitq_wakeup64_all(&turnstile->ts_waitq,
911 CAST_EVENT64_T(task_get_return_wait_event(task)),
912 THREAD_AWAKENED, WAITQ_UPDATE_INHERITOR);
913
914 turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_HELD);
915
916 turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
917 turnstile_cleanup();
918 task->t_returnwaitflags &= ~TRW_LRETURNWAITER;
919 }
920 is_write_unlock(task->itk_space);
921 }
922 }
923
924 /*
925 * Set default behavior for a task's control ports
926 *
927 * Nothing locked. This is safe because it is called before
928 * ipc_task_enable, so no one has access to the task yet.
929 */
930 void
task_set_ctrl_port_default(task_t task,thread_t thread)931 task_set_ctrl_port_default(
932 task_t task,
933 thread_t thread)
934 {
935 ipc_space_policy_t pol = ipc_policy_for_task(task);
936 bool movable_allowed = mac_task_check_get_movable_control_port_during_spawn(task) == 0;
937 bool is_simulated = pol & IPC_SPACE_POLICY_SIMULATED;
938 bool is_translated = false;
939 task_control_port_options_t opts = TASK_CONTROL_PORT_OPTIONS_NONE;
940
941 /* verify it is call before ipc_task_enable */
942 assert(!task->ipc_active);
943
944 if (movable_allowed || is_simulated || is_translated) {
945 /* Disable control port hardening for entitled||simulated binaries */
946 opts = TASK_CONTROL_PORT_OPTIONS_NONE;
947 } else if (ipc_should_apply_policy(pol, IPC_POLICY_ENHANCED_V1)) {
948 /* set control port options for 1p code, inherited from parent task by default */
949 if (ipc_control_port_options & ICP_OPTIONS_IMMOVABLE_1P_HARD) {
950 opts |= TASK_CONTROL_PORT_IMMOVABLE_HARD;
951 }
952 } else {
953 /* set control port options for 3p code, inherited from parent task by default */
954 if (ipc_control_port_options & ICP_OPTIONS_IMMOVABLE_3P_HARD) {
955 opts |= TASK_CONTROL_PORT_IMMOVABLE_HARD;
956 }
957 }
958
959 /* see `copyout_should_mark_immovable_send`, which consumes these flags */
960 task_set_control_port_options(task, opts);
961
962 /*
963 * now that we have marked the task as immovable, copyout the task/thread ports
964 * again so that they get marked as immovable on copyout
965 */
966 ipc_task_copyout_control_port(task);
967 /* consumed by ipc_thread_set_immovable_pinned */
968 thread_reference(thread);
969 ipc_thread_set_immovable_pinned(thread);
970 }
971
972 void __attribute__((noreturn))
task_wait_to_return(void)973 task_wait_to_return(void)
974 {
975 task_t task = current_task();
976 uint8_t returnwaitflags;
977
978 is_write_lock(task->itk_space);
979
980 if (task->t_returnwaitflags & TRW_LRETURNWAIT) {
981 struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
982 TURNSTILE_ULOCK);
983
984 do {
985 task->t_returnwaitflags |= TRW_LRETURNWAITER;
986 turnstile_update_inheritor(turnstile, task->returnwait_inheritor,
987 (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
988
989 waitq_assert_wait64(&turnstile->ts_waitq,
990 CAST_EVENT64_T(task_get_return_wait_event(task)),
991 THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
992
993 is_write_unlock(task->itk_space);
994
995 turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
996
997 thread_block(THREAD_CONTINUE_NULL);
998
999 is_write_lock(task->itk_space);
1000 } while (task->t_returnwaitflags & TRW_LRETURNWAIT);
1001
1002 turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
1003 }
1004
1005 returnwaitflags = task->t_returnwaitflags;
1006 is_write_unlock(task->itk_space);
1007 turnstile_cleanup();
1008
1009 /**
1010 * In posix_spawn() path, process_signature() is guaranteed to complete
1011 * when the "second wait" is cleared. Call out to execute whatever depends
1012 * on the result of that before we return to EL0.
1013 */
1014 task_post_signature_processing_hook(task);
1015 #if CONFIG_MACF
1016 /*
1017 * Before jumping to userspace and allowing this process
1018 * to execute any code, make sure its credentials are cached,
1019 * and notify any interested parties.
1020 */
1021 extern void current_cached_proc_cred_update(void);
1022
1023 current_cached_proc_cred_update();
1024 if (returnwaitflags & TRW_LEXEC_COMPLETE) {
1025 mac_proc_notify_exec_complete(current_proc());
1026 }
1027 #endif
1028
1029 thread_bootstrap_return();
1030 }
1031
1032 /**
1033 * A callout by task_wait_to_return on the main thread of a newly spawned task
1034 * after process_signature() is completed by the parent task.
1035 *
1036 * @param task The newly spawned task
1037 */
1038 void
task_post_signature_processing_hook(task_t task)1039 task_post_signature_processing_hook(task_t task)
1040 {
1041 ml_task_post_signature_processing_hook(task);
1042 }
1043
1044 bool
task_is_initproc(task_t task)1045 task_is_initproc(task_t task)
1046 {
1047 return get_bsdtask_info(task) == initproc;
1048 }
1049
1050 boolean_t
task_is_exec_copy(task_t task)1051 task_is_exec_copy(task_t task)
1052 {
1053 return task_is_exec_copy_internal(task);
1054 }
1055
1056 boolean_t
task_did_exec(task_t task)1057 task_did_exec(task_t task)
1058 {
1059 return task_did_exec_internal(task);
1060 }
1061
1062 boolean_t
task_is_active(task_t task)1063 task_is_active(task_t task)
1064 {
1065 return task->active;
1066 }
1067
1068 boolean_t
task_is_halting(task_t task)1069 task_is_halting(task_t task)
1070 {
1071 return task->halting;
1072 }
1073
1074 void
task_init(void)1075 task_init(void)
1076 {
1077 if (max_task_footprint_mb != 0) {
1078 #if CONFIG_MEMORYSTATUS
1079 if (max_task_footprint_mb < 50) {
1080 printf("Warning: max_task_pmem %d below minimum.\n",
1081 max_task_footprint_mb);
1082 max_task_footprint_mb = 50;
1083 }
1084 printf("Limiting task physical memory footprint to %d MB\n",
1085 max_task_footprint_mb);
1086
1087 max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024; // Convert MB to bytes
1088
1089 /*
1090 * Configure the per-task memory limit warning level.
1091 * This is computed as a percentage.
1092 */
1093 max_task_footprint_warning_level = 0;
1094
1095 if (max_mem < 0x40000000) {
1096 /*
1097 * On devices with < 1GB of memory:
1098 * -- set warnings to 50MB below the per-task limit.
1099 */
1100 if (max_task_footprint_mb > 50) {
1101 max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
1102 }
1103 } else {
1104 /*
1105 * On devices with >= 1GB of memory:
1106 * -- set warnings to 100MB below the per-task limit.
1107 */
1108 if (max_task_footprint_mb > 100) {
1109 max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
1110 }
1111 }
1112
1113 /*
1114 * Never allow warning level to land below the default.
1115 */
1116 if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
1117 max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
1118 }
1119
1120 printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
1121
1122 #else
1123 printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
1124 #endif /* CONFIG_MEMORYSTATUS */
1125 }
1126
1127 #if DEVELOPMENT || DEBUG
1128 PE_parse_boot_argn("task_exc_guard_default",
1129 &task_exc_guard_default,
1130 sizeof(task_exc_guard_default));
1131 #endif /* DEVELOPMENT || DEBUG */
1132
1133 #if CONFIG_COREDUMP
1134 if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
1135 sizeof(hwm_user_cores))) {
1136 hwm_user_cores = 0;
1137 }
1138 #endif
1139
1140 proc_init_cpumon_params();
1141
1142 if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof(task_wakeups_monitor_rate))) {
1143 task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
1144 }
1145
1146 if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof(task_wakeups_monitor_interval))) {
1147 task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
1148 }
1149
1150 if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
1151 sizeof(task_wakeups_monitor_ustackshots_trigger_pct))) {
1152 task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
1153 }
1154
1155 if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof(task_iomon_limit_mb))) {
1156 task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
1157 }
1158
1159 if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof(task_iomon_interval_secs))) {
1160 task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
1161 }
1162
1163 if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof(io_telemetry_limit))) {
1164 io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
1165 }
1166
1167 /*
1168 * If we have coalitions, coalition_init() will call init_task_ledgers() as it
1169 * sets up the ledgers for the default coalition. If we don't have coalitions,
1170 * then we have to call it now.
1171 */
1172 #if CONFIG_COALITIONS
1173 assert(task_ledger_template);
1174 #else /* CONFIG_COALITIONS */
1175 init_task_ledgers();
1176 #endif /* CONFIG_COALITIONS */
1177
1178 task_ref_init();
1179 task_zone_init();
1180
1181 #ifdef __LP64__
1182 boolean_t is_64bit = TRUE;
1183 #else
1184 boolean_t is_64bit = FALSE;
1185 #endif
1186
1187 kernproc = (struct proc *)zalloc_flags(proc_task_zone, Z_WAITOK | Z_ZERO);
1188 kernel_task = proc_get_task_raw(kernproc);
1189
1190 /*
1191 * Create the kernel task as the first task.
1192 */
1193 if (task_create_internal(TASK_NULL, NULL, NULL, FALSE, is_64bit,
1194 is_64bit, TF_NONE, TF_NONE, TPF_NONE, TWF_NONE, kernel_task) != KERN_SUCCESS) {
1195 panic("task_init");
1196 }
1197
1198 #if HAS_MTE && CONFIG_KERNEL_TAGGING
1199 task_set_sec(kernel_task);
1200 #endif /* HAS_MTE && CONFIG_KERNEL_TAGGING */
1201
1202
1203 vm_map_setup(get_task_map(kernel_task), kernel_task);
1204
1205 ipc_task_enable(kernel_task);
1206
1207 #if defined(HAS_APPLE_PAC)
1208 kernel_task->rop_pid = ml_default_rop_pid();
1209 kernel_task->jop_pid = ml_default_jop_pid();
1210 // kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
1211 // disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
1212 ml_task_set_disable_user_jop(kernel_task, FALSE);
1213 #endif
1214
1215 vm_map_deallocate(kernel_task->map);
1216 kernel_task->map = kernel_map;
1217 }
1218
1219 static inline void
task_zone_init(void)1220 task_zone_init(void)
1221 {
1222 proc_struct_size = roundup(proc_struct_size, task_alignment);
1223 task_struct_size = roundup(sizeof(struct task), proc_alignment);
1224 proc_and_task_size = proc_struct_size + task_struct_size;
1225
1226 proc_task_zone = zone_create_ext("proc_task", proc_and_task_size,
1227 ZC_ZFREE_CLEARMEM | ZC_SEQUESTER, ZONE_ID_PROC_TASK, NULL); /* sequester is needed for proc_rele() */
1228 }
1229
1230 /*
1231 * Task ledgers
1232 * ------------
1233 *
1234 * phys_footprint
1235 * Physical footprint: This is the sum of:
1236 * + (internal - alternate_accounting)
1237 * + (internal_compressed - alternate_accounting_compressed)
1238 * + iokit_mapped
1239 * + purgeable_nonvolatile
1240 * + purgeable_nonvolatile_compressed
1241 * + page_table
1242 *
1243 * internal
1244 * The task's anonymous memory, which on iOS is always resident.
1245 *
1246 * internal_compressed
1247 * Amount of this task's internal memory which is held by the compressor.
1248 * Such memory is no longer actually resident for the task [i.e., resident in its pmap],
1249 * and could be either decompressed back into memory, or paged out to storage, depending
1250 * on our implementation.
1251 *
1252 * iokit_mapped
1253 * IOKit mappings: The total size of all IOKit mappings in this task, regardless of
1254 * clean/dirty or internal/external state].
1255 *
1256 * alternate_accounting
1257 * The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
1258 * are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
1259 * double counting.
1260 *
1261 * pages_grabbed
1262 * pages_grabbed counts all page grabs in a task. It is also broken out into three subtypes
1263 * which track UPL, IOPL and Kernel page grabs.
1264 */
1265 void
init_task_ledgers(void)1266 init_task_ledgers(void)
1267 {
1268 ledger_template_t t;
1269
1270 assert(task_ledger_template == NULL);
1271 assert(kernel_task == TASK_NULL);
1272
1273 #if MACH_ASSERT
1274 PE_parse_boot_argn("pmap_ledgers_panic",
1275 &pmap_ledgers_panic,
1276 sizeof(pmap_ledgers_panic));
1277 PE_parse_boot_argn("pmap_ledgers_panic_leeway",
1278 &pmap_ledgers_panic_leeway,
1279 sizeof(pmap_ledgers_panic_leeway));
1280 #endif /* MACH_ASSERT */
1281
1282 if ((t = ledger_template_create("Per-task ledger")) == NULL) {
1283 panic("couldn't create task ledger template");
1284 }
1285
1286 task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
1287 task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
1288 "physmem", "bytes");
1289 task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
1290 "bytes");
1291 task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
1292 "bytes");
1293 task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
1294 "bytes");
1295 task_ledgers.conclave_mem = ledger_entry_add_with_flags(t, "conclave_mem", "physmem", "bytes",
1296 LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE | LEDGER_ENTRY_ALLOW_DEBIT);
1297 task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
1298 "bytes");
1299 task_ledgers.iokit_mapped = ledger_entry_add_with_flags(t, "iokit_mapped", "mappings",
1300 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1301 task_ledgers.alternate_accounting = ledger_entry_add_with_flags(t, "alternate_accounting", "physmem",
1302 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1303 task_ledgers.alternate_accounting_compressed = ledger_entry_add_with_flags(t, "alternate_accounting_compressed", "physmem",
1304 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1305 task_ledgers.page_table = ledger_entry_add_with_flags(t, "page_table", "physmem",
1306 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1307 task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
1308 "bytes");
1309 task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
1310 "bytes");
1311 task_ledgers.reusable = ledger_entry_add(t, "reusable", "physmem", "bytes");
1312 task_ledgers.external = ledger_entry_add(t, "external", "physmem", "bytes");
1313 task_ledgers.purgeable_volatile = ledger_entry_add_with_flags(t, "purgeable_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1314 task_ledgers.purgeable_nonvolatile = ledger_entry_add_with_flags(t, "purgeable_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1315 task_ledgers.purgeable_volatile_compressed = ledger_entry_add_with_flags(t, "purgeable_volatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1316 task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add_with_flags(t, "purgeable_nonvolatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1317 task_ledgers.pages_grabbed = ledger_entry_add_with_flags(t, "pages_grabbed", "physmem", "count", LEDGER_ENTRY_USE_COUNTER);
1318 task_ledgers.pages_grabbed_kern = ledger_entry_add_with_flags(t, "pages_grabbed_kern", "physmem", "count", LEDGER_ENTRY_USE_COUNTER);
1319 task_ledgers.pages_grabbed_iopl = ledger_entry_add_with_flags(t, "pages_grabbed_iopl", "physmem", "count", LEDGER_ENTRY_USE_COUNTER);
1320 task_ledgers.pages_grabbed_upl = ledger_entry_add_with_flags(t, "pages_grabbed_upl", "physmem", "count", LEDGER_ENTRY_USE_COUNTER);
1321 task_ledgers.tagged_nofootprint = ledger_entry_add_with_flags(t, "tagged_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1322 task_ledgers.tagged_footprint = ledger_entry_add_with_flags(t, "tagged_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1323 task_ledgers.tagged_nofootprint_compressed = ledger_entry_add_with_flags(t, "tagged_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1324 task_ledgers.tagged_footprint_compressed = ledger_entry_add_with_flags(t, "tagged_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1325 task_ledgers.network_volatile = ledger_entry_add_with_flags(t, "network_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1326 task_ledgers.network_nonvolatile = ledger_entry_add_with_flags(t, "network_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1327 task_ledgers.network_volatile_compressed = ledger_entry_add_with_flags(t, "network_volatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1328 task_ledgers.network_nonvolatile_compressed = ledger_entry_add_with_flags(t, "network_nonvolatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1329 task_ledgers.media_nofootprint = ledger_entry_add_with_flags(t, "media_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1330 task_ledgers.media_footprint = ledger_entry_add_with_flags(t, "media_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1331 task_ledgers.media_nofootprint_compressed = ledger_entry_add_with_flags(t, "media_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1332 task_ledgers.media_footprint_compressed = ledger_entry_add_with_flags(t, "media_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1333 task_ledgers.graphics_nofootprint = ledger_entry_add_with_flags(t, "graphics_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1334 task_ledgers.graphics_footprint = ledger_entry_add_with_flags(t, "graphics_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1335 task_ledgers.graphics_nofootprint_compressed = ledger_entry_add_with_flags(t, "graphics_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1336 task_ledgers.graphics_footprint_compressed = ledger_entry_add_with_flags(t, "graphics_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1337 task_ledgers.neural_nofootprint = ledger_entry_add_with_flags(t, "neural_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1338 task_ledgers.neural_footprint = ledger_entry_add_with_flags(t, "neural_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1339 task_ledgers.neural_nofootprint_compressed = ledger_entry_add_with_flags(t, "neural_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1340 task_ledgers.neural_footprint_compressed = ledger_entry_add_with_flags(t, "neural_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1341 task_ledgers.neural_nofootprint_total = ledger_entry_add(t, "neural_nofootprint_total", "physmem", "bytes");
1342
1343 #if CONFIG_DEFERRED_RECLAIM
1344 task_ledgers.est_reclaimable = ledger_entry_add_with_flags(t, "est_reclaimable", "virtmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1345 #endif /* CONFIG_DEFERRED_RECLAIM */
1346
1347 #if CONFIG_FREEZE
1348 task_ledgers.frozen_to_swap = ledger_entry_add(t, "frozen_to_swap", "physmem", "bytes");
1349 #endif /* CONFIG_FREEZE */
1350
1351 task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
1352 "count");
1353 task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
1354 "count");
1355
1356 #if CONFIG_SCHED_SFI
1357 sfi_class_id_t class_id, ledger_alias;
1358 for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1359 task_ledgers.sfi_wait_times[class_id] = -1;
1360 }
1361
1362 /* don't account for UNSPECIFIED */
1363 for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
1364 ledger_alias = sfi_get_ledger_alias_for_class(class_id);
1365 if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
1366 /* Check to see if alias has been registered yet */
1367 if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
1368 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
1369 } else {
1370 /* Otherwise, initialize it first */
1371 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
1372 }
1373 } else {
1374 task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
1375 }
1376
1377 if (task_ledgers.sfi_wait_times[class_id] < 0) {
1378 panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
1379 }
1380 }
1381
1382 assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID - 1] != -1);
1383 #endif /* CONFIG_SCHED_SFI */
1384
1385 task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
1386 task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
1387 task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
1388 task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
1389 task_ledgers.logical_writes_to_external = ledger_entry_add(t, "logical_writes_to_external", "res", "bytes");
1390 #if CONFIG_PHYS_WRITE_ACCT
1391 task_ledgers.fs_metadata_writes = ledger_entry_add(t, "fs_metadata_writes", "res", "bytes");
1392 #endif /* CONFIG_PHYS_WRITE_ACCT */
1393 task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj");
1394 task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj");
1395
1396 #if CONFIG_MEMORYSTATUS
1397 task_ledgers.memorystatus_dirty_time = ledger_entry_add(t, "memorystatus_dirty_time", "physmem", "ns");
1398 #endif /* CONFIG_MEMORYSTATUS */
1399
1400 task_ledgers.swapins = ledger_entry_add_with_flags(t, "swapins", "physmem", "bytes",
1401 LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1402
1403 if ((task_ledgers.cpu_time < 0) ||
1404 (task_ledgers.tkm_private < 0) ||
1405 (task_ledgers.tkm_shared < 0) ||
1406 (task_ledgers.phys_mem < 0) ||
1407 (task_ledgers.wired_mem < 0) ||
1408 (task_ledgers.conclave_mem < 0) ||
1409 (task_ledgers.internal < 0) ||
1410 (task_ledgers.external < 0) ||
1411 (task_ledgers.reusable < 0) ||
1412 (task_ledgers.iokit_mapped < 0) ||
1413 (task_ledgers.alternate_accounting < 0) ||
1414 (task_ledgers.alternate_accounting_compressed < 0) ||
1415 (task_ledgers.page_table < 0) ||
1416 (task_ledgers.phys_footprint < 0) ||
1417 (task_ledgers.internal_compressed < 0) ||
1418 (task_ledgers.purgeable_volatile < 0) ||
1419 (task_ledgers.purgeable_nonvolatile < 0) ||
1420 (task_ledgers.purgeable_volatile_compressed < 0) ||
1421 (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
1422 (task_ledgers.tagged_nofootprint < 0) ||
1423 (task_ledgers.tagged_footprint < 0) ||
1424 (task_ledgers.tagged_nofootprint_compressed < 0) ||
1425 (task_ledgers.tagged_footprint_compressed < 0) ||
1426 #if CONFIG_FREEZE
1427 (task_ledgers.frozen_to_swap < 0) ||
1428 #endif /* CONFIG_FREEZE */
1429 (task_ledgers.network_volatile < 0) ||
1430 (task_ledgers.network_nonvolatile < 0) ||
1431 (task_ledgers.network_volatile_compressed < 0) ||
1432 (task_ledgers.network_nonvolatile_compressed < 0) ||
1433 (task_ledgers.media_nofootprint < 0) ||
1434 (task_ledgers.media_footprint < 0) ||
1435 (task_ledgers.media_nofootprint_compressed < 0) ||
1436 (task_ledgers.media_footprint_compressed < 0) ||
1437 (task_ledgers.graphics_nofootprint < 0) ||
1438 (task_ledgers.graphics_footprint < 0) ||
1439 (task_ledgers.graphics_nofootprint_compressed < 0) ||
1440 (task_ledgers.graphics_footprint_compressed < 0) ||
1441 (task_ledgers.neural_nofootprint < 0) ||
1442 (task_ledgers.neural_footprint < 0) ||
1443 (task_ledgers.neural_nofootprint_compressed < 0) ||
1444 (task_ledgers.neural_footprint_compressed < 0) ||
1445 (task_ledgers.neural_nofootprint_total < 0) ||
1446 (task_ledgers.platform_idle_wakeups < 0) ||
1447 (task_ledgers.interrupt_wakeups < 0) ||
1448 (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
1449 (task_ledgers.physical_writes < 0) ||
1450 (task_ledgers.logical_writes < 0) ||
1451 (task_ledgers.logical_writes_to_external < 0) ||
1452 #if CONFIG_PHYS_WRITE_ACCT
1453 (task_ledgers.fs_metadata_writes < 0) ||
1454 #endif /* CONFIG_PHYS_WRITE_ACCT */
1455 #if CONFIG_MEMORYSTATUS
1456 (task_ledgers.memorystatus_dirty_time < 0) ||
1457 #endif /* CONFIG_MEMORYSTATUS */
1458 (task_ledgers.energy_billed_to_me < 0) ||
1459 (task_ledgers.energy_billed_to_others < 0) ||
1460 (task_ledgers.swapins < 0)
1461 ) {
1462 panic("couldn't create entries for task ledger template");
1463 }
1464
1465 ledger_track_credit_only(t, task_ledgers.phys_footprint);
1466 ledger_track_credit_only(t, task_ledgers.internal);
1467 ledger_track_credit_only(t, task_ledgers.external);
1468 ledger_track_credit_only(t, task_ledgers.reusable);
1469
1470 ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
1471 ledger_track_maximum(t, task_ledgers.phys_mem, 60);
1472 ledger_track_maximum(t, task_ledgers.internal, 60);
1473 ledger_track_maximum(t, task_ledgers.internal_compressed, 60);
1474 ledger_track_maximum(t, task_ledgers.reusable, 60);
1475 ledger_track_maximum(t, task_ledgers.external, 60);
1476 ledger_track_maximum(t, task_ledgers.neural_nofootprint_total, 60);
1477 #if MACH_ASSERT
1478 if (pmap_ledgers_panic) {
1479 ledger_panic_on_negative(t, task_ledgers.phys_footprint);
1480 ledger_panic_on_negative(t, task_ledgers.conclave_mem);
1481 ledger_panic_on_negative(t, task_ledgers.page_table);
1482 ledger_panic_on_negative(t, task_ledgers.internal);
1483 ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
1484 ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
1485 ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
1486 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
1487 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
1488 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
1489 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
1490 #if CONFIG_PHYS_WRITE_ACCT
1491 ledger_panic_on_negative(t, task_ledgers.fs_metadata_writes);
1492 #endif /* CONFIG_PHYS_WRITE_ACCT */
1493
1494 ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint);
1495 ledger_panic_on_negative(t, task_ledgers.tagged_footprint);
1496 ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint_compressed);
1497 ledger_panic_on_negative(t, task_ledgers.tagged_footprint_compressed);
1498 ledger_panic_on_negative(t, task_ledgers.network_volatile);
1499 ledger_panic_on_negative(t, task_ledgers.network_nonvolatile);
1500 ledger_panic_on_negative(t, task_ledgers.network_volatile_compressed);
1501 ledger_panic_on_negative(t, task_ledgers.network_nonvolatile_compressed);
1502 ledger_panic_on_negative(t, task_ledgers.media_nofootprint);
1503 ledger_panic_on_negative(t, task_ledgers.media_footprint);
1504 ledger_panic_on_negative(t, task_ledgers.media_nofootprint_compressed);
1505 ledger_panic_on_negative(t, task_ledgers.media_footprint_compressed);
1506 ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint);
1507 ledger_panic_on_negative(t, task_ledgers.graphics_footprint);
1508 ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint_compressed);
1509 ledger_panic_on_negative(t, task_ledgers.graphics_footprint_compressed);
1510 ledger_panic_on_negative(t, task_ledgers.neural_nofootprint);
1511 ledger_panic_on_negative(t, task_ledgers.neural_footprint);
1512 ledger_panic_on_negative(t, task_ledgers.neural_nofootprint_compressed);
1513 ledger_panic_on_negative(t, task_ledgers.neural_footprint_compressed);
1514 }
1515 #endif /* MACH_ASSERT */
1516
1517 #if CONFIG_MEMORYSTATUS
1518 ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
1519 ledger_set_callback(t, task_ledgers.conclave_mem, task_conclave_mem_limit_exceeded, NULL, NULL);
1520 #endif /* CONFIG_MEMORYSTATUS */
1521
1522 ledger_set_callback(t, task_ledgers.interrupt_wakeups,
1523 task_wakeups_rate_exceeded, NULL, NULL);
1524 ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
1525
1526 #if CONFIG_SPTM || !XNU_MONITOR
1527 ledger_template_complete(t);
1528 #else /* CONFIG_SPTM || !XNU_MONITOR */
1529 ledger_template_complete_secure_alloc(t);
1530 #endif /* XNU_MONITOR */
1531 task_ledger_template = t;
1532 }
1533
1534 /* Create a task, but leave the task ports disabled */
1535 kern_return_t
task_create_internal(task_t parent_task,proc_ro_t proc_ro,coalition_t * parent_coalitions __unused,boolean_t inherit_memory,boolean_t is_64bit,boolean_t is_64bit_data,uint32_t t_flags,uint32_t t_flags_ro,uint32_t t_procflags,uint8_t t_returnwaitflags,task_t child_task)1536 task_create_internal(
1537 task_t parent_task, /* Null-able */
1538 proc_ro_t proc_ro,
1539 coalition_t *parent_coalitions __unused,
1540 boolean_t inherit_memory,
1541 boolean_t is_64bit,
1542 boolean_t is_64bit_data,
1543 uint32_t t_flags,
1544 uint32_t t_flags_ro,
1545 uint32_t t_procflags,
1546 uint8_t t_returnwaitflags,
1547 task_t child_task)
1548 {
1549 task_t new_task;
1550 vm_shared_region_t shared_region;
1551 ledger_t ledger = NULL;
1552 struct task_ro_data task_ro_data = {};
1553 uint32_t parent_t_flags_ro = 0;
1554
1555 new_task = child_task;
1556
1557 if (task_ref_count_init(new_task) != KERN_SUCCESS) {
1558 return KERN_RESOURCE_SHORTAGE;
1559 }
1560
1561 /* allocate with active entries */
1562 assert(task_ledger_template != NULL);
1563 ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1564 if (ledger == NULL) {
1565 task_ref_count_fini(new_task);
1566 return KERN_RESOURCE_SHORTAGE;
1567 }
1568
1569 counter_alloc(&(new_task->faults));
1570
1571 #if defined(HAS_APPLE_PAC)
1572 const uint8_t disable_user_jop = inherit_memory ? parent_task->disable_user_jop : FALSE;
1573 ml_task_set_rop_pid(new_task, parent_task, inherit_memory);
1574 ml_task_set_jop_pid(new_task, parent_task, inherit_memory, disable_user_jop);
1575 ml_task_set_disable_user_jop(new_task, disable_user_jop);
1576 #endif
1577
1578
1579 new_task->ledger = ledger;
1580
1581 /* if inherit_memory is true, parent_task MUST not be NULL */
1582 if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) {
1583 #if CONFIG_DEFERRED_RECLAIM
1584 if (parent_task->deferred_reclamation_metadata) {
1585 /*
1586 * Prevent concurrent reclaims while we're forking the parent_task's map,
1587 * so that the child's map is in sync with the forked reclamation
1588 * metadata.
1589 */
1590 vm_deferred_reclamation_ring_own(
1591 parent_task->deferred_reclamation_metadata);
1592 }
1593 #endif /* CONFIG_DEFERRED_RECLAIM */
1594 new_task->map = vm_map_fork(ledger, parent_task->map, 0);
1595 #if CONFIG_DEFERRED_RECLAIM
1596 if (new_task->map != NULL &&
1597 parent_task->deferred_reclamation_metadata) {
1598 new_task->deferred_reclamation_metadata =
1599 vm_deferred_reclamation_task_fork(new_task,
1600 parent_task->deferred_reclamation_metadata);
1601 }
1602 if (parent_task->deferred_reclamation_metadata) {
1603 vm_deferred_reclamation_ring_disown(
1604 parent_task->deferred_reclamation_metadata);
1605 }
1606 #endif /* CONFIG_DEFERRED_RECLAIM */
1607 } else {
1608 unsigned int pmap_flags = is_64bit ? PMAP_CREATE_64BIT : 0;
1609 pmap_t pmap = pmap_create_options(ledger, 0, pmap_flags);
1610 vm_map_t new_map;
1611
1612 if (pmap == NULL) {
1613 counter_free(&new_task->faults);
1614 ledger_dereference(ledger);
1615 task_ref_count_fini(new_task);
1616 return KERN_RESOURCE_SHORTAGE;
1617 }
1618 new_map = vm_map_create_options(pmap,
1619 (vm_map_offset_t)(VM_MIN_ADDRESS),
1620 (vm_map_offset_t)(VM_MAX_ADDRESS),
1621 VM_MAP_CREATE_PAGEABLE);
1622 if (parent_task) {
1623 vm_map_inherit_limits(new_map, parent_task->map);
1624 }
1625 new_task->map = new_map;
1626 }
1627
1628 if (new_task->map == NULL) {
1629 counter_free(&new_task->faults);
1630 ledger_dereference(ledger);
1631 task_ref_count_fini(new_task);
1632 return KERN_RESOURCE_SHORTAGE;
1633 }
1634
1635 lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
1636 queue_init(&new_task->threads);
1637 new_task->suspend_count = 0;
1638 new_task->thread_count = 0;
1639 new_task->active_thread_count = 0;
1640 new_task->user_stop_count = 0;
1641 new_task->legacy_stop_count = 0;
1642 new_task->active = TRUE;
1643 new_task->halting = FALSE;
1644 new_task->priv_flags = 0;
1645 new_task->t_flags = t_flags;
1646 task_ro_data.t_flags_ro = t_flags_ro;
1647 new_task->t_procflags = t_procflags;
1648 new_task->t_returnwaitflags = t_returnwaitflags;
1649 new_task->returnwait_inheritor = current_thread();
1650 new_task->importance = 0;
1651 new_task->crashed_thread_id = 0;
1652 new_task->watchports = NULL;
1653 new_task->t_rr_ranges = NULL;
1654
1655 new_task->bank_context = NULL;
1656
1657 if (parent_task) {
1658 parent_t_flags_ro = task_ro_flags_get(parent_task);
1659 }
1660
1661 if (parent_task && inherit_memory) {
1662 #if __has_feature(ptrauth_calls)
1663 /* Inherit the pac exception flags from parent if in fork */
1664 task_ro_data.t_flags_ro |= (parent_t_flags_ro & (TFRO_PAC_ENFORCE_USER_STATE |
1665 TFRO_PAC_EXC_FATAL));
1666 #endif /* __has_feature(ptrauth_calls) */
1667 /* Inherit the platform restrictions flags from parent if in fork */
1668 task_ro_data.t_flags_ro |= parent_t_flags_ro & (TFRO_PLATFORM | TFRO_JIT_EXC_FATAL);
1669 #if XNU_TARGET_OS_OSX
1670 task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_MACH_HARDENING_OPT_OUT;
1671 #endif /* XNU_TARGET_OS_OSX */
1672
1673 /* task_security_config options are always inherited on fork */
1674 new_task->security_config = parent_task->security_config;
1675 }
1676
1677 #ifdef MACH_BSD
1678 new_task->corpse_info = NULL;
1679 #endif /* MACH_BSD */
1680
1681 /* kern_task not created by this function has unique id 0, start with 1 here. */
1682 task_set_uniqueid(new_task);
1683
1684 #if CONFIG_MACF
1685 set_task_crash_label(new_task, NULL);
1686
1687 task_ro_data.task_filters.mach_trap_filter_mask = NULL;
1688 task_ro_data.task_filters.mach_kobj_filter_mask = NULL;
1689 #endif
1690
1691 #if CONFIG_MEMORYSTATUS
1692 if (max_task_footprint != 0) {
1693 ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1694 }
1695 #endif /* CONFIG_MEMORYSTATUS */
1696
1697 if (task_wakeups_monitor_rate != 0) {
1698 uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1699 int32_t rate; // Ignored because of WAKEMON_SET_DEFAULTS
1700 task_wakeups_monitor_ctl(new_task, &flags, &rate);
1701 }
1702
1703 #if CONFIG_IO_ACCOUNTING
1704 uint32_t flags = IOMON_ENABLE;
1705 task_io_monitor_ctl(new_task, &flags);
1706 #endif /* CONFIG_IO_ACCOUNTING */
1707
1708 machine_task_init(new_task, parent_task, inherit_memory);
1709
1710 new_task->task_debug = NULL;
1711
1712 #if DEVELOPMENT || DEBUG
1713 new_task->task_unnested = FALSE;
1714 new_task->task_disconnected_count = 0;
1715 #endif
1716 queue_init(&new_task->semaphore_list);
1717 new_task->semaphores_owned = 0;
1718
1719 new_task->vtimers = 0;
1720
1721 new_task->shared_region = NULL;
1722
1723 new_task->affinity_space = NULL;
1724
1725 #if CONFIG_CPU_COUNTERS
1726 new_task->t_kpc = 0;
1727 #endif /* CONFIG_CPU_COUNTERS */
1728
1729 new_task->pidsuspended = FALSE;
1730 new_task->frozen = FALSE;
1731 new_task->changing_freeze_state = FALSE;
1732 new_task->rusage_cpu_flags = 0;
1733 new_task->rusage_cpu_percentage = 0;
1734 new_task->rusage_cpu_interval = 0;
1735 new_task->rusage_cpu_deadline = 0;
1736 new_task->rusage_cpu_callt = NULL;
1737 #if MACH_ASSERT
1738 new_task->suspends_outstanding = 0;
1739 #endif
1740 recount_task_init(&new_task->tk_recount);
1741
1742 #if HYPERVISOR
1743 new_task->hv_task_target = NULL;
1744 #endif /* HYPERVISOR */
1745
1746 #if CONFIG_TASKWATCH
1747 queue_init(&new_task->task_watchers);
1748 new_task->num_taskwatchers = 0;
1749 new_task->watchapplying = 0;
1750 #endif /* CONFIG_TASKWATCH */
1751
1752 new_task->mem_notify_reserved = 0;
1753
1754 new_task->requested_policy = default_task_requested_policy;
1755 new_task->effective_policy = default_task_effective_policy;
1756
1757 new_task->task_shared_region_slide = -1;
1758
1759 if (parent_task != NULL) {
1760 task_ro_data.task_tokens.sec_token = *task_get_sec_token(parent_task);
1761 task_ro_data.task_tokens.audit_token = *task_get_audit_token(parent_task);
1762
1763 task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_FILTER_MSG;
1764 #if CONFIG_MACF
1765 if (!(t_flags & TF_CORPSE_FORK)) {
1766 task_ro_data.task_filters.mach_trap_filter_mask = task_get_mach_trap_filter_mask(parent_task);
1767 task_ro_data.task_filters.mach_kobj_filter_mask = task_get_mach_kobj_filter_mask(parent_task);
1768 }
1769 #endif
1770 } else {
1771 task_ro_data.task_tokens.sec_token = KERNEL_SECURITY_TOKEN;
1772 task_ro_data.task_tokens.audit_token = KERNEL_AUDIT_TOKEN;
1773 }
1774 /* set in task_set_ctrl_port_default */
1775 task_ro_data.task_control_port_options = TASK_CONTROL_PORT_OPTIONS_INVALID;
1776
1777 /* must set before task_importance_init_from_parent: */
1778 if (proc_ro != NULL) {
1779 new_task->bsd_info_ro = proc_ro_ref_task(proc_ro, new_task, &task_ro_data);
1780 } else {
1781 new_task->bsd_info_ro = proc_ro_alloc(NULL, NULL, new_task, &task_ro_data);
1782 }
1783
1784 ipc_task_init(new_task, parent_task);
1785
1786 task_importance_init_from_parent(new_task, parent_task);
1787
1788 new_task->corpse_vmobject_list = NULL;
1789
1790 if (parent_task != TASK_NULL) {
1791 /* inherit the parent's shared region */
1792 shared_region = vm_shared_region_get(parent_task);
1793 if (shared_region != NULL) {
1794 vm_shared_region_set(new_task, shared_region);
1795 }
1796
1797 #if __has_feature(ptrauth_calls)
1798 /* use parent's shared_region_id */
1799 char *shared_region_id = task_get_vm_shared_region_id_and_jop_pid(parent_task, NULL);
1800 if (shared_region_id != NULL) {
1801 shared_region_key_alloc(shared_region_id, FALSE, 0); /* get a reference */
1802 }
1803 task_set_shared_region_id(new_task, shared_region_id);
1804 #endif /* __has_feature(ptrauth_calls) */
1805
1806 if (task_has_64Bit_addr(parent_task)) {
1807 task_set_64Bit_addr(new_task);
1808 }
1809
1810 if (task_has_64Bit_data(parent_task)) {
1811 task_set_64Bit_data(new_task);
1812 }
1813
1814 if (inherit_memory) {
1815 new_task->all_image_info_addr = parent_task->all_image_info_addr;
1816 new_task->all_image_info_size = parent_task->all_image_info_size;
1817 if (parent_task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) {
1818 new_task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
1819 }
1820 }
1821 new_task->mach_header_vm_address = 0;
1822
1823 if (inherit_memory && parent_task->affinity_space) {
1824 task_affinity_create(parent_task, new_task);
1825 }
1826
1827 new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
1828
1829 new_task->task_exc_guard = parent_task->task_exc_guard;
1830 if (parent_task->t_flags & TF_NO_SMT) {
1831 new_task->t_flags |= TF_NO_SMT;
1832 }
1833
1834 if (parent_task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE) {
1835 new_task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
1836 }
1837
1838 if (parent_task->t_flags & TF_TECS) {
1839 new_task->t_flags |= TF_TECS;
1840 }
1841
1842 #if defined(__x86_64__)
1843 if (parent_task->t_flags & TF_INSN_COPY_OPTOUT) {
1844 new_task->t_flags |= TF_INSN_COPY_OPTOUT;
1845 }
1846 #endif
1847
1848 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
1849 /*
1850 * On inherit_memory inherit sec-enabled and sec-inherit,
1851 * and enable it on the address space. The fork() case
1852 * is independent from the inheritance rules, as we must
1853 * support a parent duplicating the VA space and accessing
1854 * tagged memory in the child.
1855 */
1856 if (inherit_memory) {
1857 if (task_has_sec(parent_task)) {
1858 task_set_sec(new_task);
1859 vm_map_set_sec_enabled(get_task_map(new_task));
1860 }
1861 if (task_has_sec_user_data(parent_task)) {
1862 task_set_sec_user_data(new_task);
1863 }
1864 if (task_has_sec_soft_mode(parent_task)) {
1865 task_set_sec_soft_mode(new_task);
1866 }
1867 #if DEVELOPMENT || DEBUG
1868 /*
1869 * The following configuration options are only
1870 * available for debugging.
1871 */
1872 if (task_has_sec_inherit(parent_task)) {
1873 task_set_sec_inherit(new_task);
1874 }
1875 if (task_has_sec_never_check(parent_task)) {
1876 task_set_sec_never_check(new_task);
1877 vm_map_set_sec_disabled(get_task_map(new_task));
1878 }
1879 #endif /* DEVELOPMENT || DEBUG */
1880 }
1881 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
1882
1883 new_task->priority = BASEPRI_DEFAULT;
1884 new_task->max_priority = MAXPRI_USER;
1885 } else {
1886 #ifdef __LP64__
1887 if (is_64bit) {
1888 task_set_64Bit_addr(new_task);
1889 }
1890 #endif
1891
1892 if (is_64bit_data) {
1893 task_set_64Bit_data(new_task);
1894 }
1895
1896 new_task->all_image_info_addr = (mach_vm_address_t)0;
1897 new_task->all_image_info_size = (mach_vm_size_t)0;
1898
1899 new_task->pset_hint = PROCESSOR_SET_NULL;
1900
1901 new_task->task_exc_guard = TASK_EXC_GUARD_NONE;
1902
1903 if (new_task == kernel_task) {
1904 new_task->priority = BASEPRI_KERNEL;
1905 new_task->max_priority = MAXPRI_KERNEL;
1906 } else {
1907 new_task->priority = BASEPRI_DEFAULT;
1908 new_task->max_priority = MAXPRI_USER;
1909 }
1910 }
1911
1912 bzero(new_task->coalition, sizeof(new_task->coalition));
1913 for (int i = 0; i < COALITION_NUM_TYPES; i++) {
1914 queue_chain_init(new_task->task_coalition[i]);
1915 }
1916
1917 /* Allocate I/O Statistics */
1918 new_task->task_io_stats = kalloc_data(sizeof(struct io_stat_info),
1919 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1920
1921 bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats));
1922 bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats));
1923
1924 bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
1925
1926 counter_alloc(&(new_task->pageins));
1927 counter_alloc(&(new_task->cow_faults));
1928 counter_alloc(&(new_task->messages_sent));
1929 counter_alloc(&(new_task->messages_received));
1930
1931 /* Copy resource acc. info from Parent for Corpe Forked task. */
1932 if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
1933 task_rollup_accounting_info(new_task, parent_task);
1934 task_store_owned_vmobject_info(new_task, parent_task);
1935 } else {
1936 /* Initialize to zero for standard fork/spawn case */
1937 new_task->total_runnable_time = 0;
1938 new_task->syscalls_mach = 0;
1939 new_task->syscalls_unix = 0;
1940 new_task->c_switch = 0;
1941 new_task->p_switch = 0;
1942 new_task->ps_switch = 0;
1943 new_task->decompressions = 0;
1944 new_task->low_mem_notified_warn = 0;
1945 new_task->low_mem_notified_critical = 0;
1946 new_task->purged_memory_warn = 0;
1947 new_task->purged_memory_critical = 0;
1948 new_task->low_mem_privileged_listener = 0;
1949 os_atomic_store(&new_task->memlimit_flags, 0, relaxed);
1950 new_task->task_timer_wakeups_bin_1 = 0;
1951 new_task->task_timer_wakeups_bin_2 = 0;
1952 new_task->task_gpu_ns = 0;
1953 new_task->task_writes_counters_internal.task_immediate_writes = 0;
1954 new_task->task_writes_counters_internal.task_deferred_writes = 0;
1955 new_task->task_writes_counters_internal.task_invalidated_writes = 0;
1956 new_task->task_writes_counters_internal.task_metadata_writes = 0;
1957 new_task->task_writes_counters_external.task_immediate_writes = 0;
1958 new_task->task_writes_counters_external.task_deferred_writes = 0;
1959 new_task->task_writes_counters_external.task_invalidated_writes = 0;
1960 new_task->task_writes_counters_external.task_metadata_writes = 0;
1961 #if CONFIG_PHYS_WRITE_ACCT
1962 new_task->task_fs_metadata_writes = 0;
1963 #endif /* CONFIG_PHYS_WRITE_ACCT */
1964 }
1965
1966
1967 new_task->donates_own_pages = FALSE;
1968 #if CONFIG_COALITIONS
1969 if (!(t_flags & TF_CORPSE_FORK)) {
1970 /* TODO: there is no graceful failure path here... */
1971 if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1972 coalitions_adopt_task(parent_coalitions, new_task);
1973 if (parent_coalitions[COALITION_TYPE_JETSAM]) {
1974 new_task->donates_own_pages = coalition_is_swappable(parent_coalitions[COALITION_TYPE_JETSAM]);
1975 }
1976 } else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1977 /*
1978 * all tasks at least have a resource coalition, so
1979 * if the parent has one then inherit all coalitions
1980 * the parent is a part of
1981 */
1982 coalitions_adopt_task(parent_task->coalition, new_task);
1983 if (parent_task->coalition[COALITION_TYPE_JETSAM]) {
1984 new_task->donates_own_pages = coalition_is_swappable(parent_task->coalition[COALITION_TYPE_JETSAM]);
1985 }
1986 } else {
1987 /* TODO: assert that new_task will be PID 1 (launchd) */
1988 coalitions_adopt_init_task(new_task);
1989 }
1990 /*
1991 * on exec, we need to transfer the coalition roles from the
1992 * parent task to the exec copy task.
1993 */
1994 if (parent_task && (t_procflags & TPF_EXEC_COPY)) {
1995 int coal_roles[COALITION_NUM_TYPES];
1996 task_coalition_roles(parent_task, coal_roles);
1997 (void)coalitions_set_roles(new_task->coalition, new_task, coal_roles);
1998 }
1999 } else {
2000 coalitions_adopt_corpse_task(new_task);
2001 }
2002
2003 if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
2004 panic("created task is not a member of a resource coalition");
2005 }
2006 task_set_coalition_member(new_task);
2007 #endif /* CONFIG_COALITIONS */
2008
2009 if (parent_task != TASK_NULL) {
2010 /* task_policy_create queries the adopted coalition */
2011 task_policy_create(new_task, parent_task);
2012 }
2013
2014 new_task->dispatchqueue_offset = 0;
2015 if (parent_task != NULL) {
2016 new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
2017 }
2018
2019 new_task->task_can_transfer_memory_ownership = FALSE;
2020 new_task->task_volatile_objects = 0;
2021 new_task->task_nonvolatile_objects = 0;
2022 new_task->task_objects_disowning = FALSE;
2023 new_task->task_objects_disowned = FALSE;
2024 new_task->task_owned_objects = 0;
2025 queue_init(&new_task->task_objq);
2026
2027 #if CONFIG_FREEZE
2028 queue_init(&new_task->task_frozen_cseg_q);
2029 #endif /* CONFIG_FREEZE */
2030
2031 task_objq_lock_init(new_task);
2032
2033 #if __arm64__
2034 new_task->task_legacy_footprint = FALSE;
2035 new_task->task_extra_footprint_limit = FALSE;
2036 new_task->task_ios13extended_footprint_limit = FALSE;
2037 #endif /* __arm64__ */
2038 new_task->task_region_footprint = FALSE;
2039 new_task->task_has_crossed_thread_limit = FALSE;
2040 new_task->task_thread_limit = 0;
2041 #if CONFIG_SECLUDED_MEMORY
2042 new_task->task_can_use_secluded_mem = FALSE;
2043 new_task->task_could_use_secluded_mem = FALSE;
2044 new_task->task_could_also_use_secluded_mem = FALSE;
2045 new_task->task_suppressed_secluded = FALSE;
2046 #endif /* CONFIG_SECLUDED_MEMORY */
2047
2048
2049 /*
2050 * t_flags is set up above. But since we don't
2051 * support darkwake mode being set that way
2052 * currently, we clear it out here explicitly.
2053 */
2054 new_task->t_flags &= ~(TF_DARKWAKE_MODE);
2055
2056 queue_init(&new_task->io_user_clients);
2057 new_task->loadTag = 0;
2058
2059 lck_mtx_lock(&tasks_threads_lock);
2060 queue_enter(&tasks, new_task, task_t, tasks);
2061 tasks_count++;
2062 if (tasks_suspend_state) {
2063 task_suspend_internal(new_task);
2064 }
2065 lck_mtx_unlock(&tasks_threads_lock);
2066 task_ref_hold_proc_task_struct(new_task);
2067
2068 return KERN_SUCCESS;
2069 }
2070
2071 /*
2072 * task_rollup_accounting_info
2073 *
2074 * Roll up accounting stats. Used to rollup stats
2075 * for exec copy task and corpse fork.
2076 */
2077 void
task_rollup_accounting_info(task_t to_task,task_t from_task)2078 task_rollup_accounting_info(task_t to_task, task_t from_task)
2079 {
2080 assert(from_task != to_task);
2081
2082 recount_task_copy(&to_task->tk_recount, &from_task->tk_recount);
2083 to_task->total_runnable_time = from_task->total_runnable_time;
2084 counter_add(&to_task->faults, counter_load(&from_task->faults));
2085 counter_add(&to_task->pageins, counter_load(&from_task->pageins));
2086 counter_add(&to_task->cow_faults, counter_load(&from_task->cow_faults));
2087 counter_add(&to_task->messages_sent, counter_load(&from_task->messages_sent));
2088 counter_add(&to_task->messages_received, counter_load(&from_task->messages_received));
2089 to_task->decompressions = from_task->decompressions;
2090 to_task->syscalls_mach = from_task->syscalls_mach;
2091 to_task->syscalls_unix = from_task->syscalls_unix;
2092 to_task->c_switch = from_task->c_switch;
2093 to_task->p_switch = from_task->p_switch;
2094 to_task->ps_switch = from_task->ps_switch;
2095 to_task->extmod_statistics = from_task->extmod_statistics;
2096 to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
2097 to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
2098 to_task->purged_memory_warn = from_task->purged_memory_warn;
2099 to_task->purged_memory_critical = from_task->purged_memory_critical;
2100 to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
2101 *to_task->task_io_stats = *from_task->task_io_stats;
2102 to_task->cpu_time_eqos_stats = from_task->cpu_time_eqos_stats;
2103 to_task->cpu_time_rqos_stats = from_task->cpu_time_rqos_stats;
2104 to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
2105 to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
2106 to_task->task_gpu_ns = from_task->task_gpu_ns;
2107 to_task->task_writes_counters_internal.task_immediate_writes = from_task->task_writes_counters_internal.task_immediate_writes;
2108 to_task->task_writes_counters_internal.task_deferred_writes = from_task->task_writes_counters_internal.task_deferred_writes;
2109 to_task->task_writes_counters_internal.task_invalidated_writes = from_task->task_writes_counters_internal.task_invalidated_writes;
2110 to_task->task_writes_counters_internal.task_metadata_writes = from_task->task_writes_counters_internal.task_metadata_writes;
2111 to_task->task_writes_counters_external.task_immediate_writes = from_task->task_writes_counters_external.task_immediate_writes;
2112 to_task->task_writes_counters_external.task_deferred_writes = from_task->task_writes_counters_external.task_deferred_writes;
2113 to_task->task_writes_counters_external.task_invalidated_writes = from_task->task_writes_counters_external.task_invalidated_writes;
2114 to_task->task_writes_counters_external.task_metadata_writes = from_task->task_writes_counters_external.task_metadata_writes;
2115 #if CONFIG_PHYS_WRITE_ACCT
2116 to_task->task_fs_metadata_writes = from_task->task_fs_metadata_writes;
2117 #endif /* CONFIG_PHYS_WRITE_ACCT */
2118
2119 #if CONFIG_MEMORYSTATUS
2120 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.memorystatus_dirty_time);
2121 #endif /* CONFIG_MEMORYSTATUS */
2122
2123 /* Skip ledger roll up for memory accounting entries */
2124 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
2125 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
2126 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
2127 #if CONFIG_SCHED_SFI
2128 for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
2129 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
2130 }
2131 #endif
2132 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
2133 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
2134 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
2135 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
2136 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_me);
2137 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_others);
2138 }
2139
2140 /*
2141 * task_deallocate_internal:
2142 *
2143 * Drop a reference on a task.
2144 * Don't call this directly.
2145 */
2146 extern void task_deallocate_internal(task_t task, os_ref_count_t refs);
2147 void
task_deallocate_internal(task_t task,os_ref_count_t refs)2148 task_deallocate_internal(
2149 task_t task,
2150 os_ref_count_t refs)
2151 {
2152 ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
2153
2154 if (task == TASK_NULL) {
2155 return;
2156 }
2157
2158 #if IMPORTANCE_INHERITANCE
2159 if (refs == 1) {
2160 /*
2161 * If last ref potentially comes from the task's importance,
2162 * disconnect it. But more task refs may be added before
2163 * that completes, so wait for the reference to go to zero
2164 * naturally (it may happen on a recursive task_deallocate()
2165 * from the ipc_importance_disconnect_task() call).
2166 */
2167 if (IIT_NULL != task->task_imp_base) {
2168 ipc_importance_disconnect_task(task);
2169 }
2170 return;
2171 }
2172 #endif /* IMPORTANCE_INHERITANCE */
2173
2174 if (refs > 0) {
2175 return;
2176 }
2177
2178 /*
2179 * The task should be dead at this point. Ensure other resources
2180 * like threads, are gone before we trash the world.
2181 */
2182 assert(queue_empty(&task->threads));
2183 assert(get_bsdtask_info(task) == NULL);
2184 assert(!is_active(task->itk_space));
2185 assert(!task->active);
2186 assert(task->active_thread_count == 0);
2187 assert(!task_get_game_mode(task));
2188 assert(!task_get_carplay_mode(task));
2189
2190 lck_mtx_lock(&tasks_threads_lock);
2191 assert(terminated_tasks_count > 0);
2192 queue_remove(&terminated_tasks, task, task_t, tasks);
2193 terminated_tasks_count--;
2194 lck_mtx_unlock(&tasks_threads_lock);
2195
2196 /*
2197 * remove the reference on bank context
2198 */
2199 task_bank_reset(task);
2200
2201 kfree_data(task->task_io_stats, sizeof(struct io_stat_info));
2202
2203 /*
2204 * Give the machine dependent code a chance
2205 * to perform cleanup before ripping apart
2206 * the task.
2207 */
2208 machine_task_terminate(task);
2209
2210 ipc_task_terminate(task);
2211
2212 /* let iokit know 2 */
2213 iokit_task_terminate(task, 2);
2214
2215 /* Unregister task from userspace coredumps on panic */
2216 kern_unregister_userspace_coredump(task);
2217
2218 if (task->affinity_space) {
2219 task_affinity_deallocate(task);
2220 }
2221
2222 #if MACH_ASSERT
2223 if (task->ledger != NULL &&
2224 task->map != NULL &&
2225 task->map->pmap != NULL &&
2226 task->map->pmap->ledger != NULL) {
2227 assert(task->ledger == task->map->pmap->ledger);
2228 }
2229 #endif /* MACH_ASSERT */
2230
2231 vm_owned_objects_disown(task);
2232 assert(task->task_objects_disowned);
2233 if (task->task_owned_objects != 0) {
2234 panic("task_deallocate(%p): "
2235 "volatile_objects=%d nonvolatile_objects=%d owned=%d\n",
2236 task,
2237 task->task_volatile_objects,
2238 task->task_nonvolatile_objects,
2239 task->task_owned_objects);
2240 }
2241
2242 #if CONFIG_DEFERRED_RECLAIM
2243 /*
2244 * Remove this tasks reclaim buffer from global queues.
2245 */
2246 if (task->deferred_reclamation_metadata != NULL) {
2247 vm_deferred_reclamation_buffer_deallocate(task->deferred_reclamation_metadata);
2248 task->deferred_reclamation_metadata = NULL;
2249 }
2250 #endif /* CONFIG_DEFERRED_RECLAIM */
2251
2252 vm_map_deallocate(task->map);
2253 if (task->is_large_corpse) {
2254 assert(large_corpse_count > 0);
2255 OSDecrementAtomic(&large_corpse_count);
2256 task->is_large_corpse = false;
2257 }
2258 is_release(task->itk_space);
2259
2260 if (task->t_rr_ranges) {
2261 restartable_ranges_release(task->t_rr_ranges);
2262 }
2263
2264 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
2265 &interrupt_wakeups, &debit);
2266 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
2267 &platform_idle_wakeups, &debit);
2268
2269 struct recount_times_mach sum = { 0 };
2270 struct recount_times_mach p_only = { 0 };
2271 recount_task_times_perf_only(task, &sum, &p_only);
2272 #if CONFIG_PERVASIVE_ENERGY
2273 uint64_t energy = recount_task_energy_nj(task);
2274 #endif /* CONFIG_PERVASIVE_ENERGY */
2275 recount_task_deinit(&task->tk_recount);
2276
2277 /* Accumulate statistics for dead tasks */
2278 lck_spin_lock(&dead_task_statistics_lock);
2279 dead_task_statistics.total_user_time += sum.rtm_user;
2280 dead_task_statistics.total_system_time += sum.rtm_system;
2281
2282 dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
2283 dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
2284
2285 dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
2286 dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
2287 dead_task_statistics.total_ptime += p_only.rtm_user + p_only.rtm_system;
2288 dead_task_statistics.total_pset_switches += task->ps_switch;
2289 dead_task_statistics.task_gpu_ns += task->task_gpu_ns;
2290 #if CONFIG_PERVASIVE_ENERGY
2291 dead_task_statistics.task_energy += energy;
2292 #endif /* CONFIG_PERVASIVE_ENERGY */
2293
2294 lck_spin_unlock(&dead_task_statistics_lock);
2295 lck_mtx_destroy(&task->lock, &task_lck_grp);
2296
2297 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
2298 &debit)) {
2299 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
2300 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
2301 }
2302 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
2303 &debit)) {
2304 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
2305 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
2306 }
2307 ledger_dereference(task->ledger);
2308
2309 counter_free(&task->faults);
2310 counter_free(&task->pageins);
2311 counter_free(&task->cow_faults);
2312 counter_free(&task->messages_sent);
2313 counter_free(&task->messages_received);
2314
2315 #if CONFIG_COALITIONS
2316 task_release_coalitions(task);
2317 #endif /* CONFIG_COALITIONS */
2318
2319 bzero(task->coalition, sizeof(task->coalition));
2320
2321 #if MACH_BSD
2322 /* clean up collected information since last reference to task is gone */
2323 if (task->corpse_info) {
2324 void *corpse_info_kernel = kcdata_memory_get_begin_addr(task->corpse_info);
2325 task_crashinfo_destroy(task->corpse_info);
2326 task->corpse_info = NULL;
2327 kfree_data(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
2328 }
2329 #endif
2330
2331 #if CONFIG_MACF
2332 if (get_task_crash_label(task)) {
2333 mac_exc_free_label(get_task_crash_label(task));
2334 set_task_crash_label(task, NULL);
2335 }
2336 #endif
2337
2338 assert(queue_empty(&task->task_objq));
2339 task_objq_lock_destroy(task);
2340
2341 if (task->corpse_vmobject_list) {
2342 kfree_data(task->corpse_vmobject_list,
2343 (vm_size_t)task->corpse_vmobject_list_size);
2344 }
2345
2346 task_ref_count_fini(task);
2347 proc_ro_erase_task(task->bsd_info_ro);
2348 task_release_proc_task_struct(task, task->bsd_info_ro);
2349 }
2350
2351 /*
2352 * task_name_deallocate_mig:
2353 *
2354 * Drop a reference on a task name.
2355 */
2356 void
task_name_deallocate_mig(task_name_t task_name)2357 task_name_deallocate_mig(
2358 task_name_t task_name)
2359 {
2360 return task_deallocate_grp((task_t)task_name, TASK_GRP_MIG);
2361 }
2362
2363 /*
2364 * task_policy_set_deallocate_mig:
2365 *
2366 * Drop a reference on a task type.
2367 */
2368 void
task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)2369 task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)
2370 {
2371 return task_deallocate_grp((task_t)task_policy_set, TASK_GRP_MIG);
2372 }
2373
2374 /*
2375 * task_policy_get_deallocate_mig:
2376 *
2377 * Drop a reference on a task type.
2378 */
2379 void
task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)2380 task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)
2381 {
2382 return task_deallocate_grp((task_t)task_policy_get, TASK_GRP_MIG);
2383 }
2384
2385 /*
2386 * task_inspect_deallocate_mig:
2387 *
2388 * Drop a task inspection reference.
2389 */
2390 void
task_inspect_deallocate_mig(task_inspect_t task_inspect)2391 task_inspect_deallocate_mig(
2392 task_inspect_t task_inspect)
2393 {
2394 return task_deallocate_grp((task_t)task_inspect, TASK_GRP_MIG);
2395 }
2396
2397 /*
2398 * task_read_deallocate_mig:
2399 *
2400 * Drop a reference on task read port.
2401 */
2402 void
task_read_deallocate_mig(task_read_t task_read)2403 task_read_deallocate_mig(
2404 task_read_t task_read)
2405 {
2406 return task_deallocate_grp((task_t)task_read, TASK_GRP_MIG);
2407 }
2408
2409 /*
2410 * task_suspension_token_deallocate:
2411 *
2412 * Drop a reference on a task suspension token.
2413 */
2414 void
task_suspension_token_deallocate(task_suspension_token_t token)2415 task_suspension_token_deallocate(
2416 task_suspension_token_t token)
2417 {
2418 return task_deallocate((task_t)token);
2419 }
2420
2421 void
task_suspension_token_deallocate_grp(task_suspension_token_t token,task_grp_t grp)2422 task_suspension_token_deallocate_grp(
2423 task_suspension_token_t token,
2424 task_grp_t grp)
2425 {
2426 return task_deallocate_grp((task_t)token, grp);
2427 }
2428
2429 /*
2430 * task_collect_crash_info:
2431 *
2432 * collect crash info from bsd and mach based data
2433 */
2434 kern_return_t
task_collect_crash_info(task_t task,struct label * crash_label,int is_corpse_fork)2435 task_collect_crash_info(
2436 task_t task,
2437 #ifdef CONFIG_MACF
2438 struct label *crash_label,
2439 #endif
2440 int is_corpse_fork)
2441 {
2442 kern_return_t kr = KERN_SUCCESS;
2443
2444 kcdata_descriptor_t crash_data = NULL;
2445 kcdata_descriptor_t crash_data_release = NULL;
2446 mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
2447 mach_vm_offset_t crash_data_ptr = 0;
2448 void *crash_data_kernel = NULL;
2449 void *crash_data_kernel_release = NULL;
2450 #if CONFIG_MACF
2451 struct label *label, *free_label;
2452 #endif
2453
2454 if (!corpses_enabled()) {
2455 return KERN_NOT_SUPPORTED;
2456 }
2457
2458 #if CONFIG_MACF
2459 free_label = label = mac_exc_create_label(NULL);
2460 #endif
2461
2462 task_lock(task);
2463
2464 assert(is_corpse_fork || get_bsdtask_info(task) != NULL);
2465 if (task->corpse_info == NULL && (is_corpse_fork || get_bsdtask_info(task) != NULL)) {
2466 #if CONFIG_MACF
2467 /* Set the crash label, used by the exception delivery mac hook */
2468 free_label = get_task_crash_label(task); // Most likely NULL.
2469 set_task_crash_label(task, label);
2470 mac_exc_update_task_crash_label(task, crash_label);
2471 #endif
2472 task_unlock(task);
2473
2474 crash_data_kernel = kalloc_data(CORPSEINFO_ALLOCATION_SIZE,
2475 Z_WAITOK | Z_ZERO);
2476 if (crash_data_kernel == NULL) {
2477 kr = KERN_RESOURCE_SHORTAGE;
2478 goto out_no_lock;
2479 }
2480 crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
2481
2482 /* Do not get a corpse ref for corpse fork */
2483 crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size,
2484 is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF,
2485 KCFLAG_USE_MEMCOPY);
2486 if (crash_data) {
2487 task_lock(task);
2488 crash_data_release = task->corpse_info;
2489 crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);
2490 task->corpse_info = crash_data;
2491
2492 task_unlock(task);
2493 kr = KERN_SUCCESS;
2494 } else {
2495 kfree_data(crash_data_kernel,
2496 CORPSEINFO_ALLOCATION_SIZE);
2497 kr = KERN_FAILURE;
2498 }
2499
2500 if (crash_data_release != NULL) {
2501 task_crashinfo_destroy(crash_data_release);
2502 }
2503 kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2504 } else {
2505 task_unlock(task);
2506 }
2507
2508 out_no_lock:
2509 #if CONFIG_MACF
2510 if (free_label != NULL) {
2511 mac_exc_free_label(free_label);
2512 }
2513 #endif
2514 return kr;
2515 }
2516
2517 /*
2518 * task_deliver_crash_notification:
2519 *
2520 * Makes outcall to registered host port for a corpse.
2521 */
2522 kern_return_t
task_deliver_crash_notification(task_t corpse,thread_t thread,exception_type_t etype,mach_exception_subcode_t subcode)2523 task_deliver_crash_notification(
2524 task_t corpse, /* corpse or corpse fork */
2525 thread_t thread,
2526 exception_type_t etype,
2527 mach_exception_subcode_t subcode)
2528 {
2529 kcdata_descriptor_t crash_info = corpse->corpse_info;
2530 thread_t th_iter = NULL;
2531 kern_return_t kr = KERN_SUCCESS;
2532 wait_interrupt_t wsave;
2533 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2534 ipc_port_t corpse_port;
2535
2536 if (crash_info == NULL) {
2537 return KERN_FAILURE;
2538 }
2539
2540 assert(task_is_a_corpse(corpse));
2541
2542 task_lock(corpse);
2543
2544 /*
2545 * Always populate code[0] as the effective exception type for EXC_CORPSE_NOTIFY.
2546 * Crash reporters should derive whether it's fatal from corpse blob.
2547 */
2548 code[0] = etype;
2549 code[1] = subcode;
2550
2551 queue_iterate(&corpse->threads, th_iter, thread_t, task_threads)
2552 {
2553 if (th_iter->corpse_dup == FALSE) {
2554 ipc_thread_reset(th_iter);
2555 }
2556 }
2557 task_unlock(corpse);
2558
2559 /* Arm the no-sender notification for taskport */
2560 task_reference(corpse);
2561 corpse_port = convert_corpse_to_port_and_nsrequest(corpse);
2562
2563 wsave = thread_interrupt_level(THREAD_UNINT);
2564 kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
2565 if (kr != KERN_SUCCESS) {
2566 printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(corpse));
2567 }
2568
2569 (void)thread_interrupt_level(wsave);
2570
2571 /*
2572 * Drop the send right on corpse port, will fire the
2573 * no-sender notification if exception deliver failed.
2574 */
2575 ipc_port_release_send(corpse_port);
2576 return kr;
2577 }
2578
2579 /*
2580 * task_terminate:
2581 *
2582 * Terminate the specified task. See comments on thread_terminate
2583 * (kern/thread.c) about problems with terminating the "current task."
2584 */
2585
2586 kern_return_t
task_terminate(task_t task)2587 task_terminate(
2588 task_t task)
2589 {
2590 if (task == TASK_NULL) {
2591 return KERN_INVALID_ARGUMENT;
2592 }
2593
2594 if (get_bsdtask_info(task)) {
2595 return KERN_FAILURE;
2596 }
2597
2598 return task_terminate_internal(task);
2599 }
2600
2601 #if MACH_ASSERT
2602 extern int proc_pid(struct proc *);
2603 extern void proc_name_kdp(struct proc *p, char *buf, int size);
2604 #endif /* MACH_ASSERT */
2605
2606 static void
task_partial_reap(task_t task,__unused int pid)2607 __unused task_partial_reap(task_t task, __unused int pid)
2608 {
2609 unsigned int reclaimed_resident = 0;
2610 unsigned int reclaimed_compressed = 0;
2611 uint64_t task_page_count;
2612
2613 task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
2614
2615 KDBG(VMDBG_CODE(DBG_VM_MAP_PARTIAL_REAP) | DBG_FUNC_START,
2616 pid, task_page_count);
2617
2618 vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
2619
2620 KDBG(VMDBG_CODE(DBG_VM_MAP_PARTIAL_REAP) | DBG_FUNC_END,
2621 pid, reclaimed_resident, reclaimed_compressed);
2622 }
2623
2624 /*
2625 * task_mark_corpse:
2626 *
2627 * Mark the task as a corpse. Called by crashing thread.
2628 */
2629 kern_return_t
task_mark_corpse(task_t task)2630 task_mark_corpse(task_t task)
2631 {
2632 kern_return_t kr = KERN_SUCCESS;
2633 thread_t self_thread;
2634 (void) self_thread;
2635 wait_interrupt_t wsave;
2636 #if CONFIG_MACF
2637 struct label *crash_label = NULL;
2638 #endif
2639
2640 assert(task != kernel_task);
2641 assert(task == current_task());
2642 assert(!task_is_a_corpse(task));
2643
2644 #if CONFIG_MACF
2645 crash_label = mac_exc_create_label_for_proc((struct proc*)get_bsdtask_info(task));
2646 #endif
2647
2648 kr = task_collect_crash_info(task,
2649 #if CONFIG_MACF
2650 crash_label,
2651 #endif
2652 FALSE);
2653 if (kr != KERN_SUCCESS) {
2654 goto out;
2655 }
2656
2657 /* Store owned vmobjects so we can access them after being marked as corpse */
2658 task_store_owned_vmobject_info(task, task);
2659
2660 self_thread = current_thread();
2661
2662 wsave = thread_interrupt_level(THREAD_UNINT);
2663 task_lock(task);
2664
2665 /*
2666 * Check if any other thread called task_terminate_internal
2667 * and made the task inactive before we could mark it for
2668 * corpse pending report. Bail out if the task is inactive.
2669 */
2670 if (!task->active) {
2671 kcdata_descriptor_t crash_data_release = task->corpse_info;;
2672 void *crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);;
2673
2674 task->corpse_info = NULL;
2675 task_unlock(task);
2676
2677 if (crash_data_release != NULL) {
2678 task_crashinfo_destroy(crash_data_release);
2679 }
2680 kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2681 return KERN_TERMINATED;
2682 }
2683
2684 /*
2685 * ipc_task_reset() moved to last thread_terminate_self(): rdar://75737960.
2686 * disable old ports here instead.
2687 *
2688 * The vm_map and ipc_space must exist until this function returns,
2689 * convert_port_to_{map,space}_with_flavor relies on this behavior.
2690 *
2691 * Note this must be done before we mark the port as a corpse,
2692 * so that task_port_no_senders() can determine if the no-senders
2693 * is for a real corpse or not.
2694 */
2695 ipc_task_disable(task);
2696
2697 task_set_corpse_pending_report(task);
2698 task_set_corpse(task);
2699 task->crashed_thread_id = thread_tid(self_thread);
2700
2701 kr = task_start_halt_locked(task, TRUE);
2702 assert(kr == KERN_SUCCESS);
2703
2704 task_set_uniqueid(task);
2705
2706 task_unlock(task);
2707
2708 /* let iokit know 1 */
2709 iokit_task_terminate(task, 1);
2710
2711 /* terminate the ipc space */
2712 ipc_space_terminate(task->itk_space);
2713
2714 /* Add it to global corpse task list */
2715 task_add_to_corpse_task_list(task);
2716
2717 thread_terminate_internal(self_thread);
2718
2719 (void) thread_interrupt_level(wsave);
2720 assert(task->halting == TRUE);
2721
2722 out:
2723 #if CONFIG_MACF
2724 mac_exc_free_label(crash_label);
2725 #endif
2726 return kr;
2727 }
2728
2729 /*
2730 * task_set_uniqueid
2731 *
2732 * Set task uniqueid to systemwide unique 64 bit value
2733 */
2734 void
task_set_uniqueid(task_t task)2735 task_set_uniqueid(task_t task)
2736 {
2737 task->task_uniqueid = OSIncrementAtomic64(&next_taskuniqueid);
2738 }
2739
2740 /*
2741 * task_clear_corpse
2742 *
2743 * Clears the corpse pending bit on task.
2744 * Removes inspection bit on the threads.
2745 */
2746 void
task_clear_corpse(task_t task)2747 task_clear_corpse(task_t task)
2748 {
2749 thread_t th_iter = NULL;
2750
2751 task_lock(task);
2752 queue_iterate(&task->threads, th_iter, thread_t, task_threads)
2753 {
2754 thread_mtx_lock(th_iter);
2755 th_iter->inspection = FALSE;
2756 ipc_thread_disable(th_iter);
2757 thread_mtx_unlock(th_iter);
2758 }
2759
2760 thread_terminate_crashed_threads();
2761 /* remove the pending corpse report flag */
2762 task_clear_corpse_pending_report(task);
2763
2764 task_unlock(task);
2765 }
2766
2767 /*
2768 * task_port_no_senders
2769 *
2770 * Called whenever the Mach port system detects no-senders on
2771 * a control task port.
2772 *
2773 * Only task ports for corpses need to take action on it,
2774 * and each notification that comes in should terminate
2775 * the task (corpse).
2776 */
2777 static void
task_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)2778 task_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
2779 {
2780 bool is_corpse = false;
2781 task_t task;
2782
2783 ip_mq_lock(port);
2784 task = ipc_kobject_get_locked(port, IKOT_TASK_CONTROL);
2785 if (task == TASK_NULL || !task_is_a_corpse(task)) {
2786 task = TASK_NULL;
2787 } else {
2788 task_reference_mig(task);
2789 }
2790 ip_mq_unlock(port);
2791
2792 /*
2793 * Task might be a corpse, we must inspect this under
2794 * the itk_lock to resolve the race with task_mark_corpse():
2795 *
2796 * If the task associated with the port is NULL under the itk_lock(),
2797 * then the port was a former IKOT_TASK_CONTROL port and we should
2798 * leave it alone.
2799 *
2800 * TODO: we should really make corpses use their own IKOT_TASK_CORPSE
2801 * port type instead of these hacks.
2802 */
2803 if (task) {
2804 itk_lock(task);
2805 ip_mq_lock(port);
2806 assert(task_is_a_corpse(task));
2807 is_corpse = (ipc_kobject_get_locked(port, IKOT_TASK_CONTROL) !=
2808 TASK_NULL);
2809 ip_mq_unlock(port);
2810 itk_unlock(task);
2811 task_deallocate_mig(task);
2812 }
2813
2814 if (is_corpse) {
2815 /* Remove the task from global corpse task list */
2816 task_remove_from_corpse_task_list(task);
2817
2818 task_clear_corpse(task);
2819 vm_map_unset_corpse_source(task->map);
2820 task_terminate_internal(task);
2821 }
2822 }
2823
2824 /*
2825 * task_port_with_flavor_no_senders
2826 *
2827 * Called whenever the Mach port system detects no-senders on
2828 * the task inspect or read port. These ports are allocated lazily and
2829 * should be deallocated here when there are no senders remaining.
2830 */
2831 static void
task_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount)2832 task_port_with_flavor_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
2833 {
2834 task_t task;
2835 mach_task_flavor_t flavor;
2836 ipc_kobject_type_t kotype;
2837
2838 ip_mq_lock(port);
2839 if (!ipc_kobject_is_mscount_current_locked(port, mscount)) {
2840 ip_mq_unlock(port);
2841 return;
2842 }
2843
2844 kotype = ip_type(port);
2845 assert((IKOT_TASK_READ == kotype) || (IKOT_TASK_INSPECT == kotype));
2846 task = ipc_kobject_get_locked(port, kotype);
2847 if (task != TASK_NULL) {
2848 task_reference(task);
2849 }
2850 ip_mq_unlock(port);
2851
2852 if (task == TASK_NULL) {
2853 /* The task is exiting or disabled; it will eventually deallocate the port */
2854 return;
2855 }
2856
2857 if (kotype == IKOT_TASK_READ) {
2858 flavor = TASK_FLAVOR_READ;
2859 } else {
2860 flavor = TASK_FLAVOR_INSPECT;
2861 }
2862
2863 itk_lock(task);
2864 ip_mq_lock(port);
2865
2866 /*
2867 * If the port is no longer active, then ipc_task_terminate() ran
2868 * and destroyed the kobject already. Just deallocate the task
2869 * ref we took and go away.
2870 *
2871 * It is also possible that several nsrequests are in flight,
2872 * only one shall NULL-out the port entry, and this is the one
2873 * that gets to dealloc the port.
2874 *
2875 * Check for a stale no-senders notification. A call to any function
2876 * that vends out send rights to this port could resurrect it between
2877 * this notification being generated and actually being handled here.
2878 */
2879 if (task->itk_task_ports[flavor] != port ||
2880 !ipc_kobject_is_mscount_current_locked(port, mscount)) {
2881 ip_mq_unlock(port);
2882 itk_unlock(task);
2883 task_deallocate(task);
2884 return;
2885 }
2886
2887 task->itk_task_ports[flavor] = IP_NULL;
2888 itk_unlock(task);
2889
2890 ipc_kobject_dealloc_port_and_unlock(port, mscount, kotype);
2891
2892 task_deallocate(task);
2893 }
2894
2895 /*
2896 * task_wait_till_threads_terminate_locked
2897 *
2898 * Wait till all the threads in the task are terminated.
2899 * Might release the task lock and re-acquire it.
2900 */
2901 void
task_wait_till_threads_terminate_locked(task_t task)2902 task_wait_till_threads_terminate_locked(task_t task)
2903 {
2904 /* wait for all the threads in the task to terminate */
2905 while (task->active_thread_count != 0) {
2906 assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
2907 task_unlock(task);
2908 thread_block(THREAD_CONTINUE_NULL);
2909
2910 task_lock(task);
2911 }
2912 }
2913
2914 /*
2915 * task_duplicate_map_and_threads
2916 *
2917 * Copy vmmap of source task.
2918 * Copy active threads from source task to destination task.
2919 * Source task would be suspended during the copy.
2920 */
2921 kern_return_t
task_duplicate_map_and_threads(task_t task,void * p,task_t new_task,thread_t * thread_ret,uint64_t ** udata_buffer,int * size,int * num_udata,bool for_exception)2922 task_duplicate_map_and_threads(
2923 task_t task,
2924 void *p,
2925 task_t new_task,
2926 thread_t *thread_ret,
2927 uint64_t **udata_buffer,
2928 int *size,
2929 int *num_udata,
2930 bool for_exception)
2931 {
2932 kern_return_t kr = KERN_SUCCESS;
2933 int active;
2934 thread_t thread, self, thread_return = THREAD_NULL;
2935 thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL;
2936 thread_t *thread_array;
2937 uint32_t active_thread_count = 0, array_count = 0, i;
2938 vm_map_t oldmap;
2939 uint64_t *buffer = NULL;
2940 int buf_size = 0;
2941 int est_knotes = 0, num_knotes = 0;
2942
2943 self = current_thread();
2944
2945 /*
2946 * Suspend the task to copy thread state, use the internal
2947 * variant so that no user-space process can resume
2948 * the task from under us
2949 */
2950 kr = task_suspend_internal(task);
2951 if (kr != KERN_SUCCESS) {
2952 return kr;
2953 }
2954
2955 if (task->map->disable_vmentry_reuse == TRUE) {
2956 /*
2957 * Quite likely GuardMalloc (or some debugging tool)
2958 * is being used on this task. And it has gone through
2959 * its limit. Making a corpse will likely encounter
2960 * a lot of VM entries that will need COW.
2961 *
2962 * Skip it.
2963 */
2964 #if DEVELOPMENT || DEBUG
2965 memorystatus_abort_vm_map_fork(task);
2966 #endif
2967 ktriage_record(thread_tid(self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_FAIL_LIBGMALLOC), 0 /* arg */);
2968 task_resume_internal(task);
2969 return KERN_FAILURE;
2970 }
2971
2972 /* Check with VM if vm_map_fork is allowed for this task */
2973 bool is_large = false;
2974 if (memorystatus_allowed_vm_map_fork(task, &is_large)) {
2975 /* Setup new task's vmmap, switch from parent task's map to it COW map */
2976 oldmap = new_task->map;
2977 new_task->map = vm_map_fork(new_task->ledger,
2978 task->map,
2979 (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
2980 VM_MAP_FORK_PRESERVE_PURGEABLE |
2981 VM_MAP_FORK_CORPSE_FOOTPRINT |
2982 VM_MAP_FORK_SHARE_IF_OWNED));
2983 if (new_task->map) {
2984 new_task->is_large_corpse = is_large;
2985 vm_map_deallocate(oldmap);
2986
2987 /* copy ledgers that impact the memory footprint */
2988 vm_map_copy_footprint_ledgers(task, new_task);
2989
2990 /* Get all the udata pointers from kqueue */
2991 est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2992 if (est_knotes > 0) {
2993 buf_size = (est_knotes + 32) * sizeof(uint64_t);
2994 buffer = kalloc_data(buf_size, Z_WAITOK);
2995 num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2996 if (num_knotes > est_knotes + 32) {
2997 num_knotes = est_knotes + 32;
2998 }
2999 }
3000 } else {
3001 if (is_large) {
3002 assert(large_corpse_count > 0);
3003 OSDecrementAtomic(&large_corpse_count);
3004 }
3005 new_task->map = oldmap;
3006 #if DEVELOPMENT || DEBUG
3007 memorystatus_abort_vm_map_fork(task);
3008 #endif
3009 task_resume_internal(task);
3010 return KERN_NO_SPACE;
3011 }
3012 } else if (!for_exception) {
3013 #if DEVELOPMENT || DEBUG
3014 memorystatus_abort_vm_map_fork(task);
3015 #endif
3016 task_resume_internal(task);
3017 return KERN_NO_SPACE;
3018 }
3019
3020 active_thread_count = task->active_thread_count;
3021 if (active_thread_count == 0) {
3022 kfree_data(buffer, buf_size);
3023 task_resume_internal(task);
3024 return KERN_FAILURE;
3025 }
3026
3027 thread_array = kalloc_type(thread_t, active_thread_count, Z_WAITOK);
3028
3029 /* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
3030 task_lock(task);
3031 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3032 /* Skip inactive threads */
3033 active = thread->active;
3034 if (!active) {
3035 continue;
3036 }
3037
3038 if (array_count >= active_thread_count) {
3039 break;
3040 }
3041
3042 thread_array[array_count++] = thread;
3043 thread_reference(thread);
3044 }
3045 task_unlock(task);
3046
3047 for (i = 0; i < array_count; i++) {
3048 kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
3049 if (kr != KERN_SUCCESS) {
3050 break;
3051 }
3052
3053 /* Equivalent of current thread in corpse */
3054 if (thread_array[i] == self) {
3055 thread_return = new_thread;
3056 new_task->crashed_thread_id = thread_tid(new_thread);
3057 } else if (first_thread == NULL) {
3058 first_thread = new_thread;
3059 } else {
3060 /* drop the extra ref returned by thread_create_with_continuation */
3061 thread_deallocate(new_thread);
3062 }
3063
3064 kr = thread_dup2(thread_array[i], new_thread);
3065 if (kr != KERN_SUCCESS) {
3066 thread_mtx_lock(new_thread);
3067 new_thread->corpse_dup = TRUE;
3068 thread_mtx_unlock(new_thread);
3069 continue;
3070 }
3071
3072 /* Copy thread name */
3073 bsd_copythreadname(get_bsdthread_info(new_thread),
3074 get_bsdthread_info(thread_array[i]));
3075 new_thread->thread_tag = thread_array[i]->thread_tag &
3076 ~THREAD_TAG_USER_JOIN;
3077 thread_copy_resource_info(new_thread, thread_array[i]);
3078 }
3079
3080 /* return the first thread if we couldn't find the equivalent of current */
3081 if (thread_return == THREAD_NULL) {
3082 thread_return = first_thread;
3083 } else if (first_thread != THREAD_NULL) {
3084 /* drop the extra ref returned by thread_create_with_continuation */
3085 thread_deallocate(first_thread);
3086 }
3087
3088 task_resume_internal(task);
3089
3090 for (i = 0; i < array_count; i++) {
3091 thread_deallocate(thread_array[i]);
3092 }
3093 kfree_type(thread_t, active_thread_count, thread_array);
3094
3095 if (kr == KERN_SUCCESS) {
3096 *thread_ret = thread_return;
3097 *udata_buffer = buffer;
3098 *size = buf_size;
3099 *num_udata = num_knotes;
3100 } else {
3101 if (thread_return != THREAD_NULL) {
3102 thread_deallocate(thread_return);
3103 }
3104 kfree_data(buffer, buf_size);
3105 }
3106
3107 return kr;
3108 }
3109
3110 #if CONFIG_SECLUDED_MEMORY
3111 extern void task_set_can_use_secluded_mem_locked(
3112 task_t task,
3113 boolean_t can_use_secluded_mem);
3114 #endif /* CONFIG_SECLUDED_MEMORY */
3115
3116 #if MACH_ASSERT
3117 int debug4k_panic_on_terminate = 0;
3118 #endif /* MACH_ASSERT */
3119 kern_return_t
task_terminate_internal(task_t task)3120 task_terminate_internal(
3121 task_t task)
3122 {
3123 thread_t thread, self;
3124 task_t self_task;
3125 boolean_t interrupt_save;
3126 int pid = 0;
3127
3128 assert(task != kernel_task);
3129
3130 self = current_thread();
3131 self_task = current_task();
3132
3133 /*
3134 * Get the task locked and make sure that we are not racing
3135 * with someone else trying to terminate us.
3136 */
3137 if (task == self_task) {
3138 task_lock(task);
3139 } else if (task < self_task) {
3140 task_lock(task);
3141 task_lock(self_task);
3142 } else {
3143 task_lock(self_task);
3144 task_lock(task);
3145 }
3146
3147 #if CONFIG_SECLUDED_MEMORY
3148 if (task->task_can_use_secluded_mem) {
3149 task_set_can_use_secluded_mem_locked(task, FALSE);
3150 }
3151 task->task_could_use_secluded_mem = FALSE;
3152 task->task_could_also_use_secluded_mem = FALSE;
3153
3154 if (task->task_suppressed_secluded) {
3155 stop_secluded_suppression(task);
3156 }
3157 #endif /* CONFIG_SECLUDED_MEMORY */
3158
3159 if (!task->active) {
3160 /*
3161 * Task is already being terminated.
3162 * Just return an error. If we are dying, this will
3163 * just get us to our AST special handler and that
3164 * will get us to finalize the termination of ourselves.
3165 */
3166 task_unlock(task);
3167 if (self_task != task) {
3168 task_unlock(self_task);
3169 }
3170
3171 return KERN_FAILURE;
3172 }
3173
3174 if (task_corpse_pending_report(task)) {
3175 /*
3176 * Task is marked for reporting as corpse.
3177 * Just return an error. This will
3178 * just get us to our AST special handler and that
3179 * will get us to finish the path to death
3180 */
3181 task_unlock(task);
3182 if (self_task != task) {
3183 task_unlock(self_task);
3184 }
3185
3186 return KERN_FAILURE;
3187 }
3188
3189 if (self_task != task) {
3190 task_unlock(self_task);
3191 }
3192
3193 /*
3194 * Make sure the current thread does not get aborted out of
3195 * the waits inside these operations.
3196 */
3197 interrupt_save = thread_interrupt_level(THREAD_UNINT);
3198
3199 /*
3200 * Indicate that we want all the threads to stop executing
3201 * at user space by holding the task (we would have held
3202 * each thread independently in thread_terminate_internal -
3203 * but this way we may be more likely to already find it
3204 * held there). Mark the task inactive, and prevent
3205 * further task operations via the task port.
3206 *
3207 * The vm_map and ipc_space must exist until this function returns,
3208 * convert_port_to_{map,space}_with_flavor relies on this behavior.
3209 */
3210 bool first_suspension __unused = task_hold_locked(task);
3211 task->active = FALSE;
3212 ipc_task_disable(task);
3213
3214 #if CONFIG_EXCLAVES
3215 /* before conclave can be suspended */
3216 exclaves_conclave_prepare_teardown(task);
3217
3218 //rdar://139307390, first suspension might not have done conclave suspend.
3219 first_suspension = true;
3220 if (first_suspension) {
3221 task_unlock(task);
3222 task_suspend_conclave(task);
3223 task_lock(task);
3224 }
3225 #endif /* CONFIG_EXCLAVES */
3226
3227
3228 /*
3229 * Terminate each thread in the task.
3230 */
3231 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3232 thread_terminate_internal(thread);
3233 }
3234
3235 #ifdef MACH_BSD
3236 void *bsd_info = get_bsdtask_info(task);
3237 if (bsd_info != NULL) {
3238 pid = proc_pid(bsd_info);
3239 }
3240 #endif /* MACH_BSD */
3241
3242 task_unlock(task);
3243
3244 #if CONFIG_EXCLAVES
3245 task_stop_conclave(task, false);
3246 #endif /* CONFIG_EXCLAVES */
3247
3248 proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
3249 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3250
3251 /* Early object reap phase */
3252
3253 // PR-17045188: Revisit implementation
3254 // task_partial_reap(task, pid);
3255
3256 #if CONFIG_TASKWATCH
3257 /*
3258 * remove all task watchers
3259 */
3260 task_removewatchers(task);
3261
3262 #endif /* CONFIG_TASKWATCH */
3263
3264 /*
3265 * Destroy all synchronizers owned by the task.
3266 */
3267 task_synchronizer_destroy_all(task);
3268
3269 /*
3270 * Clear the watchport boost on the task.
3271 */
3272 task_remove_turnstile_watchports(task);
3273
3274 /* let iokit know 1 */
3275 iokit_task_terminate(task, 1);
3276
3277 /*
3278 * Destroy the IPC space, leaving just a reference for it.
3279 */
3280 ipc_space_terminate(task->itk_space);
3281
3282 #if 00
3283 /* if some ledgers go negative on tear-down again... */
3284 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3285 task_ledgers.phys_footprint);
3286 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3287 task_ledgers.internal);
3288 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3289 task_ledgers.iokit_mapped);
3290 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3291 task_ledgers.alternate_accounting);
3292 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3293 task_ledgers.alternate_accounting_compressed);
3294 #endif
3295
3296 /*
3297 * If the current thread is a member of the task
3298 * being terminated, then the last reference to
3299 * the task will not be dropped until the thread
3300 * is finally reaped. To avoid incurring the
3301 * expense of removing the address space regions
3302 * at reap time, we do it explictly here.
3303 */
3304
3305 #if MACH_ASSERT
3306 /*
3307 * Identify the pmap's process, in case the pmap ledgers drift
3308 * and we have to report it.
3309 */
3310 char procname[17];
3311 void *proc = get_bsdtask_info(task);
3312 if (proc) {
3313 pid = proc_pid(proc);
3314 proc_name_kdp(proc, procname, sizeof(procname));
3315 } else {
3316 pid = 0;
3317 strlcpy(procname, "<unknown>", sizeof(procname));
3318 }
3319 pmap_set_process(task->map->pmap, pid, procname);
3320 if (vm_map_page_shift(task->map) < (int)PAGE_SHIFT) {
3321 DEBUG4K_LIFE("map %p procname: %s\n", task->map, procname);
3322 if (debug4k_panic_on_terminate) {
3323 panic("DEBUG4K: %s:%d %d[%s] map %p", __FUNCTION__, __LINE__, pid, procname, task->map);
3324 }
3325 }
3326 #endif /* MACH_ASSERT */
3327
3328 vm_map_terminate(task->map);
3329
3330 /* release our shared region */
3331 vm_shared_region_set(task, NULL);
3332
3333 #if __has_feature(ptrauth_calls)
3334 task_set_shared_region_id(task, NULL);
3335 #endif /* __has_feature(ptrauth_calls) */
3336
3337 lck_mtx_lock(&tasks_threads_lock);
3338 queue_remove(&tasks, task, task_t, tasks);
3339 queue_enter(&terminated_tasks, task, task_t, tasks);
3340 tasks_count--;
3341 terminated_tasks_count++;
3342 lck_mtx_unlock(&tasks_threads_lock);
3343
3344 /*
3345 * We no longer need to guard against being aborted, so restore
3346 * the previous interruptible state.
3347 */
3348 thread_interrupt_level(interrupt_save);
3349
3350 #if CONFIG_CPU_COUNTERS
3351 /* force the task to release all ctrs */
3352 if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) {
3353 kpc_force_all_ctrs(task, 0);
3354 }
3355 #endif /* CONFIG_CPU_COUNTERS */
3356
3357 #if CONFIG_COALITIONS
3358 /*
3359 * Leave the coalition for corpse task or task that
3360 * never had any active threads (e.g. fork, exec failure).
3361 * For task with active threads, the task will be removed
3362 * from coalition by last terminating thread.
3363 */
3364 if (task->active_thread_count == 0) {
3365 coalitions_remove_task(task);
3366 }
3367 #endif
3368
3369 #if CONFIG_FREEZE
3370 extern int vm_compressor_available;
3371 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE && vm_compressor_available) {
3372 task_disown_frozen_csegs(task);
3373 assert(queue_empty(&task->task_frozen_cseg_q));
3374 }
3375 #endif /* CONFIG_FREEZE */
3376
3377
3378 /*
3379 * Get rid of the task active reference on itself.
3380 */
3381 task_deallocate_grp(task, TASK_GRP_INTERNAL);
3382
3383 return KERN_SUCCESS;
3384 }
3385
3386 void
tasks_system_suspend(boolean_t suspend)3387 tasks_system_suspend(boolean_t suspend)
3388 {
3389 task_t task;
3390
3391 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SUSPEND_USERSPACE) |
3392 (suspend ? DBG_FUNC_START : DBG_FUNC_END));
3393
3394 lck_mtx_lock(&tasks_threads_lock);
3395 assert(tasks_suspend_state != suspend);
3396 tasks_suspend_state = suspend;
3397 queue_iterate(&tasks, task, task_t, tasks) {
3398 if (task == kernel_task) {
3399 continue;
3400 }
3401 if (task_is_driver(task)) {
3402 continue;
3403 }
3404 suspend ? task_suspend_internal(task) : task_resume_internal(task);
3405 }
3406 lck_mtx_unlock(&tasks_threads_lock);
3407 }
3408
3409 /*
3410 * task_start_halt:
3411 *
3412 * Shut the current task down (except for the current thread) in
3413 * preparation for dramatic changes to the task (probably exec).
3414 * We hold the task and mark all other threads in the task for
3415 * termination.
3416 */
3417 kern_return_t
task_start_halt(task_t task)3418 task_start_halt(task_t task)
3419 {
3420 kern_return_t kr = KERN_SUCCESS;
3421 task_lock(task);
3422 kr = task_start_halt_locked(task, FALSE);
3423 task_unlock(task);
3424 return kr;
3425 }
3426
3427 static kern_return_t
task_start_halt_locked(task_t task,boolean_t should_mark_corpse)3428 task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
3429 {
3430 thread_t thread, self;
3431 uint64_t dispatchqueue_offset;
3432
3433 assert(task != kernel_task);
3434
3435 self = current_thread();
3436
3437 if (task != get_threadtask(self) && !task_is_a_corpse_fork(task)) {
3438 return KERN_INVALID_ARGUMENT;
3439 }
3440
3441 if (!should_mark_corpse &&
3442 (task->halting || !task->active || !self->active)) {
3443 /*
3444 * Task or current thread is already being terminated.
3445 * Hurry up and return out of the current kernel context
3446 * so that we run our AST special handler to terminate
3447 * ourselves. If should_mark_corpse is set, the corpse
3448 * creation might have raced with exec, let the corpse
3449 * creation continue, once the current thread reaches AST
3450 * thread in exec will be woken up from task_complete_halt.
3451 * Exec will fail cause the proc was marked for exit.
3452 * Once the thread in exec reaches AST, it will call proc_exit
3453 * and deliver the EXC_CORPSE_NOTIFY.
3454 */
3455 return KERN_FAILURE;
3456 }
3457
3458 /* Thread creation will fail after this point of no return. */
3459 task->halting = TRUE;
3460
3461 /*
3462 * Mark all the threads to keep them from starting any more
3463 * user-level execution. The thread_terminate_internal code
3464 * would do this on a thread by thread basis anyway, but this
3465 * gives us a better chance of not having to wait there.
3466 */
3467 bool first_suspension __unused = task_hold_locked(task);
3468
3469 #if CONFIG_EXCLAVES
3470 if (should_mark_corpse) {
3471 void *crash_info_ptr = task_get_corpseinfo(task);
3472 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3473 if (crash_info_ptr != NULL && thread->th_exclaves_ipc_ctx.ipcb != NULL) {
3474 struct thread_crash_exclaves_info info = { 0 };
3475
3476 info.tcei_flags = kExclaveRPCActive;
3477 info.tcei_scid = thread->th_exclaves_ipc_ctx.scid;
3478 info.tcei_thread_id = thread->thread_id;
3479
3480 kcdata_push_data(crash_info_ptr,
3481 STACKSHOT_KCTYPE_KERN_EXCLAVES_CRASH_THREADINFO,
3482 sizeof(struct thread_crash_exclaves_info), &info);
3483 }
3484 }
3485 }
3486 //rdar://139307390, first suspension might not have done conclave suspend.
3487 first_suspension = true;
3488 if (first_suspension || should_mark_corpse) {
3489 task_unlock(task);
3490
3491 /* before we can teardown the conclave */
3492 exclaves_conclave_prepare_teardown(task);
3493
3494 if (first_suspension) {
3495 task_suspend_conclave(task);
3496 }
3497
3498 if (should_mark_corpse) {
3499 task_stop_conclave(task, true);
3500 }
3501 task_lock(task);
3502 }
3503 #endif /* CONFIG_EXCLAVES */
3504
3505 dispatchqueue_offset = get_dispatchqueue_offset_from_proc(get_bsdtask_info(task));
3506 /*
3507 * Terminate all the other threads in the task.
3508 */
3509 queue_iterate(&task->threads, thread, thread_t, task_threads)
3510 {
3511 /*
3512 * Remove priority throttles for threads to terminate timely. This has
3513 * to be done after task_hold_locked() traps all threads to AST, but before
3514 * threads are marked inactive in thread_terminate_internal(). Takes thread
3515 * mutex lock.
3516 *
3517 * We need task_is_a_corpse() check so that we don't accidently update policy
3518 * for tasks that are doing posix_spawn().
3519 *
3520 * See: thread_policy_update_tasklocked().
3521 */
3522 if (task_is_a_corpse(task)) {
3523 proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE,
3524 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3525 }
3526
3527 if (should_mark_corpse) {
3528 thread_mtx_lock(thread);
3529 thread->inspection = TRUE;
3530 thread_mtx_unlock(thread);
3531 }
3532 if (thread != self) {
3533 thread_terminate_internal(thread);
3534 }
3535 }
3536 task->dispatchqueue_offset = dispatchqueue_offset;
3537
3538 task_release_locked(task);
3539
3540 return KERN_SUCCESS;
3541 }
3542
3543
3544 /*
3545 * task_complete_halt:
3546 *
3547 * Complete task halt by waiting for threads to terminate, then clean
3548 * up task resources (VM, port namespace, etc...) and then let the
3549 * current thread go in the (practically empty) task context.
3550 *
3551 * Note: task->halting flag is not cleared in order to avoid creation
3552 * of new thread in old exec'ed task.
3553 */
3554 void
task_complete_halt(task_t task)3555 task_complete_halt(task_t task)
3556 {
3557 task_lock(task);
3558 assert(task->halting);
3559 assert(task == current_task());
3560
3561 /*
3562 * Wait for the other threads to get shut down.
3563 * When the last other thread is reaped, we'll be
3564 * woken up.
3565 */
3566 if (task->thread_count > 1) {
3567 assert_wait((event_t)&task->halting, THREAD_UNINT);
3568 task_unlock(task);
3569 thread_block(THREAD_CONTINUE_NULL);
3570 } else {
3571 task_unlock(task);
3572 }
3573
3574 #if CONFIG_DEFERRED_RECLAIM
3575 if (task->deferred_reclamation_metadata) {
3576 vm_deferred_reclamation_buffer_deallocate(
3577 task->deferred_reclamation_metadata);
3578 task->deferred_reclamation_metadata = NULL;
3579 }
3580 #endif /* CONFIG_DEFERRED_RECLAIM */
3581
3582 /*
3583 * Give the machine dependent code a chance
3584 * to perform cleanup of task-level resources
3585 * associated with the current thread before
3586 * ripping apart the task.
3587 */
3588 machine_task_terminate(task);
3589
3590 /*
3591 * Destroy all synchronizers owned by the task.
3592 */
3593 task_synchronizer_destroy_all(task);
3594
3595 /* let iokit know 1 */
3596 iokit_task_terminate(task, 1);
3597
3598 /*
3599 * Terminate the IPC space. A long time ago,
3600 * this used to be ipc_space_clean() which would
3601 * keep the space active but hollow it.
3602 *
3603 * We really do not need this semantics given
3604 * tasks die with exec now.
3605 */
3606 ipc_space_terminate(task->itk_space);
3607
3608 /*
3609 * Clean out the address space, as we are going to be
3610 * getting a new one.
3611 */
3612 vm_map_terminate(task->map);
3613
3614 /*
3615 * Kick out any IOKitUser handles to the task. At best they're stale,
3616 * at worst someone is racing a SUID exec.
3617 */
3618 /* let iokit know 2 */
3619 iokit_task_terminate(task, 2);
3620 }
3621
3622 #ifdef CONFIG_TASK_SUSPEND_STATS
3623
3624 static void
_task_mark_suspend_source(task_t task)3625 _task_mark_suspend_source(task_t task)
3626 {
3627 int idx;
3628 task_suspend_stats_t stats;
3629 task_suspend_source_t source;
3630 task_lock_assert_owned(task);
3631 stats = &task->t_suspend_stats;
3632
3633 idx = stats->tss_count % TASK_SUSPEND_SOURCES_MAX;
3634 source = &task->t_suspend_sources[idx];
3635 bzero(source, sizeof(*source));
3636
3637 source->tss_time = mach_absolute_time();
3638 source->tss_tid = current_thread()->thread_id;
3639 source->tss_pid = task_pid(current_task());
3640 strlcpy(source->tss_procname, task_best_name(current_task()),
3641 sizeof(source->tss_procname));
3642
3643 stats->tss_count++;
3644 }
3645
3646 static inline void
_task_mark_suspend_start(task_t task)3647 _task_mark_suspend_start(task_t task)
3648 {
3649 task_lock_assert_owned(task);
3650 task->t_suspend_stats.tss_last_start = mach_absolute_time();
3651 }
3652
3653 static inline void
_task_mark_suspend_end(task_t task)3654 _task_mark_suspend_end(task_t task)
3655 {
3656 task_lock_assert_owned(task);
3657 task->t_suspend_stats.tss_last_end = mach_absolute_time();
3658 task->t_suspend_stats.tss_duration += (task->t_suspend_stats.tss_last_end -
3659 task->t_suspend_stats.tss_last_start);
3660 }
3661
3662 static kern_return_t
_task_get_suspend_stats_locked(task_t task,task_suspend_stats_t stats)3663 _task_get_suspend_stats_locked(task_t task, task_suspend_stats_t stats)
3664 {
3665 if (task == TASK_NULL || stats == NULL) {
3666 return KERN_INVALID_ARGUMENT;
3667 }
3668 task_lock_assert_owned(task);
3669 memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3670 return KERN_SUCCESS;
3671 }
3672
3673 static kern_return_t
_task_get_suspend_sources_locked(task_t task,task_suspend_source_t sources)3674 _task_get_suspend_sources_locked(task_t task, task_suspend_source_t sources)
3675 {
3676 if (task == TASK_NULL || sources == NULL) {
3677 return KERN_INVALID_ARGUMENT;
3678 }
3679 task_lock_assert_owned(task);
3680 memcpy(sources, task->t_suspend_sources,
3681 sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3682 return KERN_SUCCESS;
3683 }
3684
3685 #endif /* CONFIG_TASK_SUSPEND_STATS */
3686
3687 kern_return_t
task_get_suspend_stats(task_t task,task_suspend_stats_t stats)3688 task_get_suspend_stats(task_t task, task_suspend_stats_t stats)
3689 {
3690 #ifdef CONFIG_TASK_SUSPEND_STATS
3691 kern_return_t kr;
3692 if (task == TASK_NULL || stats == NULL) {
3693 return KERN_INVALID_ARGUMENT;
3694 }
3695 task_lock(task);
3696 kr = _task_get_suspend_stats_locked(task, stats);
3697 task_unlock(task);
3698 return kr;
3699 #else /* CONFIG_TASK_SUSPEND_STATS */
3700 (void)task;
3701 (void)stats;
3702 return KERN_NOT_SUPPORTED;
3703 #endif
3704 }
3705
3706 kern_return_t
task_get_suspend_stats_kdp(task_t task,task_suspend_stats_t stats)3707 task_get_suspend_stats_kdp(task_t task, task_suspend_stats_t stats)
3708 {
3709 #ifdef CONFIG_TASK_SUSPEND_STATS
3710 if (task == TASK_NULL || stats == NULL) {
3711 return KERN_INVALID_ARGUMENT;
3712 }
3713 memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3714 return KERN_SUCCESS;
3715 #else /* CONFIG_TASK_SUSPEND_STATS */
3716 #pragma unused(task, stats)
3717 return KERN_NOT_SUPPORTED;
3718 #endif /* CONFIG_TASK_SUSPEND_STATS */
3719 }
3720
3721 kern_return_t
task_get_suspend_sources(task_t task,task_suspend_source_array_t sources)3722 task_get_suspend_sources(task_t task, task_suspend_source_array_t sources)
3723 {
3724 #ifdef CONFIG_TASK_SUSPEND_STATS
3725 kern_return_t kr;
3726 if (task == TASK_NULL || sources == NULL) {
3727 return KERN_INVALID_ARGUMENT;
3728 }
3729 task_lock(task);
3730 kr = _task_get_suspend_sources_locked(task, sources);
3731 task_unlock(task);
3732 return kr;
3733 #else /* CONFIG_TASK_SUSPEND_STATS */
3734 (void)task;
3735 (void)sources;
3736 return KERN_NOT_SUPPORTED;
3737 #endif
3738 }
3739
3740 kern_return_t
task_get_suspend_sources_kdp(task_t task,task_suspend_source_array_t sources)3741 task_get_suspend_sources_kdp(task_t task, task_suspend_source_array_t sources)
3742 {
3743 #ifdef CONFIG_TASK_SUSPEND_STATS
3744 if (task == TASK_NULL || sources == NULL) {
3745 return KERN_INVALID_ARGUMENT;
3746 }
3747 memcpy(sources, task->t_suspend_sources,
3748 sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3749 return KERN_SUCCESS;
3750 #else /* CONFIG_TASK_SUSPEND_STATS */
3751 #pragma unused(task, sources)
3752 return KERN_NOT_SUPPORTED;
3753 #endif
3754 }
3755
3756 kern_return_t
task_set_cs_auxiliary_info(task_t task,uint64_t info)3757 task_set_cs_auxiliary_info(task_t task, uint64_t info)
3758 {
3759 if (task == TASK_NULL) {
3760 return KERN_INVALID_ARGUMENT;
3761 }
3762
3763 task->task_cs_auxiliary_info = info;
3764 return KERN_SUCCESS;
3765 }
3766
3767 uint64_t
task_get_cs_auxiliary_info_kdp(task_t task)3768 task_get_cs_auxiliary_info_kdp(task_t task)
3769 {
3770 if (task == TASK_NULL) {
3771 return 0;
3772 }
3773 return task->task_cs_auxiliary_info;
3774 }
3775
3776 /*
3777 * task_hold_locked:
3778 *
3779 * Suspend execution of the specified task.
3780 * This is a recursive-style suspension of the task, a count of
3781 * suspends is maintained.
3782 *
3783 * CONDITIONS: the task is locked and active.
3784 * Returns true if this was first suspension
3785 */
3786 bool
task_hold_locked(task_t task)3787 task_hold_locked(
3788 task_t task)
3789 {
3790 thread_t thread;
3791 void *bsd_info = get_bsdtask_info(task);
3792
3793 assert(task->active);
3794
3795 if (task->suspend_count++ > 0) {
3796 return false;
3797 }
3798
3799 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_TASK_SUSPEND),
3800 task_pid(task), task->user_stop_count, task->pidsuspended);
3801
3802 if (bsd_info) {
3803 workq_proc_suspended(bsd_info);
3804 }
3805
3806 /*
3807 * Iterate through all the threads and hold them.
3808 */
3809 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3810 thread_mtx_lock(thread);
3811 thread_hold(thread);
3812 thread_mtx_unlock(thread);
3813 }
3814
3815 #ifdef CONFIG_TASK_SUSPEND_STATS
3816 _task_mark_suspend_start(task);
3817 #endif
3818 return true;
3819 }
3820
3821 /*
3822 * task_hold_and_wait
3823 *
3824 * Same as the internal routine above, except that is must lock
3825 * and verify that the task is active. This differs from task_suspend
3826 * in that it places a kernel hold on the task rather than just a
3827 * user-level hold. This keeps users from over resuming and setting
3828 * it running out from under the kernel.
3829 *
3830 * CONDITIONS: the caller holds a reference on the task
3831 */
3832 kern_return_t
task_hold_and_wait(task_t task,bool suspend_conclave __unused)3833 task_hold_and_wait(
3834 task_t task,
3835 bool suspend_conclave __unused)
3836 {
3837 if (task == TASK_NULL) {
3838 return KERN_INVALID_ARGUMENT;
3839 }
3840
3841 task_lock(task);
3842 if (!task->active) {
3843 task_unlock(task);
3844 return KERN_FAILURE;
3845 }
3846
3847 #ifdef CONFIG_TASK_SUSPEND_STATS
3848 _task_mark_suspend_source(task);
3849 #endif /* CONFIG_TASK_SUSPEND_STATS */
3850
3851 bool first_suspension __unused = task_hold_locked(task);
3852
3853 #if CONFIG_EXCLAVES
3854 //rdar://139307390, first suspension might not have done conclave suspend.
3855 first_suspension = true;
3856 if (suspend_conclave && first_suspension) {
3857 task_unlock(task);
3858 task_suspend_conclave(task);
3859 task_lock(task);
3860 /*
3861 * If task terminated/resumed before we could wait on threads, then
3862 * it is a race we lost and we could treat that as termination/resume
3863 * happened after the wait and return SUCCESS.
3864 */
3865 if (!task->active || task->suspend_count <= 0) {
3866 task_unlock(task);
3867 return KERN_SUCCESS;
3868 }
3869 }
3870 #endif /* CONFIG_EXCLAVES */
3871
3872 task_wait_locked(task, FALSE);
3873 task_unlock(task);
3874
3875 return KERN_SUCCESS;
3876 }
3877
3878 /*
3879 * task_wait_locked:
3880 *
3881 * Wait for all threads in task to stop.
3882 *
3883 * Conditions:
3884 * Called with task locked, active, and held.
3885 */
3886 void
task_wait_locked(task_t task,boolean_t until_not_runnable)3887 task_wait_locked(
3888 task_t task,
3889 boolean_t until_not_runnable)
3890 {
3891 thread_t thread, self;
3892
3893 assert(task->active);
3894 assert(task->suspend_count > 0);
3895
3896 self = current_thread();
3897
3898 /*
3899 * Iterate through all the threads and wait for them to
3900 * stop. Do not wait for the current thread if it is within
3901 * the task.
3902 */
3903 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3904 if (thread != self) {
3905 thread_wait(thread, until_not_runnable);
3906 }
3907 }
3908 }
3909
3910 boolean_t
task_is_app_suspended(task_t task)3911 task_is_app_suspended(task_t task)
3912 {
3913 return task->pidsuspended;
3914 }
3915
3916 /*
3917 * task_release_locked:
3918 *
3919 * Release a kernel hold on a task.
3920 *
3921 * CONDITIONS: the task is locked and active
3922 */
3923 void
task_release_locked(task_t task)3924 task_release_locked(
3925 task_t task)
3926 {
3927 thread_t thread;
3928 void *bsd_info = get_bsdtask_info(task);
3929
3930 assert(task->active);
3931 assert(task->suspend_count > 0);
3932
3933 if (--task->suspend_count > 0) {
3934 return;
3935 }
3936
3937 if (bsd_info) {
3938 workq_proc_resumed(bsd_info);
3939 }
3940
3941 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3942 thread_mtx_lock(thread);
3943 thread_release(thread);
3944 thread_mtx_unlock(thread);
3945 }
3946
3947 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_TASK_RESUME) | DBG_FUNC_NONE, task_pid(task));
3948
3949 #if CONFIG_TASK_SUSPEND_STATS
3950 _task_mark_suspend_end(task);
3951 #endif
3952
3953 //rdar://139307390.
3954 #if 0
3955 #if CONFIG_EXCLAVES
3956 task_unlock(task);
3957 task_resume_conclave(task);
3958 task_lock(task);
3959 #endif /* CONFIG_EXCLAVES */
3960 #endif
3961 }
3962
3963 /*
3964 * task_release:
3965 *
3966 * Same as the internal routine above, except that it must lock
3967 * and verify that the task is active.
3968 *
3969 * CONDITIONS: The caller holds a reference to the task
3970 */
3971 kern_return_t
task_release(task_t task)3972 task_release(
3973 task_t task)
3974 {
3975 if (task == TASK_NULL) {
3976 return KERN_INVALID_ARGUMENT;
3977 }
3978
3979 task_lock(task);
3980
3981 if (!task->active) {
3982 task_unlock(task);
3983
3984 return KERN_FAILURE;
3985 }
3986
3987 task_release_locked(task);
3988 task_unlock(task);
3989
3990 return KERN_SUCCESS;
3991 }
3992
3993 static kern_return_t
task_threads_internal(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * countp,mach_thread_flavor_t flavor)3994 task_threads_internal(
3995 task_t task,
3996 thread_act_array_t *threads_out,
3997 mach_msg_type_number_t *countp,
3998 mach_thread_flavor_t flavor)
3999 {
4000 mach_msg_type_number_t actual, count, count_needed;
4001 thread_act_array_t thread_list;
4002 thread_t thread;
4003 unsigned int i;
4004
4005 count = 0;
4006 thread_list = NULL;
4007
4008 if (task == TASK_NULL) {
4009 return KERN_INVALID_ARGUMENT;
4010 }
4011
4012 assert(flavor <= THREAD_FLAVOR_INSPECT);
4013
4014 for (;;) {
4015 task_lock(task);
4016 if (!task->active) {
4017 task_unlock(task);
4018
4019 mach_port_array_free(thread_list, count);
4020 return KERN_FAILURE;
4021 }
4022
4023 count_needed = actual = task->thread_count;
4024 if (count_needed <= count) {
4025 break;
4026 }
4027
4028 /* unlock the task and allocate more memory */
4029 task_unlock(task);
4030
4031 mach_port_array_free(thread_list, count);
4032 count = count_needed;
4033 thread_list = mach_port_array_alloc(count, Z_WAITOK);
4034
4035 if (thread_list == NULL) {
4036 return KERN_RESOURCE_SHORTAGE;
4037 }
4038 }
4039
4040 i = 0;
4041 queue_iterate(&task->threads, thread, thread_t, task_threads) {
4042 assert(i < actual);
4043 thread_reference(thread);
4044 ((thread_t *)thread_list)[i++] = thread;
4045 }
4046
4047 count_needed = actual;
4048
4049 /* can unlock task now that we've got the thread refs */
4050 task_unlock(task);
4051
4052 if (actual == 0) {
4053 /* no threads, so return null pointer and deallocate memory */
4054
4055 mach_port_array_free(thread_list, count);
4056
4057 *threads_out = NULL;
4058 *countp = 0;
4059 } else {
4060 /* if we allocated too much, must copy */
4061 if (count_needed < count) {
4062 mach_port_array_t newaddr;
4063
4064 newaddr = mach_port_array_alloc(count_needed, Z_WAITOK);
4065 if (newaddr == NULL) {
4066 for (i = 0; i < actual; ++i) {
4067 thread_deallocate(((thread_t *)thread_list)[i]);
4068 }
4069 mach_port_array_free(thread_list, count);
4070 return KERN_RESOURCE_SHORTAGE;
4071 }
4072
4073 bcopy(thread_list, newaddr, count_needed * sizeof(thread_t));
4074 mach_port_array_free(thread_list, count);
4075 thread_list = newaddr;
4076 }
4077
4078 /* do the conversion that Mig should handle */
4079 convert_thread_array_to_ports(thread_list, actual, flavor);
4080
4081 *threads_out = thread_list;
4082 *countp = actual;
4083 }
4084
4085 return KERN_SUCCESS;
4086 }
4087
4088
4089 kern_return_t
task_threads_from_user(mach_port_t port,thread_act_array_t * threads_out,mach_msg_type_number_t * count)4090 task_threads_from_user(
4091 mach_port_t port,
4092 thread_act_array_t *threads_out,
4093 mach_msg_type_number_t *count)
4094 {
4095 ipc_kobject_type_t kotype;
4096 kern_return_t kr;
4097
4098 task_t task = convert_port_to_task_inspect_no_eval(port);
4099
4100 if (task == TASK_NULL) {
4101 return KERN_INVALID_ARGUMENT;
4102 }
4103
4104 kotype = ip_type(port);
4105
4106 switch (kotype) {
4107 case IKOT_TASK_CONTROL:
4108 kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
4109 break;
4110 case IKOT_TASK_READ:
4111 kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_READ);
4112 break;
4113 case IKOT_TASK_INSPECT:
4114 kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_INSPECT);
4115 break;
4116 default:
4117 panic("strange kobject type");
4118 break;
4119 }
4120
4121 task_deallocate(task);
4122 return kr;
4123 }
4124
4125 #define TASK_HOLD_NORMAL 0
4126 #define TASK_HOLD_PIDSUSPEND 1
4127 #define TASK_HOLD_LEGACY 2
4128 #define TASK_HOLD_LEGACY_ALL 3
4129
4130 static kern_return_t
place_task_hold(task_t task,int mode)4131 place_task_hold(
4132 task_t task,
4133 int mode)
4134 {
4135 if (!task->active && !task_is_a_corpse(task)) {
4136 return KERN_FAILURE;
4137 }
4138
4139 /* Return success for corpse task */
4140 if (task_is_a_corpse(task)) {
4141 return KERN_SUCCESS;
4142 }
4143
4144 #if MACH_ASSERT
4145 current_task()->suspends_outstanding++;
4146 #endif
4147
4148 if (mode == TASK_HOLD_LEGACY) {
4149 task->legacy_stop_count++;
4150 }
4151
4152 #ifdef CONFIG_TASK_SUSPEND_STATS
4153 _task_mark_suspend_source(task);
4154 #endif /* CONFIG_TASK_SUSPEND_STATS */
4155
4156 if (task->user_stop_count++ > 0) {
4157 /*
4158 * If the stop count was positive, the task is
4159 * already stopped and we can exit.
4160 */
4161 return KERN_SUCCESS;
4162 }
4163
4164 /*
4165 * Put a kernel-level hold on the threads in the task (all
4166 * user-level task suspensions added together represent a
4167 * single kernel-level hold). We then wait for the threads
4168 * to stop executing user code.
4169 */
4170 bool first_suspension __unused = task_hold_locked(task);
4171
4172 //rdar://139307390, do not suspend conclave on task suspend.
4173 #if 0
4174 #if CONFIG_EXCLAVES
4175 if (first_suspension) {
4176 task_unlock(task);
4177 task_suspend_conclave(task);
4178
4179 /*
4180 * If task terminated/resumed before we could wait on threads, then
4181 * it is a race we lost and we could treat that as termination/resume
4182 * happened after the wait and return SUCCESS.
4183 */
4184 task_lock(task);
4185 if (!task->active || task->suspend_count <= 0) {
4186 return KERN_SUCCESS;
4187 }
4188 }
4189 #endif /* CONFIG_EXCLAVES */
4190 #endif
4191
4192 task_wait_locked(task, FALSE);
4193
4194 return KERN_SUCCESS;
4195 }
4196
4197 static kern_return_t
release_task_hold(task_t task,int mode)4198 release_task_hold(
4199 task_t task,
4200 int mode)
4201 {
4202 boolean_t release = FALSE;
4203
4204 if (!task->active && !task_is_a_corpse(task)) {
4205 return KERN_FAILURE;
4206 }
4207
4208 /* Return success for corpse task */
4209 if (task_is_a_corpse(task)) {
4210 return KERN_SUCCESS;
4211 }
4212
4213 if (mode == TASK_HOLD_PIDSUSPEND) {
4214 if (task->pidsuspended == FALSE) {
4215 return KERN_FAILURE;
4216 }
4217 task->pidsuspended = FALSE;
4218 }
4219
4220 if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
4221 #if MACH_ASSERT
4222 /*
4223 * This is obviously not robust; if we suspend one task and then resume a different one,
4224 * we'll fly under the radar. This is only meant to catch the common case of a crashed
4225 * or buggy suspender.
4226 */
4227 current_task()->suspends_outstanding--;
4228 #endif
4229
4230 if (mode == TASK_HOLD_LEGACY_ALL) {
4231 if (task->legacy_stop_count >= task->user_stop_count) {
4232 task->user_stop_count = 0;
4233 release = TRUE;
4234 } else {
4235 task->user_stop_count -= task->legacy_stop_count;
4236 }
4237 task->legacy_stop_count = 0;
4238 } else {
4239 if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) {
4240 task->legacy_stop_count--;
4241 }
4242 if (--task->user_stop_count == 0) {
4243 release = TRUE;
4244 }
4245 }
4246 } else {
4247 return KERN_FAILURE;
4248 }
4249
4250 /*
4251 * Release the task if necessary.
4252 */
4253 if (release) {
4254 task_release_locked(task);
4255 }
4256
4257 return KERN_SUCCESS;
4258 }
4259
4260 boolean_t
get_task_suspended(task_t task)4261 get_task_suspended(task_t task)
4262 {
4263 return 0 != task->user_stop_count;
4264 }
4265
4266 /*
4267 * task_suspend:
4268 *
4269 * Implement an (old-fashioned) user-level suspension on a task.
4270 *
4271 * Because the user isn't expecting to have to manage a suspension
4272 * token, we'll track it for him in the kernel in the form of a naked
4273 * send right to the task's resume port. All such send rights
4274 * account for a single suspension against the task (unlike task_suspend2()
4275 * where each caller gets a unique suspension count represented by a
4276 * unique send-once right).
4277 *
4278 * Conditions:
4279 * The caller holds a reference to the task
4280 */
4281 kern_return_t
task_suspend(task_t task)4282 task_suspend(
4283 task_t task)
4284 {
4285 kern_return_t kr;
4286 mach_port_t port;
4287 mach_port_name_t name;
4288
4289 if (task == TASK_NULL || task == kernel_task) {
4290 return KERN_INVALID_ARGUMENT;
4291 }
4292
4293 /*
4294 * place a legacy hold on the task.
4295 */
4296 task_lock(task);
4297 kr = place_task_hold(task, TASK_HOLD_LEGACY);
4298 task_unlock(task);
4299
4300 if (kr != KERN_SUCCESS) {
4301 return kr;
4302 }
4303
4304 /*
4305 * Claim a send right on the task resume port, and request a no-senders
4306 * notification on that port (if none outstanding).
4307 */
4308 itk_lock(task);
4309 port = task->itk_resume;
4310 if (port == IP_NULL) {
4311 port = ipc_kobject_alloc_port(task, IKOT_TASK_RESUME,
4312 IPC_KOBJECT_ALLOC_MAKE_SEND);
4313 task->itk_resume = port;
4314 } else {
4315 (void)ipc_kobject_make_send(port, task, IKOT_TASK_RESUME);
4316 }
4317 itk_unlock(task);
4318
4319 /*
4320 * Copyout the send right into the calling task's IPC space. It won't know it is there,
4321 * but we'll look it up when calling a traditional resume. Any IPC operations that
4322 * deallocate the send right will auto-release the suspension.
4323 */
4324 if (IP_VALID(port)) {
4325 kr = ipc_object_copyout(current_space(), port,
4326 MACH_MSG_TYPE_MOVE_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
4327 NULL, &name);
4328 } else {
4329 kr = KERN_SUCCESS;
4330 }
4331 if (kr != KERN_SUCCESS) {
4332 printf("warning: %s(%d) failed to copyout suspension "
4333 "token for pid %d with error: %d\n",
4334 proc_name_address(get_bsdtask_info(current_task())),
4335 proc_pid(get_bsdtask_info(current_task())),
4336 task_pid(task), kr);
4337 }
4338
4339 return kr;
4340 }
4341
4342 /*
4343 * task_resume:
4344 * Release a user hold on a task.
4345 *
4346 * Conditions:
4347 * The caller holds a reference to the task
4348 */
4349 kern_return_t
task_resume(task_t task)4350 task_resume(
4351 task_t task)
4352 {
4353 kern_return_t kr;
4354 mach_port_name_t resume_port_name;
4355 ipc_entry_t resume_port_entry;
4356 ipc_space_t space = current_task()->itk_space;
4357
4358 if (task == TASK_NULL || task == kernel_task) {
4359 return KERN_INVALID_ARGUMENT;
4360 }
4361
4362 /* release a legacy task hold */
4363 task_lock(task);
4364 kr = release_task_hold(task, TASK_HOLD_LEGACY);
4365 task_unlock(task);
4366
4367 itk_lock(task); /* for itk_resume */
4368 is_write_lock(space); /* spin lock */
4369 if (is_active(space) && IP_VALID(task->itk_resume) &&
4370 ipc_hash_lookup(space, ip_to_object(task->itk_resume), &resume_port_name, &resume_port_entry) == TRUE) {
4371 /*
4372 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
4373 * we are holding one less legacy hold on the task from this caller. If the release failed,
4374 * go ahead and drop all the rights, as someone either already released our holds or the task
4375 * is gone.
4376 */
4377 itk_unlock(task);
4378 if (kr == KERN_SUCCESS) {
4379 ipc_right_dealloc(space, resume_port_name, resume_port_entry);
4380 } else {
4381 ipc_right_destroy(space, resume_port_name, resume_port_entry);
4382 }
4383 /* space unlocked */
4384 } else {
4385 itk_unlock(task);
4386 is_write_unlock(space);
4387 if (kr == KERN_SUCCESS) {
4388 printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
4389 proc_name_address(get_bsdtask_info(current_task())), proc_pid(get_bsdtask_info(current_task())),
4390 task_pid(task));
4391 }
4392 }
4393
4394 return kr;
4395 }
4396
4397 /*
4398 * Suspend the target task.
4399 * Making/holding a token/reference/port is the callers responsibility.
4400 */
4401 kern_return_t
task_suspend_internal(task_t task)4402 task_suspend_internal(task_t task)
4403 {
4404 kern_return_t kr;
4405
4406 if (task == TASK_NULL || task == kernel_task) {
4407 return KERN_INVALID_ARGUMENT;
4408 }
4409
4410 task_lock(task);
4411 kr = place_task_hold(task, TASK_HOLD_NORMAL);
4412 task_unlock(task);
4413 return kr;
4414 }
4415
4416 /*
4417 * Suspend the target task, and return a suspension token. The token
4418 * represents a reference on the suspended task.
4419 */
4420 static kern_return_t
task_suspend2_grp(task_t task,task_suspension_token_t * suspend_token,task_grp_t grp)4421 task_suspend2_grp(
4422 task_t task,
4423 task_suspension_token_t *suspend_token,
4424 task_grp_t grp)
4425 {
4426 kern_return_t kr;
4427
4428 kr = task_suspend_internal(task);
4429 if (kr != KERN_SUCCESS) {
4430 *suspend_token = TASK_NULL;
4431 return kr;
4432 }
4433
4434 /*
4435 * Take a reference on the target task and return that to the caller
4436 * as a "suspension token," which can be converted into an SO right to
4437 * the now-suspended task's resume port.
4438 */
4439 task_reference_grp(task, grp);
4440 *suspend_token = task;
4441
4442 return KERN_SUCCESS;
4443 }
4444
4445 kern_return_t
task_suspend2_mig(task_t task,task_suspension_token_t * suspend_token)4446 task_suspend2_mig(
4447 task_t task,
4448 task_suspension_token_t *suspend_token)
4449 {
4450 return task_suspend2_grp(task, suspend_token, TASK_GRP_MIG);
4451 }
4452
4453 kern_return_t
task_suspend2_external(task_t task,task_suspension_token_t * suspend_token)4454 task_suspend2_external(
4455 task_t task,
4456 task_suspension_token_t *suspend_token)
4457 {
4458 return task_suspend2_grp(task, suspend_token, TASK_GRP_EXTERNAL);
4459 }
4460
4461 /*
4462 * Resume the task
4463 * (reference/token/port management is caller's responsibility).
4464 */
4465 kern_return_t
task_resume_internal(task_suspension_token_t task)4466 task_resume_internal(
4467 task_suspension_token_t task)
4468 {
4469 kern_return_t kr;
4470
4471 if (task == TASK_NULL || task == kernel_task) {
4472 return KERN_INVALID_ARGUMENT;
4473 }
4474
4475 task_lock(task);
4476 kr = release_task_hold(task, TASK_HOLD_NORMAL);
4477 task_unlock(task);
4478 return kr;
4479 }
4480
4481 /*
4482 * Resume the task using a suspension token. Consumes the token's ref.
4483 */
4484 static kern_return_t
task_resume2_grp(task_suspension_token_t task,task_grp_t grp)4485 task_resume2_grp(
4486 task_suspension_token_t task,
4487 task_grp_t grp)
4488 {
4489 kern_return_t kr;
4490
4491 kr = task_resume_internal(task);
4492 task_suspension_token_deallocate_grp(task, grp);
4493
4494 return kr;
4495 }
4496
4497 kern_return_t
task_resume2_mig(task_suspension_token_t task)4498 task_resume2_mig(
4499 task_suspension_token_t task)
4500 {
4501 return task_resume2_grp(task, TASK_GRP_MIG);
4502 }
4503
4504 kern_return_t
task_resume2_external(task_suspension_token_t task)4505 task_resume2_external(
4506 task_suspension_token_t task)
4507 {
4508 return task_resume2_grp(task, TASK_GRP_EXTERNAL);
4509 }
4510
4511 static void
task_suspension_no_senders(ipc_port_t port,mach_port_mscount_t mscount)4512 task_suspension_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
4513 {
4514 task_t task = convert_port_to_task_suspension_token(port);
4515
4516 if (task == TASK_NULL) {
4517 return;
4518 }
4519
4520 if (task == kernel_task) {
4521 task_suspension_token_deallocate(task);
4522 return;
4523 }
4524
4525 task_lock(task);
4526
4527 if (ipc_kobject_is_mscount_current(port, mscount)) {
4528 /* release all the [remaining] outstanding legacy holds */
4529 release_task_hold(task, TASK_HOLD_LEGACY_ALL);
4530 }
4531
4532 task_unlock(task);
4533
4534 task_suspension_token_deallocate(task); /* drop token reference */
4535 }
4536
4537 /*
4538 * Fires when a send once made
4539 * by convert_task_suspension_token_to_port() dies.
4540 */
4541 void
task_suspension_send_once(ipc_port_t port)4542 task_suspension_send_once(ipc_port_t port)
4543 {
4544 task_t task = convert_port_to_task_suspension_token(port);
4545
4546 if (task == TASK_NULL || task == kernel_task) {
4547 return; /* nothing to do */
4548 }
4549
4550 /* release the hold held by this specific send-once right */
4551 task_lock(task);
4552 release_task_hold(task, TASK_HOLD_NORMAL);
4553 task_unlock(task);
4554
4555 task_suspension_token_deallocate(task); /* drop token reference */
4556 }
4557
4558 static kern_return_t
task_pidsuspend_locked(task_t task)4559 task_pidsuspend_locked(task_t task)
4560 {
4561 kern_return_t kr;
4562
4563 if (task->pidsuspended) {
4564 kr = KERN_FAILURE;
4565 goto out;
4566 }
4567
4568 task->pidsuspended = TRUE;
4569
4570 kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
4571 if (kr != KERN_SUCCESS) {
4572 task->pidsuspended = FALSE;
4573 }
4574 out:
4575 return kr;
4576 }
4577
4578
4579 /*
4580 * task_pidsuspend:
4581 *
4582 * Suspends a task by placing a hold on its threads.
4583 *
4584 * Conditions:
4585 * The caller holds a reference to the task
4586 */
4587 kern_return_t
task_pidsuspend(task_t task)4588 task_pidsuspend(
4589 task_t task)
4590 {
4591 kern_return_t kr;
4592
4593 if (task == TASK_NULL || task == kernel_task) {
4594 return KERN_INVALID_ARGUMENT;
4595 }
4596
4597 task_lock(task);
4598
4599 kr = task_pidsuspend_locked(task);
4600
4601 task_unlock(task);
4602
4603 if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4604 iokit_task_app_suspended_changed(task);
4605 vm_deferred_reclamation_task_suspend(task);
4606 }
4607
4608 return kr;
4609 }
4610
4611 /*
4612 * task_pidresume:
4613 * Resumes a previously suspended task.
4614 *
4615 * Conditions:
4616 * The caller holds a reference to the task
4617 */
4618 kern_return_t
task_pidresume(task_t task)4619 task_pidresume(
4620 task_t task)
4621 {
4622 kern_return_t kr;
4623
4624 if (task == TASK_NULL || task == kernel_task) {
4625 return KERN_INVALID_ARGUMENT;
4626 }
4627
4628 task_lock(task);
4629
4630 #if CONFIG_FREEZE
4631
4632 while (task->changing_freeze_state) {
4633 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4634 task_unlock(task);
4635 thread_block(THREAD_CONTINUE_NULL);
4636
4637 task_lock(task);
4638 }
4639 task->changing_freeze_state = TRUE;
4640 #endif
4641
4642 kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
4643
4644 task_unlock(task);
4645
4646 if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4647 iokit_task_app_suspended_changed(task);
4648 }
4649
4650 #if CONFIG_FREEZE
4651
4652 task_lock(task);
4653
4654 if (kr == KERN_SUCCESS) {
4655 task->frozen = FALSE;
4656 }
4657 task->changing_freeze_state = FALSE;
4658 thread_wakeup(&task->changing_freeze_state);
4659
4660 task_unlock(task);
4661 #endif
4662
4663 return kr;
4664 }
4665
4666 os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
4667
4668 /*
4669 * task_add_turnstile_watchports:
4670 * Setup watchports to boost the main thread of the task.
4671 *
4672 * Arguments:
4673 * task: task being spawned
4674 * thread: main thread of task
4675 * portwatch_ports: array of watchports
4676 * portwatch_count: number of watchports
4677 *
4678 * Conditions:
4679 * Nothing locked.
4680 */
4681 void
task_add_turnstile_watchports(task_t task,thread_t thread,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4682 task_add_turnstile_watchports(
4683 task_t task,
4684 thread_t thread,
4685 ipc_port_t *portwatch_ports,
4686 uint32_t portwatch_count)
4687 {
4688 struct task_watchports *watchports = NULL;
4689 struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
4690 os_ref_count_t refs;
4691
4692 /* Check if the task has terminated */
4693 if (!task->active) {
4694 return;
4695 }
4696
4697 assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
4698
4699 watchports = task_watchports_alloc_init(task, thread, portwatch_count);
4700
4701 /* Lock the ipc space */
4702 is_write_lock(task->itk_space);
4703
4704 /* Setup watchports to boost the main thread */
4705 refs = task_add_turnstile_watchports_locked(task,
4706 watchports, previous_elem_array, portwatch_ports,
4707 portwatch_count);
4708
4709 /* Drop the space lock */
4710 is_write_unlock(task->itk_space);
4711
4712 if (refs == 0) {
4713 task_watchports_deallocate(watchports);
4714 }
4715
4716 /* Drop the ref on previous_elem_array */
4717 for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
4718 task_watchport_elem_deallocate(previous_elem_array[i]);
4719 }
4720 }
4721
4722 /*
4723 * task_remove_turnstile_watchports:
4724 * Clear all turnstile boost on the task from watchports.
4725 *
4726 * Arguments:
4727 * task: task being terminated
4728 *
4729 * Conditions:
4730 * Nothing locked.
4731 */
4732 void
task_remove_turnstile_watchports(task_t task)4733 task_remove_turnstile_watchports(
4734 task_t task)
4735 {
4736 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4737 struct task_watchports *watchports = NULL;
4738 ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
4739 uint32_t portwatch_count;
4740
4741 /* Lock the ipc space */
4742 is_write_lock(task->itk_space);
4743
4744 /* Check if watchport boost exist */
4745 if (task->watchports == NULL) {
4746 is_write_unlock(task->itk_space);
4747 return;
4748 }
4749 watchports = task->watchports;
4750 portwatch_count = watchports->tw_elem_array_count;
4751
4752 refs = task_remove_turnstile_watchports_locked(task, watchports,
4753 port_freelist);
4754
4755 is_write_unlock(task->itk_space);
4756
4757 /* Drop all the port references */
4758 for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
4759 ip_release(port_freelist[i]);
4760 }
4761
4762 /* Clear the task and thread references for task_watchport */
4763 if (refs == 0) {
4764 task_watchports_deallocate(watchports);
4765 }
4766 }
4767
4768 /*
4769 * task_transfer_turnstile_watchports:
4770 * Transfer all watchport turnstile boost from old task to new task.
4771 *
4772 * Arguments:
4773 * old_task: task calling exec
4774 * new_task: new exec'ed task
4775 * thread: main thread of new task
4776 *
4777 * Conditions:
4778 * Nothing locked.
4779 */
4780 void
task_transfer_turnstile_watchports(task_t old_task,task_t new_task,thread_t new_thread)4781 task_transfer_turnstile_watchports(
4782 task_t old_task,
4783 task_t new_task,
4784 thread_t new_thread)
4785 {
4786 struct task_watchports *old_watchports = NULL;
4787 struct task_watchports *new_watchports = NULL;
4788 os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
4789 os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
4790 uint32_t portwatch_count;
4791
4792 if (old_task->watchports == NULL || !new_task->active) {
4793 return;
4794 }
4795
4796 /* Get the watch port count from the old task */
4797 is_write_lock(old_task->itk_space);
4798 if (old_task->watchports == NULL) {
4799 is_write_unlock(old_task->itk_space);
4800 return;
4801 }
4802
4803 portwatch_count = old_task->watchports->tw_elem_array_count;
4804 is_write_unlock(old_task->itk_space);
4805
4806 new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
4807
4808 /* Lock the ipc space for old task */
4809 is_write_lock(old_task->itk_space);
4810
4811 /* Lock the ipc space for new task */
4812 is_write_lock(new_task->itk_space);
4813
4814 /* Check if watchport boost exist */
4815 if (old_task->watchports == NULL || !new_task->active) {
4816 is_write_unlock(new_task->itk_space);
4817 is_write_unlock(old_task->itk_space);
4818 (void)task_watchports_release(new_watchports);
4819 task_watchports_deallocate(new_watchports);
4820 return;
4821 }
4822
4823 old_watchports = old_task->watchports;
4824 assert(portwatch_count == old_task->watchports->tw_elem_array_count);
4825
4826 /* Setup new task watchports */
4827 new_task->watchports = new_watchports;
4828
4829 for (uint32_t i = 0; i < portwatch_count; i++) {
4830 ipc_port_t port = old_watchports->tw_elem[i].twe_port;
4831
4832 if (port == NULL) {
4833 task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4834 continue;
4835 }
4836
4837 /* Lock the port and check if it has the entry */
4838 ip_mq_lock(port);
4839
4840 task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
4841
4842 if (ipc_port_replace_watchport_elem_conditional_locked(port,
4843 &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
4844 task_watchport_elem_clear(&old_watchports->tw_elem[i]);
4845
4846 task_watchports_retain(new_watchports);
4847 old_refs = task_watchports_release(old_watchports);
4848
4849 /* Check if all ports are cleaned */
4850 if (old_refs == 0) {
4851 old_task->watchports = NULL;
4852 }
4853 } else {
4854 task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4855 }
4856 /* port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
4857 }
4858
4859 /* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
4860 new_refs = task_watchports_release(new_watchports);
4861 if (new_refs == 0) {
4862 new_task->watchports = NULL;
4863 }
4864
4865 is_write_unlock(new_task->itk_space);
4866 is_write_unlock(old_task->itk_space);
4867
4868 /* Clear the task and thread references for old_watchport */
4869 if (old_refs == 0) {
4870 task_watchports_deallocate(old_watchports);
4871 }
4872
4873 /* Clear the task and thread references for new_watchport */
4874 if (new_refs == 0) {
4875 task_watchports_deallocate(new_watchports);
4876 }
4877 }
4878
4879 /*
4880 * task_add_turnstile_watchports_locked:
4881 * Setup watchports to boost the main thread of the task.
4882 *
4883 * Arguments:
4884 * task: task to boost
4885 * watchports: watchport structure to be attached to the task
4886 * previous_elem_array: an array of old watchport_elem to be returned to caller
4887 * portwatch_ports: array of watchports
4888 * portwatch_count: number of watchports
4889 *
4890 * Conditions:
4891 * ipc space of the task locked.
4892 * returns array of old watchport_elem in previous_elem_array
4893 */
4894 static os_ref_count_t
task_add_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,struct task_watchport_elem ** previous_elem_array,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4895 task_add_turnstile_watchports_locked(
4896 task_t task,
4897 struct task_watchports *watchports,
4898 struct task_watchport_elem **previous_elem_array,
4899 ipc_port_t *portwatch_ports,
4900 uint32_t portwatch_count)
4901 {
4902 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4903
4904 /* Check if the task is still active */
4905 if (!task->active) {
4906 refs = task_watchports_release(watchports);
4907 return refs;
4908 }
4909
4910 assert(task->watchports == NULL);
4911 task->watchports = watchports;
4912
4913 for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
4914 ipc_port_t port = portwatch_ports[i];
4915
4916 task_watchport_elem_init(&watchports->tw_elem[i], task, port);
4917 if (port == NULL) {
4918 task_watchport_elem_clear(&watchports->tw_elem[i]);
4919 continue;
4920 }
4921
4922 ip_mq_lock(port);
4923
4924 /* Check if port is in valid state to be setup as watchport */
4925 if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
4926 &previous_elem_array[j]) != KERN_SUCCESS) {
4927 task_watchport_elem_clear(&watchports->tw_elem[i]);
4928 continue;
4929 }
4930 /* port unlocked on return */
4931
4932 ip_reference(port);
4933 task_watchports_retain(watchports);
4934 if (previous_elem_array[j] != NULL) {
4935 j++;
4936 }
4937 }
4938
4939 /* Drop the reference on task_watchport struct returned by os_ref_init */
4940 refs = task_watchports_release(watchports);
4941 if (refs == 0) {
4942 task->watchports = NULL;
4943 }
4944
4945 return refs;
4946 }
4947
4948 /*
4949 * task_remove_turnstile_watchports_locked:
4950 * Clear all turnstile boost on the task from watchports.
4951 *
4952 * Arguments:
4953 * task: task to remove watchports from
4954 * watchports: watchports structure for the task
4955 * port_freelist: array of ports returned with ref to caller
4956 *
4957 *
4958 * Conditions:
4959 * ipc space of the task locked.
4960 * array of ports with refs are returned in port_freelist
4961 */
4962 static os_ref_count_t
task_remove_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,ipc_port_t * port_freelist)4963 task_remove_turnstile_watchports_locked(
4964 task_t task,
4965 struct task_watchports *watchports,
4966 ipc_port_t *port_freelist)
4967 {
4968 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4969
4970 for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
4971 ipc_port_t port = watchports->tw_elem[i].twe_port;
4972 if (port == NULL) {
4973 continue;
4974 }
4975
4976 /* Lock the port and check if it has the entry */
4977 ip_mq_lock(port);
4978 if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
4979 &watchports->tw_elem[i]) == KERN_SUCCESS) {
4980 task_watchport_elem_clear(&watchports->tw_elem[i]);
4981 port_freelist[j++] = port;
4982 refs = task_watchports_release(watchports);
4983
4984 /* Check if all ports are cleaned */
4985 if (refs == 0) {
4986 task->watchports = NULL;
4987 break;
4988 }
4989 }
4990 /* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
4991 }
4992 return refs;
4993 }
4994
4995 /*
4996 * task_watchports_alloc_init:
4997 * Allocate and initialize task watchport struct.
4998 *
4999 * Conditions:
5000 * Nothing locked.
5001 */
5002 static struct task_watchports *
task_watchports_alloc_init(task_t task,thread_t thread,uint32_t count)5003 task_watchports_alloc_init(
5004 task_t task,
5005 thread_t thread,
5006 uint32_t count)
5007 {
5008 struct task_watchports *watchports = kalloc_type(struct task_watchports,
5009 struct task_watchport_elem, count, Z_WAITOK | Z_ZERO | Z_NOFAIL);
5010
5011 task_reference(task);
5012 thread_reference(thread);
5013 watchports->tw_task = task;
5014 watchports->tw_thread = thread;
5015 watchports->tw_elem_array_count = count;
5016 os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
5017
5018 return watchports;
5019 }
5020
5021 /*
5022 * task_watchports_deallocate:
5023 * Deallocate task watchport struct.
5024 *
5025 * Conditions:
5026 * Nothing locked.
5027 */
5028 static void
task_watchports_deallocate(struct task_watchports * watchports)5029 task_watchports_deallocate(
5030 struct task_watchports *watchports)
5031 {
5032 uint32_t portwatch_count = watchports->tw_elem_array_count;
5033
5034 task_deallocate(watchports->tw_task);
5035 thread_deallocate(watchports->tw_thread);
5036 kfree_type(struct task_watchports, struct task_watchport_elem,
5037 portwatch_count, watchports);
5038 }
5039
5040 /*
5041 * task_watchport_elem_deallocate:
5042 * Deallocate task watchport element and release its ref on task_watchport.
5043 *
5044 * Conditions:
5045 * Nothing locked.
5046 */
5047 void
task_watchport_elem_deallocate(struct task_watchport_elem * watchport_elem)5048 task_watchport_elem_deallocate(
5049 struct task_watchport_elem *watchport_elem)
5050 {
5051 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
5052 task_t task = watchport_elem->twe_task;
5053 struct task_watchports *watchports = NULL;
5054 ipc_port_t port = NULL;
5055
5056 assert(task != NULL);
5057
5058 /* Take the space lock to modify the elememt */
5059 is_write_lock(task->itk_space);
5060
5061 watchports = task->watchports;
5062 assert(watchports != NULL);
5063
5064 port = watchport_elem->twe_port;
5065 assert(port != NULL);
5066
5067 task_watchport_elem_clear(watchport_elem);
5068 refs = task_watchports_release(watchports);
5069
5070 if (refs == 0) {
5071 task->watchports = NULL;
5072 }
5073
5074 is_write_unlock(task->itk_space);
5075
5076 ip_release(port);
5077 if (refs == 0) {
5078 task_watchports_deallocate(watchports);
5079 }
5080 }
5081
5082 /*
5083 * task_has_watchports:
5084 * Return TRUE if task has watchport boosts.
5085 *
5086 * Conditions:
5087 * Nothing locked.
5088 */
5089 boolean_t
task_has_watchports(task_t task)5090 task_has_watchports(task_t task)
5091 {
5092 return task->watchports != NULL;
5093 }
5094
5095 #if DEVELOPMENT || DEBUG
5096
5097 extern void IOSleep(int);
5098
5099 kern_return_t
task_disconnect_page_mappings(task_t task)5100 task_disconnect_page_mappings(task_t task)
5101 {
5102 int n;
5103
5104 if (task == TASK_NULL || task == kernel_task) {
5105 return KERN_INVALID_ARGUMENT;
5106 }
5107
5108 /*
5109 * this function is used to strip all of the mappings from
5110 * the pmap for the specified task to force the task to
5111 * re-fault all of the pages it is actively using... this
5112 * allows us to approximate the true working set of the
5113 * specified task. We only engage if at least 1 of the
5114 * threads in the task is runnable, but we want to continuously
5115 * sweep (at least for a while - I've arbitrarily set the limit at
5116 * 100 sweeps to be re-looked at as we gain experience) to get a better
5117 * view into what areas within a page are being visited (as opposed to only
5118 * seeing the first fault of a page after the task becomes
5119 * runnable)... in the future I may
5120 * try to block until awakened by a thread in this task
5121 * being made runnable, but for now we'll periodically poll from the
5122 * user level debug tool driving the sysctl
5123 */
5124 for (n = 0; n < 100; n++) {
5125 thread_t thread;
5126 boolean_t runnable;
5127 boolean_t do_unnest;
5128 int page_count;
5129
5130 runnable = FALSE;
5131 do_unnest = FALSE;
5132
5133 task_lock(task);
5134
5135 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5136 if (thread->state & TH_RUN) {
5137 runnable = TRUE;
5138 break;
5139 }
5140 }
5141 if (n == 0) {
5142 task->task_disconnected_count++;
5143 }
5144
5145 if (task->task_unnested == FALSE) {
5146 if (runnable == TRUE) {
5147 task->task_unnested = TRUE;
5148 do_unnest = TRUE;
5149 }
5150 }
5151 task_unlock(task);
5152
5153 if (runnable == FALSE) {
5154 break;
5155 }
5156
5157 KDBG_RELEASE((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
5158 task, do_unnest, task->task_disconnected_count);
5159
5160 page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
5161
5162 KDBG_RELEASE((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
5163 task, page_count);
5164
5165 if ((n % 5) == 4) {
5166 IOSleep(1);
5167 }
5168 }
5169 return KERN_SUCCESS;
5170 }
5171
5172 #endif
5173
5174
5175 #if CONFIG_FREEZE
5176
5177 /*
5178 * task_freeze:
5179 *
5180 * Freeze a task.
5181 *
5182 * Conditions:
5183 * The caller holds a reference to the task
5184 */
5185 extern struct freezer_context freezer_context_global;
5186
5187 kern_return_t
task_freeze(task_t task,uint32_t * purgeable_count,uint32_t * wired_count,uint32_t * clean_count,uint32_t * dirty_count,uint32_t dirty_budget,uint32_t * shared_count,int * freezer_error_code,boolean_t eval_only)5188 task_freeze(
5189 task_t task,
5190 uint32_t *purgeable_count,
5191 uint32_t *wired_count,
5192 uint32_t *clean_count,
5193 uint32_t *dirty_count,
5194 uint32_t dirty_budget,
5195 uint32_t *shared_count,
5196 int *freezer_error_code,
5197 boolean_t eval_only)
5198 {
5199 kern_return_t kr = KERN_SUCCESS;
5200
5201 if (task == TASK_NULL || task == kernel_task) {
5202 return KERN_INVALID_ARGUMENT;
5203 }
5204
5205 task_lock(task);
5206
5207 while (task->changing_freeze_state) {
5208 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5209 task_unlock(task);
5210 thread_block(THREAD_CONTINUE_NULL);
5211
5212 task_lock(task);
5213 }
5214 if (task->frozen) {
5215 task_unlock(task);
5216 return KERN_FAILURE;
5217 }
5218 task->changing_freeze_state = TRUE;
5219
5220 freezer_context_global.freezer_ctx_task = task;
5221
5222 task_unlock(task);
5223
5224 #if CONFIG_DEFERRED_RECLAIM
5225 if (vm_deferred_reclamation_task_has_ring(task)) {
5226 kr = vm_deferred_reclamation_task_drain(task, RECLAIM_OPTIONS_NONE);
5227 if (kr != KERN_SUCCESS) {
5228 os_log_error(OS_LOG_DEFAULT, "Failed to drain reclamation ring prior to freezing (%d)\n", kr);
5229 }
5230 }
5231 #endif /* CONFIG_DEFERRED_RECLAIM */
5232
5233 kr = vm_map_freeze(task,
5234 purgeable_count,
5235 wired_count,
5236 clean_count,
5237 dirty_count,
5238 dirty_budget,
5239 shared_count,
5240 freezer_error_code,
5241 eval_only);
5242
5243 task_lock(task);
5244
5245 if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) {
5246 task->frozen = TRUE;
5247
5248 freezer_context_global.freezer_ctx_task = NULL;
5249 freezer_context_global.freezer_ctx_uncompressed_pages = 0;
5250
5251 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
5252 /*
5253 * reset the counter tracking the # of swapped compressed pages
5254 * because we are now done with this freeze session and task.
5255 */
5256
5257 *dirty_count = (uint32_t) (freezer_context_global.freezer_ctx_swapped_bytes / PAGE_SIZE_64); /*used to track pageouts*/
5258 }
5259
5260 freezer_context_global.freezer_ctx_swapped_bytes = 0;
5261 }
5262
5263 task->changing_freeze_state = FALSE;
5264 thread_wakeup(&task->changing_freeze_state);
5265
5266 task_unlock(task);
5267
5268 if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
5269 (kr == KERN_SUCCESS) &&
5270 (eval_only == FALSE)) {
5271 vm_wake_compactor_swapper();
5272 /*
5273 * We do an explicit wakeup of the swapout thread here
5274 * because the compact_and_swap routines don't have
5275 * knowledge about these kind of "per-task packed c_segs"
5276 * and so will not be evaluating whether we need to do
5277 * a wakeup there.
5278 */
5279 thread_wakeup((event_t)&vm_swapout_thread);
5280 }
5281
5282 return kr;
5283 }
5284
5285 /*
5286 * task_thaw:
5287 *
5288 * Thaw a currently frozen task.
5289 *
5290 * Conditions:
5291 * The caller holds a reference to the task
5292 */
5293 kern_return_t
task_thaw(task_t task)5294 task_thaw(
5295 task_t task)
5296 {
5297 if (task == TASK_NULL || task == kernel_task) {
5298 return KERN_INVALID_ARGUMENT;
5299 }
5300
5301 task_lock(task);
5302
5303 while (task->changing_freeze_state) {
5304 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5305 task_unlock(task);
5306 thread_block(THREAD_CONTINUE_NULL);
5307
5308 task_lock(task);
5309 }
5310 if (!task->frozen) {
5311 task_unlock(task);
5312 return KERN_FAILURE;
5313 }
5314 task->frozen = FALSE;
5315
5316 task_unlock(task);
5317
5318 return KERN_SUCCESS;
5319 }
5320
5321 void
task_update_frozen_to_swap_acct(task_t task,int64_t amount,freezer_acct_op_t op)5322 task_update_frozen_to_swap_acct(task_t task, int64_t amount, freezer_acct_op_t op)
5323 {
5324 /*
5325 * We don't assert that the task lock is held because we call this
5326 * routine from the decompression path and we won't be holding the
5327 * task lock. However, since we are in the context of the task we are
5328 * safe.
5329 * In the case of the task_freeze path, we call it from behind the task
5330 * lock but we don't need to because we have a reference on the proc
5331 * being frozen.
5332 */
5333
5334 assert(task);
5335 if (amount == 0) {
5336 return;
5337 }
5338
5339 if (op == CREDIT_TO_SWAP) {
5340 ledger_credit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5341 } else if (op == DEBIT_FROM_SWAP) {
5342 ledger_debit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5343 } else {
5344 panic("task_update_frozen_to_swap_acct: Invalid ledger op");
5345 }
5346 }
5347 #endif /* CONFIG_FREEZE */
5348
5349 kern_return_t
task_set_security_tokens(task_t task,security_token_t sec_token,audit_token_t audit_token,host_priv_t host_priv)5350 task_set_security_tokens(
5351 task_t task,
5352 security_token_t sec_token,
5353 audit_token_t audit_token,
5354 host_priv_t host_priv)
5355 {
5356 ipc_port_t host_port = IP_NULL;
5357 kern_return_t kr;
5358
5359 if (task == TASK_NULL) {
5360 return KERN_INVALID_ARGUMENT;
5361 }
5362
5363 task_lock(task);
5364 task_set_tokens(task, &sec_token, &audit_token);
5365 task_unlock(task);
5366
5367 if (host_priv != HOST_PRIV_NULL) {
5368 kr = host_get_host_priv_port(host_priv, &host_port);
5369 } else {
5370 kr = host_get_host_port(host_priv_self(), &host_port);
5371 }
5372 assert(kr == KERN_SUCCESS);
5373
5374 kr = task_set_special_port_internal(task, TASK_HOST_PORT, host_port);
5375 return kr;
5376 }
5377
5378 kern_return_t
task_send_trace_memory(__unused task_t target_task,__unused uint32_t pid,__unused uint64_t uniqueid)5379 task_send_trace_memory(
5380 __unused task_t target_task,
5381 __unused uint32_t pid,
5382 __unused uint64_t uniqueid)
5383 {
5384 return KERN_INVALID_ARGUMENT;
5385 }
5386
5387 /*
5388 * This routine was added, pretty much exclusively, for registering the
5389 * RPC glue vector for in-kernel short circuited tasks. Rather than
5390 * removing it completely, I have only disabled that feature (which was
5391 * the only feature at the time). It just appears that we are going to
5392 * want to add some user data to tasks in the future (i.e. bsd info,
5393 * task names, etc...), so I left it in the formal task interface.
5394 */
5395 kern_return_t
task_set_info(task_t task,task_flavor_t flavor,__unused task_info_t task_info_in,__unused mach_msg_type_number_t task_info_count)5396 task_set_info(
5397 task_t task,
5398 task_flavor_t flavor,
5399 __unused task_info_t task_info_in, /* pointer to IN array */
5400 __unused mach_msg_type_number_t task_info_count)
5401 {
5402 if (task == TASK_NULL) {
5403 return KERN_INVALID_ARGUMENT;
5404 }
5405 switch (flavor) {
5406 #if CONFIG_ATM
5407 case TASK_TRACE_MEMORY_INFO:
5408 return KERN_NOT_SUPPORTED;
5409 #endif // CONFIG_ATM
5410 default:
5411 return KERN_INVALID_ARGUMENT;
5412 }
5413 }
5414
5415 static void
_task_fill_times(task_t task,time_value_t * user_time,time_value_t * sys_time)5416 _task_fill_times(task_t task, time_value_t *user_time, time_value_t *sys_time)
5417 {
5418 clock_sec_t sec;
5419 clock_usec_t usec;
5420
5421 struct recount_times_mach times = recount_task_terminated_times(task);
5422 absolutetime_to_microtime(times.rtm_user, &sec, &usec);
5423 user_time->seconds = (typeof(user_time->seconds))sec;
5424 user_time->microseconds = usec;
5425 absolutetime_to_microtime(times.rtm_system, &sec, &usec);
5426 sys_time->seconds = (typeof(sys_time->seconds))sec;
5427 sys_time->microseconds = usec;
5428 }
5429
5430 int radar_20146450 = 1;
5431 kern_return_t
task_info(task_t task,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)5432 task_info(
5433 task_t task,
5434 task_flavor_t flavor,
5435 task_info_t task_info_out,
5436 mach_msg_type_number_t *task_info_count)
5437 {
5438 kern_return_t error = KERN_SUCCESS;
5439 mach_msg_type_number_t original_task_info_count;
5440 bool is_kernel_task = (task == kernel_task);
5441
5442 if (task == TASK_NULL) {
5443 return KERN_INVALID_ARGUMENT;
5444 }
5445
5446 original_task_info_count = *task_info_count;
5447 task_lock(task);
5448
5449 if (task != current_task() && !task->active) {
5450 task_unlock(task);
5451 return KERN_INVALID_ARGUMENT;
5452 }
5453
5454
5455 switch (flavor) {
5456 case TASK_BASIC_INFO_32:
5457 case TASK_BASIC2_INFO_32:
5458 #if defined(__arm64__)
5459 case TASK_BASIC_INFO_64:
5460 #endif
5461 {
5462 task_basic_info_32_t basic_info;
5463 ledger_amount_t tmp;
5464
5465 if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
5466 error = KERN_INVALID_ARGUMENT;
5467 break;
5468 }
5469
5470 basic_info = (task_basic_info_32_t)task_info_out;
5471
5472 basic_info->virtual_size = (typeof(basic_info->virtual_size))
5473 vm_map_adjusted_size(is_kernel_task ? kernel_map : task->map);
5474 if (flavor == TASK_BASIC2_INFO_32) {
5475 /*
5476 * The "BASIC2" flavor gets the maximum resident
5477 * size instead of the current resident size...
5478 */
5479 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, &tmp);
5480 } else {
5481 ledger_get_balance(task->ledger, task_ledgers.phys_mem, &tmp);
5482 }
5483 basic_info->resident_size = (natural_t) MIN((ledger_amount_t) UINT32_MAX, tmp);
5484
5485 _task_fill_times(task, &basic_info->user_time,
5486 &basic_info->system_time);
5487
5488 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5489 basic_info->suspend_count = task->user_stop_count;
5490
5491 *task_info_count = TASK_BASIC_INFO_32_COUNT;
5492 break;
5493 }
5494
5495 #if defined(__arm64__)
5496 case TASK_BASIC_INFO_64_2:
5497 {
5498 task_basic_info_64_2_t basic_info;
5499
5500 if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) {
5501 error = KERN_INVALID_ARGUMENT;
5502 break;
5503 }
5504
5505 basic_info = (task_basic_info_64_2_t)task_info_out;
5506
5507 basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5508 kernel_map : task->map);
5509 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
5510 (ledger_amount_t *)&basic_info->resident_size);
5511 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5512 basic_info->suspend_count = task->user_stop_count;
5513 _task_fill_times(task, &basic_info->user_time,
5514 &basic_info->system_time);
5515
5516 *task_info_count = TASK_BASIC_INFO_64_2_COUNT;
5517 break;
5518 }
5519
5520 #else /* defined(__arm64__) */
5521 case TASK_BASIC_INFO_64:
5522 {
5523 task_basic_info_64_t basic_info;
5524
5525 if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
5526 error = KERN_INVALID_ARGUMENT;
5527 break;
5528 }
5529
5530 basic_info = (task_basic_info_64_t)task_info_out;
5531
5532 basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5533 kernel_map : task->map);
5534 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *)&basic_info->resident_size);
5535 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5536 basic_info->suspend_count = task->user_stop_count;
5537 _task_fill_times(task, &basic_info->user_time,
5538 &basic_info->system_time);
5539
5540 *task_info_count = TASK_BASIC_INFO_64_COUNT;
5541 break;
5542 }
5543 #endif /* defined(__arm64__) */
5544
5545 case MACH_TASK_BASIC_INFO:
5546 {
5547 mach_task_basic_info_t basic_info;
5548
5549 if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
5550 error = KERN_INVALID_ARGUMENT;
5551 break;
5552 }
5553
5554 basic_info = (mach_task_basic_info_t)task_info_out;
5555
5556 basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5557 kernel_map : task->map);
5558 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size);
5559 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size_max);
5560 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5561 basic_info->suspend_count = task->user_stop_count;
5562 _task_fill_times(task, &basic_info->user_time,
5563 &basic_info->system_time);
5564
5565 *task_info_count = MACH_TASK_BASIC_INFO_COUNT;
5566 break;
5567 }
5568
5569 case TASK_THREAD_TIMES_INFO:
5570 {
5571 task_thread_times_info_t times_info;
5572 thread_t thread;
5573
5574 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
5575 error = KERN_INVALID_ARGUMENT;
5576 break;
5577 }
5578
5579 times_info = (task_thread_times_info_t)task_info_out;
5580 times_info->user_time = (time_value_t){ 0 };
5581 times_info->system_time = (time_value_t){ 0 };
5582
5583 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5584 if ((thread->options & TH_OPT_IDLE_THREAD) == 0) {
5585 time_value_t user_time, system_time;
5586
5587 thread_read_times(thread, &user_time, &system_time, NULL);
5588 time_value_add(×_info->user_time, &user_time);
5589 time_value_add(×_info->system_time, &system_time);
5590 }
5591 }
5592
5593 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
5594 break;
5595 }
5596
5597 case TASK_ABSOLUTETIME_INFO:
5598 {
5599 task_absolutetime_info_t info;
5600
5601 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
5602 error = KERN_INVALID_ARGUMENT;
5603 break;
5604 }
5605
5606 info = (task_absolutetime_info_t)task_info_out;
5607
5608 struct recount_times_mach term_times =
5609 recount_task_terminated_times(task);
5610 struct recount_times_mach total_times = recount_task_times(task);
5611
5612 info->total_user = total_times.rtm_user;
5613 info->total_system = total_times.rtm_system;
5614 info->threads_user = total_times.rtm_user - term_times.rtm_user;
5615 info->threads_system += total_times.rtm_system - term_times.rtm_system;
5616
5617 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
5618 break;
5619 }
5620
5621 case TASK_DYLD_INFO:
5622 {
5623 task_dyld_info_t info;
5624
5625 /*
5626 * We added the format field to TASK_DYLD_INFO output. For
5627 * temporary backward compatibility, accept the fact that
5628 * clients may ask for the old version - distinquished by the
5629 * size of the expected result structure.
5630 */
5631 #define TASK_LEGACY_DYLD_INFO_COUNT \
5632 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
5633
5634 if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
5635 error = KERN_INVALID_ARGUMENT;
5636 break;
5637 }
5638
5639 info = (task_dyld_info_t)task_info_out;
5640 info->all_image_info_addr = task->all_image_info_addr;
5641 info->all_image_info_size = task->all_image_info_size;
5642
5643 /* only set format on output for those expecting it */
5644 if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
5645 info->all_image_info_format = task_has_64Bit_addr(task) ?
5646 TASK_DYLD_ALL_IMAGE_INFO_64 :
5647 TASK_DYLD_ALL_IMAGE_INFO_32;
5648 *task_info_count = TASK_DYLD_INFO_COUNT;
5649 } else {
5650 *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
5651 }
5652 break;
5653 }
5654
5655 case TASK_EXTMOD_INFO:
5656 {
5657 task_extmod_info_t info;
5658 void *p;
5659
5660 if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
5661 error = KERN_INVALID_ARGUMENT;
5662 break;
5663 }
5664
5665 info = (task_extmod_info_t)task_info_out;
5666
5667 p = get_bsdtask_info(task);
5668 if (p) {
5669 proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
5670 } else {
5671 bzero(info->task_uuid, sizeof(info->task_uuid));
5672 }
5673 info->extmod_statistics = task->extmod_statistics;
5674 *task_info_count = TASK_EXTMOD_INFO_COUNT;
5675
5676 break;
5677 }
5678
5679 case TASK_KERNELMEMORY_INFO:
5680 {
5681 task_kernelmemory_info_t tkm_info;
5682 ledger_amount_t credit, debit;
5683
5684 if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
5685 error = KERN_INVALID_ARGUMENT;
5686 break;
5687 }
5688
5689 tkm_info = (task_kernelmemory_info_t) task_info_out;
5690 tkm_info->total_palloc = 0;
5691 tkm_info->total_pfree = 0;
5692 tkm_info->total_salloc = 0;
5693 tkm_info->total_sfree = 0;
5694
5695 if (task == kernel_task) {
5696 /*
5697 * All shared allocs/frees from other tasks count against
5698 * the kernel private memory usage. If we are looking up
5699 * info for the kernel task, gather from everywhere.
5700 */
5701 task_unlock(task);
5702
5703 /* start by accounting for all the terminated tasks against the kernel */
5704 tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
5705 tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
5706
5707 /* count all other task/thread shared alloc/free against the kernel */
5708 lck_mtx_lock(&tasks_threads_lock);
5709
5710 /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
5711 queue_iterate(&tasks, task, task_t, tasks) {
5712 if (task == kernel_task) {
5713 if (ledger_get_entries(task->ledger,
5714 task_ledgers.tkm_private, &credit,
5715 &debit) == KERN_SUCCESS) {
5716 tkm_info->total_palloc += credit;
5717 tkm_info->total_pfree += debit;
5718 }
5719 }
5720 if (!ledger_get_entries(task->ledger,
5721 task_ledgers.tkm_shared, &credit, &debit)) {
5722 tkm_info->total_palloc += credit;
5723 tkm_info->total_pfree += debit;
5724 }
5725 }
5726 lck_mtx_unlock(&tasks_threads_lock);
5727 } else {
5728 if (!ledger_get_entries(task->ledger,
5729 task_ledgers.tkm_private, &credit, &debit)) {
5730 tkm_info->total_palloc = credit;
5731 tkm_info->total_pfree = debit;
5732 }
5733 if (!ledger_get_entries(task->ledger,
5734 task_ledgers.tkm_shared, &credit, &debit)) {
5735 tkm_info->total_salloc = credit;
5736 tkm_info->total_sfree = debit;
5737 }
5738 task_unlock(task);
5739 }
5740
5741 *task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
5742 return KERN_SUCCESS;
5743 }
5744
5745 /* OBSOLETE */
5746 case TASK_SCHED_FIFO_INFO:
5747 {
5748 if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
5749 error = KERN_INVALID_ARGUMENT;
5750 break;
5751 }
5752
5753 error = KERN_INVALID_POLICY;
5754 break;
5755 }
5756
5757 /* OBSOLETE */
5758 case TASK_SCHED_RR_INFO:
5759 {
5760 policy_rr_base_t rr_base;
5761 uint32_t quantum_time;
5762 uint64_t quantum_ns;
5763
5764 if (*task_info_count < POLICY_RR_BASE_COUNT) {
5765 error = KERN_INVALID_ARGUMENT;
5766 break;
5767 }
5768
5769 rr_base = (policy_rr_base_t) task_info_out;
5770
5771 if (task != kernel_task) {
5772 error = KERN_INVALID_POLICY;
5773 break;
5774 }
5775
5776 rr_base->base_priority = task->priority;
5777
5778 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
5779 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
5780
5781 rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
5782
5783 *task_info_count = POLICY_RR_BASE_COUNT;
5784 break;
5785 }
5786
5787 /* OBSOLETE */
5788 case TASK_SCHED_TIMESHARE_INFO:
5789 {
5790 policy_timeshare_base_t ts_base;
5791
5792 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
5793 error = KERN_INVALID_ARGUMENT;
5794 break;
5795 }
5796
5797 ts_base = (policy_timeshare_base_t) task_info_out;
5798
5799 if (task == kernel_task) {
5800 error = KERN_INVALID_POLICY;
5801 break;
5802 }
5803
5804 ts_base->base_priority = task->priority;
5805
5806 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
5807 break;
5808 }
5809
5810 case TASK_SECURITY_TOKEN:
5811 {
5812 security_token_t *sec_token_p;
5813
5814 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
5815 error = KERN_INVALID_ARGUMENT;
5816 break;
5817 }
5818
5819 sec_token_p = (security_token_t *) task_info_out;
5820
5821 *sec_token_p = *task_get_sec_token(task);
5822
5823 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
5824 break;
5825 }
5826
5827 case TASK_AUDIT_TOKEN:
5828 {
5829 audit_token_t *audit_token_p;
5830
5831 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
5832 error = KERN_INVALID_ARGUMENT;
5833 break;
5834 }
5835
5836 audit_token_p = (audit_token_t *) task_info_out;
5837
5838 *audit_token_p = *task_get_audit_token(task);
5839
5840 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
5841 break;
5842 }
5843
5844 case TASK_SCHED_INFO:
5845 error = KERN_INVALID_ARGUMENT;
5846 break;
5847
5848 case TASK_EVENTS_INFO:
5849 {
5850 task_events_info_t events_info;
5851 thread_t thread;
5852 uint64_t n_syscalls_mach, n_syscalls_unix, n_csw;
5853
5854 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
5855 error = KERN_INVALID_ARGUMENT;
5856 break;
5857 }
5858
5859 events_info = (task_events_info_t) task_info_out;
5860
5861
5862 events_info->faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
5863 events_info->pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
5864 events_info->cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
5865 events_info->messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
5866 events_info->messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
5867
5868 n_syscalls_mach = task->syscalls_mach;
5869 n_syscalls_unix = task->syscalls_unix;
5870 n_csw = task->c_switch;
5871
5872 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5873 n_csw += thread->c_switch;
5874 n_syscalls_mach += thread->syscalls_mach;
5875 n_syscalls_unix += thread->syscalls_unix;
5876 }
5877
5878 events_info->syscalls_mach = (int32_t) MIN(n_syscalls_mach, INT32_MAX);
5879 events_info->syscalls_unix = (int32_t) MIN(n_syscalls_unix, INT32_MAX);
5880 events_info->csw = (int32_t) MIN(n_csw, INT32_MAX);
5881
5882 *task_info_count = TASK_EVENTS_INFO_COUNT;
5883 break;
5884 }
5885 case TASK_AFFINITY_TAG_INFO:
5886 {
5887 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
5888 error = KERN_INVALID_ARGUMENT;
5889 break;
5890 }
5891
5892 error = task_affinity_info(task, task_info_out, task_info_count);
5893 break;
5894 }
5895 case TASK_POWER_INFO:
5896 {
5897 if (*task_info_count < TASK_POWER_INFO_COUNT) {
5898 error = KERN_INVALID_ARGUMENT;
5899 break;
5900 }
5901
5902 task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL, NULL);
5903 break;
5904 }
5905
5906 case TASK_POWER_INFO_V2:
5907 {
5908 if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
5909 error = KERN_INVALID_ARGUMENT;
5910 break;
5911 }
5912 task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
5913 task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2, NULL);
5914 break;
5915 }
5916
5917 case TASK_VM_INFO:
5918 case TASK_VM_INFO_PURGEABLE:
5919 {
5920 task_vm_info_t vm_info;
5921 vm_map_t map;
5922 ledger_amount_t tmp_amount;
5923
5924 struct proc *p;
5925 uint32_t platform, sdk;
5926
5927 vmlp_api_start(TASK_INFO); /* this is the only case that is relevant to the lock */
5928
5929 p = current_proc();
5930 platform = proc_platform(p);
5931 sdk = proc_sdk(p);
5932 if (original_task_info_count > TASK_VM_INFO_COUNT) {
5933 /*
5934 * Some iOS apps pass an incorrect value for
5935 * task_info_count, expressed in number of bytes
5936 * instead of number of "natural_t" elements, which
5937 * can lead to binary compatibility issues (including
5938 * stack corruption) when the data structure is
5939 * expanded in the future.
5940 * Let's make this potential issue visible by
5941 * logging about it...
5942 */
5943 if (!proc_is_simulated(p)) {
5944 os_log(OS_LOG_DEFAULT, "%s[%d] task_info: possibly invalid "
5945 "task_info_count %d > TASK_VM_INFO_COUNT=%d on platform %d sdk "
5946 "%d.%d.%d - please use TASK_VM_INFO_COUNT",
5947 proc_name_address(p), proc_pid(p),
5948 original_task_info_count, TASK_VM_INFO_COUNT,
5949 platform, (sdk >> 16), ((sdk >> 8) & 0xff), (sdk & 0xff));
5950 }
5951 DTRACE_VM4(suspicious_task_vm_info_count,
5952 mach_msg_type_number_t, original_task_info_count,
5953 mach_msg_type_number_t, TASK_VM_INFO_COUNT,
5954 uint32_t, platform,
5955 uint32_t, sdk);
5956 }
5957 #if __arm64__
5958 if (original_task_info_count > TASK_VM_INFO_REV2_COUNT &&
5959 platform == PLATFORM_IOS &&
5960 sdk != 0 &&
5961 (sdk >> 16) <= 12) {
5962 /*
5963 * Some iOS apps pass an incorrect value for
5964 * task_info_count, expressed in number of bytes
5965 * instead of number of "natural_t" elements.
5966 * For the sake of backwards binary compatibility
5967 * for apps built with an iOS12 or older SDK and using
5968 * the "rev2" data structure, let's fix task_info_count
5969 * for them, to avoid stomping past the actual end
5970 * of their buffer.
5971 */
5972 #if DEVELOPMENT || DEBUG
5973 printf("%s:%d %d[%s] rdar://49484582 task_info_count %d -> %d "
5974 "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5975 proc_name_address(p), original_task_info_count,
5976 TASK_VM_INFO_REV2_COUNT, platform, (sdk >> 16),
5977 ((sdk >> 8) & 0xff), (sdk & 0xff));
5978 #endif /* DEVELOPMENT || DEBUG */
5979 DTRACE_VM4(workaround_task_vm_info_count,
5980 mach_msg_type_number_t, original_task_info_count,
5981 mach_msg_type_number_t, TASK_VM_INFO_REV2_COUNT,
5982 uint32_t, platform,
5983 uint32_t, sdk);
5984 original_task_info_count = TASK_VM_INFO_REV2_COUNT;
5985 *task_info_count = original_task_info_count;
5986 }
5987 if (original_task_info_count > TASK_VM_INFO_REV5_COUNT &&
5988 platform == PLATFORM_IOS &&
5989 sdk != 0 &&
5990 (sdk >> 16) <= 15) {
5991 /*
5992 * Some iOS apps pass an incorrect value for
5993 * task_info_count, expressed in number of bytes
5994 * instead of number of "natural_t" elements.
5995 */
5996 printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5997 "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5998 proc_name_address(p), original_task_info_count,
5999 TASK_VM_INFO_REV5_COUNT, platform, (sdk >> 16),
6000 ((sdk >> 8) & 0xff), (sdk & 0xff));
6001 DTRACE_VM4(workaround_task_vm_info_count,
6002 mach_msg_type_number_t, original_task_info_count,
6003 mach_msg_type_number_t, TASK_VM_INFO_REV5_COUNT,
6004 uint32_t, platform,
6005 uint32_t, sdk);
6006 #if DEVELOPMENT || DEBUG
6007 /*
6008 * For the sake of internal builds livability,
6009 * work around this user-space bug by capping the
6010 * buffer's size to what it was with the iOS15 SDK.
6011 */
6012 original_task_info_count = TASK_VM_INFO_REV5_COUNT;
6013 *task_info_count = original_task_info_count;
6014 #endif /* DEVELOPMENT || DEBUG */
6015 }
6016
6017 if (original_task_info_count > TASK_VM_INFO_REV7_COUNT &&
6018 platform == PLATFORM_IOS &&
6019 sdk != 0 &&
6020 (sdk >> 16) == 17) {
6021 /*
6022 * Some iOS apps still pass an incorrect value for
6023 * task_info_count, expressed in number of bytes
6024 * instead of number of "natural_t" elements.
6025 */
6026 printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
6027 "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
6028 proc_name_address(p), original_task_info_count,
6029 TASK_VM_INFO_REV7_COUNT, platform, (sdk >> 16),
6030 ((sdk >> 8) & 0xff), (sdk & 0xff));
6031 DTRACE_VM4(workaround_task_vm_info_count,
6032 mach_msg_type_number_t, original_task_info_count,
6033 mach_msg_type_number_t, TASK_VM_INFO_REV6_COUNT,
6034 uint32_t, platform,
6035 uint32_t, sdk);
6036 #if DEVELOPMENT || DEBUG
6037 /*
6038 * For the sake of internal builds livability,
6039 * work around this user-space bug by capping the
6040 * buffer's size to what it was with the iOS15 and iOS16 SDKs.
6041 */
6042 original_task_info_count = TASK_VM_INFO_REV6_COUNT;
6043 *task_info_count = original_task_info_count;
6044 #endif /* DEVELOPMENT || DEBUG */
6045 }
6046 #endif /* __arm64__ */
6047
6048 if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
6049 error = KERN_INVALID_ARGUMENT;
6050 vmlp_api_end(TASK_INFO, error);
6051 break;
6052 }
6053
6054 vm_info = (task_vm_info_t)task_info_out;
6055
6056 /*
6057 * Do not hold both the task and map locks,
6058 * so convert the task lock into a map reference,
6059 * drop the task lock, then lock the map.
6060 */
6061 if (is_kernel_task) {
6062 map = kernel_map;
6063 task_unlock(task);
6064 /* no lock, no reference */
6065 } else {
6066 map = task->map;
6067 vm_map_reference(map);
6068 task_unlock(task);
6069 vm_map_lock_read(map);
6070 }
6071
6072 vmlp_range_event_all(map);
6073
6074 vm_info->virtual_size = (typeof(vm_info->virtual_size))vm_map_adjusted_size(map);
6075 vm_info->region_count = map->hdr.nentries;
6076 vm_info->page_size = vm_map_page_size(map);
6077
6078 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size);
6079 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size_peak);
6080
6081 vm_info->device = 0;
6082 vm_info->device_peak = 0;
6083 ledger_get_balance(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external);
6084 ledger_get_lifetime_max(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external_peak);
6085 ledger_get_balance(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal);
6086 ledger_get_lifetime_max(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal_peak);
6087 ledger_get_balance(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable);
6088 ledger_get_lifetime_max(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable_peak);
6089 ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed);
6090 ledger_get_lifetime_max(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_peak);
6091 ledger_get_entries(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_lifetime, &tmp_amount);
6092 ledger_get_balance(task->ledger, task_ledgers.neural_nofootprint_total, (ledger_amount_t *) &vm_info->ledger_tag_neural_nofootprint_total);
6093 ledger_get_lifetime_max(task->ledger, task_ledgers.neural_nofootprint_total, (ledger_amount_t *) &vm_info->ledger_tag_neural_nofootprint_peak);
6094
6095 vm_info->purgeable_volatile_pmap = 0;
6096 vm_info->purgeable_volatile_resident = 0;
6097 vm_info->purgeable_volatile_virtual = 0;
6098 if (is_kernel_task) {
6099 /*
6100 * We do not maintain the detailed stats for the
6101 * kernel_pmap, so just count everything as
6102 * "internal"...
6103 */
6104 vm_info->internal = vm_info->resident_size;
6105 /*
6106 * ... but since the memory held by the VM compressor
6107 * in the kernel address space ought to be attributed
6108 * to user-space tasks, we subtract it from "internal"
6109 * to give memory reporting tools a more accurate idea
6110 * of what the kernel itself is actually using, instead
6111 * of making it look like the kernel is leaking memory
6112 * when the system is under memory pressure.
6113 */
6114 vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
6115 PAGE_SIZE);
6116 } else {
6117 mach_vm_size_t volatile_virtual_size;
6118 mach_vm_size_t volatile_resident_size;
6119 mach_vm_size_t volatile_compressed_size;
6120 mach_vm_size_t volatile_pmap_size;
6121 mach_vm_size_t volatile_compressed_pmap_size;
6122 kern_return_t kr;
6123
6124 if (flavor == TASK_VM_INFO_PURGEABLE) {
6125 kr = vm_map_query_volatile(
6126 map,
6127 &volatile_virtual_size,
6128 &volatile_resident_size,
6129 &volatile_compressed_size,
6130 &volatile_pmap_size,
6131 &volatile_compressed_pmap_size);
6132 if (kr == KERN_SUCCESS) {
6133 vm_info->purgeable_volatile_pmap =
6134 volatile_pmap_size;
6135 if (radar_20146450) {
6136 vm_info->compressed -=
6137 volatile_compressed_pmap_size;
6138 }
6139 vm_info->purgeable_volatile_resident =
6140 volatile_resident_size;
6141 vm_info->purgeable_volatile_virtual =
6142 volatile_virtual_size;
6143 }
6144 }
6145 }
6146 *task_info_count = TASK_VM_INFO_REV0_COUNT;
6147
6148 if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
6149 /* must be captured while we still have the map lock */
6150 vm_info->min_address = map->min_offset;
6151 vm_info->max_address = map->max_offset;
6152 }
6153
6154 /*
6155 * Done with vm map things, can drop the map lock and reference,
6156 * and take the task lock back.
6157 *
6158 * Re-validate that the task didn't die on us.
6159 */
6160 if (!is_kernel_task) {
6161 vm_map_unlock_read(map);
6162 vm_map_deallocate(map);
6163 }
6164 map = VM_MAP_NULL;
6165
6166 task_lock(task);
6167
6168 if ((task != current_task()) && (!task->active)) {
6169 error = KERN_INVALID_ARGUMENT;
6170 vmlp_api_end(TASK_INFO, error);
6171 break;
6172 }
6173
6174 if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
6175 vm_info->phys_footprint =
6176 (mach_vm_size_t) get_task_phys_footprint(task);
6177 *task_info_count = TASK_VM_INFO_REV1_COUNT;
6178 }
6179 if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
6180 /* data was captured above */
6181 *task_info_count = TASK_VM_INFO_REV2_COUNT;
6182 }
6183
6184 if (original_task_info_count >= TASK_VM_INFO_REV3_COUNT) {
6185 ledger_get_lifetime_max(task->ledger,
6186 task_ledgers.phys_footprint,
6187 &vm_info->ledger_phys_footprint_peak);
6188 ledger_get_balance(task->ledger,
6189 task_ledgers.purgeable_nonvolatile,
6190 &vm_info->ledger_purgeable_nonvolatile);
6191 ledger_get_balance(task->ledger,
6192 task_ledgers.purgeable_nonvolatile_compressed,
6193 &vm_info->ledger_purgeable_novolatile_compressed);
6194 ledger_get_balance(task->ledger,
6195 task_ledgers.purgeable_volatile,
6196 &vm_info->ledger_purgeable_volatile);
6197 ledger_get_balance(task->ledger,
6198 task_ledgers.purgeable_volatile_compressed,
6199 &vm_info->ledger_purgeable_volatile_compressed);
6200 ledger_get_balance(task->ledger,
6201 task_ledgers.network_nonvolatile,
6202 &vm_info->ledger_tag_network_nonvolatile);
6203 ledger_get_balance(task->ledger,
6204 task_ledgers.network_nonvolatile_compressed,
6205 &vm_info->ledger_tag_network_nonvolatile_compressed);
6206 ledger_get_balance(task->ledger,
6207 task_ledgers.network_volatile,
6208 &vm_info->ledger_tag_network_volatile);
6209 ledger_get_balance(task->ledger,
6210 task_ledgers.network_volatile_compressed,
6211 &vm_info->ledger_tag_network_volatile_compressed);
6212 ledger_get_balance(task->ledger,
6213 task_ledgers.media_footprint,
6214 &vm_info->ledger_tag_media_footprint);
6215 ledger_get_balance(task->ledger,
6216 task_ledgers.media_footprint_compressed,
6217 &vm_info->ledger_tag_media_footprint_compressed);
6218 ledger_get_balance(task->ledger,
6219 task_ledgers.media_nofootprint,
6220 &vm_info->ledger_tag_media_nofootprint);
6221 ledger_get_balance(task->ledger,
6222 task_ledgers.media_nofootprint_compressed,
6223 &vm_info->ledger_tag_media_nofootprint_compressed);
6224 ledger_get_balance(task->ledger,
6225 task_ledgers.graphics_footprint,
6226 &vm_info->ledger_tag_graphics_footprint);
6227 ledger_get_balance(task->ledger,
6228 task_ledgers.graphics_footprint_compressed,
6229 &vm_info->ledger_tag_graphics_footprint_compressed);
6230 ledger_get_balance(task->ledger,
6231 task_ledgers.graphics_nofootprint,
6232 &vm_info->ledger_tag_graphics_nofootprint);
6233 ledger_get_balance(task->ledger,
6234 task_ledgers.graphics_nofootprint_compressed,
6235 &vm_info->ledger_tag_graphics_nofootprint_compressed);
6236 ledger_get_balance(task->ledger,
6237 task_ledgers.neural_footprint,
6238 &vm_info->ledger_tag_neural_footprint);
6239 ledger_get_balance(task->ledger,
6240 task_ledgers.neural_footprint_compressed,
6241 &vm_info->ledger_tag_neural_footprint_compressed);
6242 ledger_get_balance(task->ledger,
6243 task_ledgers.neural_nofootprint,
6244 &vm_info->ledger_tag_neural_nofootprint);
6245 ledger_get_balance(task->ledger,
6246 task_ledgers.neural_nofootprint_compressed,
6247 &vm_info->ledger_tag_neural_nofootprint_compressed);
6248 *task_info_count = TASK_VM_INFO_REV3_COUNT;
6249 }
6250 if (original_task_info_count >= TASK_VM_INFO_REV4_COUNT) {
6251 if (get_bsdtask_info(task)) {
6252 vm_info->limit_bytes_remaining =
6253 memorystatus_available_memory_internal(get_bsdtask_info(task));
6254 } else {
6255 vm_info->limit_bytes_remaining = 0;
6256 }
6257 *task_info_count = TASK_VM_INFO_REV4_COUNT;
6258 }
6259 if (original_task_info_count >= TASK_VM_INFO_REV5_COUNT) {
6260 thread_t thread;
6261 uint64_t total = task->decompressions;
6262 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6263 total += thread->decompressions;
6264 }
6265 vm_info->decompressions = (int32_t) MIN(total, INT32_MAX);
6266 *task_info_count = TASK_VM_INFO_REV5_COUNT;
6267 }
6268 if (original_task_info_count >= TASK_VM_INFO_REV6_COUNT) {
6269 ledger_get_balance(task->ledger, task_ledgers.swapins,
6270 &vm_info->ledger_swapins);
6271 *task_info_count = TASK_VM_INFO_REV6_COUNT;
6272 }
6273 if (original_task_info_count >= TASK_VM_INFO_REV7_COUNT) {
6274 ledger_get_balance(task->ledger,
6275 task_ledgers.neural_nofootprint_total,
6276 &vm_info->ledger_tag_neural_nofootprint_total);
6277 ledger_get_lifetime_max(task->ledger,
6278 task_ledgers.neural_nofootprint_total,
6279 &vm_info->ledger_tag_neural_nofootprint_peak);
6280 *task_info_count = TASK_VM_INFO_REV7_COUNT;
6281 }
6282
6283 vmlp_api_end(TASK_INFO, error);
6284 break;
6285 }
6286
6287 case TASK_WAIT_STATE_INFO:
6288 {
6289 /*
6290 * Deprecated flavor. Currently allowing some results until all users
6291 * stop calling it. The results may not be accurate.
6292 */
6293 task_wait_state_info_t wait_state_info;
6294 uint64_t total_sfi_ledger_val = 0;
6295
6296 if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
6297 error = KERN_INVALID_ARGUMENT;
6298 break;
6299 }
6300
6301 wait_state_info = (task_wait_state_info_t) task_info_out;
6302
6303 wait_state_info->total_wait_state_time = 0;
6304 bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
6305
6306 #if CONFIG_SCHED_SFI
6307 int i, prev_lentry = -1;
6308 int64_t val_credit, val_debit;
6309
6310 for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
6311 val_credit = 0;
6312 /*
6313 * checking with prev_lentry != entry ensures adjacent classes
6314 * which share the same ledger do not add wait times twice.
6315 * Note: Use ledger() call to get data for each individual sfi class.
6316 */
6317 if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
6318 KERN_SUCCESS == ledger_get_entries(task->ledger,
6319 task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
6320 total_sfi_ledger_val += val_credit;
6321 }
6322 prev_lentry = task_ledgers.sfi_wait_times[i];
6323 }
6324
6325 #endif /* CONFIG_SCHED_SFI */
6326 wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
6327 *task_info_count = TASK_WAIT_STATE_INFO_COUNT;
6328
6329 break;
6330 }
6331 case TASK_VM_INFO_PURGEABLE_ACCOUNT:
6332 {
6333 #if DEVELOPMENT || DEBUG
6334 pvm_account_info_t acnt_info;
6335
6336 if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
6337 error = KERN_INVALID_ARGUMENT;
6338 break;
6339 }
6340
6341 if (task_info_out == NULL) {
6342 error = KERN_INVALID_ARGUMENT;
6343 break;
6344 }
6345
6346 acnt_info = (pvm_account_info_t) task_info_out;
6347
6348 error = vm_purgeable_account(task, acnt_info);
6349
6350 *task_info_count = PVM_ACCOUNT_INFO_COUNT;
6351
6352 break;
6353 #else /* DEVELOPMENT || DEBUG */
6354 error = KERN_NOT_SUPPORTED;
6355 break;
6356 #endif /* DEVELOPMENT || DEBUG */
6357 }
6358 case TASK_FLAGS_INFO:
6359 {
6360 task_flags_info_t flags_info;
6361
6362 if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
6363 error = KERN_INVALID_ARGUMENT;
6364 break;
6365 }
6366
6367 flags_info = (task_flags_info_t)task_info_out;
6368
6369 /* only publish the 64-bit flag of the task */
6370 flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA);
6371
6372 *task_info_count = TASK_FLAGS_INFO_COUNT;
6373 break;
6374 }
6375
6376 case TASK_DEBUG_INFO_INTERNAL:
6377 {
6378 #if DEVELOPMENT || DEBUG
6379 task_debug_info_internal_t dbg_info;
6380 ipc_space_t space = task->itk_space;
6381 if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
6382 error = KERN_NOT_SUPPORTED;
6383 break;
6384 }
6385
6386 if (task_info_out == NULL) {
6387 error = KERN_INVALID_ARGUMENT;
6388 break;
6389 }
6390 dbg_info = (task_debug_info_internal_t) task_info_out;
6391 dbg_info->ipc_space_size = 0;
6392
6393 if (space) {
6394 smr_ipc_enter();
6395 ipc_entry_table_t table = smr_entered_load(&space->is_table);
6396 if (table) {
6397 dbg_info->ipc_space_size =
6398 ipc_entry_table_count(table);
6399 }
6400 smr_ipc_leave();
6401 }
6402
6403 dbg_info->suspend_count = task->suspend_count;
6404
6405 error = KERN_SUCCESS;
6406 *task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
6407 break;
6408 #else /* DEVELOPMENT || DEBUG */
6409 error = KERN_NOT_SUPPORTED;
6410 break;
6411 #endif /* DEVELOPMENT || DEBUG */
6412 }
6413 case TASK_SUSPEND_STATS_INFO:
6414 {
6415 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6416 if (*task_info_count < TASK_SUSPEND_STATS_INFO_COUNT || task_info_out == NULL) {
6417 error = KERN_INVALID_ARGUMENT;
6418 break;
6419 }
6420 error = _task_get_suspend_stats_locked(task, (task_suspend_stats_t)task_info_out);
6421 *task_info_count = TASK_SUSPEND_STATS_INFO_COUNT;
6422 break;
6423 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6424 error = KERN_NOT_SUPPORTED;
6425 break;
6426 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6427 }
6428 case TASK_SUSPEND_SOURCES_INFO:
6429 {
6430 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6431 if (*task_info_count < TASK_SUSPEND_SOURCES_INFO_COUNT || task_info_out == NULL) {
6432 error = KERN_INVALID_ARGUMENT;
6433 break;
6434 }
6435 error = _task_get_suspend_sources_locked(task, (task_suspend_source_t)task_info_out);
6436 *task_info_count = TASK_SUSPEND_SOURCES_INFO_COUNT;
6437 break;
6438 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6439 error = KERN_NOT_SUPPORTED;
6440 break;
6441 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6442 }
6443 case TASK_SECURITY_CONFIG_INFO:
6444 {
6445 task_security_config_info_t security_config;
6446
6447 if (*task_info_count < TASK_SECURITY_CONFIG_INFO_COUNT) {
6448 error = KERN_INVALID_ARGUMENT;
6449 break;
6450 }
6451
6452 security_config = (task_security_config_info_t)task_info_out;
6453 security_config->config = (uint32_t)task->security_config.value;
6454
6455 *task_info_count = TASK_SECURITY_CONFIG_INFO_COUNT;
6456 break;
6457 }
6458 case TASK_IPC_SPACE_POLICY_INFO:
6459 {
6460 task_ipc_space_policy_info_t ipc_space_config;
6461
6462 if (*task_info_count < TASK_IPC_SPACE_POLICY_INFO_COUNT) {
6463 error = KERN_INVALID_ARGUMENT;
6464 break;
6465 }
6466
6467 ipc_space_config = (task_ipc_space_policy_info_t)task_info_out;
6468 struct ipc_space *space = task->itk_space;
6469 if (space) {
6470 ipc_space_config->space_policy = (uint32_t)space->is_policy;
6471 *task_info_count = TASK_IPC_SPACE_POLICY_INFO_COUNT;
6472 }
6473 break;
6474 }
6475 default:
6476 error = KERN_INVALID_ARGUMENT;
6477 }
6478
6479 task_unlock(task);
6480 return error;
6481 }
6482
6483 /*
6484 * task_info_from_user
6485 *
6486 * When calling task_info from user space,
6487 * this function will be executed as mig server side
6488 * instead of calling directly into task_info.
6489 * This gives the possibility to perform more security
6490 * checks on task_port.
6491 *
6492 * In the case of TASK_DYLD_INFO, we require the more
6493 * privileged task_read_port not the less-privileged task_name_port.
6494 *
6495 */
6496 kern_return_t
task_info_from_user(mach_port_t task_port,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)6497 task_info_from_user(
6498 mach_port_t task_port,
6499 task_flavor_t flavor,
6500 task_info_t task_info_out,
6501 mach_msg_type_number_t *task_info_count)
6502 {
6503 task_t task;
6504 kern_return_t ret;
6505
6506 if (flavor == TASK_DYLD_INFO) {
6507 task = convert_port_to_task_read(task_port);
6508 } else {
6509 task = convert_port_to_task_name(task_port);
6510 }
6511
6512 ret = task_info(task, flavor, task_info_out, task_info_count);
6513
6514 task_deallocate(task);
6515
6516 return ret;
6517 }
6518
6519 /*
6520 * Routine: task_dyld_process_info_update_helper
6521 *
6522 * Release send rights in release_ports.
6523 *
6524 * If no active ports found in task's dyld notifier array, unset the magic value
6525 * in user space to indicate so.
6526 *
6527 * Condition:
6528 * task's itk_lock is locked, and is unlocked upon return.
6529 * Global g_dyldinfo_mtx is locked, and is unlocked upon return.
6530 */
6531 void
task_dyld_process_info_update_helper(task_t task,size_t active_count,vm_map_address_t magic_addr,ipc_port_t * release_ports,size_t release_count)6532 task_dyld_process_info_update_helper(
6533 task_t task,
6534 size_t active_count,
6535 vm_map_address_t magic_addr, /* a userspace address */
6536 ipc_port_t *release_ports,
6537 size_t release_count)
6538 {
6539 void *notifiers_ptr = NULL;
6540
6541 assert(release_count <= DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT);
6542
6543 if (active_count == 0) {
6544 assert(task->itk_dyld_notify != NULL);
6545 notifiers_ptr = task->itk_dyld_notify;
6546 task->itk_dyld_notify = NULL;
6547 itk_unlock(task);
6548
6549 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6550 (void)copyoutmap_atomic32(task->map, MACH_PORT_NULL, magic_addr); /* unset magic */
6551 } else {
6552 itk_unlock(task);
6553 (void)copyoutmap_atomic32(task->map, (mach_port_name_t)DYLD_PROCESS_INFO_NOTIFY_MAGIC,
6554 magic_addr); /* reset magic */
6555 }
6556
6557 lck_mtx_unlock(&g_dyldinfo_mtx);
6558
6559 for (size_t i = 0; i < release_count; i++) {
6560 ipc_port_release_send(release_ports[i]);
6561 }
6562 }
6563
6564 /*
6565 * Routine: task_dyld_process_info_notify_register
6566 *
6567 * Insert a send right to target task's itk_dyld_notify array. Allocate kernel
6568 * memory for the array if it's the first port to be registered. Also cleanup
6569 * any dead rights found in the array.
6570 *
6571 * Consumes sright if returns KERN_SUCCESS, otherwise MIG will destroy it.
6572 *
6573 * Args:
6574 * task: Target task for the registration.
6575 * sright: A send right.
6576 *
6577 * Returns:
6578 * KERN_SUCCESS: Registration succeeded.
6579 * KERN_INVALID_TASK: task is invalid.
6580 * KERN_INVALID_RIGHT: sright is invalid.
6581 * KERN_DENIED: Security policy denied this call.
6582 * KERN_RESOURCE_SHORTAGE: Kernel memory allocation failed.
6583 * KERN_NO_SPACE: No available notifier port slot left for this task.
6584 * KERN_RIGHT_EXISTS: The notifier port is already registered and active.
6585 *
6586 * Other error code see task_info().
6587 *
6588 * See Also:
6589 * task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6590 */
6591 kern_return_t
task_dyld_process_info_notify_register(task_t task,ipc_port_t sright)6592 task_dyld_process_info_notify_register(
6593 task_t task,
6594 ipc_port_t sright)
6595 {
6596 struct task_dyld_info dyld_info;
6597 mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6598 ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6599 uint32_t release_count = 0, active_count = 0;
6600 mach_vm_address_t ports_addr; /* a user space address */
6601 kern_return_t kr;
6602 boolean_t right_exists = false;
6603 ipc_port_t *notifiers_ptr = NULL;
6604 ipc_port_t *portp;
6605
6606 if (task == TASK_NULL || task == kernel_task) {
6607 return KERN_INVALID_TASK;
6608 }
6609
6610 if (!ipc_can_stash_naked_send(sright)) {
6611 return KERN_INVALID_RIGHT;
6612 }
6613
6614 if (!IP_VALID(sright)) {
6615 return KERN_INVALID_RIGHT;
6616 }
6617
6618 #if CONFIG_MACF
6619 if (mac_task_check_dyld_process_info_notify_register()) {
6620 return KERN_DENIED;
6621 }
6622 #endif
6623
6624 kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6625 if (kr) {
6626 return kr;
6627 }
6628
6629 if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6630 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6631 offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6632 } else {
6633 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6634 offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6635 }
6636
6637 retry:
6638 if (task->itk_dyld_notify == NULL) {
6639 notifiers_ptr = kalloc_type(ipc_port_t,
6640 DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT,
6641 Z_WAITOK | Z_ZERO | Z_NOFAIL);
6642 }
6643
6644 lck_mtx_lock(&g_dyldinfo_mtx);
6645 itk_lock(task);
6646
6647 if (task->itk_dyld_notify == NULL) {
6648 if (notifiers_ptr == NULL) {
6649 itk_unlock(task);
6650 lck_mtx_unlock(&g_dyldinfo_mtx);
6651 goto retry;
6652 }
6653 task->itk_dyld_notify = notifiers_ptr;
6654 notifiers_ptr = NULL;
6655 }
6656
6657 assert(task->itk_dyld_notify != NULL);
6658 /* First pass: clear dead names and check for duplicate registration */
6659 for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6660 portp = &task->itk_dyld_notify[slot];
6661 if (*portp != IPC_PORT_NULL && !ip_active(*portp)) {
6662 release_ports[release_count++] = *portp;
6663 *portp = IPC_PORT_NULL;
6664 } else if (*portp == sright) {
6665 /* the port is already registered and is active */
6666 right_exists = true;
6667 }
6668
6669 if (*portp != IPC_PORT_NULL) {
6670 active_count++;
6671 }
6672 }
6673
6674 if (right_exists) {
6675 /* skip second pass */
6676 kr = KERN_RIGHT_EXISTS;
6677 goto out;
6678 }
6679
6680 /* Second pass: register the port */
6681 kr = KERN_NO_SPACE;
6682 for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6683 portp = &task->itk_dyld_notify[slot];
6684 if (*portp == IPC_PORT_NULL) {
6685 *portp = sright;
6686 active_count++;
6687 kr = KERN_SUCCESS;
6688 break;
6689 }
6690 }
6691
6692 out:
6693 assert(active_count > 0);
6694
6695 task_dyld_process_info_update_helper(task, active_count,
6696 (vm_map_address_t)ports_addr, release_ports, release_count);
6697 /* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6698
6699 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6700
6701 return kr;
6702 }
6703
6704 /*
6705 * Routine: task_dyld_process_info_notify_deregister
6706 *
6707 * Remove a send right in target task's itk_dyld_notify array matching the receive
6708 * right name passed in. Deallocate kernel memory for the array if it's the last port to
6709 * be deregistered, or all ports have died. Also cleanup any dead rights found in the array.
6710 *
6711 * Does not consume any reference.
6712 *
6713 * Args:
6714 * task: Target task for the deregistration.
6715 * rcv_name: The name denoting the receive right in caller's space.
6716 *
6717 * Returns:
6718 * KERN_SUCCESS: A matching entry found and degistration succeeded.
6719 * KERN_INVALID_TASK: task is invalid.
6720 * KERN_INVALID_NAME: name is invalid.
6721 * KERN_DENIED: Security policy denied this call.
6722 * KERN_FAILURE: A matching entry is not found.
6723 * KERN_INVALID_RIGHT: The name passed in does not represent a valid rcv right.
6724 *
6725 * Other error code see task_info().
6726 *
6727 * See Also:
6728 * task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6729 */
6730 kern_return_t
task_dyld_process_info_notify_deregister(task_t task,mach_port_name_t rcv_name)6731 task_dyld_process_info_notify_deregister(
6732 task_t task,
6733 mach_port_name_t rcv_name)
6734 {
6735 struct task_dyld_info dyld_info;
6736 mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6737 ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6738 uint32_t release_count = 0, active_count = 0;
6739 boolean_t port_found = false;
6740 mach_vm_address_t ports_addr; /* a user space address */
6741 ipc_port_t sright;
6742 kern_return_t kr;
6743 ipc_port_t *portp;
6744
6745 if (task == TASK_NULL || task == kernel_task) {
6746 return KERN_INVALID_TASK;
6747 }
6748
6749 if (!MACH_PORT_VALID(rcv_name)) {
6750 return KERN_INVALID_NAME;
6751 }
6752
6753 #if CONFIG_MACF
6754 if (mac_task_check_dyld_process_info_notify_register()) {
6755 return KERN_DENIED;
6756 }
6757 #endif
6758
6759 kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6760 if (kr) {
6761 return kr;
6762 }
6763
6764 if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6765 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6766 offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6767 } else {
6768 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6769 offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6770 }
6771
6772 kr = ipc_port_translate_receive(current_space(), rcv_name, &sright); /* does not produce port ref */
6773 if (kr) {
6774 return KERN_INVALID_RIGHT;
6775 }
6776
6777 ip_reference(sright);
6778 ip_mq_unlock(sright);
6779
6780 assert(sright != IPC_PORT_NULL);
6781
6782 lck_mtx_lock(&g_dyldinfo_mtx);
6783 itk_lock(task);
6784
6785 if (task->itk_dyld_notify == NULL) {
6786 itk_unlock(task);
6787 lck_mtx_unlock(&g_dyldinfo_mtx);
6788 ip_release(sright);
6789 return KERN_FAILURE;
6790 }
6791
6792 for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6793 portp = &task->itk_dyld_notify[slot];
6794 if (*portp == sright) {
6795 release_ports[release_count++] = *portp;
6796 *portp = IPC_PORT_NULL;
6797 port_found = true;
6798 } else if ((*portp != IPC_PORT_NULL && !ip_active(*portp))) {
6799 release_ports[release_count++] = *portp;
6800 *portp = IPC_PORT_NULL;
6801 }
6802
6803 if (*portp != IPC_PORT_NULL) {
6804 active_count++;
6805 }
6806 }
6807
6808 task_dyld_process_info_update_helper(task, active_count,
6809 (vm_map_address_t)ports_addr, release_ports, release_count);
6810 /* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6811
6812 ip_release(sright);
6813
6814 return port_found ? KERN_SUCCESS : KERN_FAILURE;
6815 }
6816
6817 /*
6818 * task_power_info
6819 *
6820 * Returns power stats for the task.
6821 * Note: Called with task locked.
6822 */
6823 void
task_power_info_locked(task_t task,task_power_info_t info,gpu_energy_data_t ginfo,task_power_info_v2_t infov2,struct task_power_info_extra * extra_info)6824 task_power_info_locked(
6825 task_t task,
6826 task_power_info_t info,
6827 gpu_energy_data_t ginfo,
6828 task_power_info_v2_t infov2,
6829 struct task_power_info_extra *extra_info)
6830 {
6831 thread_t thread;
6832 ledger_amount_t tmp;
6833
6834 uint64_t runnable_time_sum = 0;
6835
6836 task_lock_assert_owned(task);
6837
6838 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
6839 (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
6840 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
6841 (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
6842
6843 info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
6844 info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
6845
6846 struct recount_usage usage = { 0 };
6847 struct recount_usage usage_perf = { 0 };
6848 recount_task_usage_perf_only(task, &usage, &usage_perf);
6849
6850 info->total_user = usage.ru_metrics[RCT_LVL_USER].rm_time_mach;
6851 info->total_system = recount_usage_system_time_mach(&usage);
6852 runnable_time_sum = task->total_runnable_time;
6853
6854 if (ginfo) {
6855 ginfo->task_gpu_utilisation = task->task_gpu_ns;
6856 }
6857
6858 if (infov2) {
6859 infov2->task_ptime = recount_usage_time_mach(&usage_perf);
6860 infov2->task_pset_switches = task->ps_switch;
6861 #if CONFIG_PERVASIVE_ENERGY
6862 infov2->task_energy = usage.ru_energy_nj;
6863 #endif /* CONFIG_PERVASIVE_ENERGY */
6864 }
6865
6866 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6867 spl_t x;
6868
6869 if (thread->options & TH_OPT_IDLE_THREAD) {
6870 continue;
6871 }
6872
6873 x = splsched();
6874 thread_lock(thread);
6875
6876 info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
6877 info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
6878
6879 if (infov2) {
6880 infov2->task_pset_switches += thread->ps_switch;
6881 }
6882
6883 runnable_time_sum += timer_grab(&thread->runnable_timer);
6884
6885 if (ginfo) {
6886 ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
6887 }
6888 thread_unlock(thread);
6889 splx(x);
6890 }
6891
6892 if (extra_info) {
6893 extra_info->runnable_time = runnable_time_sum;
6894 #if CONFIG_PERVASIVE_CPI
6895 extra_info->cycles = recount_usage_cycles(&usage);
6896 extra_info->instructions = recount_usage_instructions(&usage);
6897 extra_info->pcycles = recount_usage_cycles(&usage_perf);
6898 extra_info->pinstructions = recount_usage_instructions(&usage_perf);
6899 extra_info->user_ptime = usage_perf.ru_metrics[RCT_LVL_USER].rm_time_mach;
6900 extra_info->system_ptime = recount_usage_system_time_mach(&usage_perf);
6901 #endif // CONFIG_PERVASIVE_CPI
6902 #if CONFIG_PERVASIVE_ENERGY
6903 extra_info->energy = usage.ru_energy_nj;
6904 extra_info->penergy = usage_perf.ru_energy_nj;
6905 #endif // CONFIG_PERVASIVE_ENERGY
6906 #if RECOUNT_SECURE_METRICS
6907 if (PE_i_can_has_debugger(NULL)) {
6908 extra_info->secure_time = usage.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6909 extra_info->secure_ptime = usage_perf.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6910 }
6911 #endif // RECOUNT_SECURE_METRICS
6912 }
6913 }
6914
6915 /*
6916 * task_gpu_utilisation
6917 *
6918 * Returns the total gpu time used by the all the threads of the task
6919 * (both dead and alive)
6920 */
6921 uint64_t
task_gpu_utilisation(task_t task)6922 task_gpu_utilisation(
6923 task_t task)
6924 {
6925 uint64_t gpu_time = 0;
6926 #if defined(__x86_64__)
6927 thread_t thread;
6928
6929 task_lock(task);
6930 gpu_time += task->task_gpu_ns;
6931
6932 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6933 spl_t x;
6934 x = splsched();
6935 thread_lock(thread);
6936 gpu_time += ml_gpu_stat(thread);
6937 thread_unlock(thread);
6938 splx(x);
6939 }
6940
6941 task_unlock(task);
6942 #else /* defined(__x86_64__) */
6943 /* silence compiler warning */
6944 (void)task;
6945 #endif /* defined(__x86_64__) */
6946 return gpu_time;
6947 }
6948
6949 /* This function updates the cpu time in the arrays for each
6950 * effective and requested QoS class
6951 */
6952 void
task_update_cpu_time_qos_stats(task_t task,uint64_t * eqos_stats,uint64_t * rqos_stats)6953 task_update_cpu_time_qos_stats(
6954 task_t task,
6955 uint64_t *eqos_stats,
6956 uint64_t *rqos_stats)
6957 {
6958 if (!eqos_stats && !rqos_stats) {
6959 return;
6960 }
6961
6962 task_lock(task);
6963 thread_t thread;
6964 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6965 if (thread->options & TH_OPT_IDLE_THREAD) {
6966 continue;
6967 }
6968
6969 thread_update_qos_cpu_time(thread);
6970 }
6971
6972 if (eqos_stats) {
6973 eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default;
6974 eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
6975 eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background;
6976 eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility;
6977 eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy;
6978 eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
6979 eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
6980 }
6981
6982 if (rqos_stats) {
6983 rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default;
6984 rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance;
6985 rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background;
6986 rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility;
6987 rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy;
6988 rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated;
6989 rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive;
6990 }
6991
6992 task_unlock(task);
6993 }
6994
6995 kern_return_t
task_purgable_info(task_t task,task_purgable_info_t * stats)6996 task_purgable_info(
6997 task_t task,
6998 task_purgable_info_t *stats)
6999 {
7000 if (task == TASK_NULL || stats == NULL) {
7001 return KERN_INVALID_ARGUMENT;
7002 }
7003 /* Take task reference */
7004 task_reference(task);
7005 vm_purgeable_stats((vm_purgeable_info_t)stats, task);
7006 /* Drop task reference */
7007 task_deallocate(task);
7008 return KERN_SUCCESS;
7009 }
7010
7011 void
task_vtimer_set(task_t task,integer_t which)7012 task_vtimer_set(
7013 task_t task,
7014 integer_t which)
7015 {
7016 thread_t thread;
7017 spl_t x;
7018
7019 task_lock(task);
7020
7021 task->vtimers |= which;
7022
7023 switch (which) {
7024 case TASK_VTIMER_USER:
7025 queue_iterate(&task->threads, thread, thread_t, task_threads) {
7026 x = splsched();
7027 thread_lock(thread);
7028 struct recount_times_mach times = recount_thread_times(thread);
7029 thread->vtimer_user_save = times.rtm_user;
7030 thread_unlock(thread);
7031 splx(x);
7032 }
7033 break;
7034
7035 case TASK_VTIMER_PROF:
7036 queue_iterate(&task->threads, thread, thread_t, task_threads) {
7037 x = splsched();
7038 thread_lock(thread);
7039 thread->vtimer_prof_save = recount_thread_time_mach(thread);
7040 thread_unlock(thread);
7041 splx(x);
7042 }
7043 break;
7044
7045 case TASK_VTIMER_RLIM:
7046 queue_iterate(&task->threads, thread, thread_t, task_threads) {
7047 x = splsched();
7048 thread_lock(thread);
7049 thread->vtimer_rlim_save = recount_thread_time_mach(thread);
7050 thread_unlock(thread);
7051 splx(x);
7052 }
7053 break;
7054 }
7055
7056 task_unlock(task);
7057 }
7058
7059 void
task_vtimer_clear(task_t task,integer_t which)7060 task_vtimer_clear(
7061 task_t task,
7062 integer_t which)
7063 {
7064 task_lock(task);
7065
7066 task->vtimers &= ~which;
7067
7068 task_unlock(task);
7069 }
7070
7071 void
task_vtimer_update(__unused task_t task,integer_t which,uint32_t * microsecs)7072 task_vtimer_update(
7073 __unused
7074 task_t task,
7075 integer_t which,
7076 uint32_t *microsecs)
7077 {
7078 thread_t thread = current_thread();
7079 uint32_t tdelt = 0;
7080 clock_sec_t secs = 0;
7081 uint64_t tsum;
7082
7083 assert(task == current_task());
7084
7085 spl_t s = splsched();
7086 thread_lock(thread);
7087
7088 if ((task->vtimers & which) != (uint32_t)which) {
7089 thread_unlock(thread);
7090 splx(s);
7091 return;
7092 }
7093
7094 switch (which) {
7095 case TASK_VTIMER_USER:;
7096 struct recount_times_mach times = recount_thread_times(thread);
7097 tsum = times.rtm_user;
7098 tdelt = (uint32_t)(tsum - thread->vtimer_user_save);
7099 thread->vtimer_user_save = tsum;
7100 absolutetime_to_microtime(tdelt, &secs, microsecs);
7101 break;
7102
7103 case TASK_VTIMER_PROF:
7104 tsum = recount_current_thread_time_mach();
7105 tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
7106 absolutetime_to_microtime(tdelt, &secs, microsecs);
7107 /* if the time delta is smaller than a usec, ignore */
7108 if (*microsecs != 0) {
7109 thread->vtimer_prof_save = tsum;
7110 }
7111 break;
7112
7113 case TASK_VTIMER_RLIM:
7114 tsum = recount_current_thread_time_mach();
7115 tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
7116 thread->vtimer_rlim_save = tsum;
7117 absolutetime_to_microtime(tdelt, &secs, microsecs);
7118 break;
7119 }
7120
7121 thread_unlock(thread);
7122 splx(s);
7123 }
7124
7125 uint64_t
get_task_dispatchqueue_offset(task_t task)7126 get_task_dispatchqueue_offset(
7127 task_t task)
7128 {
7129 return task->dispatchqueue_offset;
7130 }
7131
7132 void
task_synchronizer_destroy_all(task_t task)7133 task_synchronizer_destroy_all(task_t task)
7134 {
7135 /*
7136 * Destroy owned semaphores
7137 */
7138 semaphore_destroy_all(task);
7139 }
7140
7141 /*
7142 * Install default (machine-dependent) initial thread state
7143 * on the task. Subsequent thread creation will have this initial
7144 * state set on the thread by machine_thread_inherit_taskwide().
7145 * Flavors and structures are exactly the same as those to thread_set_state()
7146 */
7147 kern_return_t
task_set_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t state_count)7148 task_set_state(
7149 task_t task,
7150 int flavor,
7151 thread_state_t state,
7152 mach_msg_type_number_t state_count)
7153 {
7154 kern_return_t ret;
7155
7156 if (task == TASK_NULL) {
7157 return KERN_INVALID_ARGUMENT;
7158 }
7159
7160 task_lock(task);
7161
7162 if (!task->active) {
7163 task_unlock(task);
7164 return KERN_FAILURE;
7165 }
7166
7167 ret = machine_task_set_state(task, flavor, state, state_count);
7168
7169 task_unlock(task);
7170 return ret;
7171 }
7172
7173 /*
7174 * Examine the default (machine-dependent) initial thread state
7175 * on the task, as set by task_set_state(). Flavors and structures
7176 * are exactly the same as those passed to thread_get_state().
7177 */
7178 kern_return_t
task_get_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)7179 task_get_state(
7180 task_t task,
7181 int flavor,
7182 thread_state_t state,
7183 mach_msg_type_number_t *state_count)
7184 {
7185 kern_return_t ret;
7186
7187 if (task == TASK_NULL) {
7188 return KERN_INVALID_ARGUMENT;
7189 }
7190
7191 task_lock(task);
7192
7193 if (!task->active) {
7194 task_unlock(task);
7195 return KERN_FAILURE;
7196 }
7197
7198 ret = machine_task_get_state(task, flavor, state, state_count);
7199
7200 task_unlock(task);
7201 return ret;
7202 }
7203
7204
7205 static kern_return_t __attribute__((noinline, not_tail_called))
PROC_VIOLATED_GUARD__SEND_EXC_GUARD(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,boolean_t backtrace_only)7206 PROC_VIOLATED_GUARD__SEND_EXC_GUARD(
7207 mach_exception_code_t code,
7208 mach_exception_subcode_t subcode,
7209 void *reason,
7210 boolean_t backtrace_only)
7211 {
7212 #ifdef MACH_BSD
7213 if (1 == proc_selfpid()) {
7214 return KERN_NOT_SUPPORTED; // initproc is immune
7215 }
7216 #endif
7217 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
7218 [0] = code,
7219 [1] = subcode,
7220 };
7221 task_t task = current_task();
7222 kern_return_t kr;
7223 void *bsd_info = get_bsdtask_info(task);
7224
7225 /* (See jetsam-related comments below) */
7226
7227 proc_memstat_skip(bsd_info, TRUE);
7228 kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason, backtrace_only);
7229 proc_memstat_skip(bsd_info, FALSE);
7230 return kr;
7231 }
7232
7233 kern_return_t
task_violated_guard(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,bool backtrace_only)7234 task_violated_guard(
7235 mach_exception_code_t code,
7236 mach_exception_subcode_t subcode,
7237 void *reason,
7238 bool backtrace_only)
7239 {
7240 return PROC_VIOLATED_GUARD__SEND_EXC_GUARD(code, subcode, reason, backtrace_only);
7241 }
7242
7243
7244 #if CONFIG_MEMORYSTATUS
7245
7246 bool
task_get_memlimit_is_active(task_t task)7247 task_get_memlimit_is_active(task_t task)
7248 {
7249 assert(task != NULL);
7250
7251 return os_atomic_load(&task->memlimit_flags, relaxed) & TASK_MEMLIMIT_IS_ACTIVE;
7252 }
7253
7254 void
task_set_memlimit_is_active(task_t task,bool memlimit_is_active)7255 task_set_memlimit_is_active(task_t task, bool memlimit_is_active)
7256 {
7257 assert(task != NULL);
7258
7259 if (memlimit_is_active) {
7260 os_atomic_or(&task->memlimit_flags, TASK_MEMLIMIT_IS_ACTIVE, relaxed);
7261 } else {
7262 os_atomic_andnot(&task->memlimit_flags, TASK_MEMLIMIT_IS_ACTIVE, relaxed);
7263 }
7264 }
7265
7266 bool
task_get_memlimit_is_fatal(task_t task)7267 task_get_memlimit_is_fatal(task_t task)
7268 {
7269 assert(task != NULL);
7270
7271 return os_atomic_load(&task->memlimit_flags, relaxed) & TASK_MEMLIMIT_IS_FATAL;
7272 }
7273
7274 void
task_set_memlimit_is_fatal(task_t task,bool memlimit_is_fatal)7275 task_set_memlimit_is_fatal(task_t task, bool memlimit_is_fatal)
7276 {
7277 assert(task != NULL);
7278
7279 if (memlimit_is_fatal) {
7280 os_atomic_or(&task->memlimit_flags, TASK_MEMLIMIT_IS_FATAL, relaxed);
7281 } else {
7282 os_atomic_andnot(&task->memlimit_flags, TASK_MEMLIMIT_IS_FATAL, relaxed);
7283 }
7284 }
7285
7286 uint64_t
task_get_dirty_start(task_t task)7287 task_get_dirty_start(task_t task)
7288 {
7289 return task->memstat_dirty_start;
7290 }
7291
7292 void
task_set_dirty_start(task_t task,uint64_t start)7293 task_set_dirty_start(task_t task, uint64_t start)
7294 {
7295 task_lock(task);
7296 task->memstat_dirty_start = start;
7297 task_unlock(task);
7298 }
7299
7300 bool
task_set_exc_resource_bit(task_t task,bool memlimit_is_active)7301 task_set_exc_resource_bit(task_t task, bool memlimit_is_active)
7302 {
7303 /*
7304 * Sets the specified EXC_RESOURCE bit if not set already, and returns
7305 * true if the bit was changed (i.e. it was 0 before).
7306 */
7307
7308 task_memlimit_flags_t memlimit_orig;
7309 task_memlimit_flags_t bit =
7310 memlimit_is_active ?
7311 TASK_MEMLIMIT_ACTIVE_EXC_RESOURCE :
7312 TASK_MEMLIMIT_INACTIVE_EXC_RESOURCE;
7313
7314 memlimit_orig = os_atomic_or_orig(&task->memlimit_flags, bit, acquire);
7315
7316 return !(memlimit_orig & bit);
7317 }
7318
7319 void
task_reset_triggered_exc_resource(task_t task,bool memlimit_is_active)7320 task_reset_triggered_exc_resource(task_t task, bool memlimit_is_active)
7321 {
7322 task_memlimit_flags_t bit =
7323 memlimit_is_active ?
7324 TASK_MEMLIMIT_ACTIVE_EXC_RESOURCE :
7325 TASK_MEMLIMIT_INACTIVE_EXC_RESOURCE;
7326
7327 os_atomic_andnot(&task->memlimit_flags, bit, relaxed);
7328 }
7329
7330 bool
task_get_jetsam_realtime_audio(task_t task)7331 task_get_jetsam_realtime_audio(task_t task)
7332 {
7333 return task->task_jetsam_realtime_audio;
7334 }
7335
7336 void
task_set_jetsam_realtime_audio(task_t task,bool realtime_audio)7337 task_set_jetsam_realtime_audio(task_t task, bool realtime_audio)
7338 {
7339 task_lock(task);
7340 task->task_jetsam_realtime_audio = realtime_audio;
7341 task_unlock(task);
7342 }
7343
7344 #define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
7345
7346 void __attribute__((noinline))
PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb,send_exec_resource_options_t exception_options)7347 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options)
7348 {
7349 task_t task = current_task();
7350 int pid = 0;
7351 const char *procname = "unknown";
7352 const char *reason = "high watermark";
7353 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
7354 boolean_t send_sync_exc_resource = FALSE;
7355 void *cur_bsd_info = get_bsdtask_info(current_task());
7356 int flavor = FLAVOR_HIGH_WATERMARK;
7357
7358 #ifdef MACH_BSD
7359 pid = proc_selfpid();
7360
7361 if (pid == 1) {
7362 /*
7363 * Cannot have ReportCrash analyzing
7364 * a suspended initproc.
7365 */
7366 return;
7367 }
7368
7369 if (cur_bsd_info != NULL) {
7370 procname = proc_name_address(cur_bsd_info);
7371 send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(cur_bsd_info);
7372 }
7373 #endif
7374 #if CONFIG_COREDUMP
7375 if (hwm_user_cores) {
7376 int error;
7377 uint64_t starttime, end;
7378 clock_sec_t secs = 0;
7379 uint32_t microsecs = 0;
7380
7381 starttime = mach_absolute_time();
7382 /*
7383 * Trigger a coredump of this process. Don't proceed unless we know we won't
7384 * be filling up the disk; and ignore the core size resource limit for this
7385 * core file.
7386 */
7387 if ((error = coredump(cur_bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
7388 printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
7389 }
7390 /*
7391 * coredump() leaves the task suspended.
7392 */
7393 task_resume_internal(current_task());
7394
7395 end = mach_absolute_time();
7396 absolutetime_to_microtime(end - starttime, &secs, µsecs);
7397 printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
7398 proc_name_address(cur_bsd_info), pid, (int)secs, microsecs);
7399 }
7400 #endif /* CONFIG_COREDUMP */
7401
7402 if (disable_exc_resource) {
7403 printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7404 "suppressed by a boot-arg.\n", procname, pid, max_footprint_mb);
7405 return;
7406 }
7407
7408 /*
7409 * For the reason string, diagnostic limit is prioritized over fatal limit,
7410 * but for the EXC_RESOURCE flavor it's the other way round.
7411 */
7412 if (exception_options & EXEC_RESOURCE_DIAGNOSTIC) {
7413 reason = "diagnostics limit";
7414 if (!(exception_options & EXEC_RESOURCE_FATAL)) {
7415 flavor = FLAVOR_DIAG_MEMLIMIT;
7416 }
7417 } else if (exception_options & EXEC_RESOURCE_CONCLAVE) {
7418 reason = "conclave limit";
7419 flavor = FLAVOR_CONCLAVE_LIMIT;
7420 }
7421
7422 printf("process %s [%d] crossed memory %s (%d MB); EXC_RESOURCE "
7423 "\n", procname, pid, reason, max_footprint_mb);
7424
7425 /*
7426 * A task that has triggered an EXC_RESOURCE, should not be
7427 * jetsammed when the device is under memory pressure. Here
7428 * we set the P_MEMSTAT_SKIP flag so that the process
7429 * will be skipped if the memorystatus_thread wakes up.
7430 *
7431 * This is a debugging aid to ensure we can get a corpse before
7432 * the jetsam thread kills the process.
7433 * Note that proc_memstat_skip is a no-op on release kernels.
7434 */
7435 proc_memstat_skip(cur_bsd_info, TRUE);
7436
7437 code[0] = code[1] = 0;
7438 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
7439 EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
7440 EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
7441 /*
7442 * Do not generate a corpse fork if the violation is a fatal one
7443 * or the process wants synchronous EXC_RESOURCE exceptions.
7444 */
7445 if ((exception_options & EXEC_RESOURCE_FATAL) || send_sync_exc_resource || !exc_via_corpse_forking) {
7446 if (exception_options & EXEC_RESOURCE_FATAL) {
7447 vm_map_set_corpse_source(task->map);
7448 }
7449
7450 /* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */
7451 if (send_sync_exc_resource || !corpse_for_fatal_memkill) {
7452 /*
7453 * Use the _internal_ variant so that no user-space
7454 * process can resume our task from under us.
7455 */
7456 task_suspend_internal(task);
7457 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
7458 task_resume_internal(task);
7459 }
7460 } else {
7461 if (disable_exc_resource_during_audio && audio_active && task->task_jetsam_realtime_audio) {
7462 printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7463 "suppressed due to audio playback.\n", procname, pid, max_footprint_mb);
7464 } else {
7465 task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
7466 code, EXCEPTION_CODE_MAX, NULL, FALSE);
7467 }
7468 }
7469
7470 /*
7471 * After the EXC_RESOURCE has been handled, we must clear the
7472 * P_MEMSTAT_SKIP flag so that the process can again be
7473 * considered for jetsam if the memorystatus_thread wakes up.
7474 */
7475 proc_memstat_skip(cur_bsd_info, FALSE); /* clear the flag */
7476 }
7477 /*
7478 * Callback invoked when a task exceeds its physical footprint limit.
7479 */
7480 void
task_footprint_exceeded(int warning,__unused const void * param0,__unused const void * param1)7481 task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
7482 {
7483 ledger_amount_t enforced_limit_mb = 0;
7484 ledger_amount_t enforced_limit = 0;
7485 #if CONFIG_DEFERRED_RECLAIM
7486 ledger_amount_t current_footprint;
7487 #endif /* CONFIG_DEFERRED_RECLAIM */
7488 task_t task;
7489 send_exec_resource_is_warning is_warning = IS_NOT_WARNING;
7490 boolean_t memlimit_is_active;
7491 send_exec_resource_is_fatal memlimit_is_fatal;
7492 send_exec_resource_is_diagnostics is_diag_mem_threshold = IS_NOT_DIAGNOSTICS;
7493 if (warning == LEDGER_WARNING_DIAG_MEM_THRESHOLD) {
7494 is_diag_mem_threshold = IS_DIAGNOSTICS;
7495 is_warning = IS_WARNING;
7496 } else if (warning == LEDGER_WARNING_DIPPED_BELOW) {
7497 /*
7498 * Task memory limits only provide a warning on the way up.
7499 */
7500 return;
7501 } else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
7502 /*
7503 * This task is in danger of violating a memory limit,
7504 * It has exceeded a percentage level of the limit.
7505 */
7506 is_warning = IS_WARNING;
7507 } else {
7508 /*
7509 * The task has exceeded the physical footprint limit.
7510 * This is not a warning but a true limit violation.
7511 */
7512 is_warning = IS_NOT_WARNING;
7513 }
7514
7515 task = current_task();
7516
7517 #if DEBUG || DEVELOPMENT
7518 if (is_diag_mem_threshold == IS_DIAGNOSTICS) {
7519 ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &enforced_limit);
7520 } else {
7521 ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &enforced_limit);
7522 }
7523 #else /* DEBUG || DEVELOPMENT */
7524 ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &enforced_limit);
7525 #endif /* !(DEBUG || DEVELOPMENT) */
7526 #if CONFIG_DEFERRED_RECLAIM
7527 if (!is_warning && vm_deferred_reclamation_task_has_ring(task)) {
7528 /*
7529 * Task is enrolled in deferred reclamation.
7530 * Do a reclaim to ensure it's really over its limit.
7531 */
7532 vm_deferred_reclamation_task_drain(task, RECLAIM_OPTIONS_NONE);
7533 ledger_get_balance(task->ledger, task_ledgers.phys_footprint, ¤t_footprint);
7534 if (current_footprint < enforced_limit) {
7535 return;
7536 }
7537 }
7538 #endif /* CONFIG_DEFERRED_RECLAIM */
7539 enforced_limit_mb = enforced_limit >> 20;
7540 memlimit_is_active = task_get_memlimit_is_active(task);
7541 memlimit_is_fatal = task_get_memlimit_is_fatal(task) == FALSE ? IS_NOT_FATAL : IS_FATAL;
7542 #if DEBUG || DEVELOPMENT
7543 if (is_diag_mem_threshold == IS_NOT_DIAGNOSTICS) {
7544 task_process_crossed_limit_no_diag(task, enforced_limit_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7545 } else {
7546 task_process_crossed_limit_diag(enforced_limit_mb);
7547 }
7548 if ((enforced_limit_mb & EXC_RESOURCE_HWM_LIMIT_MASK) != enforced_limit_mb) {
7549 os_log_error(OS_LOG_DEFAULT, "EXC_RESOURCE limit %d above maximum-encodable limit %d; logs may be inaccurate\n",
7550 (int) enforced_limit_mb, (int) EXC_RESOURCE_HWM_LIMIT_MASK);
7551 }
7552 #else /* DEBUG || DEVELOPMENT */
7553 task_process_crossed_limit_no_diag(task, enforced_limit_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7554 #endif /* !(DEBUG || DEVELOPMENT) */
7555 }
7556
7557 /*
7558 * Actions to perfrom when a process has crossed watermark or is a fatal consumption */
7559 static inline void
task_process_crossed_limit_no_diag(task_t task,ledger_amount_t ledger_limit_size,bool memlimit_is_fatal,bool memlimit_is_active,send_exec_resource_is_warning is_warning)7560 task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning)
7561 {
7562 send_exec_resource_options_t exception_options = 0;
7563 if (memlimit_is_fatal) {
7564 exception_options |= EXEC_RESOURCE_FATAL;
7565 }
7566 /*
7567 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7568 * We only generate the exception once per process per memlimit (active/inactive limit).
7569 * To enforce this, we monitor state based on the memlimit's active/inactive attribute
7570 * and we disable it by marking that memlimit as exception triggered.
7571 */
7572 if (is_warning == IS_NOT_WARNING && task_set_exc_resource_bit(task, memlimit_is_active)) {
7573 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7574 // If it was not a diag threshold (if was a memory limit), then we do not want more signalling,
7575 // however, if was a diag limit, the user may reload a different limit and signal again the violation
7576 memorystatus_log_exception((int)ledger_limit_size, memlimit_is_active, memlimit_is_fatal);
7577 }
7578 memorystatus_on_ledger_footprint_exceeded(is_warning == IS_NOT_WARNING ? FALSE : TRUE, memlimit_is_active, memlimit_is_fatal);
7579 }
7580
7581 /*
7582 * Callback invoked when a task exceeds its conclave memory limit.
7583 */
7584 void
task_conclave_mem_limit_exceeded(__unused int warning,__unused const void * param0,__unused const void * param1)7585 task_conclave_mem_limit_exceeded(__unused int warning, __unused const void *param0, __unused const void *param1)
7586 {
7587 ledger_amount_t max_footprint = 0;
7588 ledger_amount_t max_footprint_mb = 0;
7589
7590 task_t task = current_task();
7591
7592 ledger_get_limit(task->ledger, task_ledgers.conclave_mem, &max_footprint);
7593 max_footprint_mb = max_footprint >> 20;
7594
7595 /*
7596 * The conclave memory limit is always fatal.
7597 * For the moment, we assume conclave memory isn't tied to process memory
7598 * and so this doesn't participate in the once-per-process rule above.
7599 */
7600 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)max_footprint_mb, EXEC_RESOURCE_FATAL | EXEC_RESOURCE_CONCLAVE);
7601
7602 memorystatus_on_conclave_limit_exceeded((int)max_footprint_mb);
7603 }
7604
7605 #if DEBUG || DEVELOPMENT
7606 /**
7607 * Actions to take when a process has crossed the diagnostics limit
7608 */
7609 static inline void
task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)7610 task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)
7611 {
7612 /*
7613 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7614 * In the case of the diagnostics thresholds, the exception will be signaled only once, but the
7615 * inhibit / rearm mechanism if performed at ledger level.
7616 */
7617 send_exec_resource_options_t exception_options = EXEC_RESOURCE_DIAGNOSTIC;
7618 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7619 memorystatus_log_diag_threshold_exception((int)ledger_limit_size);
7620 }
7621 #endif
7622
7623 extern int proc_check_footprint_priv(void);
7624
7625 kern_return_t
task_set_phys_footprint_limit(task_t task,int new_limit_mb,int * old_limit_mb)7626 task_set_phys_footprint_limit(
7627 task_t task,
7628 int new_limit_mb,
7629 int *old_limit_mb)
7630 {
7631 kern_return_t error;
7632
7633 boolean_t memlimit_is_active;
7634 boolean_t memlimit_is_fatal;
7635
7636 if ((error = proc_check_footprint_priv())) {
7637 return KERN_NO_ACCESS;
7638 }
7639
7640 /*
7641 * This call should probably be obsoleted.
7642 * But for now, we default to current state.
7643 */
7644 memlimit_is_active = task_get_memlimit_is_active(task);
7645 memlimit_is_fatal = task_get_memlimit_is_fatal(task);
7646
7647 return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
7648 }
7649
7650 /*
7651 * Set the limit of diagnostics memory consumption for a concrete task
7652 */
7653 #if CONFIG_MEMORYSTATUS
7654 #if DEVELOPMENT || DEBUG
7655 kern_return_t
task_set_diag_footprint_limit(task_t task,uint64_t new_limit_mb,uint64_t * old_limit_mb)7656 task_set_diag_footprint_limit(
7657 task_t task,
7658 uint64_t new_limit_mb,
7659 uint64_t *old_limit_mb)
7660 {
7661 kern_return_t error;
7662
7663 if ((error = proc_check_footprint_priv())) {
7664 return KERN_NO_ACCESS;
7665 }
7666
7667 return task_set_diag_footprint_limit_internal(task, new_limit_mb, old_limit_mb);
7668 }
7669
7670 #endif // DEVELOPMENT || DEBUG
7671 #endif // CONFIG_MEMORYSTATUS
7672
7673 kern_return_t
task_convert_phys_footprint_limit(int limit_mb,int * converted_limit_mb)7674 task_convert_phys_footprint_limit(
7675 int limit_mb,
7676 int *converted_limit_mb)
7677 {
7678 if (limit_mb == -1) {
7679 /*
7680 * No limit
7681 */
7682 if (max_task_footprint != 0) {
7683 *converted_limit_mb = (int)(max_task_footprint / 1024 / 1024); /* bytes to MB */
7684 } else {
7685 *converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
7686 }
7687 } else {
7688 /* nothing to convert */
7689 *converted_limit_mb = limit_mb;
7690 }
7691 return KERN_SUCCESS;
7692 }
7693
7694 kern_return_t
task_set_phys_footprint_limit_internal(task_t task,int new_limit_mb,int * old_limit_mb,boolean_t memlimit_is_active,boolean_t memlimit_is_fatal)7695 task_set_phys_footprint_limit_internal(
7696 task_t task,
7697 int new_limit_mb,
7698 int *old_limit_mb,
7699 boolean_t memlimit_is_active,
7700 boolean_t memlimit_is_fatal)
7701 {
7702 ledger_amount_t old;
7703 kern_return_t ret;
7704 #if DEVELOPMENT || DEBUG
7705 diagthreshold_check_return diag_threshold_validity;
7706 #endif
7707 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
7708
7709 if (ret != KERN_SUCCESS) {
7710 return ret;
7711 }
7712 /**
7713 * Maybe we will need to re-enable the diag threshold, lets get the value
7714 * and the current status
7715 */
7716 #if DEVELOPMENT || DEBUG
7717 diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_mb, false);
7718 /**
7719 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7720 */
7721 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7722 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7723 } else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7724 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7725 }
7726 #endif
7727
7728 /*
7729 * Check that limit >> 20 will not give an "unexpected" 32-bit
7730 * result. There are, however, implicit assumptions that -1 mb limit
7731 * equates to LEDGER_LIMIT_INFINITY.
7732 */
7733 assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
7734
7735 if (old_limit_mb) {
7736 *old_limit_mb = (int)(old >> 20);
7737 }
7738
7739 if (new_limit_mb == -1) {
7740 /*
7741 * Caller wishes to remove the limit.
7742 */
7743 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7744 max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
7745 max_task_footprint ? (uint8_t)max_task_footprint_warning_level : 0);
7746
7747 task_lock(task);
7748 task_set_memlimit_is_active(task, memlimit_is_active);
7749 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7750 task_unlock(task);
7751 /**
7752 * If the diagnostics were disabled, and now we have a new limit, we have to re-enable it.
7753 */
7754 #if DEVELOPMENT || DEBUG
7755 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7756 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7757 } else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7758 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7759 }
7760 #endif
7761 return KERN_SUCCESS;
7762 }
7763
7764 #ifdef CONFIG_NOMONITORS
7765 return KERN_SUCCESS;
7766 #endif /* CONFIG_NOMONITORS */
7767
7768 task_lock(task);
7769
7770 if ((memlimit_is_active == task_get_memlimit_is_active(task)) &&
7771 (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) &&
7772 (((ledger_amount_t)new_limit_mb << 20) == old)) {
7773 /*
7774 * memlimit state is not changing
7775 */
7776 task_unlock(task);
7777 return KERN_SUCCESS;
7778 }
7779
7780 task_set_memlimit_is_active(task, memlimit_is_active);
7781 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7782
7783 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7784 (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
7785
7786 if (task == current_task()) {
7787 ledger_check_new_balance(current_thread(), task->ledger,
7788 task_ledgers.phys_footprint);
7789 }
7790
7791 task_unlock(task);
7792 #if DEVELOPMENT || DEBUG
7793 if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7794 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7795 }
7796 #endif
7797
7798 return KERN_SUCCESS;
7799 }
7800
7801 #if RESETTABLE_DIAG_FOOTPRINT_LIMITS
7802 kern_return_t
task_set_diag_footprint_limit_internal(task_t task,uint64_t new_limit_bytes,uint64_t * old_limit_bytes)7803 task_set_diag_footprint_limit_internal(
7804 task_t task,
7805 uint64_t new_limit_bytes,
7806 uint64_t *old_limit_bytes)
7807 {
7808 ledger_amount_t old = 0;
7809 kern_return_t ret = KERN_SUCCESS;
7810 diagthreshold_check_return diag_threshold_validity;
7811 ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &old);
7812
7813 if (ret != KERN_SUCCESS) {
7814 return ret;
7815 }
7816 /**
7817 * Maybe we will need to re-enable the diag threshold, lets get the value
7818 * and the current status
7819 */
7820 diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_bytes >> 20, true);
7821 /**
7822 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7823 */
7824 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7825 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7826 }
7827
7828 /*
7829 * Check that limit >> 20 will not give an "unexpected" 32-bit
7830 * result. There are, however, implicit assumptions that -1 mb limit
7831 * equates to LEDGER_LIMIT_INFINITY.
7832 */
7833 if (old_limit_bytes) {
7834 *old_limit_bytes = old;
7835 }
7836
7837 if (new_limit_bytes == -1) {
7838 /*
7839 * Caller wishes to remove the limit.
7840 */
7841 ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7842 LEDGER_LIMIT_INFINITY);
7843 /*
7844 * If the memory diagnostics flag was disabled, lets enable it again
7845 */
7846 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7847 return KERN_SUCCESS;
7848 }
7849
7850 #ifdef CONFIG_NOMONITORS
7851 return KERN_SUCCESS;
7852 #else
7853
7854 task_lock(task);
7855 ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7856 (ledger_amount_t)new_limit_bytes );
7857 if (task == current_task()) {
7858 ledger_check_new_balance(current_thread(), task->ledger,
7859 task_ledgers.phys_footprint);
7860 }
7861
7862 task_unlock(task);
7863 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7864 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7865 } else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7866 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7867 }
7868
7869 return KERN_SUCCESS;
7870 #endif /* CONFIG_NOMONITORS */
7871 }
7872
7873 kern_return_t
task_get_diag_footprint_limit_internal(task_t task,uint64_t * new_limit_bytes,bool * threshold_disabled)7874 task_get_diag_footprint_limit_internal(
7875 task_t task,
7876 uint64_t *new_limit_bytes,
7877 bool *threshold_disabled)
7878 {
7879 ledger_amount_t ledger_limit;
7880 kern_return_t ret = KERN_SUCCESS;
7881 if (new_limit_bytes == NULL || threshold_disabled == NULL) {
7882 return KERN_INVALID_ARGUMENT;
7883 }
7884 ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &ledger_limit);
7885 if (ledger_limit == LEDGER_LIMIT_INFINITY) {
7886 ledger_limit = -1;
7887 }
7888 if (ret == KERN_SUCCESS) {
7889 *new_limit_bytes = ledger_limit;
7890 ret = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, threshold_disabled);
7891 }
7892 return ret;
7893 }
7894 #endif /* RESETTABLE_DIAG_FOOTPRINT_LIMITS */
7895
7896
7897 kern_return_t
task_get_phys_footprint_limit(task_t task,int * limit_mb)7898 task_get_phys_footprint_limit(
7899 task_t task,
7900 int *limit_mb)
7901 {
7902 ledger_amount_t limit;
7903 kern_return_t ret;
7904
7905 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
7906 if (ret != KERN_SUCCESS) {
7907 return ret;
7908 }
7909
7910 /*
7911 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
7912 * result. There are, however, implicit assumptions that -1 mb limit
7913 * equates to LEDGER_LIMIT_INFINITY.
7914 */
7915 assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
7916 *limit_mb = (int)(limit >> 20);
7917
7918 return KERN_SUCCESS;
7919 }
7920 #else /* CONFIG_MEMORYSTATUS */
7921 kern_return_t
task_set_phys_footprint_limit(__unused task_t task,__unused int new_limit_mb,__unused int * old_limit_mb)7922 task_set_phys_footprint_limit(
7923 __unused task_t task,
7924 __unused int new_limit_mb,
7925 __unused int *old_limit_mb)
7926 {
7927 return KERN_FAILURE;
7928 }
7929
7930 kern_return_t
task_get_phys_footprint_limit(__unused task_t task,__unused int * limit_mb)7931 task_get_phys_footprint_limit(
7932 __unused task_t task,
7933 __unused int *limit_mb)
7934 {
7935 return KERN_FAILURE;
7936 }
7937 #endif /* CONFIG_MEMORYSTATUS */
7938
7939 security_token_t *
task_get_sec_token(task_t task)7940 task_get_sec_token(task_t task)
7941 {
7942 return &task_get_ro(task)->task_tokens.sec_token;
7943 }
7944
7945 void
task_set_sec_token(task_t task,security_token_t * token)7946 task_set_sec_token(task_t task, security_token_t *token)
7947 {
7948 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7949 task_tokens.sec_token, token);
7950 }
7951
7952 audit_token_t *
task_get_audit_token(task_t task)7953 task_get_audit_token(task_t task)
7954 {
7955 return &task_get_ro(task)->task_tokens.audit_token;
7956 }
7957
7958 void
task_set_audit_token(task_t task,audit_token_t * token)7959 task_set_audit_token(task_t task, audit_token_t *token)
7960 {
7961 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7962 task_tokens.audit_token, token);
7963 }
7964
7965 void
task_set_tokens(task_t task,security_token_t * sec_token,audit_token_t * audit_token)7966 task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token)
7967 {
7968 struct task_token_ro_data tokens;
7969
7970 tokens = task_get_ro(task)->task_tokens;
7971 tokens.sec_token = *sec_token;
7972 tokens.audit_token = *audit_token;
7973
7974 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task), task_tokens,
7975 &tokens);
7976 }
7977
7978 boolean_t
task_is_privileged(task_t task)7979 task_is_privileged(task_t task)
7980 {
7981 return task_get_sec_token(task)->val[0] == 0;
7982 }
7983
7984 #ifdef CONFIG_MACF
7985 uint8_t *
task_get_mach_trap_filter_mask(task_t task)7986 task_get_mach_trap_filter_mask(task_t task)
7987 {
7988 return task_get_ro(task)->task_filters.mach_trap_filter_mask;
7989 }
7990
7991 void
task_set_mach_trap_filter_mask(task_t task,uint8_t * mask)7992 task_set_mach_trap_filter_mask(task_t task, uint8_t *mask)
7993 {
7994 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7995 task_filters.mach_trap_filter_mask, &mask);
7996 }
7997
7998 uint8_t *
task_get_mach_kobj_filter_mask(task_t task)7999 task_get_mach_kobj_filter_mask(task_t task)
8000 {
8001 return task_get_ro(task)->task_filters.mach_kobj_filter_mask;
8002 }
8003
8004 mach_vm_address_t
task_get_all_image_info_addr(task_t task)8005 task_get_all_image_info_addr(task_t task)
8006 {
8007 return task->all_image_info_addr;
8008 }
8009
8010 void
task_set_mach_kobj_filter_mask(task_t task,uint8_t * mask)8011 task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask)
8012 {
8013 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
8014 task_filters.mach_kobj_filter_mask, &mask);
8015 }
8016
8017 #endif /* CONFIG_MACF */
8018
8019 void
task_set_thread_limit(task_t task,uint16_t thread_limit)8020 task_set_thread_limit(task_t task, uint16_t thread_limit)
8021 {
8022 assert(task != kernel_task);
8023 if (thread_limit <= TASK_MAX_THREAD_LIMIT) {
8024 task_lock(task);
8025 task->task_thread_limit = thread_limit;
8026 task_unlock(task);
8027 }
8028 }
8029
8030 kern_return_t
task_get_conclave_mem_limit(task_t task,uint64_t * conclave_limit)8031 task_get_conclave_mem_limit(task_t task, uint64_t *conclave_limit)
8032 {
8033 kern_return_t ret;
8034 ledger_amount_t max;
8035
8036 ret = ledger_get_limit(task->ledger, task_ledgers.conclave_mem, &max);
8037 if (ret != KERN_SUCCESS) {
8038 return ret;
8039 }
8040
8041 *conclave_limit = max;
8042
8043 return KERN_SUCCESS;
8044 }
8045
8046 kern_return_t
task_set_conclave_mem_limit(task_t task,uint64_t conclave_limit)8047 task_set_conclave_mem_limit(task_t task, uint64_t conclave_limit)
8048 {
8049 kern_return_t error;
8050
8051 if ((error = proc_check_footprint_priv())) {
8052 (void) error;
8053 /* Following task_set_phys_footprint_limit, always returns KERN_NO_ACCESS. */
8054 return KERN_NO_ACCESS;
8055 }
8056
8057 task_lock(task);
8058
8059 ledger_set_limit(task->ledger, task_ledgers.conclave_mem,
8060 (ledger_amount_t)conclave_limit << 20, 0);
8061
8062 if (task == current_task()) {
8063 ledger_check_new_balance(current_thread(), task->ledger,
8064 task_ledgers.conclave_mem);
8065 }
8066
8067 task_unlock(task);
8068
8069 return KERN_SUCCESS;
8070 }
8071
8072 #if CONFIG_PROC_RESOURCE_LIMITS
8073 kern_return_t
task_set_port_space_limits(task_t task,uint32_t soft_limit,uint32_t hard_limit)8074 task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit)
8075 {
8076 return ipc_space_set_table_size_limits(task->itk_space, soft_limit, hard_limit);
8077 }
8078 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8079
8080 #if XNU_TARGET_OS_OSX
8081 boolean_t
task_has_system_version_compat_enabled(task_t task)8082 task_has_system_version_compat_enabled(task_t task)
8083 {
8084 boolean_t enabled = FALSE;
8085
8086 task_lock(task);
8087 enabled = (task->t_flags & TF_SYS_VERSION_COMPAT);
8088 task_unlock(task);
8089
8090 return enabled;
8091 }
8092
8093 void
task_set_system_version_compat_enabled(task_t task,boolean_t enable_system_version_compat)8094 task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat)
8095 {
8096 assert(task == current_task());
8097 assert(task != kernel_task);
8098
8099 task_lock(task);
8100 if (enable_system_version_compat) {
8101 task->t_flags |= TF_SYS_VERSION_COMPAT;
8102 } else {
8103 task->t_flags &= ~TF_SYS_VERSION_COMPAT;
8104 }
8105 task_unlock(task);
8106 }
8107 #endif /* XNU_TARGET_OS_OSX */
8108
8109 /*
8110 * We need to export some functions to other components that
8111 * are currently implemented in macros within the osfmk
8112 * component. Just export them as functions of the same name.
8113 */
8114 boolean_t
is_kerneltask(task_t t)8115 is_kerneltask(task_t t)
8116 {
8117 if (t == kernel_task) {
8118 return TRUE;
8119 }
8120
8121 return FALSE;
8122 }
8123
8124 boolean_t
is_corpsefork(task_t t)8125 is_corpsefork(task_t t)
8126 {
8127 return task_is_a_corpse_fork(t);
8128 }
8129
8130 task_t
current_task_early(void)8131 current_task_early(void)
8132 {
8133 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
8134 if (current_thread()->t_tro == NULL) {
8135 return TASK_NULL;
8136 }
8137 }
8138 return get_threadtask(current_thread());
8139 }
8140
8141 task_t
current_task(void)8142 current_task(void)
8143 {
8144 return get_threadtask(current_thread());
8145 }
8146
8147 /* defined in bsd/kern/kern_prot.c */
8148 extern int get_audit_token_pid(audit_token_t *audit_token);
8149
8150 int
task_pid(task_t task)8151 task_pid(task_t task)
8152 {
8153 if (task) {
8154 return get_audit_token_pid(task_get_audit_token(task));
8155 }
8156 return -1;
8157 }
8158
8159 #if __has_feature(ptrauth_calls)
8160 /*
8161 * Get the shared region id and jop signing key for the task.
8162 * The function will allocate a kalloc buffer and return
8163 * it to caller, the caller needs to free it. This is used
8164 * for getting the information via task port.
8165 */
8166 char *
task_get_vm_shared_region_id_and_jop_pid(task_t task,uint64_t * jop_pid)8167 task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid)
8168 {
8169 size_t len;
8170 char *shared_region_id = NULL;
8171
8172 task_lock(task);
8173 if (task->shared_region_id == NULL) {
8174 task_unlock(task);
8175 return NULL;
8176 }
8177 len = strlen(task->shared_region_id) + 1;
8178
8179 /* don't hold task lock while allocating */
8180 task_unlock(task);
8181 shared_region_id = kalloc_data(len, Z_WAITOK);
8182 task_lock(task);
8183
8184 if (task->shared_region_id == NULL) {
8185 task_unlock(task);
8186 kfree_data(shared_region_id, len);
8187 return NULL;
8188 }
8189 assert(len == strlen(task->shared_region_id) + 1); /* should never change */
8190 strlcpy(shared_region_id, task->shared_region_id, len);
8191 task_unlock(task);
8192
8193 /* find key from its auth pager */
8194 if (jop_pid != NULL) {
8195 *jop_pid = shared_region_find_key(shared_region_id);
8196 }
8197
8198 return shared_region_id;
8199 }
8200
8201 /*
8202 * set the shared region id for a task
8203 */
8204 void
task_set_shared_region_id(task_t task,char * id)8205 task_set_shared_region_id(task_t task, char *id)
8206 {
8207 char *old_id;
8208
8209 task_lock(task);
8210 old_id = task->shared_region_id;
8211 task->shared_region_id = id;
8212 task->shared_region_auth_remapped = FALSE;
8213 task_unlock(task);
8214
8215 /* free any pre-existing shared region id */
8216 if (old_id != NULL) {
8217 shared_region_key_dealloc(old_id);
8218 kfree_data(old_id, strlen(old_id) + 1);
8219 }
8220 }
8221 #endif /* __has_feature(ptrauth_calls) */
8222
8223 /*
8224 * This routine finds a thread in a task by its unique id
8225 * Returns a referenced thread or THREAD_NULL if the thread was not found
8226 *
8227 * TODO: This is super inefficient - it's an O(threads in task) list walk!
8228 * We should make a tid hash, or transition all tid clients to thread ports
8229 *
8230 * Precondition: No locks held (will take task lock)
8231 */
8232 thread_t
task_findtid(task_t task,uint64_t tid)8233 task_findtid(task_t task, uint64_t tid)
8234 {
8235 thread_t self = current_thread();
8236 thread_t found_thread = THREAD_NULL;
8237 thread_t iter_thread = THREAD_NULL;
8238
8239 /* Short-circuit the lookup if we're looking up ourselves */
8240 if (tid == self->thread_id || tid == TID_NULL) {
8241 assert(get_threadtask(self) == task);
8242
8243 thread_reference(self);
8244
8245 return self;
8246 }
8247
8248 task_lock(task);
8249
8250 queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
8251 if (iter_thread->thread_id == tid) {
8252 found_thread = iter_thread;
8253 thread_reference(found_thread);
8254 break;
8255 }
8256 }
8257
8258 task_unlock(task);
8259
8260 return found_thread;
8261 }
8262
8263 int
pid_from_task(task_t task)8264 pid_from_task(task_t task)
8265 {
8266 int pid = -1;
8267 void *bsd_info = get_bsdtask_info(task);
8268
8269 if (bsd_info) {
8270 pid = proc_pid(bsd_info);
8271 } else {
8272 pid = task_pid(task);
8273 }
8274
8275 return pid;
8276 }
8277
8278 /*
8279 * Control the CPU usage monitor for a task.
8280 */
8281 kern_return_t
task_cpu_usage_monitor_ctl(task_t task,uint32_t * flags)8282 task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
8283 {
8284 int error = KERN_SUCCESS;
8285
8286 if (*flags & CPUMON_MAKE_FATAL) {
8287 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
8288 } else {
8289 error = KERN_INVALID_ARGUMENT;
8290 }
8291
8292 return error;
8293 }
8294
8295 /*
8296 * Control the wakeups monitor for a task.
8297 */
8298 kern_return_t
task_wakeups_monitor_ctl(task_t task,uint32_t * flags,int32_t * rate_hz)8299 task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
8300 {
8301 ledger_t ledger = task->ledger;
8302
8303 task_lock(task);
8304 if (*flags & WAKEMON_GET_PARAMS) {
8305 ledger_amount_t limit;
8306 uint64_t period;
8307
8308 ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
8309 ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
8310
8311 if (limit != LEDGER_LIMIT_INFINITY) {
8312 /*
8313 * An active limit means the wakeups monitor is enabled.
8314 */
8315 *rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
8316 *flags = WAKEMON_ENABLE;
8317 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
8318 *flags |= WAKEMON_MAKE_FATAL;
8319 }
8320 } else {
8321 *flags = WAKEMON_DISABLE;
8322 *rate_hz = -1;
8323 }
8324
8325 /*
8326 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
8327 */
8328 task_unlock(task);
8329 return KERN_SUCCESS;
8330 }
8331
8332 if (*flags & WAKEMON_ENABLE) {
8333 if (*flags & WAKEMON_SET_DEFAULTS) {
8334 *rate_hz = task_wakeups_monitor_rate;
8335 }
8336
8337 #ifndef CONFIG_NOMONITORS
8338 if (*flags & WAKEMON_MAKE_FATAL) {
8339 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
8340 }
8341 #endif /* CONFIG_NOMONITORS */
8342
8343 if (*rate_hz <= 0) {
8344 task_unlock(task);
8345 return KERN_INVALID_ARGUMENT;
8346 }
8347
8348 #ifndef CONFIG_NOMONITORS
8349 ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
8350 (uint8_t)task_wakeups_monitor_ustackshots_trigger_pct);
8351 ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
8352 ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
8353 #endif /* CONFIG_NOMONITORS */
8354 } else if (*flags & WAKEMON_DISABLE) {
8355 /*
8356 * Caller wishes to disable wakeups monitor on the task.
8357 *
8358 * Remove the limit & callback on the wakeups ledger entry.
8359 */
8360 ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
8361 ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
8362 }
8363
8364 task_unlock(task);
8365 return KERN_SUCCESS;
8366 }
8367
8368 void
task_wakeups_rate_exceeded(int warning,__unused const void * param0,__unused const void * param1)8369 task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
8370 {
8371 if (warning == 0) {
8372 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
8373 }
8374 }
8375
8376 TUNABLE(bool, enable_wakeup_reports, "enable_wakeup_reports", false); /* Enable wakeup reports. */
8377
8378 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)8379 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
8380 {
8381 task_t task = current_task();
8382 int pid = 0;
8383 const char *procname = "unknown";
8384 boolean_t fatal;
8385 kern_return_t kr;
8386 #ifdef EXC_RESOURCE_MONITORS
8387 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
8388 #endif /* EXC_RESOURCE_MONITORS */
8389 struct ledger_entry_info lei;
8390
8391 #ifdef MACH_BSD
8392 pid = proc_selfpid();
8393 if (get_bsdtask_info(task) != NULL) {
8394 procname = proc_name_address(get_bsdtask_info(current_task()));
8395 }
8396 #endif
8397
8398 ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
8399
8400 /*
8401 * Disable the exception notification so we don't overwhelm
8402 * the listener with an endless stream of redundant exceptions.
8403 * TODO: detect whether another thread is already reporting the violation.
8404 */
8405 uint32_t flags = WAKEMON_DISABLE;
8406 task_wakeups_monitor_ctl(task, &flags, NULL);
8407
8408 fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
8409 trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
8410 os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
8411 "over ~%llu seconds, averaging %llu wakes / second and "
8412 "violating a %slimit of %llu wakes over %llu seconds.\n",
8413 procname, pid,
8414 lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
8415 lei.lei_last_refill == 0 ? 0 :
8416 (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
8417 fatal ? "FATAL " : "",
8418 lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
8419
8420 if (enable_wakeup_reports) {
8421 kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
8422 fatal ? kRNFatalLimitFlag : 0);
8423 if (kr) {
8424 printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
8425 }
8426 }
8427
8428 #ifdef EXC_RESOURCE_MONITORS
8429 if (disable_exc_resource) {
8430 printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8431 "suppressed by a boot-arg\n", procname, pid);
8432 return;
8433 }
8434 if (disable_exc_resource_during_audio && audio_active && task->task_jetsam_realtime_audio) {
8435 os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8436 "suppressed due to audio playback\n", procname, pid);
8437 return;
8438 }
8439 if (lei.lei_last_refill == 0) {
8440 os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8441 "suppressed due to lei.lei_last_refill = 0 \n", procname, pid);
8442 }
8443
8444 code[0] = code[1] = 0;
8445 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
8446 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
8447 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
8448 NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
8449 EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
8450 lei.lei_last_refill);
8451 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
8452 NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
8453 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8454 #endif /* EXC_RESOURCE_MONITORS */
8455
8456 if (fatal) {
8457 task_terminate_internal(task);
8458 }
8459 }
8460
8461 static boolean_t
global_update_logical_writes(int64_t io_delta,int64_t * global_write_count)8462 global_update_logical_writes(int64_t io_delta, int64_t *global_write_count)
8463 {
8464 int64_t old_count, new_count;
8465 boolean_t needs_telemetry;
8466
8467 do {
8468 new_count = old_count = *global_write_count;
8469 new_count += io_delta;
8470 if (new_count >= io_telemetry_limit) {
8471 new_count = 0;
8472 needs_telemetry = TRUE;
8473 } else {
8474 needs_telemetry = FALSE;
8475 }
8476 } while (!OSCompareAndSwap64(old_count, new_count, global_write_count));
8477 return needs_telemetry;
8478 }
8479
8480 void
task_update_physical_writes(__unused task_t task,__unused task_physical_write_flavor_t flavor,__unused uint64_t io_size,__unused task_balance_flags_t flags)8481 task_update_physical_writes(__unused task_t task, __unused task_physical_write_flavor_t flavor, __unused uint64_t io_size, __unused task_balance_flags_t flags)
8482 {
8483 #if CONFIG_PHYS_WRITE_ACCT
8484 if (!io_size) {
8485 return;
8486 }
8487
8488 /*
8489 * task == NULL means that we have to update kernel_task ledgers
8490 */
8491 if (!task) {
8492 task = kernel_task;
8493 }
8494
8495 KDBG((VMDBG_CODE(DBG_VM_PHYS_WRITE_ACCT)) | DBG_FUNC_NONE,
8496 task_pid(task), flavor, io_size, flags);
8497 DTRACE_IO4(physical_writes, struct task *, task, task_physical_write_flavor_t, flavor, uint64_t, io_size, task_balance_flags_t, flags);
8498
8499 if (flags & TASK_BALANCE_CREDIT) {
8500 if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8501 OSAddAtomic64(io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8502 ledger_credit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8503 }
8504 } else if (flags & TASK_BALANCE_DEBIT) {
8505 if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8506 OSAddAtomic64(-1 * io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8507 ledger_debit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8508 }
8509 }
8510 #endif /* CONFIG_PHYS_WRITE_ACCT */
8511 }
8512
8513 void
task_update_logical_writes(task_t task,uint32_t io_size,int flags,void * vp)8514 task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
8515 {
8516 int64_t io_delta = 0;
8517 int64_t * global_counter_to_update;
8518 boolean_t needs_telemetry = FALSE;
8519 boolean_t is_external_device = FALSE;
8520 int ledger_to_update = 0;
8521 struct task_writes_counters * writes_counters_to_update;
8522
8523 if ((!task) || (!io_size) || (!vp)) {
8524 return;
8525 }
8526
8527 KDBG((VMDBG_CODE(DBG_VM_DATA_WRITE)) | DBG_FUNC_NONE,
8528 task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp));
8529 DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
8530
8531 // Is the drive backing this vnode internal or external to the system?
8532 if (vnode_isonexternalstorage(vp) == false) {
8533 global_counter_to_update = &global_logical_writes_count;
8534 ledger_to_update = task_ledgers.logical_writes;
8535 writes_counters_to_update = &task->task_writes_counters_internal;
8536 is_external_device = FALSE;
8537 } else {
8538 global_counter_to_update = &global_logical_writes_to_external_count;
8539 ledger_to_update = task_ledgers.logical_writes_to_external;
8540 writes_counters_to_update = &task->task_writes_counters_external;
8541 is_external_device = TRUE;
8542 }
8543
8544 switch (flags) {
8545 case TASK_WRITE_IMMEDIATE:
8546 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_immediate_writes));
8547 ledger_credit(task->ledger, ledger_to_update, io_size);
8548 if (!is_external_device) {
8549 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8550 }
8551 break;
8552 case TASK_WRITE_DEFERRED:
8553 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_deferred_writes));
8554 ledger_credit(task->ledger, ledger_to_update, io_size);
8555 if (!is_external_device) {
8556 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8557 }
8558 break;
8559 case TASK_WRITE_INVALIDATED:
8560 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_invalidated_writes));
8561 ledger_debit(task->ledger, ledger_to_update, io_size);
8562 if (!is_external_device) {
8563 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, FALSE, io_size);
8564 }
8565 break;
8566 case TASK_WRITE_METADATA:
8567 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_metadata_writes));
8568 ledger_credit(task->ledger, ledger_to_update, io_size);
8569 if (!is_external_device) {
8570 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8571 }
8572 break;
8573 }
8574
8575 io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
8576 if (io_telemetry_limit != 0) {
8577 /* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
8578 needs_telemetry = global_update_logical_writes(io_delta, global_counter_to_update);
8579 if (needs_telemetry && !is_external_device) {
8580 act_set_io_telemetry_ast(current_thread());
8581 }
8582 }
8583 }
8584
8585 /*
8586 * Control the I/O monitor for a task.
8587 */
8588 kern_return_t
task_io_monitor_ctl(task_t task,uint32_t * flags)8589 task_io_monitor_ctl(task_t task, uint32_t *flags)
8590 {
8591 ledger_t ledger = task->ledger;
8592
8593 task_lock(task);
8594 if (*flags & IOMON_ENABLE) {
8595 /* Configure the physical I/O ledger */
8596 ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
8597 ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
8598 } else if (*flags & IOMON_DISABLE) {
8599 /*
8600 * Caller wishes to disable I/O monitor on the task.
8601 */
8602 ledger_disable_refill(ledger, task_ledgers.physical_writes);
8603 ledger_disable_callback(ledger, task_ledgers.physical_writes);
8604 }
8605
8606 task_unlock(task);
8607 return KERN_SUCCESS;
8608 }
8609
8610 void
task_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)8611 task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
8612 {
8613 if (warning == 0) {
8614 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
8615 }
8616 }
8617
8618 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)8619 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
8620 {
8621 int pid = 0;
8622 task_t task = current_task();
8623 #ifdef EXC_RESOURCE_MONITORS
8624 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
8625 #endif /* EXC_RESOURCE_MONITORS */
8626 struct ledger_entry_info lei = {};
8627 kern_return_t kr;
8628
8629 #ifdef MACH_BSD
8630 pid = proc_selfpid();
8631 #endif
8632 /*
8633 * Get the ledger entry info. We need to do this before disabling the exception
8634 * to get correct values for all fields.
8635 */
8636 switch (flavor) {
8637 case FLAVOR_IO_PHYSICAL_WRITES:
8638 ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
8639 break;
8640 }
8641
8642
8643 /*
8644 * Disable the exception notification so we don't overwhelm
8645 * the listener with an endless stream of redundant exceptions.
8646 * TODO: detect whether another thread is already reporting the violation.
8647 */
8648 uint32_t flags = IOMON_DISABLE;
8649 task_io_monitor_ctl(task, &flags);
8650
8651 if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
8652 trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
8653 }
8654 os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
8655 pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
8656
8657 kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
8658 if (kr) {
8659 printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
8660 }
8661
8662 #ifdef EXC_RESOURCE_MONITORS
8663 code[0] = code[1] = 0;
8664 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
8665 EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
8666 EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
8667 EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
8668 EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
8669 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8670 #endif /* EXC_RESOURCE_MONITORS */
8671 }
8672
8673 void
task_port_space_ast(__unused task_t task)8674 task_port_space_ast(__unused task_t task)
8675 {
8676 uint32_t current_size, soft_limit, hard_limit;
8677 assert(task == current_task());
8678 bool should_notify = ipc_space_check_table_size_limit(task->itk_space,
8679 ¤t_size, &soft_limit, &hard_limit);
8680 if (should_notify) {
8681 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task, current_size, soft_limit, hard_limit);
8682 }
8683 }
8684
8685 #if CONFIG_PROC_RESOURCE_LIMITS
8686 static mach_port_t
task_allocate_fatal_port(void)8687 task_allocate_fatal_port(void)
8688 {
8689 mach_port_t task_fatal_port = MACH_PORT_NULL;
8690 task_id_token_t token;
8691
8692 kern_return_t kr = task_create_identity_token(current_task(), &token); /* Takes a reference on the token */
8693 if (kr) {
8694 return MACH_PORT_NULL;
8695 }
8696 task_fatal_port = ipc_kobject_alloc_port(token, IKOT_TASK_FATAL,
8697 IPC_KOBJECT_ALLOC_MAKE_SEND);
8698
8699 task_id_token_set_port(token, task_fatal_port);
8700
8701 return task_fatal_port;
8702 }
8703
8704 static void
task_fatal_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)8705 task_fatal_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
8706 {
8707 task_t task = TASK_NULL;
8708 kern_return_t kr;
8709
8710 task_id_token_t token = ipc_kobject_get_stable(port, IKOT_TASK_FATAL);
8711
8712 assert(token != NULL);
8713 if (token) {
8714 kr = task_identity_token_get_task_grp(token, &task, TASK_GRP_KERNEL); /* takes a reference on task */
8715 if (task) {
8716 task_bsdtask_kill(task);
8717 task_deallocate(task);
8718 }
8719 task_id_token_release(token); /* consumes ref given by notification */
8720 }
8721 }
8722 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8723
8724 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,uint32_t current_size,uint32_t soft_limit,uint32_t hard_limit)8725 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task, uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit)
8726 {
8727 int pid = 0;
8728 char *procname = (char *) "unknown";
8729 __unused kern_return_t kr;
8730 __unused resource_notify_flags_t flags = kRNFlagsNone;
8731 __unused uint32_t limit;
8732 __unused mach_port_t task_fatal_port = MACH_PORT_NULL;
8733 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
8734
8735 pid = proc_selfpid();
8736 if (get_bsdtask_info(task) != NULL) {
8737 procname = proc_name_address(get_bsdtask_info(task));
8738 }
8739
8740 /*
8741 * Only kernel_task and launchd may be allowed to
8742 * have really large ipc space.
8743 */
8744 if (pid == 0 || pid == 1) {
8745 return;
8746 }
8747
8748 os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many mach ports. \
8749 Num of ports allocated %u; \n", procname, pid, current_size);
8750
8751 /* Abort the process if it has hit the system-wide limit for ipc port table size */
8752 if (!hard_limit && !soft_limit) {
8753 code[0] = code[1] = 0;
8754 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_PORTS);
8755 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_PORT_SPACE_FULL);
8756 EXC_RESOURCE_PORTS_ENCODE_PORTS(code[0], current_size);
8757
8758 exception_info_t info = {
8759 .os_reason = OS_REASON_PORT_SPACE,
8760 .exception_type = EXC_RESOURCE,
8761 .mx_code = code[0],
8762 .mx_subcode = code[1]
8763 };
8764
8765 exit_with_mach_exception(current_proc(), info, PX_DEBUG_NO_HONOR);
8766 return;
8767 }
8768
8769 #if CONFIG_PROC_RESOURCE_LIMITS
8770 if (hard_limit > 0) {
8771 flags |= kRNHardLimitFlag;
8772 limit = hard_limit;
8773 task_fatal_port = task_allocate_fatal_port();
8774 if (!task_fatal_port) {
8775 os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8776 task_bsdtask_kill(task);
8777 }
8778 } else {
8779 flags |= kRNSoftLimitFlag;
8780 limit = soft_limit;
8781 }
8782
8783 kr = send_resource_violation_with_fatal_port(send_port_space_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8784 if (kr) {
8785 os_log(OS_LOG_DEFAULT, "send_resource_violation(ports, ...): error %#x\n", kr);
8786 }
8787 if (task_fatal_port) {
8788 ipc_port_release_send(task_fatal_port);
8789 }
8790 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8791 }
8792
8793 #if CONFIG_PROC_RESOURCE_LIMITS
8794 void
task_kqworkloop_ast(task_t task,int current_size,int soft_limit,int hard_limit)8795 task_kqworkloop_ast(task_t task, int current_size, int soft_limit, int hard_limit)
8796 {
8797 assert(task == current_task());
8798 return SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task, current_size, soft_limit, hard_limit);
8799 }
8800
8801 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task,int current_size,int soft_limit,int hard_limit)8802 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit)
8803 {
8804 int pid = 0;
8805 char *procname = (char *) "unknown";
8806 #ifdef MACH_BSD
8807 pid = proc_selfpid();
8808 if (get_bsdtask_info(task) != NULL) {
8809 procname = proc_name_address(get_bsdtask_info(task));
8810 }
8811 #endif
8812 if (pid == 0 || pid == 1) {
8813 return;
8814 }
8815
8816 os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many kqworkloops. \
8817 Num of kqworkloops allocated %u; \n", procname, pid, current_size);
8818
8819 int limit = 0;
8820 resource_notify_flags_t flags = kRNFlagsNone;
8821 mach_port_t task_fatal_port = MACH_PORT_NULL;
8822 if (hard_limit) {
8823 flags |= kRNHardLimitFlag;
8824 limit = hard_limit;
8825
8826 task_fatal_port = task_allocate_fatal_port();
8827 if (task_fatal_port == MACH_PORT_NULL) {
8828 os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8829 task_bsdtask_kill(task);
8830 }
8831 } else {
8832 flags |= kRNSoftLimitFlag;
8833 limit = soft_limit;
8834 }
8835
8836 kern_return_t kr;
8837 kr = send_resource_violation_with_fatal_port(send_kqworkloops_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8838 if (kr) {
8839 os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(kqworkloops, ...): error %#x\n", kr);
8840 }
8841 if (task_fatal_port) {
8842 ipc_port_release_send(task_fatal_port);
8843 }
8844 }
8845
8846
8847 void
task_filedesc_ast(__unused task_t task,__unused int current_size,__unused int soft_limit,__unused int hard_limit)8848 task_filedesc_ast(__unused task_t task, __unused int current_size, __unused int soft_limit, __unused int hard_limit)
8849 {
8850 assert(task == current_task());
8851 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task, current_size, soft_limit, hard_limit);
8852 }
8853
8854 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task,int current_size,int soft_limit,int hard_limit)8855 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit)
8856 {
8857 int pid = 0;
8858 char *procname = (char *) "unknown";
8859 kern_return_t kr;
8860 resource_notify_flags_t flags = kRNFlagsNone;
8861 int limit;
8862 mach_port_t task_fatal_port = MACH_PORT_NULL;
8863
8864 #ifdef MACH_BSD
8865 pid = proc_selfpid();
8866 if (get_bsdtask_info(task) != NULL) {
8867 procname = proc_name_address(get_bsdtask_info(task));
8868 }
8869 #endif
8870 /*
8871 * Only kernel_task and launchd may be allowed to
8872 * have really large ipc space.
8873 */
8874 if (pid == 0 || pid == 1) {
8875 return;
8876 }
8877
8878 os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many file descriptors. \
8879 Num of fds allocated %u; \n", procname, pid, current_size);
8880
8881 if (hard_limit > 0) {
8882 flags |= kRNHardLimitFlag;
8883 limit = hard_limit;
8884 task_fatal_port = task_allocate_fatal_port();
8885 if (!task_fatal_port) {
8886 os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8887 task_bsdtask_kill(task);
8888 }
8889 } else {
8890 flags |= kRNSoftLimitFlag;
8891 limit = soft_limit;
8892 }
8893
8894 kr = send_resource_violation_with_fatal_port(send_file_descriptors_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8895 if (kr) {
8896 os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(filedesc, ...): error %#x\n", kr);
8897 }
8898 if (task_fatal_port) {
8899 ipc_port_release_send(task_fatal_port);
8900 }
8901 }
8902 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8903
8904 /* Placeholders for the task set/get voucher interfaces */
8905 kern_return_t
task_get_mach_voucher(task_t task,mach_voucher_selector_t __unused which,ipc_voucher_t * voucher)8906 task_get_mach_voucher(
8907 task_t task,
8908 mach_voucher_selector_t __unused which,
8909 ipc_voucher_t *voucher)
8910 {
8911 if (TASK_NULL == task) {
8912 return KERN_INVALID_TASK;
8913 }
8914
8915 *voucher = NULL;
8916 return KERN_SUCCESS;
8917 }
8918
8919 kern_return_t
task_set_mach_voucher(task_t task,ipc_voucher_t __unused voucher)8920 task_set_mach_voucher(
8921 task_t task,
8922 ipc_voucher_t __unused voucher)
8923 {
8924 if (TASK_NULL == task) {
8925 return KERN_INVALID_TASK;
8926 }
8927
8928 return KERN_SUCCESS;
8929 }
8930
8931 kern_return_t
task_swap_mach_voucher(__unused task_t task,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)8932 task_swap_mach_voucher(
8933 __unused task_t task,
8934 __unused ipc_voucher_t new_voucher,
8935 ipc_voucher_t *in_out_old_voucher)
8936 {
8937 /*
8938 * Currently this function is only called from a MIG generated
8939 * routine which doesn't release the reference on the voucher
8940 * addressed by in_out_old_voucher. To avoid leaking this reference,
8941 * a call to release it has been added here.
8942 */
8943 ipc_voucher_release(*in_out_old_voucher);
8944 OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
8945 }
8946
8947 void
task_set_gpu_role(task_t task,darwin_gpu_role_t gpu_role)8948 task_set_gpu_role(task_t task, darwin_gpu_role_t gpu_role)
8949 {
8950 task_lock(task);
8951
8952 os_atomic_store(&task->t_gpu_role, gpu_role, relaxed);
8953
8954 KDBG(IMPORTANCE_CODE(IMP_SET_GPU_ROLE, 0), gpu_role);
8955
8956 task_unlock(task);
8957 }
8958
8959 darwin_gpu_role_t
task_get_gpu_role(task_t task)8960 task_get_gpu_role(task_t task)
8961 {
8962 return os_atomic_load(&task->t_gpu_role, relaxed);
8963 }
8964
8965 boolean_t
task_is_gpu_denied(task_t task)8966 task_is_gpu_denied(task_t task)
8967 {
8968 return (os_atomic_load(&task->t_gpu_role, relaxed) == PRIO_DARWIN_GPU_DENY) ? TRUE : FALSE;
8969 }
8970
8971 /*
8972 * Task policy termination uses this path to clear the bit the final time
8973 * during the termination flow, and the TASK_POLICY_TERMINATED bit guarantees
8974 * that it won't be changed again on a terminated task.
8975 */
8976 bool
task_set_game_mode_locked(task_t task,bool enabled)8977 task_set_game_mode_locked(task_t task, bool enabled)
8978 {
8979 task_lock_assert_owned(task);
8980
8981 if (enabled) {
8982 assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
8983 }
8984
8985 bool previously_enabled = task_get_game_mode(task);
8986 bool needs_update = false;
8987 uint32_t new_count = 0;
8988
8989 if (enabled) {
8990 task->t_flags |= TF_GAME_MODE;
8991 } else {
8992 task->t_flags &= ~TF_GAME_MODE;
8993 }
8994
8995 if (enabled && !previously_enabled) {
8996 if (task_coalition_adjust_game_mode_count(task, 1, &new_count) && (new_count == 1)) {
8997 needs_update = true;
8998 }
8999 } else if (!enabled && previously_enabled) {
9000 if (task_coalition_adjust_game_mode_count(task, -1, &new_count) && (new_count == 0)) {
9001 needs_update = true;
9002 }
9003 }
9004
9005 return needs_update;
9006 }
9007
9008 void
task_set_game_mode(task_t task,bool enabled)9009 task_set_game_mode(task_t task, bool enabled)
9010 {
9011 bool needs_update = false;
9012
9013 task_lock(task);
9014
9015 /* After termination, further updates are no longer effective */
9016 if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
9017 needs_update = task_set_game_mode_locked(task, enabled);
9018 }
9019
9020 task_unlock(task);
9021
9022 #if CONFIG_THREAD_GROUPS
9023 if (needs_update) {
9024 task_coalition_thread_group_game_mode_update(task);
9025 }
9026 #endif /* CONFIG_THREAD_GROUPS */
9027 }
9028
9029 bool
task_get_game_mode(task_t task)9030 task_get_game_mode(task_t task)
9031 {
9032 /* We don't need the lock to read this flag */
9033 return task->t_flags & TF_GAME_MODE;
9034 }
9035
9036 bool
task_set_carplay_mode_locked(task_t task,bool enabled)9037 task_set_carplay_mode_locked(task_t task, bool enabled)
9038 {
9039 task_lock_assert_owned(task);
9040
9041 if (enabled) {
9042 assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
9043 }
9044
9045 bool previously_enabled = task_get_carplay_mode(task);
9046 bool needs_update = false;
9047 uint32_t new_count = 0;
9048
9049 if (enabled) {
9050 task->t_flags |= TF_CARPLAY_MODE;
9051 } else {
9052 task->t_flags &= ~TF_CARPLAY_MODE;
9053 }
9054
9055 if (enabled && !previously_enabled) {
9056 if (task_coalition_adjust_carplay_mode_count(task, 1, &new_count) && (new_count == 1)) {
9057 needs_update = true;
9058 }
9059 } else if (!enabled && previously_enabled) {
9060 if (task_coalition_adjust_carplay_mode_count(task, -1, &new_count) && (new_count == 0)) {
9061 needs_update = true;
9062 }
9063 }
9064 return needs_update;
9065 }
9066
9067 void
task_set_carplay_mode(task_t task,bool enabled)9068 task_set_carplay_mode(task_t task, bool enabled)
9069 {
9070 bool needs_update = false;
9071
9072 task_lock(task);
9073
9074 /* After termination, further updates are no longer effective */
9075 if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
9076 needs_update = task_set_carplay_mode_locked(task, enabled);
9077 }
9078
9079 task_unlock(task);
9080
9081 #if CONFIG_THREAD_GROUPS
9082 if (needs_update) {
9083 task_coalition_thread_group_carplay_mode_update(task);
9084 }
9085 #endif /* CONFIG_THREAD_GROUPS */
9086 }
9087
9088 bool
task_get_carplay_mode(task_t task)9089 task_get_carplay_mode(task_t task)
9090 {
9091 /* We don't need the lock to read this flag */
9092 return task->t_flags & TF_CARPLAY_MODE;
9093 }
9094
9095 uint64_t
get_task_memory_region_count(task_t task)9096 get_task_memory_region_count(task_t task)
9097 {
9098 vm_map_t map;
9099 map = (task == kernel_task) ? kernel_map: task->map;
9100 return (uint64_t)get_map_nentries(map);
9101 }
9102
9103 static void
kdebug_trace_dyld_internal(uint32_t base_code,struct dyld_kernel_image_info * info)9104 kdebug_trace_dyld_internal(uint32_t base_code,
9105 struct dyld_kernel_image_info *info)
9106 {
9107 static_assert(sizeof(info->uuid) >= 16);
9108
9109 #if defined(__LP64__)
9110 uint64_t *uuid = (uint64_t *)&(info->uuid);
9111
9112 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
9113 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
9114 uuid[1], info->load_addr,
9115 (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
9116 0);
9117 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
9118 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
9119 (uint64_t)info->fsobjid.fid_objno |
9120 ((uint64_t)info->fsobjid.fid_generation << 32),
9121 0, 0, 0, 0);
9122 #else /* defined(__LP64__) */
9123 uint32_t *uuid = (uint32_t *)&(info->uuid);
9124
9125 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
9126 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
9127 uuid[1], uuid[2], uuid[3], 0);
9128 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
9129 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
9130 (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
9131 info->fsobjid.fid_objno, 0);
9132 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
9133 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
9134 info->fsobjid.fid_generation, 0, 0, 0, 0);
9135 #endif /* !defined(__LP64__) */
9136 }
9137
9138 static kern_return_t
kdebug_trace_dyld(task_t task,uint32_t base_code,vm_map_copy_t infos_copy,mach_msg_type_number_t infos_len)9139 kdebug_trace_dyld(task_t task, uint32_t base_code,
9140 vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
9141 {
9142 kern_return_t kr;
9143 dyld_kernel_image_info_array_t infos;
9144 vm_map_offset_t map_data;
9145 vm_offset_t data;
9146
9147 if (!infos_copy) {
9148 return KERN_INVALID_ADDRESS;
9149 }
9150
9151 if (!kdebug_enable ||
9152 !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) {
9153 vm_map_copy_discard(infos_copy);
9154 return KERN_SUCCESS;
9155 }
9156
9157 if (task == NULL || task != current_task()) {
9158 return KERN_INVALID_TASK;
9159 }
9160
9161 kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
9162 if (kr != KERN_SUCCESS) {
9163 return kr;
9164 }
9165
9166 infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
9167
9168 for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
9169 kdebug_trace_dyld_internal(base_code, &(infos[i]));
9170 }
9171
9172 data = CAST_DOWN(vm_offset_t, map_data);
9173 mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
9174 return KERN_SUCCESS;
9175 }
9176
9177 kern_return_t
task_register_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)9178 task_register_dyld_image_infos(task_t task,
9179 dyld_kernel_image_info_array_t infos_copy,
9180 mach_msg_type_number_t infos_len)
9181 {
9182 return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
9183 (vm_map_copy_t)infos_copy, infos_len);
9184 }
9185
9186 kern_return_t
task_unregister_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)9187 task_unregister_dyld_image_infos(task_t task,
9188 dyld_kernel_image_info_array_t infos_copy,
9189 mach_msg_type_number_t infos_len)
9190 {
9191 return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
9192 (vm_map_copy_t)infos_copy, infos_len);
9193 }
9194
9195 kern_return_t
task_get_dyld_image_infos(__unused task_t task,__unused dyld_kernel_image_info_array_t * dyld_images,__unused mach_msg_type_number_t * dyld_imagesCnt)9196 task_get_dyld_image_infos(__unused task_t task,
9197 __unused dyld_kernel_image_info_array_t * dyld_images,
9198 __unused mach_msg_type_number_t * dyld_imagesCnt)
9199 {
9200 return KERN_NOT_SUPPORTED;
9201 }
9202
9203 kern_return_t
task_register_dyld_shared_cache_image_info(task_t task,dyld_kernel_image_info_t cache_img,__unused boolean_t no_cache,__unused boolean_t private_cache)9204 task_register_dyld_shared_cache_image_info(task_t task,
9205 dyld_kernel_image_info_t cache_img,
9206 __unused boolean_t no_cache,
9207 __unused boolean_t private_cache)
9208 {
9209 if (task == NULL || task != current_task()) {
9210 return KERN_INVALID_TASK;
9211 }
9212
9213 kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
9214 return KERN_SUCCESS;
9215 }
9216
9217 kern_return_t
task_register_dyld_set_dyld_state(__unused task_t task,__unused uint8_t dyld_state)9218 task_register_dyld_set_dyld_state(__unused task_t task,
9219 __unused uint8_t dyld_state)
9220 {
9221 return KERN_NOT_SUPPORTED;
9222 }
9223
9224 kern_return_t
task_register_dyld_get_process_state(__unused task_t task,__unused dyld_kernel_process_info_t * dyld_process_state)9225 task_register_dyld_get_process_state(__unused task_t task,
9226 __unused dyld_kernel_process_info_t * dyld_process_state)
9227 {
9228 return KERN_NOT_SUPPORTED;
9229 }
9230
9231 kern_return_t
task_inspect(task_inspect_t task_insp,task_inspect_flavor_t flavor,task_inspect_info_t info_out,mach_msg_type_number_t * size_in_out)9232 task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
9233 task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out)
9234 {
9235 #if CONFIG_PERVASIVE_CPI
9236 task_t task = (task_t)task_insp;
9237 kern_return_t kr = KERN_SUCCESS;
9238 mach_msg_type_number_t size;
9239
9240 if (task == TASK_NULL) {
9241 return KERN_INVALID_ARGUMENT;
9242 }
9243
9244 size = *size_in_out;
9245
9246 switch (flavor) {
9247 case TASK_INSPECT_BASIC_COUNTS: {
9248 struct task_inspect_basic_counts *bc =
9249 (struct task_inspect_basic_counts *)info_out;
9250 struct recount_usage stats = { 0 };
9251 if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
9252 kr = KERN_INVALID_ARGUMENT;
9253 break;
9254 }
9255
9256 recount_sum(&recount_task_plan, task->tk_recount.rtk_lifetime, &stats);
9257 bc->instructions = recount_usage_instructions(&stats);
9258 bc->cycles = recount_usage_cycles(&stats);
9259 size = TASK_INSPECT_BASIC_COUNTS_COUNT;
9260 break;
9261 }
9262 default:
9263 kr = KERN_INVALID_ARGUMENT;
9264 break;
9265 }
9266
9267 if (kr == KERN_SUCCESS) {
9268 *size_in_out = size;
9269 }
9270 return kr;
9271 #else /* CONFIG_PERVASIVE_CPI */
9272 #pragma unused(task_insp, flavor, info_out, size_in_out)
9273 return KERN_NOT_SUPPORTED;
9274 #endif /* !CONFIG_PERVASIVE_CPI */
9275 }
9276
9277 #if CONFIG_SECLUDED_MEMORY
9278 int num_tasks_can_use_secluded_mem = 0;
9279
9280 void
task_set_can_use_secluded_mem(task_t task,boolean_t can_use_secluded_mem)9281 task_set_can_use_secluded_mem(
9282 task_t task,
9283 boolean_t can_use_secluded_mem)
9284 {
9285 if (!task->task_could_use_secluded_mem) {
9286 return;
9287 }
9288 task_lock(task);
9289 task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
9290 task_unlock(task);
9291 }
9292
9293 void
task_set_can_use_secluded_mem_locked(task_t task,boolean_t can_use_secluded_mem)9294 task_set_can_use_secluded_mem_locked(
9295 task_t task,
9296 boolean_t can_use_secluded_mem)
9297 {
9298 assert(task->task_could_use_secluded_mem);
9299 if (can_use_secluded_mem &&
9300 secluded_for_apps && /* global boot-arg */
9301 !task->task_can_use_secluded_mem) {
9302 assert(num_tasks_can_use_secluded_mem >= 0);
9303 OSAddAtomic(+1,
9304 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
9305 task->task_can_use_secluded_mem = TRUE;
9306 } else if (!can_use_secluded_mem &&
9307 task->task_can_use_secluded_mem) {
9308 assert(num_tasks_can_use_secluded_mem > 0);
9309 OSAddAtomic(-1,
9310 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
9311 task->task_can_use_secluded_mem = FALSE;
9312 }
9313 }
9314
9315 void
task_set_could_use_secluded_mem(task_t task,boolean_t could_use_secluded_mem)9316 task_set_could_use_secluded_mem(
9317 task_t task,
9318 boolean_t could_use_secluded_mem)
9319 {
9320 task->task_could_use_secluded_mem = !!could_use_secluded_mem;
9321 }
9322
9323 void
task_set_could_also_use_secluded_mem(task_t task,boolean_t could_also_use_secluded_mem)9324 task_set_could_also_use_secluded_mem(
9325 task_t task,
9326 boolean_t could_also_use_secluded_mem)
9327 {
9328 task->task_could_also_use_secluded_mem = !!could_also_use_secluded_mem;
9329 }
9330
9331 boolean_t
task_can_use_secluded_mem(task_t task,boolean_t is_alloc)9332 task_can_use_secluded_mem(
9333 task_t task,
9334 boolean_t is_alloc)
9335 {
9336 if (task->task_can_use_secluded_mem) {
9337 assert(task->task_could_use_secluded_mem);
9338 assert(num_tasks_can_use_secluded_mem > 0);
9339 return TRUE;
9340 }
9341 if (task->task_could_also_use_secluded_mem &&
9342 num_tasks_can_use_secluded_mem > 0) {
9343 assert(num_tasks_can_use_secluded_mem > 0);
9344 return TRUE;
9345 }
9346
9347 /*
9348 * If a single task is using more than some large amount of
9349 * memory (i.e. secluded_shutoff_trigger) and is approaching
9350 * its task limit, allow it to dip into secluded and begin
9351 * suppression of rebuilding secluded memory until that task exits.
9352 */
9353 if (is_alloc && secluded_shutoff_trigger != 0) {
9354 uint64_t phys_used = get_task_phys_footprint(task);
9355 uint64_t limit = get_task_phys_footprint_limit(task);
9356 if (phys_used > secluded_shutoff_trigger &&
9357 limit > secluded_shutoff_trigger &&
9358 phys_used > limit - secluded_shutoff_headroom) {
9359 start_secluded_suppression(task);
9360 return TRUE;
9361 }
9362 }
9363
9364 return FALSE;
9365 }
9366
9367 boolean_t
task_could_use_secluded_mem(task_t task)9368 task_could_use_secluded_mem(
9369 task_t task)
9370 {
9371 return task->task_could_use_secluded_mem;
9372 }
9373
9374 boolean_t
task_could_also_use_secluded_mem(task_t task)9375 task_could_also_use_secluded_mem(
9376 task_t task)
9377 {
9378 return task->task_could_also_use_secluded_mem;
9379 }
9380 #endif /* CONFIG_SECLUDED_MEMORY */
9381
9382 queue_head_t *
task_io_user_clients(task_t task)9383 task_io_user_clients(task_t task)
9384 {
9385 return &task->io_user_clients;
9386 }
9387
9388 void
task_set_message_app_suspended(task_t task,boolean_t enable)9389 task_set_message_app_suspended(task_t task, boolean_t enable)
9390 {
9391 task->message_app_suspended = enable;
9392 }
9393
9394 void
task_copy_fields_for_exec(task_t dst_task,task_t src_task)9395 task_copy_fields_for_exec(task_t dst_task, task_t src_task)
9396 {
9397 dst_task->vtimers = src_task->vtimers;
9398 }
9399
9400 #if DEVELOPMENT || DEBUG
9401 int vm_region_footprint = 0;
9402 #endif /* DEVELOPMENT || DEBUG */
9403
9404 boolean_t
task_self_region_footprint(void)9405 task_self_region_footprint(void)
9406 {
9407 #if DEVELOPMENT || DEBUG
9408 if (vm_region_footprint) {
9409 /* system-wide override */
9410 return TRUE;
9411 }
9412 #endif /* DEVELOPMENT || DEBUG */
9413 return current_task()->task_region_footprint;
9414 }
9415
9416 void
task_self_region_footprint_set(boolean_t newval)9417 task_self_region_footprint_set(
9418 boolean_t newval)
9419 {
9420 task_t curtask;
9421
9422 curtask = current_task();
9423 task_lock(curtask);
9424 if (newval) {
9425 curtask->task_region_footprint = TRUE;
9426 } else {
9427 curtask->task_region_footprint = FALSE;
9428 }
9429 task_unlock(curtask);
9430 }
9431
9432 int
task_self_region_info_flags(void)9433 task_self_region_info_flags(void)
9434 {
9435 return current_task()->task_region_info_flags;
9436 }
9437
9438 kern_return_t
task_self_region_info_flags_set(int newval)9439 task_self_region_info_flags_set(
9440 int newval)
9441 {
9442 task_t curtask;
9443 kern_return_t err = KERN_SUCCESS;
9444
9445 curtask = current_task();
9446 task_lock(curtask);
9447 curtask->task_region_info_flags = newval;
9448 /* check for overflow (flag added without increasing bitfield size?) */
9449 if (curtask->task_region_info_flags != newval) {
9450 err = KERN_INVALID_ARGUMENT;
9451 }
9452 task_unlock(curtask);
9453
9454 return err;
9455 }
9456
9457 void
task_set_darkwake_mode(task_t task,boolean_t set_mode)9458 task_set_darkwake_mode(task_t task, boolean_t set_mode)
9459 {
9460 assert(task);
9461
9462 task_lock(task);
9463
9464 if (set_mode) {
9465 task->t_flags |= TF_DARKWAKE_MODE;
9466 } else {
9467 task->t_flags &= ~(TF_DARKWAKE_MODE);
9468 }
9469
9470 task_unlock(task);
9471 }
9472
9473 boolean_t
task_get_darkwake_mode(task_t task)9474 task_get_darkwake_mode(task_t task)
9475 {
9476 assert(task);
9477 return (task->t_flags & TF_DARKWAKE_MODE) != 0;
9478 }
9479
9480 /*
9481 * Set task default behavior for EXC_GUARD variants that have settable behavior.
9482 *
9483 * Platform binaries typically have one behavior, third parties another -
9484 * but there are special exception we may need to account for.
9485 */
9486 void
task_set_exc_guard_default(task_t task,const char * name,unsigned long namelen,boolean_t is_simulated,uint32_t platform,uint32_t sdk)9487 task_set_exc_guard_default(
9488 task_t task,
9489 const char *name,
9490 unsigned long namelen,
9491 boolean_t is_simulated,
9492 uint32_t platform,
9493 uint32_t sdk)
9494 {
9495 if (task_get_platform_restrictions_version(task) >= 1) {
9496 /* set exc guard default behavior for platform restrictions binaries */
9497 task->task_exc_guard = (task_exc_guard_default & TASK_EXC_GUARD_ALL);
9498
9499 if (1 == task_pid(task)) {
9500 /* special flags for inittask - delivery every instance as corpse */
9501 task->task_exc_guard = _TASK_EXC_GUARD_ALL_CORPSE;
9502 } else if (task_exc_guard_default & TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS) {
9503 /* honor by-name default setting overrides */
9504
9505 int count = sizeof(task_exc_guard_named_defaults) / sizeof(struct task_exc_guard_named_default);
9506
9507 for (int i = 0; i < count; i++) {
9508 const struct task_exc_guard_named_default *named_default =
9509 &task_exc_guard_named_defaults[i];
9510 if (strncmp(named_default->name, name, namelen) == 0 &&
9511 strlen(named_default->name) == namelen) {
9512 task->task_exc_guard = named_default->behavior;
9513 break;
9514 }
9515 }
9516 }
9517 } else {
9518 /* set exc guard default behavior for third-party code */
9519 task->task_exc_guard = ((task_exc_guard_default >> TASK_EXC_GUARD_THIRD_PARTY_DEFAULT_SHIFT) & TASK_EXC_GUARD_ALL);
9520 }
9521
9522 if (is_simulated) {
9523 /* If simulated and built against pre-iOS 15 SDK, disable all EXC_GUARD */
9524 if ((platform == PLATFORM_IOSSIMULATOR && sdk < 0xf0000) ||
9525 (platform == PLATFORM_TVOSSIMULATOR && sdk < 0xf0000) ||
9526 (platform == PLATFORM_WATCHOSSIMULATOR && sdk < 0x80000)) {
9527 task->task_exc_guard = TASK_EXC_GUARD_NONE;
9528 }
9529 }
9530 }
9531
9532 kern_return_t
task_get_exc_guard_behavior(task_t task,task_exc_guard_behavior_t * behaviorp)9533 task_get_exc_guard_behavior(
9534 task_t task,
9535 task_exc_guard_behavior_t *behaviorp)
9536 {
9537 if (task == TASK_NULL) {
9538 return KERN_INVALID_TASK;
9539 }
9540 *behaviorp = task->task_exc_guard;
9541 return KERN_SUCCESS;
9542 }
9543
9544 kern_return_t
task_set_exc_guard_behavior(task_t task,task_exc_guard_behavior_t new_behavior)9545 task_set_exc_guard_behavior(
9546 task_t task,
9547 task_exc_guard_behavior_t new_behavior)
9548 {
9549 if (task == TASK_NULL) {
9550 return KERN_INVALID_TASK;
9551 }
9552 if (new_behavior & ~TASK_EXC_GUARD_ALL) {
9553 return KERN_INVALID_VALUE;
9554 }
9555
9556 /* limit setting to that allowed for this config */
9557 new_behavior = new_behavior & task_exc_guard_config_mask;
9558
9559 #if !defined (DEBUG) && !defined (DEVELOPMENT)
9560 /* On release kernels, only allow _upgrading_ exc guard behavior */
9561 task_exc_guard_behavior_t cur_behavior;
9562
9563 os_atomic_rmw_loop(&task->task_exc_guard, cur_behavior, new_behavior, relaxed, {
9564 if ((cur_behavior & task_exc_guard_no_unset_mask) & ~(new_behavior & task_exc_guard_no_unset_mask)) {
9565 os_atomic_rmw_loop_give_up(return KERN_DENIED);
9566 }
9567
9568 if ((new_behavior & task_exc_guard_no_set_mask) & ~(cur_behavior & task_exc_guard_no_set_mask)) {
9569 os_atomic_rmw_loop_give_up(return KERN_DENIED);
9570 }
9571
9572 /* no restrictions on CORPSE bit */
9573 });
9574 #else
9575 task->task_exc_guard = new_behavior;
9576 #endif
9577 return KERN_SUCCESS;
9578 }
9579
9580 kern_return_t
task_set_corpse_forking_behavior(task_t task,task_corpse_forking_behavior_t behavior)9581 task_set_corpse_forking_behavior(task_t task, task_corpse_forking_behavior_t behavior)
9582 {
9583 #if DEVELOPMENT || DEBUG
9584 if (task == TASK_NULL) {
9585 return KERN_INVALID_TASK;
9586 }
9587
9588 task_lock(task);
9589 if (behavior & TASK_CORPSE_FORKING_DISABLED_MEM_DIAG) {
9590 task->t_flags |= TF_NO_CORPSE_FORKING;
9591 } else {
9592 task->t_flags &= ~TF_NO_CORPSE_FORKING;
9593 }
9594 task_unlock(task);
9595
9596 return KERN_SUCCESS;
9597 #else
9598 (void)task;
9599 (void)behavior;
9600 return KERN_NOT_SUPPORTED;
9601 #endif
9602 }
9603
9604 boolean_t
task_corpse_forking_disabled(task_t task)9605 task_corpse_forking_disabled(task_t task)
9606 {
9607 boolean_t disabled = FALSE;
9608
9609 task_lock(task);
9610 disabled = (task->t_flags & TF_NO_CORPSE_FORKING);
9611 task_unlock(task);
9612
9613 return disabled;
9614 }
9615
9616 #if __arm64__
9617 extern int legacy_footprint_entitlement_mode;
9618 extern void memorystatus_act_on_legacy_footprint_entitlement(struct proc *, boolean_t);
9619 extern void memorystatus_act_on_ios13extended_footprint_entitlement(struct proc *);
9620
9621
9622 void
task_set_legacy_footprint(task_t task)9623 task_set_legacy_footprint(
9624 task_t task)
9625 {
9626 task_lock(task);
9627 task->task_legacy_footprint = TRUE;
9628 task_unlock(task);
9629 }
9630
9631 void
task_set_extra_footprint_limit(task_t task)9632 task_set_extra_footprint_limit(
9633 task_t task)
9634 {
9635 if (task->task_extra_footprint_limit) {
9636 return;
9637 }
9638 task_lock(task);
9639 if (task->task_extra_footprint_limit) {
9640 task_unlock(task);
9641 return;
9642 }
9643 task->task_extra_footprint_limit = TRUE;
9644 task_unlock(task);
9645 memorystatus_act_on_legacy_footprint_entitlement(get_bsdtask_info(task), TRUE);
9646 }
9647
9648 void
task_set_ios13extended_footprint_limit(task_t task)9649 task_set_ios13extended_footprint_limit(
9650 task_t task)
9651 {
9652 if (task->task_ios13extended_footprint_limit) {
9653 return;
9654 }
9655 task_lock(task);
9656 if (task->task_ios13extended_footprint_limit) {
9657 task_unlock(task);
9658 return;
9659 }
9660 task->task_ios13extended_footprint_limit = TRUE;
9661 task_unlock(task);
9662 memorystatus_act_on_ios13extended_footprint_entitlement(get_bsdtask_info(task));
9663 }
9664 #endif /* __arm64__ */
9665
9666 static inline ledger_amount_t
task_ledger_get_balance(ledger_t ledger,int ledger_idx)9667 task_ledger_get_balance(
9668 ledger_t ledger,
9669 int ledger_idx)
9670 {
9671 ledger_amount_t amount;
9672 amount = 0;
9673 ledger_get_balance(ledger, ledger_idx, &amount);
9674 return amount;
9675 }
9676
9677 /*
9678 * Gather the amount of memory counted in a task's footprint due to
9679 * being in a specific set of ledgers.
9680 */
9681 void
task_ledgers_footprint(ledger_t ledger,ledger_amount_t * ledger_resident,ledger_amount_t * ledger_compressed)9682 task_ledgers_footprint(
9683 ledger_t ledger,
9684 ledger_amount_t *ledger_resident,
9685 ledger_amount_t *ledger_compressed)
9686 {
9687 *ledger_resident = 0;
9688 *ledger_compressed = 0;
9689
9690 /* purgeable non-volatile memory */
9691 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile);
9692 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile_compressed);
9693
9694 /* "default" tagged memory */
9695 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint);
9696 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint_compressed);
9697
9698 /* "network" currently never counts in the footprint... */
9699
9700 /* "media" tagged memory */
9701 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.media_footprint);
9702 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.media_footprint_compressed);
9703
9704 /* "graphics" tagged memory */
9705 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint);
9706 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint_compressed);
9707
9708 /* "neural" tagged memory */
9709 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.neural_footprint);
9710 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.neural_footprint_compressed);
9711 }
9712
9713 #if CONFIG_MEMORYSTATUS
9714 void
task_ledger_settle_dirty_time(task_t t)9715 task_ledger_settle_dirty_time(task_t t)
9716 {
9717 task_lock(t);
9718 task_ledger_settle_dirty_time_locked(t);
9719 task_unlock(t);
9720 }
9721
9722 /*
9723 * Credit any outstanding task dirty time to the ledger.
9724 * memstat_dirty_start is pushed forward to prevent any possibility of double
9725 * counting, making it safe to call this as often as necessary to ensure that
9726 * anyone reading the ledger gets up-to-date information.
9727 */
9728 void
task_ledger_settle_dirty_time_locked(task_t t)9729 task_ledger_settle_dirty_time_locked(task_t t)
9730 {
9731 task_lock_assert_owned(t);
9732
9733 uint64_t start = t->memstat_dirty_start;
9734 if (start) {
9735 uint64_t now = mach_absolute_time();
9736
9737 uint64_t duration;
9738 absolutetime_to_nanoseconds(now - start, &duration);
9739
9740 ledger_t ledger = get_task_ledger(t);
9741 ledger_credit(ledger, task_ledgers.memorystatus_dirty_time, duration);
9742
9743 t->memstat_dirty_start = now;
9744 }
9745 }
9746 #endif /* CONFIG_MEMORYSTATUS */
9747
9748 void
task_ledger_settle(task_t t)9749 task_ledger_settle(task_t t)
9750 {
9751 #if CONFIG_MEMORYSTATUS
9752 task_lock(t);
9753 /* Settle memorystatus dirty time */
9754 task_ledger_settle_dirty_time_locked(t);
9755 task_unlock(t);
9756 #endif /* CONFIG_MEMORYSTATUS */
9757
9758 #if CONFIG_DEFERRED_RECLAIM
9759 vm_deferred_reclamation_settle_ledger(t);
9760 #endif /* CONFIG_DEFERRED_RECLAIM */
9761 }
9762
9763 void
task_set_memory_ownership_transfer(task_t task,boolean_t value)9764 task_set_memory_ownership_transfer(
9765 task_t task,
9766 boolean_t value)
9767 {
9768 task_lock(task);
9769 task->task_can_transfer_memory_ownership = !!value;
9770 task_unlock(task);
9771 }
9772
9773 #if DEVELOPMENT || DEBUG
9774
9775 void
task_set_no_footprint_for_debug(task_t task,boolean_t value)9776 task_set_no_footprint_for_debug(task_t task, boolean_t value)
9777 {
9778 task_lock(task);
9779 task->task_no_footprint_for_debug = !!value;
9780 task_unlock(task);
9781 }
9782
9783 int
task_get_no_footprint_for_debug(task_t task)9784 task_get_no_footprint_for_debug(task_t task)
9785 {
9786 return task->task_no_footprint_for_debug;
9787 }
9788
9789 #endif /* DEVELOPMENT || DEBUG */
9790
9791 void
task_copy_vmobjects(task_t task,vm_object_query_t query,size_t len,size_t * num)9792 task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num)
9793 {
9794 vm_object_t find_vmo;
9795 size_t size = 0;
9796
9797 /*
9798 * Allocate a save area for FP state before taking task_objq lock,
9799 * if necessary, to ensure that VM_KERNEL_ADDRHASH() doesn't cause
9800 * an FP state allocation while holding VM locks.
9801 */
9802 ml_fp_save_area_prealloc();
9803
9804 task_objq_lock(task);
9805 if (query != NULL) {
9806 queue_iterate(&task->task_objq, find_vmo, vm_object_t, task_objq)
9807 {
9808 vm_object_query_t p = &query[size++];
9809
9810 /* make sure to not overrun */
9811 if (size * sizeof(vm_object_query_data_t) > len) {
9812 --size;
9813 break;
9814 }
9815
9816 bzero(p, sizeof(*p));
9817 p->object_id = (vm_object_id_t) VM_KERNEL_ADDRHASH(find_vmo);
9818 p->virtual_size = find_vmo->internal ? find_vmo->vo_size : 0;
9819 p->resident_size = find_vmo->resident_page_count * PAGE_SIZE;
9820 p->wired_size = find_vmo->wired_page_count * PAGE_SIZE;
9821 p->reusable_size = find_vmo->reusable_page_count * PAGE_SIZE;
9822 p->vo_no_footprint = find_vmo->vo_no_footprint;
9823 p->vo_ledger_tag = find_vmo->vo_ledger_tag;
9824 p->purgable = find_vmo->purgable;
9825
9826 if (find_vmo->internal && find_vmo->pager_created && find_vmo->pager != NULL) {
9827 p->compressed_size = vm_compressor_pager_get_count(find_vmo->pager) * PAGE_SIZE;
9828 } else {
9829 p->compressed_size = 0;
9830 }
9831 }
9832 } else {
9833 size = (size_t)task->task_owned_objects;
9834 }
9835 task_objq_unlock(task);
9836
9837 *num = size;
9838 }
9839
9840 void
task_get_owned_vmobjects(task_t task,size_t buffer_size,vmobject_list_output_t buffer,size_t * output_size,size_t * entries)9841 task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries)
9842 {
9843 assert(output_size);
9844 assert(entries);
9845
9846 /* copy the vmobjects and vmobject data out of the task */
9847 if (buffer_size == 0) {
9848 task_copy_vmobjects(task, NULL, 0, entries);
9849 *output_size = (*entries > 0) ? *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer) : 0;
9850 } else {
9851 assert(buffer);
9852 task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(*buffer), entries);
9853 buffer->entries = (uint64_t)*entries;
9854 *output_size = *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer);
9855 }
9856 }
9857
9858 static void
task_store_owned_vmobject_info(task_t to_task,task_t from_task)9859 task_store_owned_vmobject_info(task_t to_task, task_t from_task)
9860 {
9861 size_t buffer_size;
9862 vmobject_list_output_t buffer;
9863 size_t output_size;
9864 size_t entries;
9865
9866 /* get the size, allocate a buffer, and populate */
9867 entries = 0;
9868 output_size = 0;
9869 task_get_owned_vmobjects(from_task, 0, NULL, &output_size, &entries);
9870
9871 if (output_size) {
9872 buffer_size = output_size;
9873 buffer = kalloc_data(buffer_size, Z_WAITOK);
9874
9875 if (buffer) {
9876 entries = 0;
9877 output_size = 0;
9878
9879 task_get_owned_vmobjects(from_task, buffer_size, buffer, &output_size, &entries);
9880
9881 task_lock(to_task);
9882
9883 if (!entries || (to_task->corpse_vmobject_list != NULL)) {
9884 kfree_data(buffer, buffer_size);
9885 task_unlock(to_task);
9886 return;
9887 }
9888
9889 to_task->corpse_vmobject_list = buffer;
9890 to_task->corpse_vmobject_list_size = buffer_size;
9891
9892 task_unlock(to_task);
9893 }
9894 }
9895 }
9896
9897 void
task_set_filter_msg_flag(task_t task,boolean_t flag)9898 task_set_filter_msg_flag(
9899 task_t task,
9900 boolean_t flag)
9901 {
9902 assert(task != TASK_NULL);
9903
9904 if (flag) {
9905 task_ro_flags_set(task, TFRO_FILTER_MSG);
9906 } else {
9907 task_ro_flags_clear(task, TFRO_FILTER_MSG);
9908 }
9909 }
9910
9911 boolean_t
task_get_filter_msg_flag(task_t task)9912 task_get_filter_msg_flag(
9913 task_t task)
9914 {
9915 if (!task) {
9916 return false;
9917 }
9918
9919 return (task_ro_flags_get(task) & TFRO_FILTER_MSG) ? TRUE : FALSE;
9920 }
9921 bool
task_is_exotic(task_t task)9922 task_is_exotic(
9923 task_t task)
9924 {
9925 if (task == TASK_NULL) {
9926 return false;
9927 }
9928 return vm_map_is_exotic(get_task_map(task));
9929 }
9930
9931 bool
task_is_alien(task_t task)9932 task_is_alien(
9933 task_t task)
9934 {
9935 if (task == TASK_NULL) {
9936 return false;
9937 }
9938 return vm_map_is_alien(get_task_map(task));
9939 }
9940
9941
9942
9943 #if CONFIG_MACF
9944 uint8_t *
mac_task_get_mach_filter_mask(task_t task)9945 mac_task_get_mach_filter_mask(task_t task)
9946 {
9947 assert(task);
9948 return task_get_mach_trap_filter_mask(task);
9949 }
9950
9951 uint8_t *
mac_task_get_kobj_filter_mask(task_t task)9952 mac_task_get_kobj_filter_mask(task_t task)
9953 {
9954 assert(task);
9955 return task_get_mach_kobj_filter_mask(task);
9956 }
9957
9958 /* Set the filter mask for Mach traps. */
9959 void
mac_task_set_mach_filter_mask(task_t task,uint8_t * maskptr)9960 mac_task_set_mach_filter_mask(task_t task, uint8_t *maskptr)
9961 {
9962 assert(task);
9963
9964 task_set_mach_trap_filter_mask(task, maskptr);
9965 }
9966
9967 /* Set the filter mask for kobject msgs. */
9968 void
mac_task_set_kobj_filter_mask(task_t task,uint8_t * maskptr)9969 mac_task_set_kobj_filter_mask(task_t task, uint8_t *maskptr)
9970 {
9971 assert(task);
9972
9973 task_set_mach_kobj_filter_mask(task, maskptr);
9974 }
9975
9976 /* Hook for mach trap/sc filter evaluation policy. */
9977 SECURITY_READ_ONLY_LATE(mac_task_mach_filter_cbfunc_t) mac_task_mach_trap_evaluate = NULL;
9978
9979 /* Hook for kobj message filter evaluation policy. */
9980 SECURITY_READ_ONLY_LATE(mac_task_kobj_filter_cbfunc_t) mac_task_kobj_msg_evaluate = NULL;
9981
9982 /* Set the callback hooks for the filtering policy. */
9983 int
mac_task_register_filter_callbacks(const mac_task_mach_filter_cbfunc_t mach_cbfunc,const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)9984 mac_task_register_filter_callbacks(
9985 const mac_task_mach_filter_cbfunc_t mach_cbfunc,
9986 const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)
9987 {
9988 if (mach_cbfunc != NULL) {
9989 if (mac_task_mach_trap_evaluate != NULL) {
9990 return KERN_FAILURE;
9991 }
9992 mac_task_mach_trap_evaluate = mach_cbfunc;
9993 }
9994 if (kobj_cbfunc != NULL) {
9995 if (mac_task_kobj_msg_evaluate != NULL) {
9996 return KERN_FAILURE;
9997 }
9998 mac_task_kobj_msg_evaluate = kobj_cbfunc;
9999 }
10000
10001 return KERN_SUCCESS;
10002 }
10003 #endif /* CONFIG_MACF */
10004
10005 #if CONFIG_ROSETTA
10006 bool
task_is_translated(task_t task)10007 task_is_translated(task_t task)
10008 {
10009 extern boolean_t proc_is_translated(struct proc* p);
10010 return task && proc_is_translated(get_bsdtask_info(task));
10011 }
10012 #endif
10013
10014 /* Task runtime security mitigations configuration. */
10015 #define TASK_SECURITY_CONFIG_HELPER_DEFINE(suffix, checked) \
10016 bool task_has_##suffix(task_t task) \
10017 { \
10018 assert(task); \
10019 return (task->security_config. suffix); \
10020 } \
10021 \
10022 void task_set_##suffix(task_t task) \
10023 { \
10024 assert(task);\
10025 task->security_config. suffix = true; \
10026 } \
10027 \
10028 void task_clear_##suffix(task_t task) \
10029 { \
10030 assert(task);\
10031 task->security_config. suffix = false; \
10032 }
10033
10034 uint32_t
task_get_security_config(task_t task)10035 task_get_security_config(task_t task)
10036 {
10037 assert(task);
10038 return (uint32_t)(task->security_config.value);
10039 }
10040
TASK_SECURITY_CONFIG_HELPER_DEFINE(hardened_heap,true)10041 TASK_SECURITY_CONFIG_HELPER_DEFINE(hardened_heap, true)
10042 TASK_SECURITY_CONFIG_HELPER_DEFINE(tpro, true)
10043 TASK_SECURITY_CONFIG_HELPER_DEFINE(guard_objects, true)
10044
10045 uint8_t
10046 task_get_platform_restrictions_version(task_t task)
10047 {
10048 assert(task);
10049 return task->security_config.platform_restrictions_version;
10050 }
10051
10052 void
task_set_platform_restrictions_version(task_t task,uint64_t version)10053 task_set_platform_restrictions_version(task_t task, uint64_t version)
10054 {
10055 assert(task);
10056 /* platform_restrictions_version is a 3-bit field */
10057 if (version < 8) {
10058 task->security_config.platform_restrictions_version = (uint8_t)version;
10059 }
10060 }
10061
10062 uint8_t
task_get_hardened_process_version(task_t task)10063 task_get_hardened_process_version(task_t task)
10064 {
10065 assert(task);
10066 return task->security_config.hardened_process_version;
10067 }
10068 void
task_set_hardened_process_version(task_t task,uint64_t version)10069 task_set_hardened_process_version(task_t task, uint64_t version)
10070 {
10071 assert(task);
10072 task->security_config.hardened_process_version = (uint8_t)version;
10073 }
10074
10075 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
10076 /*
10077 * task_has_sec() (really: task_has_mte()) means:
10078 *
10079 * 1. task->map allows vm_allocate(VM_FLAGS_MTE); i.e., you can create *new*
10080 * tagged memory in that map.
10081 * 2. When this task is running, MTE tag checking is enabled (SCTLR.ATA0=1).
10082 * 3. task is subject to VM restriction policies.
10083 */
10084 TASK_SECURITY_CONFIG_HELPER_DEFINE(sec, false)
10085
10086 #define TASK_MTE_POLICY_HELPER_DEFINE(suffix, policy) \
10087 bool task_has_sec_##suffix(task_t task) \
10088 { \
10089 if (__improbable(!task)) { \
10090 panic("NULL task in %s", __func__); \
10091 } \
10092 if (__improbable(task == kernel_task)) { \
10093 return false; \
10094 } \
10095 if (__improbable(!task_has_sec(task))) { \
10096 return false; \
10097 } \
10098 return ((os_atomic_load(&task->task_sec_policy, relaxed)) & policy) != 0; \
10099 } \
10100 void task_set_sec_##suffix(task_t task) \
10101 { \
10102 os_atomic_or(&task->task_sec_policy, policy, relaxed); \
10103 }
10104
10105 TASK_MTE_POLICY_HELPER_DEFINE(soft_mode, TASK_SEC_POLICY_SOFT_MODE);
10106 TASK_MTE_POLICY_HELPER_DEFINE(user_data, TASK_SEC_POLICY_USER_DATA);
10107 TASK_MTE_POLICY_HELPER_DEFINE(inherit, TASK_SEC_POLICY_INHERIT);
10108 TASK_MTE_POLICY_HELPER_DEFINE(never_check, TASK_SEC_POLICY_NEVER_CHECK);
10109 TASK_MTE_POLICY_HELPER_DEFINE(restrict_receiving_aliases_to_tagged_memory, TASK_SEC_POLICY_RESTRICT_RECEIVING_ALIASES_TO_TAGGED_MEMORY);
10110
10111 uint32_t
task_get_sec_policy(task_t task)10112 task_get_sec_policy(task_t task)
10113 {
10114 assert(task);
10115 return (uint32_t)(task->task_sec_policy);
10116 }
10117
10118 void
task_clear_sec_policy(task_t task)10119 task_clear_sec_policy(task_t task)
10120 {
10121 os_atomic_store(&task->task_sec_policy, TASK_SEC_POLICY_NONE, relaxed);
10122 }
10123
10124 void
task_clear_sec_soft_mode(task_t task)10125 task_clear_sec_soft_mode(task_t task)
10126 {
10127 os_atomic_andnot(&task->task_sec_policy, TASK_SEC_POLICY_SOFT_MODE, relaxed);
10128 }
10129
10130 bool
current_task_has_sec_enabled(void)10131 current_task_has_sec_enabled(void)
10132 {
10133 task_t task = current_task_early();
10134 if (!task) {
10135 /* an early boot thread is always a kernel thread */
10136 #if CONFIG_KERNEL_TAGGING
10137 return true;
10138 #else /* !CONFIG_KERNEL_TAGGING */
10139 return false;
10140 #endif /* !CONFIG_KERNEL_TAGGING */
10141 }
10142 return task_has_sec(task);
10143 }
10144 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
10145
10146
10147 #if __has_feature(ptrauth_calls)
10148 /* On FPAC, we want to deliver all PAC violations as fatal exceptions, regardless
10149 * of the enable_pac_exception boot-arg value or any other entitlements.
10150 * The only case where we allow non-fatal PAC exceptions on FPAC is for debugging,
10151 * which requires Developer Mode enabled.
10152 *
10153 * On non-FPAC hardware, we gate the decision behind entitlements and the
10154 * enable_pac_exception boot-arg.
10155 */
10156 extern int gARM_FEAT_FPAC;
10157 /*
10158 * Having the PAC_EXCEPTION_ENTITLEMENT entitlement means we always enforce all
10159 * of the PAC exception hardening: fatal exceptions and signed user state.
10160 */
10161 #define PAC_EXCEPTION_ENTITLEMENT "com.apple.private.pac.exception"
10162 /*
10163 * On non-FPAC hardware, when enable_pac_exception boot-arg is set to true,
10164 * processes can choose to get non-fatal PAC exception delivery by setting
10165 * the SKIP_PAC_EXCEPTION_ENTITLEMENT entitlement.
10166 */
10167 #define SKIP_PAC_EXCEPTION_ENTITLEMENT "com.apple.private.skip.pac.exception"
10168
10169 void
task_set_pac_exception_fatal_flag(task_t task)10170 task_set_pac_exception_fatal_flag(
10171 task_t task)
10172 {
10173 assert(task != TASK_NULL);
10174 bool pac_hardened_task = false;
10175 uint32_t set_flags = 0;
10176
10177 /*
10178 * We must not apply this security policy on tasks which have opted out of mach hardening to
10179 * avoid regressions in third party plugins and third party apps when using AMFI boot-args
10180 */
10181 ipc_space_policy_t pol = ipc_policy_for_task(task);
10182 bool platform_binary = pol & IPC_SPACE_POLICY_PLATFORM;
10183 #if XNU_TARGET_OS_OSX
10184 platform_binary &= !(pol & IPC_SPACE_POLICY_OPTED_OUT);
10185 #endif /* XNU_TARGET_OS_OSX */
10186
10187 /*
10188 * On non-FPAC hardware, we allow gating PAC exceptions behind
10189 * SKIP_PAC_EXCEPTION_ENTITLEMENT and the boot-arg.
10190 */
10191 if (!gARM_FEAT_FPAC && enable_pac_exception &&
10192 IOTaskHasEntitlement(task, SKIP_PAC_EXCEPTION_ENTITLEMENT)) {
10193 return;
10194 }
10195
10196 if (IOTaskHasEntitlement(task, PAC_EXCEPTION_ENTITLEMENT) ||
10197 (task_get_platform_restrictions_version(task) >= 1)) {
10198 pac_hardened_task = true;
10199 set_flags |= TFRO_PAC_ENFORCE_USER_STATE;
10200 }
10201
10202 /* On non-FPAC hardware, gate the fatal property behind entitlements and boot-arg. */
10203 if (pac_hardened_task ||
10204 ((enable_pac_exception || gARM_FEAT_FPAC) && platform_binary)) {
10205 set_flags |= TFRO_PAC_EXC_FATAL;
10206 }
10207
10208 if (set_flags != 0) {
10209 task_ro_flags_set(task, set_flags);
10210 }
10211 }
10212
10213 bool
task_is_pac_exception_fatal(task_t task)10214 task_is_pac_exception_fatal(
10215 task_t task)
10216 {
10217 assert(task != TASK_NULL);
10218 return !!(task_ro_flags_get(task) & TFRO_PAC_EXC_FATAL);
10219 }
10220 #endif /* __has_feature(ptrauth_calls) */
10221
10222 /*
10223 * FATAL_EXCEPTION_ENTITLEMENT, if present, will contain a list of
10224 * conditions for which access violations should deliver SIGKILL rather than
10225 * SIGSEGV. This is a hardening measure intended for use by applications
10226 * that are able to handle the stricter error handling behavior. Currently
10227 * this supports FATAL_EXCEPTION_ENTITLEMENT_JIT, which is documented in
10228 * user_fault_in_self_restrict_mode().
10229 */
10230 #define FATAL_EXCEPTION_ENTITLEMENT "com.apple.security.fatal-exceptions"
10231 #define FATAL_EXCEPTION_ENTITLEMENT_JIT "jit"
10232
10233
10234 void
task_set_jit_flags(task_t task)10235 task_set_jit_flags(
10236 task_t task)
10237 {
10238 assert(task != TASK_NULL);
10239 if (IOTaskHasStringEntitlement(task, FATAL_EXCEPTION_ENTITLEMENT, FATAL_EXCEPTION_ENTITLEMENT_JIT)) {
10240 task_ro_flags_set(task, TFRO_JIT_EXC_FATAL);
10241 }
10242
10243 }
10244
10245 bool
task_is_jit_exception_fatal(__unused task_t task)10246 task_is_jit_exception_fatal(
10247 __unused task_t task)
10248 {
10249 #if !defined(XNU_PLATFORM_MacOSX)
10250 return true;
10251 #else
10252 assert(task != TASK_NULL);
10253 return !!(task_ro_flags_get(task) & TFRO_JIT_EXC_FATAL);
10254 #endif
10255 }
10256
10257 bool
task_needs_user_signed_thread_state(task_t task)10258 task_needs_user_signed_thread_state(
10259 task_t task)
10260 {
10261 assert(task != TASK_NULL);
10262 return !!(task_ro_flags_get(task) & TFRO_PAC_ENFORCE_USER_STATE);
10263 }
10264
10265 void
task_set_tecs(task_t task)10266 task_set_tecs(task_t task)
10267 {
10268 if (task == TASK_NULL) {
10269 task = current_task();
10270 }
10271
10272 if (!machine_csv(CPUVN_CI)) {
10273 return;
10274 }
10275
10276 LCK_MTX_ASSERT(&task->lock, LCK_MTX_ASSERT_NOTOWNED);
10277
10278 task_lock(task);
10279
10280 task->t_flags |= TF_TECS;
10281
10282 thread_t thread;
10283 queue_iterate(&task->threads, thread, thread_t, task_threads) {
10284 machine_tecs(thread);
10285 }
10286 task_unlock(task);
10287 }
10288
10289 kern_return_t
task_test_sync_upcall(task_t task,ipc_port_t send_port)10290 task_test_sync_upcall(
10291 task_t task,
10292 ipc_port_t send_port)
10293 {
10294 #if DEVELOPMENT || DEBUG
10295 if (task != current_task() || !IPC_PORT_VALID(send_port)) {
10296 return KERN_INVALID_ARGUMENT;
10297 }
10298
10299 /* Block on sync kernel upcall on the given send port */
10300 mach_test_sync_upcall(send_port);
10301
10302 ipc_port_release_send(send_port);
10303 return KERN_SUCCESS;
10304 #else
10305 (void)task;
10306 (void)send_port;
10307 return KERN_NOT_SUPPORTED;
10308 #endif
10309 }
10310
10311 kern_return_t
task_test_async_upcall_propagation(task_t task,ipc_port_t send_port,int qos,int iotier)10312 task_test_async_upcall_propagation(
10313 task_t task,
10314 ipc_port_t send_port,
10315 int qos,
10316 int iotier)
10317 {
10318 #if DEVELOPMENT || DEBUG
10319 kern_return_t kr;
10320
10321 if (task != current_task() || !IPC_PORT_VALID(send_port)) {
10322 return KERN_INVALID_ARGUMENT;
10323 }
10324
10325 if (qos < THREAD_QOS_DEFAULT || qos > THREAD_QOS_USER_INTERACTIVE ||
10326 iotier < THROTTLE_LEVEL_START || iotier > THROTTLE_LEVEL_END) {
10327 return KERN_INVALID_ARGUMENT;
10328 }
10329
10330 struct thread_attr_for_ipc_propagation attr = {
10331 .tafip_iotier = iotier,
10332 .tafip_qos = qos
10333 };
10334
10335 /* Apply propagate attr to port */
10336 kr = ipc_port_propagate_thread_attr(send_port, attr);
10337 if (kr != KERN_SUCCESS) {
10338 return kr;
10339 }
10340
10341 thread_enable_send_importance(current_thread(), TRUE);
10342
10343 /* Perform an async kernel upcall on the given send port */
10344 mach_test_async_upcall(send_port);
10345 thread_enable_send_importance(current_thread(), FALSE);
10346
10347 ipc_port_release_send(send_port);
10348 return KERN_SUCCESS;
10349 #else
10350 (void)task;
10351 (void)send_port;
10352 (void)qos;
10353 (void)iotier;
10354 return KERN_NOT_SUPPORTED;
10355 #endif
10356 }
10357
10358 #if CONFIG_PROC_RESOURCE_LIMITS
10359 mach_port_name_t
current_task_get_fatal_port_name(void)10360 current_task_get_fatal_port_name(void)
10361 {
10362 mach_port_t task_fatal_port = MACH_PORT_NULL;
10363 mach_port_name_t port_name = 0;
10364
10365 task_fatal_port = task_allocate_fatal_port();
10366
10367 if (task_fatal_port) {
10368 ipc_object_copyout(current_space(), task_fatal_port,
10369 MACH_MSG_TYPE_PORT_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
10370 NULL, &port_name);
10371 }
10372
10373 return port_name;
10374 }
10375 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
10376
10377 #if defined(__x86_64__)
10378 bool
curtask_get_insn_copy_optout(void)10379 curtask_get_insn_copy_optout(void)
10380 {
10381 bool optout;
10382 task_t cur_task = current_task();
10383
10384 task_lock(cur_task);
10385 optout = (cur_task->t_flags & TF_INSN_COPY_OPTOUT) ? true : false;
10386 task_unlock(cur_task);
10387
10388 return optout;
10389 }
10390
10391 void
curtask_set_insn_copy_optout(void)10392 curtask_set_insn_copy_optout(void)
10393 {
10394 task_t cur_task = current_task();
10395
10396 task_lock(cur_task);
10397
10398 cur_task->t_flags |= TF_INSN_COPY_OPTOUT;
10399
10400 thread_t thread;
10401 queue_iterate(&cur_task->threads, thread, thread_t, task_threads) {
10402 machine_thread_set_insn_copy_optout(thread);
10403 }
10404 task_unlock(cur_task);
10405 }
10406 #endif /* defined(__x86_64__) */
10407
10408 void
task_get_corpse_vmobject_list(task_t task,vmobject_list_output_t * list,size_t * list_size)10409 task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size)
10410 {
10411 assert(task);
10412 assert(list_size);
10413
10414 *list = task->corpse_vmobject_list;
10415 *list_size = (size_t)task->corpse_vmobject_list_size;
10416 }
10417
10418 __abortlike
10419 static void
panic_proc_ro_task_backref_mismatch(task_t t,proc_ro_t ro)10420 panic_proc_ro_task_backref_mismatch(task_t t, proc_ro_t ro)
10421 {
10422 panic("proc_ro->task backref mismatch: t=%p, ro=%p, "
10423 "proc_ro_task(ro)=%p", t, ro, proc_ro_task(ro));
10424 }
10425
10426 proc_ro_t
task_get_ro(task_t t)10427 task_get_ro(task_t t)
10428 {
10429 proc_ro_t ro = (proc_ro_t)t->bsd_info_ro;
10430
10431 zone_require_ro(ZONE_ID_PROC_RO, sizeof(struct proc_ro), ro);
10432 if (__improbable(proc_ro_task(ro) != t)) {
10433 panic_proc_ro_task_backref_mismatch(t, ro);
10434 }
10435
10436 return ro;
10437 }
10438
10439 uint32_t
task_ro_flags_get(task_t task)10440 task_ro_flags_get(task_t task)
10441 {
10442 return task_get_ro(task)->t_flags_ro;
10443 }
10444
10445 void
task_ro_flags_set(task_t task,uint32_t flags)10446 task_ro_flags_set(task_t task, uint32_t flags)
10447 {
10448 zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
10449 t_flags_ro, ZRO_ATOMIC_OR_32, flags);
10450 }
10451
10452 void
task_ro_flags_clear(task_t task,uint32_t flags)10453 task_ro_flags_clear(task_t task, uint32_t flags)
10454 {
10455 zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
10456 t_flags_ro, ZRO_ATOMIC_AND_32, ~flags);
10457 }
10458
10459 task_control_port_options_t
task_get_control_port_options(task_t task)10460 task_get_control_port_options(task_t task)
10461 {
10462 return task_get_ro(task)->task_control_port_options;
10463 }
10464
10465 /*
10466 * intentionally static, as calling this after the task has been started
10467 * will have no affect, control ports cannot go from immovable back to movable
10468 */
10469 static void
task_set_control_port_options(task_t task,task_control_port_options_t opts)10470 task_set_control_port_options(task_t task, task_control_port_options_t opts)
10471 {
10472 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
10473 task_control_port_options, &opts);
10474 }
10475
10476 /*!
10477 * @function kdp_task_is_locked
10478 *
10479 * @abstract
10480 * Checks if task is locked.
10481 *
10482 * @discussion
10483 * NOT SAFE: To be used only by kernel debugger.
10484 *
10485 * @param task task to check
10486 *
10487 * @returns TRUE if the task is locked.
10488 */
10489 boolean_t
kdp_task_is_locked(task_t task)10490 kdp_task_is_locked(task_t task)
10491 {
10492 return kdp_lck_mtx_lock_spin_is_acquired(&task->lock);
10493 }
10494
10495 #if DEBUG || DEVELOPMENT
10496 /**
10497 *
10498 * Check if a threshold limit is valid based on the actual phys memory
10499 * limit. If they are same, race conditions may arise, so we have to prevent
10500 * it to happen.
10501 */
10502 static diagthreshold_check_return
task_check_memorythreshold_is_valid(task_t task,uint64_t new_limit,bool is_diagnostics_value)10503 task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value)
10504 {
10505 int phys_limit_mb;
10506 kern_return_t ret_value;
10507 bool threshold_enabled;
10508 bool dummy;
10509 ret_value = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, &threshold_enabled);
10510 if (ret_value != KERN_SUCCESS) {
10511 return ret_value;
10512 }
10513 if (is_diagnostics_value == true) {
10514 ret_value = task_get_phys_footprint_limit(task, &phys_limit_mb);
10515 } else {
10516 uint64_t diag_limit;
10517 ret_value = task_get_diag_footprint_limit_internal(task, &diag_limit, &dummy);
10518 phys_limit_mb = (int)(diag_limit >> 20);
10519 }
10520 if (ret_value != KERN_SUCCESS) {
10521 return ret_value;
10522 }
10523 if (phys_limit_mb == (int) new_limit) {
10524 if (threshold_enabled == false) {
10525 return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED;
10526 } else {
10527 return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED;
10528 }
10529 }
10530 if (threshold_enabled == false) {
10531 return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED;
10532 } else {
10533 return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED;
10534 }
10535 }
10536 #endif
10537
10538 #if CONFIG_EXCLAVES
10539 kern_return_t
task_add_conclave(task_t task,void * vnode,int64_t off,const char * task_conclave_id)10540 task_add_conclave(task_t task, void *vnode, int64_t off, const char *task_conclave_id)
10541 {
10542 /*
10543 * Only launchd or properly entitled tasks can attach tasks to
10544 * conclaves.
10545 */
10546 if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10547 return KERN_DENIED;
10548 }
10549
10550 /*
10551 * Only entitled tasks can have conclaves attached.
10552 * Allow tasks which have the SPAWN privilege to also host conclaves.
10553 * This allows xpc proxy to add a conclave before execing a daemon.
10554 */
10555 if (!exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_HOST) &&
10556 !exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10557 return KERN_DENIED;
10558 }
10559
10560 return exclaves_conclave_attach(task_conclave_id, task);
10561 }
10562
10563 kern_return_t
task_launch_conclave(mach_port_name_t port __unused)10564 task_launch_conclave(mach_port_name_t port __unused)
10565 {
10566 kern_return_t kr = KERN_FAILURE;
10567 assert3u(port, ==, MACH_PORT_NULL);
10568 exclaves_resource_t *conclave = task_get_conclave(current_task());
10569 if (conclave == NULL || exclaves_is_forwarding_resource(conclave)) {
10570 return kr;
10571 }
10572
10573 kr = exclaves_conclave_launch(conclave);
10574 if (kr != KERN_SUCCESS) {
10575 return kr;
10576 }
10577 task_set_conclave_taint(current_task());
10578
10579 return KERN_SUCCESS;
10580 }
10581
10582 kern_return_t
task_inherit_conclave(task_t old_task,task_t new_task,void * vnode,int64_t off)10583 task_inherit_conclave(task_t old_task, task_t new_task, void *vnode, int64_t off)
10584 {
10585 if (old_task->conclave == NULL ||
10586 !exclaves_conclave_is_attached(old_task->conclave)) {
10587 return KERN_SUCCESS;
10588 }
10589
10590 /*
10591 * Only launchd or properly entitled tasks can attach tasks to
10592 * conclaves.
10593 */
10594 if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10595 return KERN_DENIED;
10596 }
10597
10598 /*
10599 * Only entitled tasks can have conclaves attached.
10600 */
10601 if (!exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_HOST)) {
10602 return KERN_DENIED;
10603 }
10604
10605 return exclaves_conclave_inherit(old_task->conclave, old_task, new_task);
10606 }
10607
10608 void
task_clear_conclave(task_t task)10609 task_clear_conclave(task_t task)
10610 {
10611 if (task->exclave_crash_info) {
10612 kfree_data(task->exclave_crash_info, CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE);
10613 task->exclave_crash_info = NULL;
10614 }
10615
10616 if (task->conclave == NULL) {
10617 return;
10618 }
10619
10620 /*
10621 * XXX
10622 * This should only fail if either the conclave is in an unexpected
10623 * state (i.e. not ATTACHED) or if the wrong port is supplied.
10624 * We should re-visit this and make sure we guarantee the above
10625 * constraints.
10626 */
10627 __assert_only kern_return_t ret =
10628 exclaves_conclave_detach(task->conclave, task);
10629 assert3u(ret, ==, KERN_SUCCESS);
10630 }
10631
10632 void
task_stop_conclave(task_t task,bool gather_crash_bt)10633 task_stop_conclave(task_t task, bool gather_crash_bt)
10634 {
10635 thread_t thread = current_thread();
10636
10637 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10638 return;
10639 }
10640
10641 if (task_should_panic_on_exit_due_to_conclave_taint(task)) {
10642 panic("Conclave tainted task %p terminated\n", task);
10643 }
10644
10645 /* Stash the task on current thread for conclave teardown */
10646 thread->conclave_stop_task = task;
10647
10648 __assert_only kern_return_t ret =
10649 exclaves_conclave_stop(task->conclave, gather_crash_bt);
10650
10651 thread->conclave_stop_task = TASK_NULL;
10652
10653 assert3u(ret, ==, KERN_SUCCESS);
10654 }
10655
10656 void
task_suspend_conclave(task_t task)10657 task_suspend_conclave(task_t task)
10658 {
10659 thread_t thread = current_thread();
10660
10661 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10662 return;
10663 }
10664
10665 /* Stash the task on current thread for conclave teardown */
10666 thread->conclave_stop_task = task;
10667
10668 __assert_only kern_return_t ret =
10669 exclaves_conclave_suspend(task->conclave);
10670
10671 thread->conclave_stop_task = TASK_NULL;
10672
10673 assert3u(ret, ==, KERN_SUCCESS);
10674 }
10675
10676 void
task_resume_conclave(task_t task)10677 task_resume_conclave(task_t task)
10678 {
10679 thread_t thread = current_thread();
10680
10681 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10682 return;
10683 }
10684
10685 /* Stash the task on current thread for conclave teardown */
10686 thread->conclave_stop_task = task;
10687
10688 __assert_only kern_return_t ret =
10689 exclaves_conclave_resume(task->conclave);
10690
10691 thread->conclave_stop_task = TASK_NULL;
10692
10693 assert3u(ret, ==, KERN_SUCCESS);
10694 }
10695
10696 kern_return_t
task_stop_conclave_upcall(void)10697 task_stop_conclave_upcall(void)
10698 {
10699 task_t task = current_task();
10700 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10701 return KERN_INVALID_TASK;
10702 }
10703
10704 return exclaves_conclave_stop_upcall(task->conclave);
10705 }
10706
10707 kern_return_t
task_stop_conclave_upcall_complete(void)10708 task_stop_conclave_upcall_complete(void)
10709 {
10710 task_t task = current_task();
10711 thread_t thread = current_thread();
10712
10713 if (!(thread->th_exclaves_state & TH_EXCLAVES_STOP_UPCALL_PENDING)) {
10714 return KERN_SUCCESS;
10715 }
10716
10717 assert3p(task->conclave, !=, NULL);
10718
10719 return exclaves_conclave_stop_upcall_complete(task->conclave, task);
10720 }
10721
10722 kern_return_t
task_suspend_conclave_upcall(uint64_t * scid_list,size_t scid_list_count)10723 task_suspend_conclave_upcall(uint64_t *scid_list, size_t scid_list_count)
10724 {
10725 task_t task = current_task();
10726 thread_t thread;
10727 int scid_count = 0;
10728 kern_return_t kr;
10729 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10730 return KERN_INVALID_TASK;
10731 }
10732
10733 kr = task_hold_and_wait(task, false);
10734
10735 task_lock(task);
10736 queue_iterate(&task->threads, thread, thread_t, task_threads)
10737 {
10738 if (thread->th_exclaves_state & TH_EXCLAVES_RPC) {
10739 scid_list[scid_count++] = thread->th_exclaves_ipc_ctx.scid;
10740 if (scid_count >= scid_list_count) {
10741 break;
10742 }
10743 }
10744 }
10745
10746 task_unlock(task);
10747 return kr;
10748 }
10749
10750 kern_return_t
task_crash_info_conclave_upcall(task_t task,const struct conclave_sharedbuffer_t * shared_buf,uint32_t length)10751 task_crash_info_conclave_upcall(task_t task, const struct conclave_sharedbuffer_t *shared_buf,
10752 uint32_t length)
10753 {
10754 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10755 return KERN_INVALID_TASK;
10756 }
10757
10758 /* Allocate the buffer and memcpy it */
10759 int task_crash_info_buffer_size = 0;
10760 uint8_t * task_crash_info_buffer;
10761
10762 if (!length) {
10763 printf("Conclave upcall: task_crash_info_conclave_upcall did not return any page addresses\n");
10764 return KERN_INVALID_ARGUMENT;
10765 }
10766
10767 task_crash_info_buffer_size = CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE;
10768 assert3u(task_crash_info_buffer_size, >=, length);
10769
10770 task_crash_info_buffer = kalloc_data(task_crash_info_buffer_size, Z_WAITOK);
10771 if (!task_crash_info_buffer) {
10772 panic("task_crash_info_conclave_upcall: cannot allocate buffer for task_info shared memory");
10773 return KERN_INVALID_ARGUMENT;
10774 }
10775
10776 uint8_t * dst = task_crash_info_buffer;
10777 uint32_t remaining = length;
10778 for (size_t i = 0; i < CONCLAVE_CRASH_BUFFER_PAGECOUNT; i++) {
10779 if (remaining) {
10780 memcpy(dst, (uint8_t*)phystokv((pmap_paddr_t)shared_buf->physaddr[i]), PAGE_SIZE);
10781 remaining = (remaining >= PAGE_SIZE) ? remaining - PAGE_SIZE : 0;
10782 dst += PAGE_SIZE;
10783 }
10784 }
10785
10786 task_lock(task);
10787 if (task->exclave_crash_info == NULL && task->active) {
10788 task->exclave_crash_info = task_crash_info_buffer;
10789 task->exclave_crash_info_length = length;
10790 task_crash_info_buffer = NULL;
10791 }
10792 task_unlock(task);
10793
10794 if (task_crash_info_buffer) {
10795 kfree_data(task_crash_info_buffer, task_crash_info_buffer_size);
10796 }
10797
10798 return KERN_SUCCESS;
10799 }
10800
10801 exclaves_resource_t *
task_get_conclave(task_t task)10802 task_get_conclave(task_t task)
10803 {
10804 return task->conclave;
10805 }
10806
10807 extern boolean_t IOPMRootDomainGetWillShutdown(void);
10808
10809 TUNABLE(bool, disable_conclave_taint, "disable_conclave_taint", true); /* Do not taint processes when they talk to conclave, so system does not panic when exit. */
10810
10811 static bool
task_should_panic_on_exit_due_to_conclave_taint(task_t task)10812 task_should_panic_on_exit_due_to_conclave_taint(task_t task)
10813 {
10814 /* Check if boot-arg to disable conclave taint is set */
10815 if (disable_conclave_taint) {
10816 return false;
10817 }
10818
10819 /* Check if the system is shutting down */
10820 if (IOPMRootDomainGetWillShutdown()) {
10821 return false;
10822 }
10823
10824 return task_is_conclave_tainted(task);
10825 }
10826
10827 static bool
task_is_conclave_tainted(task_t task)10828 task_is_conclave_tainted(task_t task)
10829 {
10830 return (task->t_exclave_state & TES_CONCLAVE_TAINTED) != 0 &&
10831 !(task->t_exclave_state & TES_CONCLAVE_UNTAINTABLE);
10832 }
10833
10834 static void
task_set_conclave_taint(task_t task)10835 task_set_conclave_taint(task_t task)
10836 {
10837 os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_TAINTED, relaxed);
10838 }
10839
10840 void
task_set_conclave_untaintable(task_t task)10841 task_set_conclave_untaintable(task_t task)
10842 {
10843 os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_UNTAINTABLE, relaxed);
10844 }
10845
10846 void
task_add_conclave_crash_info(task_t task,void * crash_info_ptr)10847 task_add_conclave_crash_info(task_t task, void *crash_info_ptr)
10848 {
10849 __block kern_return_t error = KERN_SUCCESS;
10850 tb_error_t tberr = TB_ERROR_SUCCESS;
10851 void *crash_info;
10852 uint32_t crash_info_length = 0;
10853
10854 if (task->conclave == NULL || exclaves_is_forwarding_resource(task->conclave)) {
10855 return;
10856 }
10857
10858 if (task->exclave_crash_info_length == 0) {
10859 return;
10860 }
10861
10862 error = kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_BEGIN,
10863 STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10864 if (error != KERN_SUCCESS) {
10865 return;
10866 }
10867
10868 crash_info = task->exclave_crash_info;
10869 crash_info_length = task->exclave_crash_info_length;
10870
10871 tberr = stackshot_stackshotresult__unmarshal(crash_info,
10872 (uint64_t)crash_info_length, ^(stackshot_stackshotresult_s result){
10873 error = stackshot_exclaves_process_stackshot(&result, crash_info_ptr, false);
10874 if (error != KERN_SUCCESS) {
10875 printf("task_add_conclave_crash_info: error processing stackshot result %d\n", error);
10876 }
10877 });
10878 if (tberr != TB_ERROR_SUCCESS) {
10879 printf("task_conclave_crash: task_add_conclave_crash_info could not unmarshal stackshot data 0x%x\n", tberr);
10880 error = KERN_FAILURE;
10881 goto error_exit;
10882 }
10883
10884 error_exit:
10885 kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_END,
10886 STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10887
10888 return;
10889 }
10890
10891 #endif /* CONFIG_EXCLAVES */
10892
10893 /* defined in bsd/kern/kern_proc.c */
10894 extern void proc_name(int pid, char *buf, int size);
10895 extern const char *proc_best_name(struct proc *p);
10896
10897 void
task_procname(task_t task,char * buf,int size)10898 task_procname(task_t task, char *buf, int size)
10899 {
10900 proc_name(task_pid(task), buf, size);
10901 }
10902
10903 const char *
task_best_name(task_t task)10904 task_best_name(task_t task)
10905 {
10906 return proc_best_name(task_get_proc_raw(task));
10907 }
10908
10909
10910 #if HAS_MTE
10911 /*
10912 * Set a AST_SYNTHESIZE_MACH exception on the task.
10913 * This AST will consult the saved address in the vm_map and create a proper
10914 * MTE mach exception out of thin air.
10915 */
10916 void
task_set_ast_mte_synthesize_mach_exception(task_t task)10917 task_set_ast_mte_synthesize_mach_exception(task_t task)
10918 {
10919 task_lock(task);
10920
10921 if (!task->active) {
10922 task_unlock(task);
10923 return;
10924 }
10925
10926 spl_t s = splsched();
10927 /* Set an AST on each of the task's threads, sending IPIs if needed */
10928 thread_t thread;
10929 queue_iterate(&task->threads, thread, thread_t, task_threads) {
10930 if (thread == current_thread()) {
10931 thread_ast_set(thread, AST_SYNTHESIZE_MACH);
10932 ast_propagate(thread);
10933 } else {
10934 processor_t processor;
10935
10936 thread_lock(thread);
10937 thread_ast_set(thread, AST_SYNTHESIZE_MACH);
10938 processor = thread->last_processor;
10939 if (processor != PROCESSOR_NULL &&
10940 processor->state == PROCESSOR_RUNNING &&
10941 processor->active_thread == thread) {
10942 cause_ast_check(processor);
10943 }
10944 thread_unlock(thread);
10945 }
10946 };
10947 splx(s);
10948
10949 task_unlock(task);
10950 }
10951 #endif /* HAS_MTE */
10952