1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: task.h
60 * Author: Avadis Tevanian, Jr.
61 *
62 * This file contains the structure definitions for tasks.
63 *
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to [email protected] any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83 /*
84 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
85 * support for mandatory and extensible security protections. This notice
86 * is included in support of clause 2.2 (b) of the Apple Public License,
87 * Version 2.0.
88 * Copyright (c) 2005 SPARTA, Inc.
89 */
90
91 #ifndef _KERN_TASK_H_
92 #define _KERN_TASK_H_
93
94 #include <kern/kern_types.h>
95 #include <kern/task_ref.h>
96 #include <mach/mach_types.h>
97 #include <sys/cdefs.h>
98
99 #ifdef XNU_KERNEL_PRIVATE
100 #include <kern/btlog.h>
101 #include <kern/kern_cdata.h>
102 #include <mach/sfi_class.h>
103 #include <kern/counter.h>
104 #include <kern/cs_blobs.h>
105 #include <kern/queue.h>
106 #include <kern/recount.h>
107 #include <sys/kern_sysctl.h>
108 #include <sys/resource_private.h>
109
110 #if CONFIG_EXCLAVES
111 #include <mach/exclaves.h>
112 #endif /* CONFIG_EXCLAVES */
113 #endif /* XNU_KERNEL_PRIVATE */
114
115 #ifdef MACH_KERNEL_PRIVATE
116 #include <mach/boolean.h>
117 #include <mach/port.h>
118 #include <mach/time_value.h>
119 #include <mach/message.h>
120 #include <mach/mach_param.h>
121 #include <mach/task_info.h>
122 #include <mach/exception_types.h>
123 #include <mach/vm_statistics.h>
124 #include <machine/task.h>
125
126 #include <kern/cpu_data.h>
127 #include <kern/queue.h>
128 #include <kern/exception.h>
129 #include <kern/locks.h>
130 #include <security/_label.h>
131 #include <ipc/ipc_port.h>
132
133 #include <kern/thread.h>
134 #include <mach/coalition.h>
135 #include <stdatomic.h>
136 #include <os/refcnt.h>
137
138 #if CONFIG_DEFERRED_RECLAIM
139 typedef struct vm_deferred_reclamation_metadata_s *vm_deferred_reclamation_metadata_t;
140 #endif /* CONFIG_DEFFERED_RECLAIM */
141
142 struct _cpu_time_qos_stats {
143 uint64_t cpu_time_qos_default;
144 uint64_t cpu_time_qos_maintenance;
145 uint64_t cpu_time_qos_background;
146 uint64_t cpu_time_qos_utility;
147 uint64_t cpu_time_qos_legacy;
148 uint64_t cpu_time_qos_user_initiated;
149 uint64_t cpu_time_qos_user_interactive;
150 };
151
152 struct task_writes_counters {
153 uint64_t task_immediate_writes;
154 uint64_t task_deferred_writes;
155 uint64_t task_invalidated_writes;
156 uint64_t task_metadata_writes;
157 };
158
159 struct task_pend_token {
160 union {
161 struct {
162 uint32_t tpt_update_sockets :1,
163 tpt_update_timers :1,
164 tpt_update_watchers :1,
165 tpt_update_live_donor :1,
166 tpt_update_coal_sfi :1,
167 tpt_update_throttle :1,
168 tpt_update_thread_sfi :1,
169 tpt_force_recompute_pri :1,
170 tpt_update_tg_ui_flag :1,
171 tpt_update_turnstile :1,
172 tpt_update_tg_app_flag :1,
173 tpt_update_game_mode :1,
174 tpt_update_carplay_mode :1,
175 tpt_update_appnap :1;
176 };
177 uint32_t tpt_value;
178 };
179 };
180
181 typedef struct task_pend_token task_pend_token_s;
182 typedef struct task_pend_token *task_pend_token_t;
183
184 struct task_security_config {
185 union {
186 struct {
187 uint16_t hardened_heap: 1,
188 tpro: 1,
189 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
190 sec: 1,
191 #else /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
192 reserved: 1,
193 #endif
194 platform_restrictions_version: 3,
195 script_restrictions: 1,
196 ipc_containment_vessel: 1,
197 guard_objects: 1;
198 uint8_t hardened_process_version;
199 };
200 uint32_t value;
201 };
202 };
203
204 typedef struct task_security_config task_security_config_s;
205
206 struct task_watchports;
207 #include <bank/bank_internal.h>
208
209 struct ucred;
210
211 #ifdef MACH_BSD
212 struct proc;
213 struct proc_ro;
214 #endif
215
216 __options_closed_decl(task_memlimit_flags_t, uint32_t, {
217 /* if set, use active attributes, otherwise use inactive attributes */
218 TASK_MEMLIMIT_IS_ACTIVE = 0x01,
219 /* if set, exceeding current memlimit will prove fatal to the task */
220 TASK_MEMLIMIT_IS_FATAL = 0x02,
221 /* if set, suppress exc_resource exception when task exceeds active memory limit */
222 TASK_MEMLIMIT_ACTIVE_EXC_RESOURCE = 0x04,
223 /* if set, suppress exc_resource exception when task exceeds inactive memory limit */
224 TASK_MEMLIMIT_INACTIVE_EXC_RESOURCE = 0x08
225 });
226
227 struct task {
228 /* Synchronization/destruction information */
229 decl_lck_mtx_data(, lock); /* Task's lock */
230 os_refcnt_t ref_count; /* Number of references to me */
231
232 #if DEVELOPMENT || DEBUG
233 struct os_refgrp *ref_group;
234 lck_spin_t ref_group_lock;
235 #endif /* DEVELOPMENT || DEBUG */
236
237 bool active; /* Task has not been terminated */
238 bool ipc_active; /* IPC with the task ports is allowed */
239 bool halting; /* Task is being halted */
240 bool message_app_suspended; /* Let iokit know when pidsuspended */
241
242 /* Virtual timers */
243 uint32_t vtimers;
244 uint32_t loadTag; /* dext ID used for logging identity */
245
246 /* Globally uniqueid to identify tasks and corpses */
247 uint64_t task_uniqueid;
248
249 /* Miscellaneous */
250 vm_map_t XNU_PTRAUTH_SIGNED_PTR("task.map") map; /* Address space description */
251 queue_chain_t tasks; /* global list of tasks */
252 struct task_watchports *watchports; /* watchports passed in spawn */
253 turnstile_inheritor_t returnwait_inheritor; /* inheritor for task_wait */
254
255 /* Threads in this task */
256 queue_head_t threads;
257 struct restartable_ranges *t_rr_ranges;
258
259 processor_set_t pset_hint;
260 struct affinity_space *affinity_space;
261
262 int thread_count;
263 uint32_t active_thread_count;
264 int suspend_count; /* Internal scheduling only */
265 #ifdef CONFIG_TASK_SUSPEND_STATS
266 struct task_suspend_stats_s t_suspend_stats; /* suspension statistics for this task */
267 task_suspend_source_array_t t_suspend_sources; /* array of suspender debug info for this task */
268 #endif /* CONFIG_TASK_SUSPEND_STATS */
269
270 /* User-visible scheduling information */
271 integer_t user_stop_count; /* outstanding stops */
272 integer_t legacy_stop_count; /* outstanding legacy stops */
273
274 int16_t priority; /* base priority for threads */
275 int16_t max_priority; /* maximum priority for threads */
276
277 integer_t importance; /* priority offset (BSD 'nice' value) */
278
279 /* Statistics */
280 uint64_t total_runnable_time;
281
282 struct recount_task tk_recount;
283
284 /* IPC structures */
285 decl_lck_mtx_data(, itk_lock_data);
286 /*
287 * Different flavors of task port.
288 * These flavors TASK_FLAVOR_* are defined in mach_types.h
289 */
290 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_ports") itk_task_ports[TASK_SELF_PORT_COUNT];
291 #if CONFIG_CSR
292 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_settable_self") itk_settable_self; /* a send right */
293 #endif /* CONFIG_CSR */
294 struct exception_action exc_actions[EXC_TYPES_COUNT];
295 /* special exception port used by task_register_hardened_exception_handler */
296 struct hardened_exception_action hardened_exception_action;
297 /* a send right each valid element */
298 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_host") itk_host; /* a send right */
299 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_bootstrap") itk_bootstrap; /* a send right */
300 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_debug_control") itk_debug_control; /* send right for debugmode communications */
301 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_access") itk_task_access; /* and another send right */
302 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resume") itk_resume; /* a receive right to resume this task */
303 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_registered") itk_registered[TASK_PORT_REGISTER_MAX];
304 /* all send rights */
305 ipc_port_t * XNU_PTRAUTH_SIGNED_PTR("task.itk_dyld_notify") itk_dyld_notify; /* lazy send rights array of size DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT */
306 #if CONFIG_PROC_RESOURCE_LIMITS
307 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resource_notify") itk_resource_notify; /* a send right to the resource notify port */
308 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
309 struct ipc_space * XNU_PTRAUTH_SIGNED_PTR("task.itk_space") itk_space;
310
311 ledger_t ledger;
312 /* Synchronizer ownership information */
313 queue_head_t semaphore_list; /* list of owned semaphores */
314 int semaphores_owned; /* number of semaphores owned */
315
316 unsigned int priv_flags; /* privilege resource flags */
317 #define VM_BACKING_STORE_PRIV 0x1
318
319 MACHINE_TASK
320
321 counter_t faults; /* faults counter */
322 counter_t pageins; /* pageins counter */
323 counter_t cow_faults; /* copy on write fault counter */
324 counter_t messages_sent; /* messages sent counter */
325 counter_t messages_received; /* messages received counter */
326 uint32_t decompressions; /* decompression counter (from threads that already terminated) */
327 uint32_t syscalls_mach; /* mach system call counter */
328 uint32_t syscalls_unix; /* unix system call counter */
329 uint32_t c_switch; /* total context switches */
330 uint32_t p_switch; /* total processor switches */
331 uint32_t ps_switch; /* total pset switches */
332
333 #ifdef MACH_BSD
334 struct proc_ro * bsd_info_ro;
335 #endif
336 kcdata_descriptor_t corpse_info;
337 uint64_t crashed_thread_id;
338 queue_chain_t corpse_tasks;
339 #ifdef CONFIG_MACF
340 struct label * crash_label;
341 #endif
342 volatile uint32_t t_flags; /* general-purpose task flags protected by task_lock (TL) */
343 #define TF_NONE 0
344 #define TF_64B_ADDR 0x00000001 /* task has 64-bit addressing */
345 #define TF_64B_DATA 0x00000002 /* task has 64-bit data registers */
346 #define TF_CPUMON_WARNING 0x00000004 /* task has at least one thread in CPU usage warning zone */
347 #define TF_WAKEMON_WARNING 0x00000008 /* task is in wakeups monitor warning zone */
348 #define TF_TELEMETRY (TF_CPUMON_WARNING | TF_WAKEMON_WARNING) /* task is a telemetry participant */
349 #define TF_GPU_DENIED 0x00000010 /* task is not allowed to access the GPU */
350 #define TF_PENDING_CORPSE 0x00000040 /* task corpse has not been reported yet */
351 #define TF_CORPSE_FORK 0x00000080 /* task is a forked corpse */
352 #define TF_CA_CLIENT_WI 0x00000800 /* task has CA_CLIENT work interval */
353 #define TF_DARKWAKE_MODE 0x00001000 /* task is in darkwake mode */
354 #define TF_NO_SMT 0x00002000 /* task threads must not be paired with SMT threads */
355 #define TF_SYS_VERSION_COMPAT 0x00008000 /* shim task accesses to OS version data (macOS - app compatibility) */
356 #define TF_TECS 0x00020000 /* task threads must enable CPU security */
357 #if defined(__x86_64__)
358 #define TF_INSN_COPY_OPTOUT 0x00040000 /* task threads opt out of unhandled-fault instruction stream collection */
359 #endif
360 #define TF_COALITION_MEMBER 0x00080000 /* task is a member of a coalition */
361 #define TF_NO_CORPSE_FORKING 0x00100000 /* do not fork a corpse for this task */
362 #define TF_USE_PSET_HINT_CLUSTER_TYPE 0x00200000 /* bind task to task->pset_hint->pset_cluster_type */
363 #define TF_DYLD_ALL_IMAGE_FINAL 0x00400000 /* all_image_info_addr can no longer be changed */
364 #define TF_HASPROC 0x00800000 /* task points to a proc */
365 #define TF_GAME_MODE 0x40000000 /* Set the game mode bit for CLPC */
366 #define TF_CARPLAY_MODE 0x80000000 /* Set the carplay mode bit for CLPC */
367
368 /*
369 * WARNING: These TF_ and TFRO_ flags are NOT automatically inherited by a child of fork
370 * If you believe something should be inherited, you must manually inherit the flags in `task_create_internal`
371 */
372
373 /*
374 * RO-protected flags:
375 */
376 #define TFRO_CORPSE 0x00000020 /* task is a corpse */
377 #if XNU_TARGET_OS_OSX
378 #define TFRO_MACH_HARDENING_OPT_OUT 0x00000040 /* task might load third party plugins on macOS and should be opted out of mach hardening */
379 #endif /* XNU_TARGET_OS_OSX */
380 #define TFRO_PLATFORM 0x00000080 /* task is a platform binary */
381
382 #define TFRO_FILTER_MSG 0x00004000 /* task calls into message filter callback before sending a message */
383 #define TFRO_PAC_EXC_FATAL 0x00010000 /* task is marked a corpse if a PAC exception occurs */
384 #define TFRO_JIT_EXC_FATAL 0x00020000 /* kill the task on access violations from privileged JIT code */
385 #define TFRO_PAC_ENFORCE_USER_STATE 0x01000000 /* Enforce user and kernel signed thread state */
386 #if CONFIG_EXCLAVES
387 #define TFRO_HAS_KD_ACCESS 0x02000000 /* Access to the kernel exclave resource domain */
388 #endif /* CONFIG_EXCLAVES */
389 #define TFRO_FREEZE_EXCEPTION_PORTS 0x04000000 /* Setting new exception ports on the task/thread is disallowed */
390 #if CONFIG_EXCLAVES
391 #define TFRO_HAS_SENSOR_MIN_ON_TIME_ACCESS 0x08000000 /* Access to sensor minimum on time call */
392 #endif /* CONFIG_EXCLAVES */
393
394 /*
395 * Task is running within a 64-bit address space.
396 */
397 #define task_has_64Bit_addr(task) \
398 (((task)->t_flags & TF_64B_ADDR) != 0)
399 #define task_set_64Bit_addr(task) \
400 ((task)->t_flags |= TF_64B_ADDR)
401 #define task_clear_64Bit_addr(task) \
402 ((task)->t_flags &= ~TF_64B_ADDR)
403
404 /*
405 * Task is using 64-bit machine state.
406 */
407 #define task_has_64Bit_data(task) \
408 (((task)->t_flags & TF_64B_DATA) != 0)
409 #define task_set_64Bit_data(task) \
410 ((task)->t_flags |= TF_64B_DATA)
411 #define task_clear_64Bit_data(task) \
412 ((task)->t_flags &= ~TF_64B_DATA)
413
414 #define task_corpse_pending_report(task) \
415 (((task)->t_flags & TF_PENDING_CORPSE) != 0)
416
417 #define task_set_corpse_pending_report(task) \
418 ((task)->t_flags |= TF_PENDING_CORPSE)
419
420 #define task_clear_corpse_pending_report(task) \
421 ((task)->t_flags &= ~TF_PENDING_CORPSE)
422
423 #define task_is_a_corpse_fork(task) \
424 (((task)->t_flags & TF_CORPSE_FORK) != 0)
425
426 #define task_set_coalition_member(task) \
427 ((task)->t_flags |= TF_COALITION_MEMBER)
428
429 #define task_clear_coalition_member(task) \
430 ((task)->t_flags &= ~TF_COALITION_MEMBER)
431
432 #define task_is_coalition_member(task) \
433 (((task)->t_flags & TF_COALITION_MEMBER) != 0)
434
435 #define task_has_proc(task) \
436 (((task)->t_flags & TF_HASPROC) != 0)
437
438 #define task_set_has_proc(task) \
439 ((task)->t_flags |= TF_HASPROC)
440
441 #define task_clear_has_proc(task) \
442 ((task)->t_flags &= ~TF_HASPROC)
443
444 uint32_t t_procflags; /* general-purpose task flags protected by proc_lock (PL) */
445 #define TPF_NONE 0
446 #define TPF_DID_EXEC 0x00000001 /* task has been execed to a new task */
447 #define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */
448
449 #define task_did_exec_internal(task) \
450 (((task)->t_procflags & TPF_DID_EXEC) != 0)
451
452 #define task_is_exec_copy_internal(task) \
453 (((task)->t_procflags & TPF_EXEC_COPY) != 0)
454
455 mach_vm_address_t all_image_info_addr; /* dyld __all_image_info */
456 mach_vm_size_t all_image_info_size; /* section location and size */
457
458 #if CONFIG_CPU_COUNTERS
459 #define TASK_KPC_FORCED_ALL_CTRS 0x2 /* Bit in "t_kpc" signifying this task forced all counters */
460 uint32_t t_kpc; /* kpc flags */
461 #endif /* CONFIG_CPU_COUNTERS */
462
463 _Atomic darwin_gpu_role_t t_gpu_role;
464
465 bool pidsuspended; /* pid_suspend called; no threads can execute */
466 bool frozen; /* frozen; private resident pages committed to swap */
467 bool changing_freeze_state; /* in the process of freezing or thawing */
468 bool is_large_corpse;
469 uint16_t policy_ru_cpu :4,
470 policy_ru_cpu_ext :4,
471 applied_ru_cpu :4,
472 applied_ru_cpu_ext :4;
473 uint8_t rusage_cpu_flags;
474 uint8_t rusage_cpu_percentage; /* Task-wide CPU limit percentage */
475 uint8_t rusage_cpu_perthr_percentage; /* Per-thread CPU limit percentage */
476 #if MACH_ASSERT
477 int8_t suspends_outstanding; /* suspends this task performed in excess of resumes */
478 #endif
479 uint8_t t_returnwaitflags;
480 #define TWF_NONE 0
481 #define TRW_LRETURNWAIT 0x01 /* task is waiting for fork/posix_spawn/exec to complete */
482 #define TRW_LRETURNWAITER 0x02 /* task is waiting for TRW_LRETURNWAIT to get cleared */
483 #define TRW_LEXEC_COMPLETE 0x04 /* thread should call exec complete */
484
485 #if CONFIG_EXCLAVES
486 uint8_t t_exclave_state;
487 #define TES_NONE 0
488 #define TES_CONCLAVE_TAINTED 0x01 /* Task has talked to conclave, xnu has tainted the process */
489 #define TES_CONCLAVE_UNTAINTABLE 0x02 /* Task can not be tainted by xnu when it talks to conclave */
490 #endif /* CONFIG_EXCLAVES */
491
492 #if __has_feature(ptrauth_calls)
493 bool shared_region_auth_remapped; /* authenticated sections ready for use */
494 char *shared_region_id; /* determines which ptr auth key to use */
495 #endif /* __has_feature(ptrauth_calls) */
496 struct vm_shared_region *shared_region;
497
498 uint64_t rusage_cpu_interval; /* Task-wide CPU limit interval */
499 uint64_t rusage_cpu_perthr_interval; /* Per-thread CPU limit interval */
500 uint64_t rusage_cpu_deadline;
501 thread_call_t rusage_cpu_callt;
502 #if CONFIG_TASKWATCH
503 queue_head_t task_watchers; /* app state watcher threads */
504 int num_taskwatchers;
505 int watchapplying;
506 #endif /* CONFIG_TASKWATCH */
507
508 struct bank_task *bank_context; /* pointer to per task bank structure */
509
510 #if IMPORTANCE_INHERITANCE
511 struct ipc_importance_task *task_imp_base; /* Base of IPC importance chain */
512 #endif /* IMPORTANCE_INHERITANCE */
513
514 vm_extmod_statistics_data_t extmod_statistics;
515
516 struct task_requested_policy requested_policy;
517 struct task_effective_policy effective_policy;
518
519 struct task_pend_token pended_coalition_changes;
520
521 /*
522 * Can be merged with imp_donor bits, once the IMPORTANCE_INHERITANCE macro goes away.
523 */
524 uint32_t low_mem_notified_warn :1, /* warning low memory notification is sent to the task */
525 low_mem_notified_critical :1, /* critical low memory notification is sent to the task */
526 purged_memory_warn :1, /* purgeable memory of the task is purged for warning level pressure */
527 purged_memory_critical :1, /* purgeable memory of the task is purged for critical level pressure */
528 low_mem_privileged_listener :1, /* if set, task would like to know about pressure changes before other tasks on the system */
529 mem_notify_reserved :27; /* reserved for future use */
530
531 task_memlimit_flags_t _Atomic memlimit_flags;
532
533 io_stat_info_t task_io_stats;
534
535 struct task_writes_counters task_writes_counters_internal;
536 struct task_writes_counters task_writes_counters_external;
537
538 /*
539 * The cpu_time_qos_stats fields are protected by the task lock
540 */
541 struct _cpu_time_qos_stats cpu_time_eqos_stats;
542 struct _cpu_time_qos_stats cpu_time_rqos_stats;
543
544 /* Statistics accumulated for terminated threads from this task */
545 uint32_t task_timer_wakeups_bin_1;
546 uint32_t task_timer_wakeups_bin_2;
547 uint64_t task_gpu_ns;
548
549 uint8_t task_can_transfer_memory_ownership;
550 #if DEVELOPMENT || DEBUG
551 uint8_t task_no_footprint_for_debug;
552 #endif
553 uint8_t task_objects_disowning;
554 uint8_t task_objects_disowned;
555 /* # of purgeable volatile VM objects owned by this task: */
556 int task_volatile_objects;
557 /* # of purgeable but not volatile VM objects owned by this task: */
558 int task_nonvolatile_objects;
559 int task_owned_objects;
560 queue_head_t task_objq;
561 decl_lck_mtx_data(, task_objq_lock); /* protects "task_objq" */
562
563 unsigned int task_thread_limit:16;
564 #if __arm64__
565 unsigned int task_legacy_footprint:1;
566 unsigned int task_extra_footprint_limit:1;
567 unsigned int task_ios13extended_footprint_limit:1;
568 #endif /* __arm64__ */
569 unsigned int task_region_footprint:1;
570 unsigned int task_region_info_flags:1;
571 unsigned int task_has_crossed_thread_limit:1;
572 unsigned int task_rr_in_flight:1; /* a t_rr_synchronzie() is in flight */
573 unsigned int task_jetsam_realtime_audio:1;
574
575 /*
576 * A task's coalition set is "adopted" in task_create_internal
577 * and unset in task_deallocate_internal, so each array member
578 * can be referenced without the task lock.
579 * Note: these fields are protected by coalition->lock,
580 * not the task lock.
581 */
582 coalition_t coalition[COALITION_NUM_TYPES];
583 queue_chain_t task_coalition[COALITION_NUM_TYPES];
584 uint64_t dispatchqueue_offset;
585
586 #if DEVELOPMENT || DEBUG
587 boolean_t task_unnested;
588 int task_disconnected_count;
589 #endif
590
591 #if HYPERVISOR
592 void * XNU_PTRAUTH_SIGNED_PTR("task.hv_task_target") hv_task_target; /* hypervisor virtual machine object associated with this task */
593 #endif /* HYPERVISOR */
594
595 #if CONFIG_SECLUDED_MEMORY
596 uint8_t task_can_use_secluded_mem;
597 uint8_t task_could_use_secluded_mem;
598 uint8_t task_could_also_use_secluded_mem;
599 uint8_t task_suppressed_secluded;
600 #endif /* CONFIG_SECLUDED_MEMORY */
601
602 task_exc_guard_behavior_t task_exc_guard;
603 mach_vm_address_t mach_header_vm_address;
604
605 queue_head_t io_user_clients;
606
607 #if CONFIG_FREEZE
608 queue_head_t task_frozen_cseg_q; /* queue of csegs frozen to NAND */
609 #endif /* CONFIG_FREEZE */
610 boolean_t donates_own_pages; /* pages land on the special Q (only swappable pages on iPadOS, early swap on macOS) */
611 uint32_t task_shared_region_slide; /* cached here to avoid locking during telemetry */
612 #if CONFIG_PHYS_WRITE_ACCT
613 uint64_t task_fs_metadata_writes;
614 #endif /* CONFIG_PHYS_WRITE_ACCT */
615 uuid_t task_shared_region_uuid;
616 #if CONFIG_MEMORYSTATUS
617 uint64_t memstat_dirty_start; /* last abstime transition into the dirty band or last call to task_ledger_settle_dirty_time while dirty */
618 #endif /* CONFIG_MEMORYSTATUS */
619 vmobject_list_output_t corpse_vmobject_list;
620 uint64_t corpse_vmobject_list_size;
621 #if CONFIG_DEFERRED_RECLAIM
622 vm_deferred_reclamation_metadata_t deferred_reclamation_metadata; /* Protected by the task lock */
623 #endif /* CONFIG_DEFERRED_RECLAIM */
624
625 #if CONFIG_EXCLAVES
626 void * XNU_PTRAUTH_SIGNED_PTR("task.conclave") conclave;
627 void * XNU_PTRAUTH_SIGNED_PTR("task.exclave_crash_info") exclave_crash_info;
628 uint32_t exclave_crash_info_length;
629 #endif /* CONFIG_EXCLAVES */
630
631 /* Auxiliary code-signing information */
632 uint64_t task_cs_auxiliary_info;
633
634 /* Runtime security mitigations */
635 task_security_config_s security_config;
636
637 #define CONFIG_LARGE_SIZE_TELEMETRY (!XNU_TARGET_OS_OSX)
638 #if CONFIG_LARGE_SIZE_TELEMETRY
639 /* Guard objects telemetry. */
640 _Atomic vm_map_size_t large_allocation_size;
641 #endif
642 };
643
644 ZONE_DECLARE_ID(ZONE_ID_PROC_TASK, void *);
645 extern zone_t proc_task_zone;
646
647 extern task_control_port_options_t task_get_control_port_options(task_t task);
648
649 /*
650 * EXC_GUARD default delivery behavior for optional Mach port and VM guards.
651 * Applied to new tasks at creation time.
652 */
653 extern task_exc_guard_behavior_t task_exc_guard_default;
654 extern size_t proc_and_task_size;
655 extern void *get_bsdtask_info(task_t t);
656 extern void *task_get_proc_raw(task_t task);
657 static inline void
task_require(struct task * task)658 task_require(struct task *task)
659 {
660 zone_id_require(ZONE_ID_PROC_TASK, proc_and_task_size, task_get_proc_raw(task));
661 }
662
663 #define task_lock(task) lck_mtx_lock(&(task)->lock)
664 #define task_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED)
665 #define task_lock_try(task) lck_mtx_try_lock(&(task)->lock)
666 #define task_unlock(task) lck_mtx_unlock(&(task)->lock)
667
668 #define task_objq_lock_init(task) lck_mtx_init(&(task)->task_objq_lock, &vm_object_lck_grp, &vm_object_lck_attr)
669 #define task_objq_lock_destroy(task) lck_mtx_destroy(&(task)->task_objq_lock, &vm_object_lck_grp)
670 #define task_objq_lock(task) lck_mtx_lock(&(task)->task_objq_lock)
671 #define task_objq_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->task_objq_lock, LCK_MTX_ASSERT_OWNED)
672 #define task_objq_lock_try(task) lck_mtx_try_lock(&(task)->task_objq_lock)
673 #define task_objq_unlock(task) lck_mtx_unlock(&(task)->task_objq_lock)
674
675 #define itk_lock_init(task) lck_mtx_init(&(task)->itk_lock_data, &ipc_lck_grp, &ipc_lck_attr)
676 #define itk_lock_destroy(task) lck_mtx_destroy(&(task)->itk_lock_data, &ipc_lck_grp)
677 #define itk_lock(task) lck_mtx_lock(&(task)->itk_lock_data)
678 #define itk_unlock(task) lck_mtx_unlock(&(task)->itk_lock_data)
679
680 /* task clear return wait flags */
681 #define TCRW_CLEAR_INITIAL_WAIT 0x1
682 #define TCRW_CLEAR_FINAL_WAIT 0x2
683 #define TCRW_CLEAR_EXEC_COMPLETE 0x4
684 #define TCRW_CLEAR_ALL_WAIT (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT)
685
686 /* Initialize task module */
687 extern void task_init(void);
688
689 /* coalition_init() calls this to initialize ledgers before task_init() */
690 extern void init_task_ledgers(void);
691
692 extern task_t current_task(void) __pure2;
693
694 __pure2
695 static inline ipc_space_t
current_space(void)696 current_space(void)
697 {
698 return current_task()->itk_space;
699 }
700
701 extern bool task_is_driver(task_t task);
702 extern uint32_t task_ro_flags_get(task_t task);
703 extern void task_ro_flags_set(task_t task, uint32_t flags);
704 extern void task_ro_flags_clear(task_t task, uint32_t flags);
705
706 extern lck_attr_t task_lck_attr;
707 extern lck_grp_t task_lck_grp;
708
709 struct task_watchport_elem {
710 task_t twe_task;
711 ipc_port_t twe_port; /* (Space lock) */
712 ipc_port_t XNU_PTRAUTH_SIGNED_PTR("twe_pdrequest") twe_pdrequest;
713 };
714
715 struct task_watchports {
716 os_refcnt_t tw_refcount; /* (Space lock) */
717 task_t tw_task; /* (Space lock) & tw_refcount == 0 */
718 thread_t tw_thread; /* (Space lock) & tw_refcount == 0 */
719 uint32_t tw_elem_array_count; /* (Space lock) */
720 struct task_watchport_elem tw_elem[]; /* (Space lock) & (Portlock) & (mq lock) */
721 };
722
723 #define task_watchports_retain(x) (os_ref_retain(&(x)->tw_refcount))
724 #define task_watchports_release(x) (os_ref_release(&(x)->tw_refcount))
725
726 #define task_watchport_elem_init(elem, task, port) \
727 do { \
728 (elem)->twe_task = (task); \
729 (elem)->twe_port = (port); \
730 (elem)->twe_pdrequest = IP_NULL; \
731 } while(0)
732
733 #define task_watchport_elem_clear(elem) task_watchport_elem_init((elem), NULL, NULL)
734
735 extern void
736 task_add_turnstile_watchports(
737 task_t task,
738 thread_t thread,
739 ipc_port_t *portwatch_ports,
740 uint32_t portwatch_count);
741
742 extern void
743 task_watchport_elem_deallocate(
744 struct task_watchport_elem *watchport_elem);
745
746 extern boolean_t
747 task_has_watchports(task_t task);
748
749 void
750 task_dyld_process_info_update_helper(
751 task_t task,
752 size_t active_count,
753 vm_map_address_t magic_addr,
754 ipc_port_t *release_ports,
755 size_t release_count);
756
757 extern kern_return_t
758 task_suspend2_mig(
759 task_t task,
760 task_suspension_token_t *suspend_token);
761
762 extern kern_return_t
763 task_suspend2_external(
764 task_t task,
765 task_suspension_token_t *suspend_token);
766
767 extern kern_return_t
768 task_resume2_mig(
769 task_suspension_token_t suspend_token);
770
771 extern kern_return_t
772 task_resume2_external(
773 task_suspension_token_t suspend_token);
774
775 extern void
776 task_suspension_token_deallocate_grp(
777 task_suspension_token_t suspend_token,
778 task_grp_t grp);
779
780 extern ipc_port_t
781 convert_task_to_port_with_flavor(
782 task_t task,
783 mach_task_flavor_t flavor,
784 task_grp_t grp);
785
786 extern task_t current_task_early(void) __pure2;
787
788 #else /* MACH_KERNEL_PRIVATE */
789
790 __BEGIN_DECLS
791
792 extern task_t current_task(void) __pure2;
793
794 extern bool task_is_driver(task_t task);
795
796 #define TF_NONE 0
797
798 #define TWF_NONE 0
799 #define TRW_LRETURNWAIT 0x01 /* task is waiting for fork/posix_spawn/exec to complete */
800 #define TRW_LRETURNWAITER 0x02 /* task is waiting for TRW_LRETURNWAIT to get cleared */
801 #define TRW_LEXEC_COMPLETE 0x04 /* thread should call exec complete */
802
803 /* task clear return wait flags */
804 #define TCRW_CLEAR_INITIAL_WAIT 0x1
805 #define TCRW_CLEAR_FINAL_WAIT 0x2
806 #define TCRW_CLEAR_EXEC_COMPLETE 0x4
807 #define TCRW_CLEAR_ALL_WAIT (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT)
808
809
810 #define TPF_NONE 0
811 #define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */
812
813
814 __END_DECLS
815
816 #endif /* MACH_KERNEL_PRIVATE */
817
818 __BEGIN_DECLS
819
820 #ifdef KERNEL_PRIVATE
821 extern boolean_t task_is_app_suspended(task_t task);
822 extern bool task_is_exotic(task_t task);
823 extern bool task_is_alien(task_t task);
824 extern boolean_t task_get_platform_binary(task_t task);
825 #endif /* KERNEL_PRIVATE */
826
827 #ifdef XNU_KERNEL_PRIVATE
828
829 /* Hold all threads in a task, Wait for task to stop running, just to get off CPU */
830 extern kern_return_t task_hold_and_wait(
831 task_t task,
832 bool suspend_conclave);
833
834 /* Release hold on all threads in a task */
835 extern kern_return_t task_release(
836 task_t task);
837
838 /* Suspend/resume a task where the kernel owns the suspend count */
839 extern kern_return_t task_suspend_internal( task_t task);
840 extern kern_return_t task_resume_internal( task_t task);
841
842 /* Suspends a task by placing a hold on its threads */
843 extern kern_return_t task_pidsuspend(
844 task_t task);
845
846 /* Resumes a previously paused task */
847 extern kern_return_t task_pidresume(
848 task_t task);
849
850 extern kern_return_t task_send_trace_memory(
851 task_t task,
852 uint32_t pid,
853 uint64_t uniqueid);
854
855 extern void task_remove_turnstile_watchports(
856 task_t task);
857
858 extern void task_transfer_turnstile_watchports(
859 task_t old_task,
860 task_t new_task,
861 thread_t new_thread);
862
863 extern kern_return_t
864 task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *, bool);
865
866 #if DEVELOPMENT || DEBUG
867
868 extern kern_return_t task_disconnect_page_mappings(
869 task_t task);
870 #endif /* DEVELOPMENT || DEBUG */
871
872 extern void tasks_system_suspend(boolean_t suspend);
873
874 #if CONFIG_FREEZE
875
876 /* Freeze a task's resident pages */
877 extern kern_return_t task_freeze(
878 task_t task,
879 uint32_t *purgeable_count,
880 uint32_t *wired_count,
881 uint32_t *clean_count,
882 uint32_t *dirty_count,
883 uint32_t dirty_budget,
884 uint32_t *shared_count,
885 int *freezer_error_code,
886 boolean_t eval_only);
887
888 /* Thaw a currently frozen task */
889 extern kern_return_t task_thaw(
890 task_t task);
891
892 typedef enum {
893 CREDIT_TO_SWAP = 1,
894 DEBIT_FROM_SWAP = 2
895 } freezer_acct_op_t;
896
897 extern void task_update_frozen_to_swap_acct(
898 task_t task,
899 int64_t amount,
900 freezer_acct_op_t op);
901
902 #endif /* CONFIG_FREEZE */
903
904 /* Halt all other threads in the current task */
905 extern kern_return_t task_start_halt(
906 task_t task);
907
908 /* Wait for other threads to halt and free halting task resources */
909 extern void task_complete_halt(
910 task_t task);
911
912 extern kern_return_t task_terminate_internal(
913 task_t task);
914
915 struct proc_ro;
916 typedef struct proc_ro *proc_ro_t;
917
918 extern kern_return_t task_create_internal(
919 task_t parent_task,
920 proc_ro_t proc_ro,
921 coalition_t *parent_coalitions,
922 boolean_t inherit_memory,
923 boolean_t is_64bit,
924 boolean_t is_64bit_data,
925 uint32_t t_flags,
926 uint32_t t_flags_ro,
927 uint32_t procflags,
928 uint8_t t_returnwaitflags,
929 task_t child_task);
930
931 extern kern_return_t task_set_special_port_internal(
932 task_t task,
933 int which,
934 ipc_port_t port);
935
936 extern kern_return_t task_set_security_tokens(
937 task_t task,
938 security_token_t sec_token,
939 audit_token_t audit_token,
940 host_priv_t host_priv);
941
942 extern kern_return_t task_info(
943 task_t task,
944 task_flavor_t flavor,
945 task_info_t task_info_out,
946 mach_msg_type_number_t *task_info_count);
947
948 /*
949 * Additional fields that aren't exposed through `task_power_info` but needed
950 * by clients of `task_power_info_locked`.
951 */
952 struct task_power_info_extra {
953 uint64_t cycles;
954 uint64_t instructions;
955 uint64_t pcycles;
956 uint64_t pinstructions;
957 uint64_t user_ptime;
958 uint64_t system_ptime;
959 uint64_t runnable_time;
960 uint64_t energy;
961 uint64_t penergy;
962 uint64_t secure_time;
963 uint64_t secure_ptime;
964 };
965
966 void task_power_info_locked(
967 task_t task,
968 task_power_info_t info,
969 gpu_energy_data_t gpu_energy,
970 task_power_info_v2_t infov2,
971 struct task_power_info_extra *extra_info);
972
973 extern uint64_t task_gpu_utilisation(
974 task_t task);
975
976 extern void task_update_cpu_time_qos_stats(
977 task_t task,
978 uint64_t *eqos_stats,
979 uint64_t *rqos_stats);
980
981 extern void task_vtimer_set(
982 task_t task,
983 integer_t which);
984
985 extern void task_vtimer_clear(
986 task_t task,
987 integer_t which);
988
989 extern void task_vtimer_update(
990 task_t task,
991 integer_t which,
992 uint32_t *microsecs);
993
994 #define TASK_VTIMER_USER 0x01
995 #define TASK_VTIMER_PROF 0x02
996 #define TASK_VTIMER_RLIM 0x04
997
998 extern void task_set_64bit(
999 task_t task,
1000 boolean_t is_64bit,
1001 boolean_t is_64bit_data);
1002
1003 extern bool task_get_64bit_addr(
1004 task_t task);
1005
1006 extern bool task_get_64bit_data(
1007 task_t task);
1008
1009 extern void task_set_platform_binary(
1010 task_t task,
1011 boolean_t is_platform);
1012
1013 #if XNU_TARGET_OS_OSX
1014 #if DEVELOPMENT || DEBUG
1015 /* Disables task identity security hardening (*_set_exception_ports policy)
1016 * for all tasks if amfi_get_out_of_my_way is set. */
1017 extern bool AMFI_bootarg_disable_mach_hardening;
1018 #endif /* DEVELOPMENT || DEBUG */
1019 extern void task_disable_mach_hardening(
1020 task_t task);
1021
1022 extern bool task_opted_out_mach_hardening(
1023 task_t task);
1024 #endif /* XNU_TARGET_OS_OSX */
1025
1026 extern boolean_t task_is_a_corpse(
1027 task_t task);
1028
1029 extern boolean_t task_is_ipc_active(
1030 task_t task);
1031
1032 extern bool
1033 task_is_immovable_no_assert(task_t task);
1034
1035 extern bool task_is_immovable(
1036 task_t task);
1037
1038 extern void task_set_corpse(
1039 task_t task);
1040
1041 extern void task_set_exc_guard_default(
1042 task_t task,
1043 const char *name,
1044 unsigned long namelen,
1045 boolean_t is_simulated,
1046 uint32_t platform,
1047 uint32_t sdk);
1048
1049 extern bool task_set_ca_client_wi(
1050 task_t task,
1051 boolean_t ca_client_wi);
1052
1053 extern kern_return_t task_set_dyld_info(
1054 task_t task,
1055 mach_vm_address_t addr,
1056 mach_vm_size_t size,
1057 bool finalize_value);
1058
1059 extern void task_set_mach_header_address(
1060 task_t task,
1061 mach_vm_address_t addr);
1062
1063 extern void task_set_uniqueid(task_t task);
1064
1065 /* Get number of activations in a task */
1066 extern int get_task_numacts(
1067 task_t task);
1068
1069 extern bool task_donates_own_pages(
1070 task_t task);
1071
1072 struct label;
1073 extern kern_return_t task_collect_crash_info(
1074 task_t task,
1075 #if CONFIG_MACF
1076 struct label *crash_label,
1077 #endif
1078 int is_corpse_fork);
1079 void task_wait_till_threads_terminate_locked(task_t task);
1080
1081 /* JMM - should just be temporary (implementation in bsd_kern still) */
1082 extern void set_bsdtask_info(task_t, void *);
1083 extern uint32_t set_task_loadTag(task_t task, uint32_t loadTag);
1084 extern vm_map_t get_task_map_reference(task_t);
1085 extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t);
1086 extern pmap_t get_task_pmap(task_t);
1087 extern uint64_t get_task_resident_size(task_t);
1088 extern uint64_t get_task_compressed(task_t);
1089 extern uint64_t get_task_resident_max(task_t);
1090 extern uint64_t get_task_phys_footprint(task_t);
1091 #if CONFIG_LEDGER_INTERVAL_MAX
1092 extern uint64_t get_task_phys_footprint_interval_max(task_t, int reset);
1093 #endif /* CONFIG_FOOTPRINT_INTERVAL_MAX */
1094 extern uint64_t get_task_phys_footprint_lifetime_max(task_t);
1095 extern uint64_t get_task_phys_footprint_limit(task_t);
1096 extern uint64_t get_task_neural_nofootprint_total(task_t task);
1097 #if CONFIG_LEDGER_INTERVAL_MAX
1098 extern uint64_t get_task_neural_nofootprint_total_interval_max(task_t, int reset);
1099 #endif /* CONFIG_NEURAL_INTERVAL_MAX */
1100 extern uint64_t get_task_neural_nofootprint_total_lifetime_max(task_t);
1101 extern uint64_t get_task_purgeable_size(task_t);
1102 extern uint64_t get_task_cpu_time(task_t);
1103 extern uint64_t get_task_dispatchqueue_offset(task_t);
1104 extern uint64_t get_task_dispatchqueue_serialno_offset(task_t);
1105 extern uint64_t get_task_dispatchqueue_label_offset(task_t);
1106 extern uint64_t get_task_uniqueid(task_t task);
1107 extern int get_task_version(task_t task);
1108
1109 extern uint64_t get_task_internal(task_t);
1110 extern uint64_t get_task_internal_compressed(task_t);
1111 extern uint64_t get_task_purgeable_nonvolatile(task_t);
1112 extern uint64_t get_task_purgeable_nonvolatile_compressed(task_t);
1113 extern uint64_t get_task_iokit_mapped(task_t);
1114 extern uint64_t get_task_alternate_accounting(task_t);
1115 extern uint64_t get_task_alternate_accounting_compressed(task_t);
1116 extern uint64_t get_task_memory_region_count(task_t);
1117 extern uint64_t get_task_page_table(task_t);
1118 #if CONFIG_FREEZE
1119 extern uint64_t get_task_frozen_to_swap(task_t);
1120 #endif
1121 extern uint64_t get_task_network_nonvolatile(task_t);
1122 extern uint64_t get_task_network_nonvolatile_compressed(task_t);
1123 extern uint64_t get_task_wired_mem(task_t);
1124 extern uint32_t get_task_loadTag(task_t task);
1125
1126 extern uint64_t get_task_tagged_footprint(task_t task);
1127 extern uint64_t get_task_tagged_footprint_compressed(task_t task);
1128 extern uint64_t get_task_media_footprint(task_t task);
1129 extern uint64_t get_task_media_footprint_compressed(task_t task);
1130 extern uint64_t get_task_graphics_footprint(task_t task);
1131 extern uint64_t get_task_graphics_footprint_compressed(task_t task);
1132 extern uint64_t get_task_neural_footprint(task_t task);
1133 extern uint64_t get_task_neural_footprint_compressed(task_t task);
1134
1135 extern kern_return_t task_convert_phys_footprint_limit(int, int *);
1136 extern kern_return_t task_set_phys_footprint_limit_internal(task_t, int, int *, boolean_t, boolean_t);
1137 extern kern_return_t task_get_phys_footprint_limit(task_t task, int *limit_mb);
1138 #if DEBUG || DEVELOPMENT
1139 #if CONFIG_MEMORYSTATUS
1140 extern kern_return_t task_set_diag_footprint_limit_internal(task_t, uint64_t, uint64_t *);
1141 extern kern_return_t task_get_diag_footprint_limit_internal(task_t, uint64_t *, bool *);
1142 extern kern_return_t task_set_diag_footprint_limit(task_t task, uint64_t new_limit_mb, uint64_t *old_limit_mb);
1143 #endif /* CONFIG_MEMORYSTATUS */
1144 #endif /* DEBUG || DEVELOPMENT */
1145 extern kern_return_t task_get_conclave_mem_limit(task_t, uint64_t *conclave_limit);
1146 extern kern_return_t task_set_conclave_mem_limit(task_t, uint64_t conclave_limit);
1147
1148 extern security_token_t *task_get_sec_token(task_t task);
1149 extern void task_set_sec_token(task_t task, security_token_t *token);
1150 extern audit_token_t *task_get_audit_token(task_t task);
1151 extern void task_set_audit_token(task_t task, audit_token_t *token);
1152 extern void task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token);
1153 extern boolean_t task_is_privileged(task_t task);
1154 extern uint8_t *task_get_mach_trap_filter_mask(task_t task);
1155 extern void task_set_mach_trap_filter_mask(task_t task, uint8_t *mask);
1156 extern uint8_t *task_get_mach_kobj_filter_mask(task_t task);
1157 extern void task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask);
1158 extern mach_vm_address_t task_get_all_image_info_addr(task_t task);
1159
1160 /* Jetsam memlimit attributes */
1161 extern bool task_get_memlimit_is_active(task_t task);
1162 extern bool task_get_memlimit_is_fatal(task_t task);
1163 extern void task_set_memlimit_is_active(task_t task, bool memlimit_is_active);
1164 extern void task_set_memlimit_is_fatal(task_t task, bool memlimit_is_fatal);
1165 extern bool task_set_exc_resource_bit(task_t task, bool memlimit_is_active);
1166 extern void task_reset_triggered_exc_resource(task_t task, bool memlimit_is_active);
1167 extern bool task_get_jetsam_realtime_audio(task_t task);
1168 extern void task_set_jetsam_realtime_audio(task_t task, bool realtime_audio);
1169
1170 extern uint64_t task_get_dirty_start(task_t task);
1171 extern void task_set_dirty_start(task_t task, uint64_t start);
1172
1173 extern void task_set_thread_limit(task_t task, uint16_t thread_limit);
1174 #if CONFIG_PROC_RESOURCE_LIMITS
1175 extern kern_return_t task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit);
1176 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1177 extern void task_port_space_ast(task_t task);
1178
1179 #if XNU_TARGET_OS_OSX
1180 extern boolean_t task_has_system_version_compat_enabled(task_t task);
1181 extern void task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat);
1182 #endif
1183
1184 extern boolean_t is_kerneltask(task_t task);
1185 extern boolean_t is_corpsefork(task_t task);
1186
1187 extern kern_return_t check_actforsig(task_t task, thread_t thread, int setast);
1188
1189 extern kern_return_t machine_task_get_state(
1190 task_t task,
1191 int flavor,
1192 thread_state_t state,
1193 mach_msg_type_number_t *state_count);
1194
1195 extern kern_return_t machine_task_set_state(
1196 task_t task,
1197 int flavor,
1198 thread_state_t state,
1199 mach_msg_type_number_t state_count);
1200
1201 extern void machine_task_terminate(task_t task);
1202
1203 extern kern_return_t machine_task_process_signature(task_t task, uint32_t platform, uint32_t sdk, char const **error_msg);
1204
1205 struct _task_ledger_indices {
1206 int cpu_time;
1207 int tkm_private;
1208 int tkm_shared;
1209 int phys_mem;
1210 int wired_mem;
1211 int conclave_mem;
1212 int internal;
1213 int iokit_mapped;
1214 int external;
1215 int reusable;
1216 int alternate_accounting;
1217 int alternate_accounting_compressed;
1218 int page_table;
1219 int phys_footprint;
1220 int internal_compressed;
1221 int purgeable_volatile;
1222 int purgeable_nonvolatile;
1223 int purgeable_volatile_compressed;
1224 int purgeable_nonvolatile_compressed;
1225 int tagged_nofootprint;
1226 int tagged_footprint;
1227 int tagged_nofootprint_compressed;
1228 int tagged_footprint_compressed;
1229 int network_volatile;
1230 int network_nonvolatile;
1231 int network_volatile_compressed;
1232 int network_nonvolatile_compressed;
1233 int media_nofootprint;
1234 int media_footprint;
1235 int media_nofootprint_compressed;
1236 int media_footprint_compressed;
1237 int graphics_nofootprint;
1238 int graphics_footprint;
1239 int graphics_nofootprint_compressed;
1240 int graphics_footprint_compressed;
1241 int neural_nofootprint;
1242 int neural_footprint;
1243 int neural_nofootprint_compressed;
1244 int neural_footprint_compressed;
1245 int neural_nofootprint_total;
1246 int platform_idle_wakeups;
1247 int interrupt_wakeups;
1248 #if CONFIG_SCHED_SFI
1249 int sfi_wait_times[MAX_SFI_CLASS_ID];
1250 #endif /* CONFIG_SCHED_SFI */
1251 int cpu_time_billed_to_me;
1252 int cpu_time_billed_to_others;
1253 int physical_writes;
1254 int logical_writes;
1255 int logical_writes_to_external;
1256 int energy_billed_to_me;
1257 int energy_billed_to_others;
1258 #if CONFIG_MEMORYSTATUS
1259 int memorystatus_dirty_time;
1260 #endif /* CONFIG_MEMORYSTATUS */
1261 int pages_grabbed;
1262 int pages_grabbed_kern;
1263 int pages_grabbed_iopl;
1264 int pages_grabbed_upl;
1265 #if CONFIG_DEFERRED_RECLAIM
1266 int est_reclaimable;
1267 #endif /* CONFIG_DEFERRED_RECLAIM */
1268 #if CONFIG_FREEZE
1269 int frozen_to_swap;
1270 #endif /* CONFIG_FREEZE */
1271 #if CONFIG_PHYS_WRITE_ACCT
1272 int fs_metadata_writes;
1273 #endif /* CONFIG_PHYS_WRITE_ACCT */
1274 int swapins;
1275 };
1276
1277 /*
1278 * Each runtime security mitigation that we support for userland processes
1279 * is tracked in the task security configuration and managed by the following
1280 * helpers.
1281 */
1282 #define TASK_SECURITY_CONFIG_HELPER_DECLARE(suffix) \
1283 extern bool task_has_##suffix(task_t); \
1284 extern void task_set_##suffix(task_t); \
1285 extern void task_clear_##suffix(task_t); \
1286 extern void task_no_set_##suffix(task_t task) \
1287
1288 extern uint32_t task_get_security_config(task_t);
1289
1290 TASK_SECURITY_CONFIG_HELPER_DECLARE(hardened_heap);
1291 TASK_SECURITY_CONFIG_HELPER_DECLARE(tpro);
1292 TASK_SECURITY_CONFIG_HELPER_DECLARE(guard_objects);
1293
1294 uint8_t task_get_platform_restrictions_version(task_t task);
1295 void task_set_platform_restrictions_version(task_t task, uint64_t version);
1296 uint8_t task_get_hardened_process_version(task_t task);
1297 void task_set_hardened_process_version(task_t task, uint64_t version);
1298
1299 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
1300 TASK_SECURITY_CONFIG_HELPER_DECLARE(sec);
1301 /*
1302 * Definitions need to be visible on bsd/
1303 */
1304
1305 #define TASK_MTE_POLICY_HELPER_DECLARE(suffix) \
1306 extern bool task_has_sec_##suffix(task_t); \
1307 extern void task_set_sec_##suffix(task_t)
1308
1309 TASK_MTE_POLICY_HELPER_DECLARE(soft_mode);
1310 TASK_MTE_POLICY_HELPER_DECLARE(user_data);
1311 TASK_MTE_POLICY_HELPER_DECLARE(inherit);
1312 TASK_MTE_POLICY_HELPER_DECLARE(never_check);
1313 TASK_MTE_POLICY_HELPER_DECLARE(restrict_receiving_aliases_to_tagged_memory);
1314
1315 extern bool current_task_has_sec_enabled(void);
1316 extern void task_clear_sec_policy(task_t);
1317 extern uint32_t task_get_sec_policy(task_t);
1318 extern void task_clear_sec_soft_mode(task_t task);
1319 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
1320
1321 /*
1322 * Many of the task ledger entries use a reduced feature set
1323 * (specifically they just use LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE)
1324 * and are stored in a smaller entry structure.
1325 * That structure is an implementation detail of the ledger.
1326 * But on PPL systems, the task ledger's memory is managed by the PPL
1327 * and it has to determine the size of the task ledger at compile time.
1328 * This define specifies the number of small entries so the PPL can
1329 * properly determine the ledger's size.
1330 *
1331 * If you add a new entry with only the
1332 * LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE | LEDGER_ENTRY_ALLOW_INACTIVE
1333 * flags, you need to increment this count.
1334 * Otherwise, PPL systems will panic at boot.
1335 */
1336 #if CONFIG_DEFERRED_RECLAIM
1337 #define TASK_LEDGER_NUM_SMALL_INDICES 34
1338 #else /* CONFIG_DEFERRED_RECLAIM */
1339 #define TASK_LEDGER_NUM_SMALL_INDICES 33
1340 #endif /* !CONFIG_DEFERRED_RECLAIM */
1341 extern struct _task_ledger_indices task_ledgers;
1342
1343 /* requires task to be unlocked, returns a referenced thread */
1344 thread_t task_findtid(task_t task, uint64_t tid);
1345 int pid_from_task(task_t task);
1346
1347 extern kern_return_t task_wakeups_monitor_ctl(task_t task, uint32_t *rate_hz, int32_t *flags);
1348 extern kern_return_t task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags);
1349 extern void task_rollup_accounting_info(task_t new_task, task_t parent_task);
1350 extern kern_return_t task_io_monitor_ctl(task_t task, uint32_t *flags);
1351 extern void task_set_did_exec_flag(task_t task);
1352 extern void task_clear_exec_copy_flag(task_t task);
1353 extern bool task_is_initproc(task_t task);
1354 extern boolean_t task_is_exec_copy(task_t);
1355 extern boolean_t task_did_exec(task_t task);
1356 extern boolean_t task_is_active(task_t task);
1357 extern boolean_t task_is_halting(task_t task);
1358 extern void task_clear_return_wait(task_t task, uint32_t flags);
1359 extern void task_set_ctrl_port_default(task_t task, thread_t thread);
1360 extern void task_wait_to_return(void) __attribute__((noreturn));
1361 extern void task_post_signature_processing_hook(task_t task);
1362 extern event_t task_get_return_wait_event(task_t task);
1363
1364 extern void task_bank_reset(task_t task);
1365 extern void task_bank_init(task_t task);
1366
1367 #if CONFIG_MEMORYSTATUS
1368 extern void task_ledger_settle_dirty_time(task_t t);
1369 extern void task_ledger_settle_dirty_time_locked(task_t t);
1370 #endif /* CONFIG_MEMORYSTATUS */
1371 extern void task_ledger_settle(task_t t);
1372
1373 #if CONFIG_ARCADE
1374 extern void task_prep_arcade(task_t task, thread_t thread);
1375 #endif /* CONFIG_ARCADE */
1376
1377 extern int task_pid(task_t task);
1378
1379 #if __has_feature(ptrauth_calls)
1380 char *task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *);
1381 void task_set_shared_region_id(task_t task, char *id);
1382 #endif /* __has_feature(ptrauth_calls) */
1383
1384 extern boolean_t task_has_assertions(task_t task);
1385 /* End task_policy */
1386
1387 extern void task_set_gpu_role(task_t task, darwin_gpu_role_t gpu_role);
1388 extern boolean_t task_is_gpu_denied(task_t task);
1389 /* Returns PRIO_DARWIN_GPU values defined in sys/resource_private.h */
1390 extern darwin_gpu_role_t task_get_gpu_role(task_t task);
1391
1392 extern void task_set_game_mode(task_t task, bool enabled);
1393 /* returns true if update must be pushed to coalition (Automatically handled by task_set_game_mode) */
1394 extern bool task_set_game_mode_locked(task_t task, bool enabled);
1395 extern bool task_get_game_mode(task_t task);
1396
1397 extern void task_set_carplay_mode(task_t task, bool enabled);
1398 /* returns true if update must be pushed to coalition (Automatically handled by task_set_carplay_mode) */
1399 extern bool task_set_carplay_mode_locked(task_t task, bool enabled);
1400 extern bool task_get_carplay_mode(task_t task);
1401
1402 extern queue_head_t * task_io_user_clients(task_t task);
1403 extern void task_set_message_app_suspended(task_t task, boolean_t enable);
1404
1405 extern void task_copy_fields_for_exec(task_t dst_task, task_t src_task);
1406
1407 extern void task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num);
1408 extern void task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries);
1409
1410 extern void task_set_filter_msg_flag(task_t task, boolean_t flag);
1411 extern boolean_t task_get_filter_msg_flag(task_t task);
1412
1413 #if __has_feature(ptrauth_calls)
1414 extern bool task_is_pac_exception_fatal(task_t task);
1415 extern void task_set_pac_exception_fatal_flag(task_t task);
1416 #endif /*__has_feature(ptrauth_calls)*/
1417
1418 extern bool task_is_jit_exception_fatal(task_t task);
1419 extern void task_set_jit_flags(task_t task);
1420
1421 extern bool task_needs_user_signed_thread_state(task_t task);
1422 extern void task_set_tecs(task_t task);
1423 extern void task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size);
1424
1425 extern boolean_t task_corpse_forking_disabled(task_t task);
1426
1427 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,
1428 uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit);
1429
1430 extern int get_task_cdhash(task_t task, char cdhash[CS_CDHASH_LEN]);
1431
1432 extern boolean_t kdp_task_is_locked(task_t task);
1433
1434 /* redeclaration from task_server.h for the sake of kern_exec.c */
1435 extern kern_return_t _kernelrpc_mach_ports_register3(
1436 task_t task,
1437 mach_port_t port1,
1438 mach_port_t port2,
1439 mach_port_t port3);
1440
1441 /* Kernel side prototypes for MIG routines */
1442 extern kern_return_t task_get_exception_ports(
1443 task_t task,
1444 exception_mask_t exception_mask,
1445 exception_mask_array_t masks,
1446 mach_msg_type_number_t *CountCnt,
1447 exception_port_array_t ports,
1448 exception_behavior_array_t behaviors,
1449 thread_state_flavor_array_t flavors);
1450
1451 #if CONFIG_EXCLAVES
1452 int task_add_conclave(task_t task, void *, int64_t, const char *task_conclave_id);
1453 kern_return_t task_inherit_conclave(task_t old_task, task_t new_task, void *vnode, int64_t off);
1454 kern_return_t task_launch_conclave(mach_port_name_t port);
1455 void task_clear_conclave(task_t task);
1456 void task_stop_conclave(task_t task, bool gather_crash_bt);
1457 void task_suspend_conclave(task_t task);
1458 void task_resume_conclave(task_t task);
1459 kern_return_t task_stop_conclave_upcall(void);
1460 kern_return_t task_stop_conclave_upcall_complete(void);
1461 kern_return_t task_suspend_conclave_upcall(uint64_t *, size_t);
1462 struct conclave_sharedbuffer_t;
1463 kern_return_t task_crash_info_conclave_upcall(task_t task,
1464 const struct conclave_sharedbuffer_t *shared_buf, uint32_t length);
1465 typedef struct exclaves_resource exclaves_resource_t;
1466 exclaves_resource_t *task_get_conclave(task_t task);
1467 void task_set_conclave_untaintable(task_t task);
1468 void task_add_conclave_crash_info(task_t task, void *crash_info_ptr);
1469 //Changing this would also warrant a change in ConclaveSharedBuffer
1470 #define CONCLAVE_CRASH_BUFFER_PAGECOUNT 2
1471
1472 #endif /* CONFIG_EXCLAVES */
1473
1474 #endif /* XNU_KERNEL_PRIVATE */
1475 #ifdef KERNEL_PRIVATE
1476
1477 extern void *get_bsdtask_info(task_t);
1478 extern void *get_bsdthreadtask_info(thread_t);
1479 extern void task_bsdtask_kill(task_t);
1480 extern vm_map_t get_task_map(task_t);
1481 extern ledger_t get_task_ledger(task_t);
1482
1483 extern boolean_t get_task_pidsuspended(task_t);
1484 extern boolean_t get_task_suspended(task_t);
1485 extern boolean_t get_task_frozen(task_t);
1486
1487 /*
1488 * Flavors of convert_task_to_port. XNU callers get convert_task_to_port_kernel,
1489 * external callers get convert_task_to_port_external.
1490 */
1491 extern ipc_port_t convert_task_to_port(task_t);
1492 extern ipc_port_t convert_task_to_port_kernel(task_t);
1493 extern ipc_port_t convert_task_to_port_external(task_t);
1494 extern void convert_task_array_to_ports(task_array_t, size_t, mach_task_flavor_t);
1495
1496 extern ipc_port_t convert_task_read_to_port(task_t);
1497 extern ipc_port_t convert_task_read_to_port_kernel(task_read_t);
1498 extern ipc_port_t convert_task_read_to_port_external(task_t);
1499
1500 extern ipc_port_t convert_task_inspect_to_port(task_inspect_t);
1501 extern ipc_port_t convert_task_name_to_port(task_name_t);
1502
1503 extern ipc_port_t convert_corpse_to_port_and_nsrequest(task_t task);
1504
1505 extern ipc_port_t convert_task_suspension_token_to_port(task_suspension_token_t task);
1506 /* Convert from a port (in this case, an SO right to a task's resume port) to a task. */
1507 extern task_suspension_token_t convert_port_to_task_suspension_token(ipc_port_t port);
1508
1509 extern void task_suspension_send_once(ipc_port_t port);
1510
1511 #define TASK_WRITE_IMMEDIATE 0x1
1512 #define TASK_WRITE_DEFERRED 0x2
1513 #define TASK_WRITE_INVALIDATED 0x4
1514 #define TASK_WRITE_METADATA 0x8
1515 extern void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp);
1516
1517 __enum_decl(task_balance_flags_t, uint8_t, {
1518 TASK_BALANCE_CREDIT = 0x1,
1519 TASK_BALANCE_DEBIT = 0x2,
1520 });
1521
1522 __enum_decl(task_physical_write_flavor_t, uint8_t, {
1523 TASK_PHYSICAL_WRITE_METADATA = 0x1,
1524 });
1525 extern void task_update_physical_writes(task_t task, task_physical_write_flavor_t flavor,
1526 uint64_t io_size, task_balance_flags_t flags);
1527
1528 #if CONFIG_SECLUDED_MEMORY
1529 extern void task_set_can_use_secluded_mem(
1530 task_t task,
1531 boolean_t can_use_secluded_mem);
1532 extern void task_set_could_use_secluded_mem(
1533 task_t task,
1534 boolean_t could_use_secluded_mem);
1535 extern void task_set_could_also_use_secluded_mem(
1536 task_t task,
1537 boolean_t could_also_use_secluded_mem);
1538 extern boolean_t task_can_use_secluded_mem(
1539 task_t task,
1540 boolean_t is_allocate);
1541 extern boolean_t task_could_use_secluded_mem(task_t task);
1542 extern boolean_t task_could_also_use_secluded_mem(task_t task);
1543 #endif /* CONFIG_SECLUDED_MEMORY */
1544
1545 extern void task_set_darkwake_mode(task_t, boolean_t);
1546 extern boolean_t task_get_darkwake_mode(task_t);
1547
1548 #if __arm64__
1549 extern void task_set_legacy_footprint(task_t task);
1550 extern void task_set_extra_footprint_limit(task_t task);
1551 extern void task_set_ios13extended_footprint_limit(task_t task);
1552 #endif /* __arm64__ */
1553
1554 #if CONFIG_MACF
1555 extern struct label *get_task_crash_label(task_t task);
1556 extern void set_task_crash_label(task_t task, struct label *label);
1557 #endif /* CONFIG_MACF */
1558
1559 /* task_find_region_details() */
1560 __options_closed_decl(find_region_details_options_t, uint32_t, {
1561 FIND_REGION_DETAILS_OPTIONS_NONE = 0x00000000,
1562 FIND_REGION_DETAILS_AT_OFFSET = 0x00000001,
1563 FIND_REGION_DETAILS_GET_VNODE = 0x00000002,
1564 });
1565 #define FIND_REGION_DETAILS_OPTIONS_ALL ( \
1566 FIND_REGION_DETAILS_AT_OFFSET | \
1567 FIND_REGION_DETAILS_GET_VNODE \
1568 )
1569 extern int task_find_region_details(
1570 task_t task,
1571 vm_map_offset_t offset,
1572 find_region_details_options_t options,
1573 uintptr_t *vp_p, /* caller must call vnode_put(vp) when done */
1574 uint32_t *vid_p,
1575 bool *is_mapped_shared_p,
1576 uint64_t *start_p,
1577 uint64_t *len_p);
1578
1579
1580 #endif /* KERNEL_PRIVATE */
1581
1582 extern task_t kernel_task;
1583
1584 extern void task_name_deallocate_mig(
1585 task_name_t task_name);
1586
1587 extern void task_policy_set_deallocate_mig(
1588 task_policy_set_t task_policy_set);
1589
1590 extern void task_policy_get_deallocate_mig(
1591 task_policy_get_t task_policy_get);
1592
1593 extern void task_inspect_deallocate_mig(
1594 task_inspect_t task_inspect);
1595
1596 extern void task_read_deallocate_mig(
1597 task_read_t task_read);
1598
1599 extern void task_suspension_token_deallocate(
1600 task_suspension_token_t token);
1601
1602 extern boolean_t task_self_region_footprint(void);
1603 extern void task_self_region_footprint_set(boolean_t newval);
1604
1605 /* VM_REGION_INFO_FLAGS defined in vm_region.h */
1606 extern int task_self_region_info_flags(void);
1607 extern kern_return_t task_self_region_info_flags_set(int newval);
1608
1609 extern void task_ledgers_footprint(ledger_t ledger,
1610 ledger_amount_t *ledger_resident,
1611 ledger_amount_t *ledger_compressed);
1612 extern void task_set_memory_ownership_transfer(
1613 task_t task,
1614 boolean_t value);
1615
1616 #if DEVELOPMENT || DEBUG
1617 extern void task_set_no_footprint_for_debug(
1618 task_t task,
1619 boolean_t value);
1620 extern int task_get_no_footprint_for_debug(
1621 task_t task);
1622 #endif /* DEVELOPMENT || DEBUG */
1623
1624 #ifdef KERNEL_PRIVATE
1625 extern kern_return_t task_get_suspend_stats(task_t task, task_suspend_stats_t stats);
1626 extern kern_return_t task_get_suspend_stats_kdp(task_t task, task_suspend_stats_t stats);
1627 #endif /* KERNEL_PRIVATE*/
1628
1629 #ifdef XNU_KERNEL_PRIVATE
1630 extern kern_return_t task_get_suspend_sources(task_t task, task_suspend_source_array_t sources);
1631 extern kern_return_t task_get_suspend_sources_kdp(task_t task, task_suspend_source_array_t sources);
1632 #endif /* XNU_KERNEL_PRIVATE */
1633
1634 #if CONFIG_ROSETTA
1635 extern bool task_is_translated(task_t task);
1636 #endif
1637
1638
1639 #ifdef MACH_KERNEL_PRIVATE
1640
1641 void task_procname(task_t task, char *buf, int size);
1642 const char *task_best_name(task_t task);
1643
1644 #endif /* MACH_KERNEL_PRIVATE */
1645
1646 #if HAS_MTE
1647 /* Must be callable from IOKit as it sometimes has need to asynchronously
1648 * terminate tasks. Takes the task lock.
1649 */
1650 void task_set_ast_mte_synthesize_mach_exception(task_t task);
1651 #endif /* HAS_MTE */
1652
1653
1654 #ifdef KERNEL_PRIVATE
1655 kern_return_t task_set_cs_auxiliary_info(task_t task, uint64_t info);
1656 uint64_t task_get_cs_auxiliary_info_kdp(task_t task);
1657 #endif /* KERNEL_PRIVATE */
1658
1659 __END_DECLS
1660
1661 #endif /* _KERN_TASK_H_ */
1662