xref: /xnu-12377.61.12/osfmk/kern/task.h (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	task.h
60  *	Author:	Avadis Tevanian, Jr.
61  *
62  *	This file contains the structure definitions for tasks.
63  *
64  */
65 /*
66  * Copyright (c) 1993 The University of Utah and
67  * the Computer Systems Laboratory (CSL).  All rights reserved.
68  *
69  * Permission to use, copy, modify and distribute this software and its
70  * documentation is hereby granted, provided that both the copyright
71  * notice and this permission notice appear in all copies of the
72  * software, derivative works or modified versions, and any portions
73  * thereof, and that both notices appear in supporting documentation.
74  *
75  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78  *
79  * CSL requests users of this software to return to [email protected] any
80  * improvements that they make and grant CSL redistribution rights.
81  *
82  */
83 /*
84  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
85  * support for mandatory and extensible security protections.  This notice
86  * is included in support of clause 2.2 (b) of the Apple Public License,
87  * Version 2.0.
88  * Copyright (c) 2005 SPARTA, Inc.
89  */
90 
91 #ifndef _KERN_TASK_H_
92 #define _KERN_TASK_H_
93 
94 #include <kern/kern_types.h>
95 #include <kern/task_ref.h>
96 #include <mach/mach_types.h>
97 #include <sys/cdefs.h>
98 
99 #ifdef XNU_KERNEL_PRIVATE
100 #include <kern/btlog.h>
101 #include <kern/kern_cdata.h>
102 #include <mach/sfi_class.h>
103 #include <kern/counter.h>
104 #include <kern/cs_blobs.h>
105 #include <kern/queue.h>
106 #include <kern/recount.h>
107 #include <sys/kern_sysctl.h>
108 #include <sys/resource_private.h>
109 
110 #if CONFIG_EXCLAVES
111 #include <mach/exclaves.h>
112 #endif /* CONFIG_EXCLAVES */
113 #endif /* XNU_KERNEL_PRIVATE */
114 
115 #ifdef  MACH_KERNEL_PRIVATE
116 #include <mach/boolean.h>
117 #include <mach/port.h>
118 #include <mach/time_value.h>
119 #include <mach/message.h>
120 #include <mach/mach_param.h>
121 #include <mach/task_info.h>
122 #include <mach/exception_types.h>
123 #include <mach/vm_statistics.h>
124 #include <machine/task.h>
125 
126 #include <kern/cpu_data.h>
127 #include <kern/queue.h>
128 #include <kern/exception.h>
129 #include <kern/locks.h>
130 #include <security/_label.h>
131 #include <ipc/ipc_port.h>
132 
133 #include <kern/thread.h>
134 #include <mach/coalition.h>
135 #include <stdatomic.h>
136 #include <os/refcnt.h>
137 
138 #if CONFIG_DEFERRED_RECLAIM
139 typedef struct vm_deferred_reclamation_metadata_s *vm_deferred_reclamation_metadata_t;
140 #endif /* CONFIG_DEFFERED_RECLAIM */
141 
142 struct _cpu_time_qos_stats {
143 	uint64_t cpu_time_qos_default;
144 	uint64_t cpu_time_qos_maintenance;
145 	uint64_t cpu_time_qos_background;
146 	uint64_t cpu_time_qos_utility;
147 	uint64_t cpu_time_qos_legacy;
148 	uint64_t cpu_time_qos_user_initiated;
149 	uint64_t cpu_time_qos_user_interactive;
150 };
151 
152 struct task_writes_counters {
153 	uint64_t task_immediate_writes;
154 	uint64_t task_deferred_writes;
155 	uint64_t task_invalidated_writes;
156 	uint64_t task_metadata_writes;
157 };
158 
159 struct task_pend_token {
160 	union {
161 		struct {
162 			uint32_t        tpt_update_sockets      :1,
163 			    tpt_update_timers       :1,
164 			    tpt_update_watchers     :1,
165 			    tpt_update_live_donor   :1,
166 			    tpt_update_coal_sfi     :1,
167 			    tpt_update_throttle     :1,
168 			    tpt_update_thread_sfi   :1,
169 			    tpt_force_recompute_pri :1,
170 			    tpt_update_tg_ui_flag   :1,
171 			    tpt_update_turnstile    :1,
172 			    tpt_update_tg_app_flag  :1,
173 			    tpt_update_game_mode    :1,
174 			    tpt_update_carplay_mode :1,
175 			    tpt_update_appnap       :1;
176 		};
177 		uint32_t tpt_value;
178 	};
179 };
180 
181 typedef struct task_pend_token task_pend_token_s;
182 typedef struct task_pend_token *task_pend_token_t;
183 
184 struct task_security_config {
185 	union {
186 		struct {
187 			uint16_t hardened_heap: 1,
188 			    tpro: 1,
189 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
190 			    sec: 1,
191 #else /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
192 			reserved: 1,
193 #endif
194 			platform_restrictions_version: 3,
195 			    script_restrictions: 1,
196 			    ipc_containment_vessel: 1,
197 			    guard_objects: 1;
198 			uint8_t hardened_process_version;
199 		};
200 		uint32_t value;
201 	};
202 };
203 
204 typedef struct task_security_config task_security_config_s;
205 
206 struct task_watchports;
207 #include <bank/bank_internal.h>
208 
209 struct ucred;
210 
211 #ifdef MACH_BSD
212 struct proc;
213 struct proc_ro;
214 #endif
215 
216 __options_closed_decl(task_memlimit_flags_t, uint32_t, {
217 	/* if set, use active attributes, otherwise use inactive attributes */
218 	TASK_MEMLIMIT_IS_ACTIVE             = 0x01,
219 	/* if set, exceeding current memlimit will prove fatal to the task */
220 	TASK_MEMLIMIT_IS_FATAL              = 0x02,
221 	/* if set, suppress exc_resource exception when task exceeds active memory limit */
222 	TASK_MEMLIMIT_ACTIVE_EXC_RESOURCE   = 0x04,
223 	/* if set, suppress exc_resource exception when task exceeds inactive memory limit */
224 	TASK_MEMLIMIT_INACTIVE_EXC_RESOURCE = 0x08
225 });
226 
227 struct task {
228 	/* Synchronization/destruction information */
229 	decl_lck_mtx_data(, lock);      /* Task's lock */
230 	os_refcnt_t     ref_count;      /* Number of references to me */
231 
232 #if DEVELOPMENT || DEBUG
233 	struct os_refgrp *ref_group;
234 	lck_spin_t        ref_group_lock;
235 #endif /* DEVELOPMENT || DEBUG */
236 
237 	bool            active;         /* Task has not been terminated */
238 	bool            ipc_active;     /* IPC with the task ports is allowed */
239 	bool            halting;        /* Task is being halted */
240 	bool            message_app_suspended;  /* Let iokit know when pidsuspended */
241 
242 	/* Virtual timers */
243 	uint32_t        vtimers;
244 	uint32_t loadTag; /* dext ID used for logging identity */
245 
246 	/* Globally uniqueid to identify tasks and corpses */
247 	uint64_t        task_uniqueid;
248 
249 	/* Miscellaneous */
250 	vm_map_t        XNU_PTRAUTH_SIGNED_PTR("task.map") map; /* Address space description */
251 	queue_chain_t   tasks;  /* global list of tasks */
252 	struct task_watchports *watchports; /* watchports passed in spawn */
253 	turnstile_inheritor_t returnwait_inheritor; /* inheritor for task_wait */
254 
255 	/* Threads in this task */
256 	queue_head_t            threads;
257 	struct restartable_ranges *t_rr_ranges;
258 
259 	processor_set_t         pset_hint;
260 	struct affinity_space   *affinity_space;
261 
262 	int                     thread_count;
263 	uint32_t                active_thread_count;
264 	int                     suspend_count;  /* Internal scheduling only */
265 #ifdef CONFIG_TASK_SUSPEND_STATS
266 	struct task_suspend_stats_s t_suspend_stats; /* suspension statistics for this task */
267 	task_suspend_source_array_t t_suspend_sources; /* array of suspender debug info for this task */
268 #endif /* CONFIG_TASK_SUSPEND_STATS */
269 
270 	/* User-visible scheduling information */
271 	integer_t               user_stop_count;        /* outstanding stops */
272 	integer_t               legacy_stop_count;      /* outstanding legacy stops */
273 
274 	int16_t                 priority;               /* base priority for threads */
275 	int16_t                 max_priority;           /* maximum priority for threads */
276 
277 	integer_t               importance;             /* priority offset (BSD 'nice' value) */
278 
279 	/* Statistics */
280 	uint64_t                total_runnable_time;
281 
282 	struct recount_task     tk_recount;
283 
284 	/* IPC structures */
285 	decl_lck_mtx_data(, itk_lock_data);
286 	/*
287 	 * Different flavors of task port.
288 	 * These flavors TASK_FLAVOR_* are defined in mach_types.h
289 	 */
290 	struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_ports") itk_task_ports[TASK_SELF_PORT_COUNT];
291 #if CONFIG_CSR
292 	struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_settable_self") itk_settable_self;   /* a send right */
293 #endif /* CONFIG_CSR */
294 	struct exception_action exc_actions[EXC_TYPES_COUNT];
295 	/* special exception port used by task_register_hardened_exception_handler */
296 	struct hardened_exception_action hardened_exception_action;
297 	/* a send right each valid element  */
298 	struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_host") itk_host;                     /* a send right */
299 	struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_bootstrap") itk_bootstrap;           /* a send right */
300 	struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_debug_control") itk_debug_control;   /* send right for debugmode communications */
301 	struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_access") itk_task_access;       /* and another send right */
302 	struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resume") itk_resume;                 /* a receive right to resume this task */
303 	struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_registered") itk_registered[TASK_PORT_REGISTER_MAX];
304 	/* all send rights */
305 	ipc_port_t * XNU_PTRAUTH_SIGNED_PTR("task.itk_dyld_notify") itk_dyld_notify; /* lazy send rights array of size DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT */
306 #if CONFIG_PROC_RESOURCE_LIMITS
307 	struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resource_notify") itk_resource_notify; /* a send right to the resource notify port */
308 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
309 	struct ipc_space * XNU_PTRAUTH_SIGNED_PTR("task.itk_space") itk_space;
310 
311 	ledger_t        ledger;
312 	/* Synchronizer ownership information */
313 	queue_head_t    semaphore_list;         /* list of owned semaphores   */
314 	int             semaphores_owned;       /* number of semaphores owned */
315 
316 	unsigned int    priv_flags;                     /* privilege resource flags */
317 #define VM_BACKING_STORE_PRIV   0x1
318 
319 	MACHINE_TASK
320 
321 	counter_t faults;             /* faults counter */
322 	counter_t pageins;            /* pageins counter */
323 	counter_t cow_faults;         /* copy on write fault counter */
324 	counter_t messages_sent;      /* messages sent counter */
325 	counter_t messages_received;  /* messages received counter */
326 	uint32_t decompressions;      /* decompression counter (from threads that already terminated) */
327 	uint32_t syscalls_mach;       /* mach system call counter */
328 	uint32_t syscalls_unix;       /* unix system call counter */
329 	uint32_t c_switch;            /* total context switches */
330 	uint32_t p_switch;            /* total processor switches */
331 	uint32_t ps_switch;           /* total pset switches */
332 
333 #ifdef  MACH_BSD
334 	struct proc_ro *                bsd_info_ro;
335 #endif
336 	kcdata_descriptor_t             corpse_info;
337 	uint64_t                        crashed_thread_id;
338 	queue_chain_t                   corpse_tasks;
339 #ifdef CONFIG_MACF
340 	struct label *                  crash_label;
341 #endif
342 	volatile uint32_t t_flags;                                      /* general-purpose task flags protected by task_lock (TL) */
343 #define TF_NONE                 0
344 #define TF_64B_ADDR             0x00000001                              /* task has 64-bit addressing */
345 #define TF_64B_DATA             0x00000002                              /* task has 64-bit data registers */
346 #define TF_CPUMON_WARNING       0x00000004                              /* task has at least one thread in CPU usage warning zone */
347 #define TF_WAKEMON_WARNING      0x00000008                              /* task is in wakeups monitor warning zone */
348 #define TF_TELEMETRY            (TF_CPUMON_WARNING | TF_WAKEMON_WARNING) /* task is a telemetry participant */
349 #define TF_GPU_DENIED           0x00000010                              /* task is not allowed to access the GPU */
350 #define TF_PENDING_CORPSE       0x00000040                              /* task corpse has not been reported yet */
351 #define TF_CORPSE_FORK          0x00000080                              /* task is a forked corpse */
352 #define TF_CA_CLIENT_WI         0x00000800                              /* task has CA_CLIENT work interval */
353 #define TF_DARKWAKE_MODE        0x00001000                              /* task is in darkwake mode */
354 #define TF_NO_SMT               0x00002000                              /* task threads must not be paired with SMT threads */
355 #define TF_SYS_VERSION_COMPAT   0x00008000                              /* shim task accesses to OS version data (macOS - app compatibility) */
356 #define TF_TECS                 0x00020000                              /* task threads must enable CPU security */
357 #if defined(__x86_64__)
358 #define TF_INSN_COPY_OPTOUT     0x00040000                              /* task threads opt out of unhandled-fault instruction stream collection */
359 #endif
360 #define TF_COALITION_MEMBER     0x00080000                              /* task is a member of a coalition */
361 #define TF_NO_CORPSE_FORKING    0x00100000                              /* do not fork a corpse for this task */
362 #define TF_USE_PSET_HINT_CLUSTER_TYPE 0x00200000                        /* bind task to task->pset_hint->pset_cluster_type */
363 #define TF_DYLD_ALL_IMAGE_FINAL   0x00400000                            /* all_image_info_addr can no longer be changed */
364 #define TF_HASPROC              0x00800000                              /* task points to a proc */
365 #define TF_GAME_MODE            0x40000000                              /* Set the game mode bit for CLPC */
366 #define TF_CARPLAY_MODE         0x80000000                              /* Set the carplay mode bit for CLPC */
367 
368 /*
369  * WARNING: These TF_ and TFRO_ flags are NOT automatically inherited by a child of fork
370  * If you believe something should be inherited, you must manually inherit the flags in `task_create_internal`
371  */
372 
373 /*
374  * RO-protected flags:
375  */
376 #define TFRO_CORPSE                     0x00000020                      /* task is a corpse */
377 #if XNU_TARGET_OS_OSX
378 #define TFRO_MACH_HARDENING_OPT_OUT     0x00000040                      /* task might load third party plugins on macOS and should be opted out of mach hardening */
379 #endif /* XNU_TARGET_OS_OSX */
380 #define TFRO_PLATFORM                   0x00000080                      /* task is a platform binary */
381 
382 #define TFRO_FILTER_MSG                 0x00004000                      /* task calls into message filter callback before sending a message */
383 #define TFRO_PAC_EXC_FATAL              0x00010000                      /* task is marked a corpse if a PAC exception occurs */
384 #define TFRO_JIT_EXC_FATAL              0x00020000                      /* kill the task on access violations from privileged JIT code */
385 #define TFRO_PAC_ENFORCE_USER_STATE     0x01000000                      /* Enforce user and kernel signed thread state */
386 #if CONFIG_EXCLAVES
387 #define TFRO_HAS_KD_ACCESS              0x02000000                      /* Access to the kernel exclave resource domain  */
388 #endif /* CONFIG_EXCLAVES */
389 #define TFRO_FREEZE_EXCEPTION_PORTS     0x04000000                      /* Setting new exception ports on the task/thread is disallowed */
390 #if CONFIG_EXCLAVES
391 #define TFRO_HAS_SENSOR_MIN_ON_TIME_ACCESS     0x08000000               /* Access to sensor minimum on time call  */
392 #endif /* CONFIG_EXCLAVES */
393 
394 /*
395  * Task is running within a 64-bit address space.
396  */
397 #define task_has_64Bit_addr(task)       \
398 	(((task)->t_flags & TF_64B_ADDR) != 0)
399 #define task_set_64Bit_addr(task)       \
400 	((task)->t_flags |= TF_64B_ADDR)
401 #define task_clear_64Bit_addr(task)     \
402 	((task)->t_flags &= ~TF_64B_ADDR)
403 
404 /*
405  * Task is using 64-bit machine state.
406  */
407 #define task_has_64Bit_data(task)       \
408 	(((task)->t_flags & TF_64B_DATA) != 0)
409 #define task_set_64Bit_data(task)       \
410 	((task)->t_flags |= TF_64B_DATA)
411 #define task_clear_64Bit_data(task)     \
412 	((task)->t_flags &= ~TF_64B_DATA)
413 
414 #define task_corpse_pending_report(task)        \
415 	 (((task)->t_flags & TF_PENDING_CORPSE) != 0)
416 
417 #define task_set_corpse_pending_report(task)       \
418 	 ((task)->t_flags |= TF_PENDING_CORPSE)
419 
420 #define task_clear_corpse_pending_report(task)       \
421 	 ((task)->t_flags &= ~TF_PENDING_CORPSE)
422 
423 #define task_is_a_corpse_fork(task)     \
424 	(((task)->t_flags & TF_CORPSE_FORK) != 0)
425 
426 #define task_set_coalition_member(task)      \
427 	((task)->t_flags |= TF_COALITION_MEMBER)
428 
429 #define task_clear_coalition_member(task)    \
430 	((task)->t_flags &= ~TF_COALITION_MEMBER)
431 
432 #define task_is_coalition_member(task)       \
433 	(((task)->t_flags & TF_COALITION_MEMBER) != 0)
434 
435 #define task_has_proc(task) \
436 	(((task)->t_flags & TF_HASPROC) != 0)
437 
438 #define task_set_has_proc(task) \
439 	((task)->t_flags |= TF_HASPROC)
440 
441 #define task_clear_has_proc(task) \
442 	((task)->t_flags &= ~TF_HASPROC)
443 
444 	uint32_t t_procflags;                                            /* general-purpose task flags protected by proc_lock (PL) */
445 #define TPF_NONE                 0
446 #define TPF_DID_EXEC             0x00000001                              /* task has been execed to a new task */
447 #define TPF_EXEC_COPY            0x00000002                              /* task is the new copy of an exec */
448 
449 #define task_did_exec_internal(task)            \
450 	(((task)->t_procflags & TPF_DID_EXEC) != 0)
451 
452 #define task_is_exec_copy_internal(task)        \
453 	(((task)->t_procflags & TPF_EXEC_COPY) != 0)
454 
455 	mach_vm_address_t       all_image_info_addr; /* dyld __all_image_info     */
456 	mach_vm_size_t          all_image_info_size; /* section location and size */
457 
458 #if CONFIG_CPU_COUNTERS
459 #define TASK_KPC_FORCED_ALL_CTRS        0x2     /* Bit in "t_kpc" signifying this task forced all counters */
460 	uint32_t t_kpc; /* kpc flags */
461 #endif /* CONFIG_CPU_COUNTERS */
462 
463 	_Atomic darwin_gpu_role_t       t_gpu_role;
464 
465 	bool pidsuspended; /* pid_suspend called; no threads can execute */
466 	bool frozen;       /* frozen; private resident pages committed to swap */
467 	bool changing_freeze_state;        /* in the process of freezing or thawing */
468 	bool     is_large_corpse;
469 	uint16_t policy_ru_cpu          :4,
470 	    policy_ru_cpu_ext      :4,
471 	    applied_ru_cpu         :4,
472 	    applied_ru_cpu_ext     :4;
473 	uint8_t  rusage_cpu_flags;
474 	uint8_t  rusage_cpu_percentage;         /* Task-wide CPU limit percentage */
475 	uint8_t  rusage_cpu_perthr_percentage;  /* Per-thread CPU limit percentage */
476 #if MACH_ASSERT
477 	int8_t          suspends_outstanding;   /* suspends this task performed in excess of resumes */
478 #endif
479 	uint8_t                  t_returnwaitflags;
480 #define TWF_NONE                 0
481 #define TRW_LRETURNWAIT          0x01           /* task is waiting for fork/posix_spawn/exec to complete */
482 #define TRW_LRETURNWAITER        0x02           /* task is waiting for TRW_LRETURNWAIT to get cleared */
483 #define TRW_LEXEC_COMPLETE       0x04           /* thread should call exec complete */
484 
485 #if CONFIG_EXCLAVES
486 	uint8_t                  t_exclave_state;
487 #define TES_NONE                 0
488 #define TES_CONCLAVE_TAINTED     0x01           /* Task has talked to conclave, xnu has tainted the process */
489 #define TES_CONCLAVE_UNTAINTABLE 0x02           /* Task can not be tainted by xnu when it talks to conclave */
490 #endif /* CONFIG_EXCLAVES */
491 
492 #if __has_feature(ptrauth_calls)
493 	bool                            shared_region_auth_remapped;    /* authenticated sections ready for use */
494 	char                            *shared_region_id;              /* determines which ptr auth key to use */
495 #endif /* __has_feature(ptrauth_calls) */
496 	struct vm_shared_region         *shared_region;
497 
498 	uint64_t rusage_cpu_interval;           /* Task-wide CPU limit interval */
499 	uint64_t rusage_cpu_perthr_interval;    /* Per-thread CPU limit interval */
500 	uint64_t rusage_cpu_deadline;
501 	thread_call_t rusage_cpu_callt;
502 #if CONFIG_TASKWATCH
503 	queue_head_t    task_watchers;          /* app state watcher threads */
504 	int     num_taskwatchers;
505 	int             watchapplying;
506 #endif /* CONFIG_TASKWATCH */
507 
508 	struct bank_task *bank_context;  /* pointer to per task bank structure */
509 
510 #if IMPORTANCE_INHERITANCE
511 	struct ipc_importance_task  *task_imp_base;     /* Base of IPC importance chain */
512 #endif /* IMPORTANCE_INHERITANCE */
513 
514 	vm_extmod_statistics_data_t     extmod_statistics;
515 
516 	struct task_requested_policy requested_policy;
517 	struct task_effective_policy effective_policy;
518 
519 	struct task_pend_token pended_coalition_changes;
520 
521 	/*
522 	 * Can be merged with imp_donor bits, once the IMPORTANCE_INHERITANCE macro goes away.
523 	 */
524 	uint32_t        low_mem_notified_warn           :1,     /* warning low memory notification is sent to the task */
525 	    low_mem_notified_critical       :1,                 /* critical low memory notification is sent to the task */
526 	    purged_memory_warn              :1,                 /* purgeable memory of the task is purged for warning level pressure */
527 	    purged_memory_critical          :1,                 /* purgeable memory of the task is purged for critical level pressure */
528 	    low_mem_privileged_listener     :1,                 /* if set, task would like to know about pressure changes before other tasks on the system */
529 	    mem_notify_reserved             :27;                /* reserved for future use */
530 
531 	task_memlimit_flags_t _Atomic memlimit_flags;
532 
533 	io_stat_info_t          task_io_stats;
534 
535 	struct task_writes_counters task_writes_counters_internal;
536 	struct task_writes_counters task_writes_counters_external;
537 
538 	/*
539 	 * The cpu_time_qos_stats fields are protected by the task lock
540 	 */
541 	struct _cpu_time_qos_stats      cpu_time_eqos_stats;
542 	struct _cpu_time_qos_stats      cpu_time_rqos_stats;
543 
544 	/* Statistics accumulated for terminated threads from this task */
545 	uint32_t        task_timer_wakeups_bin_1;
546 	uint32_t        task_timer_wakeups_bin_2;
547 	uint64_t        task_gpu_ns;
548 
549 	uint8_t         task_can_transfer_memory_ownership;
550 #if DEVELOPMENT || DEBUG
551 	uint8_t         task_no_footprint_for_debug;
552 #endif
553 	uint8_t         task_objects_disowning;
554 	uint8_t         task_objects_disowned;
555 	/* # of purgeable volatile VM objects owned by this task: */
556 	int             task_volatile_objects;
557 	/* # of purgeable but not volatile VM objects owned by this task: */
558 	int             task_nonvolatile_objects;
559 	int             task_owned_objects;
560 	queue_head_t    task_objq;
561 	decl_lck_mtx_data(, task_objq_lock); /* protects "task_objq" */
562 
563 	unsigned int    task_thread_limit:16;
564 #if __arm64__
565 	unsigned int    task_legacy_footprint:1;
566 	unsigned int    task_extra_footprint_limit:1;
567 	unsigned int    task_ios13extended_footprint_limit:1;
568 #endif /* __arm64__ */
569 	unsigned int    task_region_footprint:1;
570 	unsigned int    task_region_info_flags:1;
571 	unsigned int    task_has_crossed_thread_limit:1;
572 	unsigned int    task_rr_in_flight:1; /* a t_rr_synchronzie() is in flight */
573 	unsigned int    task_jetsam_realtime_audio:1;
574 
575 	/*
576 	 * A task's coalition set is "adopted" in task_create_internal
577 	 * and unset in task_deallocate_internal, so each array member
578 	 * can be referenced without the task lock.
579 	 * Note: these fields are protected by coalition->lock,
580 	 *       not the task lock.
581 	 */
582 	coalition_t     coalition[COALITION_NUM_TYPES];
583 	queue_chain_t   task_coalition[COALITION_NUM_TYPES];
584 	uint64_t        dispatchqueue_offset;
585 
586 #if DEVELOPMENT || DEBUG
587 	boolean_t       task_unnested;
588 	int             task_disconnected_count;
589 #endif
590 
591 #if HYPERVISOR
592 	void * XNU_PTRAUTH_SIGNED_PTR("task.hv_task_target") hv_task_target; /* hypervisor virtual machine object associated with this task */
593 #endif /* HYPERVISOR */
594 
595 #if CONFIG_SECLUDED_MEMORY
596 	uint8_t task_can_use_secluded_mem;
597 	uint8_t task_could_use_secluded_mem;
598 	uint8_t task_could_also_use_secluded_mem;
599 	uint8_t task_suppressed_secluded;
600 #endif /* CONFIG_SECLUDED_MEMORY */
601 
602 	task_exc_guard_behavior_t task_exc_guard;
603 	mach_vm_address_t mach_header_vm_address;
604 
605 	queue_head_t    io_user_clients;
606 
607 #if CONFIG_FREEZE
608 	queue_head_t   task_frozen_cseg_q;  /* queue of csegs frozen to NAND */
609 #endif /* CONFIG_FREEZE */
610 	boolean_t       donates_own_pages; /* pages land on the special Q (only swappable pages on iPadOS, early swap on macOS) */
611 	uint32_t task_shared_region_slide;   /* cached here to avoid locking during telemetry */
612 #if CONFIG_PHYS_WRITE_ACCT
613 	uint64_t        task_fs_metadata_writes;
614 #endif /* CONFIG_PHYS_WRITE_ACCT */
615 	uuid_t   task_shared_region_uuid;
616 #if CONFIG_MEMORYSTATUS
617 	uint64_t        memstat_dirty_start; /* last abstime transition into the dirty band or last call to task_ledger_settle_dirty_time  while dirty */
618 #endif /* CONFIG_MEMORYSTATUS */
619 	vmobject_list_output_t corpse_vmobject_list;
620 	uint64_t corpse_vmobject_list_size;
621 #if CONFIG_DEFERRED_RECLAIM
622 	vm_deferred_reclamation_metadata_t deferred_reclamation_metadata; /* Protected by the task lock */
623 #endif /* CONFIG_DEFERRED_RECLAIM */
624 
625 #if CONFIG_EXCLAVES
626 	void * XNU_PTRAUTH_SIGNED_PTR("task.conclave") conclave;
627 	void * XNU_PTRAUTH_SIGNED_PTR("task.exclave_crash_info") exclave_crash_info;
628 	uint32_t exclave_crash_info_length;
629 #endif /* CONFIG_EXCLAVES */
630 
631 	/* Auxiliary code-signing information */
632 	uint64_t task_cs_auxiliary_info;
633 
634 	/* Runtime security mitigations */
635 	task_security_config_s security_config;
636 };
637 
638 ZONE_DECLARE_ID(ZONE_ID_PROC_TASK, void *);
639 extern zone_t proc_task_zone;
640 
641 extern task_control_port_options_t task_get_control_port_options(task_t task);
642 
643 /*
644  * EXC_GUARD default delivery behavior for optional Mach port and VM guards.
645  * Applied to new tasks at creation time.
646  */
647 extern task_exc_guard_behavior_t task_exc_guard_default;
648 extern size_t proc_and_task_size;
649 extern void  *get_bsdtask_info(task_t t);
650 extern void *task_get_proc_raw(task_t task);
651 static inline void
task_require(struct task * task)652 task_require(struct task *task)
653 {
654 	zone_id_require(ZONE_ID_PROC_TASK, proc_and_task_size, task_get_proc_raw(task));
655 }
656 
657 #define task_lock(task)                 lck_mtx_lock(&(task)->lock)
658 #define task_lock_assert_owned(task)    LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED)
659 #define task_lock_try(task)             lck_mtx_try_lock(&(task)->lock)
660 #define task_unlock(task)               lck_mtx_unlock(&(task)->lock)
661 
662 #define task_objq_lock_init(task)       lck_mtx_init(&(task)->task_objq_lock, &vm_object_lck_grp, &vm_object_lck_attr)
663 #define task_objq_lock_destroy(task)    lck_mtx_destroy(&(task)->task_objq_lock, &vm_object_lck_grp)
664 #define task_objq_lock(task)            lck_mtx_lock(&(task)->task_objq_lock)
665 #define task_objq_lock_assert_owned(task)       LCK_MTX_ASSERT(&(task)->task_objq_lock, LCK_MTX_ASSERT_OWNED)
666 #define task_objq_lock_try(task)        lck_mtx_try_lock(&(task)->task_objq_lock)
667 #define task_objq_unlock(task)          lck_mtx_unlock(&(task)->task_objq_lock)
668 
669 #define itk_lock_init(task)     lck_mtx_init(&(task)->itk_lock_data, &ipc_lck_grp, &ipc_lck_attr)
670 #define itk_lock_destroy(task)  lck_mtx_destroy(&(task)->itk_lock_data, &ipc_lck_grp)
671 #define itk_lock(task)          lck_mtx_lock(&(task)->itk_lock_data)
672 #define itk_unlock(task)        lck_mtx_unlock(&(task)->itk_lock_data)
673 
674 /* task clear return wait flags */
675 #define TCRW_CLEAR_INITIAL_WAIT   0x1
676 #define TCRW_CLEAR_FINAL_WAIT     0x2
677 #define TCRW_CLEAR_EXEC_COMPLETE  0x4
678 #define TCRW_CLEAR_ALL_WAIT       (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT)
679 
680 /* Initialize task module */
681 extern void             task_init(void);
682 
683 /* coalition_init() calls this to initialize ledgers before task_init() */
684 extern void             init_task_ledgers(void);
685 
686 extern task_t   current_task(void) __pure2;
687 
688 __pure2
689 static inline ipc_space_t
current_space(void)690 current_space(void)
691 {
692 	return current_task()->itk_space;
693 }
694 
695 extern bool task_is_driver(task_t task);
696 extern uint32_t task_ro_flags_get(task_t task);
697 extern void task_ro_flags_set(task_t task, uint32_t flags);
698 extern void task_ro_flags_clear(task_t task, uint32_t flags);
699 
700 extern lck_attr_t      task_lck_attr;
701 extern lck_grp_t       task_lck_grp;
702 
703 struct task_watchport_elem {
704 	task_t                          twe_task;
705 	ipc_port_t                      twe_port;     /* (Space lock) */
706 	ipc_port_t XNU_PTRAUTH_SIGNED_PTR("twe_pdrequest") twe_pdrequest;
707 };
708 
709 struct task_watchports {
710 	os_refcnt_t                     tw_refcount;           /* (Space lock) */
711 	task_t                          tw_task;               /* (Space lock) & tw_refcount == 0 */
712 	thread_t                        tw_thread;             /* (Space lock) & tw_refcount == 0 */
713 	uint32_t                        tw_elem_array_count;   /* (Space lock) */
714 	struct task_watchport_elem      tw_elem[];             /* (Space lock) & (Portlock) & (mq lock) */
715 };
716 
717 #define task_watchports_retain(x)   (os_ref_retain(&(x)->tw_refcount))
718 #define task_watchports_release(x)  (os_ref_release(&(x)->tw_refcount))
719 
720 #define task_watchport_elem_init(elem, task, port) \
721 do {                                               \
722 	(elem)->twe_task = (task);                 \
723 	(elem)->twe_port = (port);                 \
724 	(elem)->twe_pdrequest = IP_NULL;           \
725 } while(0)
726 
727 #define task_watchport_elem_clear(elem) task_watchport_elem_init((elem), NULL, NULL)
728 
729 extern void
730 task_add_turnstile_watchports(
731 	task_t          task,
732 	thread_t        thread,
733 	ipc_port_t      *portwatch_ports,
734 	uint32_t        portwatch_count);
735 
736 extern void
737 task_watchport_elem_deallocate(
738 	struct          task_watchport_elem *watchport_elem);
739 
740 extern boolean_t
741 task_has_watchports(task_t task);
742 
743 void
744 task_dyld_process_info_update_helper(
745 	task_t                  task,
746 	size_t                  active_count,
747 	vm_map_address_t        magic_addr,
748 	ipc_port_t             *release_ports,
749 	size_t                  release_count);
750 
751 extern kern_return_t
752 task_suspend2_mig(
753 	task_t                  task,
754 	task_suspension_token_t *suspend_token);
755 
756 extern kern_return_t
757 task_suspend2_external(
758 	task_t                  task,
759 	task_suspension_token_t *suspend_token);
760 
761 extern kern_return_t
762 task_resume2_mig(
763 	task_suspension_token_t suspend_token);
764 
765 extern kern_return_t
766 task_resume2_external(
767 	task_suspension_token_t suspend_token);
768 
769 extern void
770 task_suspension_token_deallocate_grp(
771 	task_suspension_token_t suspend_token,
772 	task_grp_t              grp);
773 
774 extern ipc_port_t
775 convert_task_to_port_with_flavor(
776 	task_t                  task,
777 	mach_task_flavor_t      flavor,
778 	task_grp_t              grp);
779 
780 extern task_t   current_task_early(void) __pure2;
781 
782 #else   /* MACH_KERNEL_PRIVATE */
783 
784 __BEGIN_DECLS
785 
786 extern task_t   current_task(void) __pure2;
787 
788 extern bool task_is_driver(task_t task);
789 
790 #define TF_NONE                 0
791 
792 #define TWF_NONE                 0
793 #define TRW_LRETURNWAIT          0x01           /* task is waiting for fork/posix_spawn/exec to complete */
794 #define TRW_LRETURNWAITER        0x02           /* task is waiting for TRW_LRETURNWAIT to get cleared */
795 #define TRW_LEXEC_COMPLETE       0x04           /* thread should call exec complete */
796 
797 /* task clear return wait flags */
798 #define TCRW_CLEAR_INITIAL_WAIT   0x1
799 #define TCRW_CLEAR_FINAL_WAIT     0x2
800 #define TCRW_CLEAR_EXEC_COMPLETE  0x4
801 #define TCRW_CLEAR_ALL_WAIT       (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT)
802 
803 
804 #define TPF_NONE                0
805 #define TPF_EXEC_COPY           0x00000002                              /* task is the new copy of an exec */
806 
807 
808 __END_DECLS
809 
810 #endif  /* MACH_KERNEL_PRIVATE */
811 
812 __BEGIN_DECLS
813 
814 #ifdef KERNEL_PRIVATE
815 extern boolean_t                task_is_app_suspended(task_t task);
816 extern bool task_is_exotic(task_t task);
817 extern bool task_is_alien(task_t task);
818 extern boolean_t task_get_platform_binary(task_t task);
819 #endif /* KERNEL_PRIVATE */
820 
821 #ifdef  XNU_KERNEL_PRIVATE
822 
823 /* Hold all threads in a task, Wait for task to stop running, just to get off CPU */
824 extern kern_return_t task_hold_and_wait(
825 	task_t          task,
826 	bool            suspend_conclave);
827 
828 /* Release hold on all threads in a task */
829 extern kern_return_t    task_release(
830 	task_t          task);
831 
832 /* Suspend/resume a task where the kernel owns the suspend count */
833 extern kern_return_t    task_suspend_internal(          task_t          task);
834 extern kern_return_t    task_resume_internal(           task_t          task);
835 
836 /* Suspends a task by placing a hold on its threads */
837 extern kern_return_t    task_pidsuspend(
838 	task_t          task);
839 
840 /* Resumes a previously paused task */
841 extern kern_return_t    task_pidresume(
842 	task_t          task);
843 
844 extern kern_return_t    task_send_trace_memory(
845 	task_t          task,
846 	uint32_t        pid,
847 	uint64_t        uniqueid);
848 
849 extern void             task_remove_turnstile_watchports(
850 	task_t          task);
851 
852 extern void             task_transfer_turnstile_watchports(
853 	task_t          old_task,
854 	task_t          new_task,
855 	thread_t        new_thread);
856 
857 extern kern_return_t
858     task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *, bool);
859 
860 #if DEVELOPMENT || DEBUG
861 
862 extern kern_return_t    task_disconnect_page_mappings(
863 	task_t          task);
864 #endif /* DEVELOPMENT || DEBUG */
865 
866 extern void                     tasks_system_suspend(boolean_t suspend);
867 
868 #if CONFIG_FREEZE
869 
870 /* Freeze a task's resident pages */
871 extern kern_return_t    task_freeze(
872 	task_t          task,
873 	uint32_t        *purgeable_count,
874 	uint32_t        *wired_count,
875 	uint32_t        *clean_count,
876 	uint32_t        *dirty_count,
877 	uint32_t        dirty_budget,
878 	uint32_t        *shared_count,
879 	int             *freezer_error_code,
880 	boolean_t       eval_only);
881 
882 /* Thaw a currently frozen task */
883 extern kern_return_t    task_thaw(
884 	task_t          task);
885 
886 typedef enum {
887 	CREDIT_TO_SWAP = 1,
888 	DEBIT_FROM_SWAP = 2
889 } freezer_acct_op_t;
890 
891 extern void task_update_frozen_to_swap_acct(
892 	task_t  task,
893 	int64_t amount,
894 	freezer_acct_op_t op);
895 
896 #endif /* CONFIG_FREEZE */
897 
898 /* Halt all other threads in the current task */
899 extern kern_return_t    task_start_halt(
900 	task_t          task);
901 
902 /* Wait for other threads to halt and free halting task resources */
903 extern void             task_complete_halt(
904 	task_t          task);
905 
906 extern kern_return_t    task_terminate_internal(
907 	task_t                  task);
908 
909 struct proc_ro;
910 typedef struct proc_ro *proc_ro_t;
911 
912 extern kern_return_t    task_create_internal(
913 	task_t          parent_task,
914 	proc_ro_t       proc_ro,
915 	coalition_t     *parent_coalitions,
916 	boolean_t       inherit_memory,
917 	boolean_t       is_64bit,
918 	boolean_t       is_64bit_data,
919 	uint32_t        t_flags,
920 	uint32_t        t_flags_ro,
921 	uint32_t        procflags,
922 	uint8_t         t_returnwaitflags,
923 	task_t          child_task);
924 
925 extern kern_return_t    task_set_special_port_internal(
926 	task_t                  task,
927 	int                     which,
928 	ipc_port_t              port);
929 
930 extern kern_return_t task_set_security_tokens(
931 	task_t                  task,
932 	security_token_t        sec_token,
933 	audit_token_t           audit_token,
934 	host_priv_t             host_priv);
935 
936 extern kern_return_t    task_info(
937 	task_t                  task,
938 	task_flavor_t           flavor,
939 	task_info_t             task_info_out,
940 	mach_msg_type_number_t  *task_info_count);
941 
942 /*
943  * Additional fields that aren't exposed through `task_power_info` but needed
944  * by clients of `task_power_info_locked`.
945  */
946 struct task_power_info_extra {
947 	uint64_t cycles;
948 	uint64_t instructions;
949 	uint64_t pcycles;
950 	uint64_t pinstructions;
951 	uint64_t user_ptime;
952 	uint64_t system_ptime;
953 	uint64_t runnable_time;
954 	uint64_t energy;
955 	uint64_t penergy;
956 	uint64_t secure_time;
957 	uint64_t secure_ptime;
958 };
959 
960 void task_power_info_locked(
961 	task_t                        task,
962 	task_power_info_t             info,
963 	gpu_energy_data_t             gpu_energy,
964 	task_power_info_v2_t          infov2,
965 	struct task_power_info_extra *extra_info);
966 
967 extern uint64_t         task_gpu_utilisation(
968 	task_t   task);
969 
970 extern void             task_update_cpu_time_qos_stats(
971 	task_t   task,
972 	uint64_t *eqos_stats,
973 	uint64_t *rqos_stats);
974 
975 extern void             task_vtimer_set(
976 	task_t          task,
977 	integer_t       which);
978 
979 extern void             task_vtimer_clear(
980 	task_t          task,
981 	integer_t       which);
982 
983 extern void             task_vtimer_update(
984 	task_t          task,
985 	integer_t       which,
986 	uint32_t        *microsecs);
987 
988 #define TASK_VTIMER_USER                0x01
989 #define TASK_VTIMER_PROF                0x02
990 #define TASK_VTIMER_RLIM                0x04
991 
992 extern void             task_set_64bit(
993 	task_t          task,
994 	boolean_t       is_64bit,
995 	boolean_t       is_64bit_data);
996 
997 extern bool             task_get_64bit_addr(
998 	task_t task);
999 
1000 extern bool             task_get_64bit_data(
1001 	task_t task);
1002 
1003 extern void     task_set_platform_binary(
1004 	task_t task,
1005 	boolean_t is_platform);
1006 
1007 #if XNU_TARGET_OS_OSX
1008 #if DEVELOPMENT || DEBUG
1009 /* Disables task identity security hardening (*_set_exception_ports policy)
1010  * for all tasks if amfi_get_out_of_my_way is set. */
1011 extern bool AMFI_bootarg_disable_mach_hardening;
1012 #endif /* DEVELOPMENT || DEBUG */
1013 extern void             task_disable_mach_hardening(
1014 	task_t task);
1015 
1016 extern bool     task_opted_out_mach_hardening(
1017 	task_t task);
1018 #endif /* XNU_TARGET_OS_OSX */
1019 
1020 extern boolean_t task_is_a_corpse(
1021 	task_t task);
1022 
1023 extern boolean_t task_is_ipc_active(
1024 	task_t task);
1025 
1026 extern bool
1027 task_is_immovable_no_assert(task_t task);
1028 
1029 extern bool task_is_immovable(
1030 	task_t task);
1031 
1032 extern void task_set_corpse(
1033 	task_t task);
1034 
1035 extern void     task_set_exc_guard_default(
1036 	task_t task,
1037 	const char *name,
1038 	unsigned long namelen,
1039 	boolean_t is_simulated,
1040 	uint32_t platform,
1041 	uint32_t sdk);
1042 
1043 extern bool     task_set_ca_client_wi(
1044 	task_t task,
1045 	boolean_t ca_client_wi);
1046 
1047 extern kern_return_t task_set_dyld_info(
1048 	task_t            task,
1049 	mach_vm_address_t addr,
1050 	mach_vm_size_t    size,
1051 	bool              finalize_value);
1052 
1053 extern void task_set_mach_header_address(
1054 	task_t task,
1055 	mach_vm_address_t addr);
1056 
1057 extern void task_set_uniqueid(task_t task);
1058 
1059 /* Get number of activations in a task */
1060 extern int              get_task_numacts(
1061 	task_t          task);
1062 
1063 extern bool task_donates_own_pages(
1064 	task_t task);
1065 
1066 struct label;
1067 extern kern_return_t task_collect_crash_info(
1068 	task_t task,
1069 #if CONFIG_MACF
1070 	struct label *crash_label,
1071 #endif
1072 	int is_corpse_fork);
1073 void task_wait_till_threads_terminate_locked(task_t task);
1074 
1075 /* JMM - should just be temporary (implementation in bsd_kern still) */
1076 extern void     set_bsdtask_info(task_t, void *);
1077 extern uint32_t set_task_loadTag(task_t task, uint32_t loadTag);
1078 extern vm_map_t get_task_map_reference(task_t);
1079 extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t);
1080 extern pmap_t   get_task_pmap(task_t);
1081 extern uint64_t get_task_resident_size(task_t);
1082 extern uint64_t get_task_compressed(task_t);
1083 extern uint64_t get_task_resident_max(task_t);
1084 extern uint64_t get_task_phys_footprint(task_t);
1085 #if CONFIG_LEDGER_INTERVAL_MAX
1086 extern uint64_t get_task_phys_footprint_interval_max(task_t, int reset);
1087 #endif /* CONFIG_FOOTPRINT_INTERVAL_MAX */
1088 extern uint64_t get_task_phys_footprint_lifetime_max(task_t);
1089 extern uint64_t get_task_phys_footprint_limit(task_t);
1090 extern uint64_t get_task_neural_nofootprint_total(task_t task);
1091 #if CONFIG_LEDGER_INTERVAL_MAX
1092 extern uint64_t get_task_neural_nofootprint_total_interval_max(task_t, int reset);
1093 #endif /* CONFIG_NEURAL_INTERVAL_MAX */
1094 extern uint64_t get_task_neural_nofootprint_total_lifetime_max(task_t);
1095 extern uint64_t get_task_purgeable_size(task_t);
1096 extern uint64_t get_task_cpu_time(task_t);
1097 extern uint64_t get_task_dispatchqueue_offset(task_t);
1098 extern uint64_t get_task_dispatchqueue_serialno_offset(task_t);
1099 extern uint64_t get_task_dispatchqueue_label_offset(task_t);
1100 extern uint64_t get_task_uniqueid(task_t task);
1101 extern int      get_task_version(task_t task);
1102 
1103 extern uint64_t get_task_internal(task_t);
1104 extern uint64_t get_task_internal_compressed(task_t);
1105 extern uint64_t get_task_purgeable_nonvolatile(task_t);
1106 extern uint64_t get_task_purgeable_nonvolatile_compressed(task_t);
1107 extern uint64_t get_task_iokit_mapped(task_t);
1108 extern uint64_t get_task_alternate_accounting(task_t);
1109 extern uint64_t get_task_alternate_accounting_compressed(task_t);
1110 extern uint64_t get_task_memory_region_count(task_t);
1111 extern uint64_t get_task_page_table(task_t);
1112 #if CONFIG_FREEZE
1113 extern uint64_t get_task_frozen_to_swap(task_t);
1114 #endif
1115 extern uint64_t get_task_network_nonvolatile(task_t);
1116 extern uint64_t get_task_network_nonvolatile_compressed(task_t);
1117 extern uint64_t get_task_wired_mem(task_t);
1118 extern uint32_t get_task_loadTag(task_t task);
1119 
1120 extern uint64_t get_task_tagged_footprint(task_t task);
1121 extern uint64_t get_task_tagged_footprint_compressed(task_t task);
1122 extern uint64_t get_task_media_footprint(task_t task);
1123 extern uint64_t get_task_media_footprint_compressed(task_t task);
1124 extern uint64_t get_task_graphics_footprint(task_t task);
1125 extern uint64_t get_task_graphics_footprint_compressed(task_t task);
1126 extern uint64_t get_task_neural_footprint(task_t task);
1127 extern uint64_t get_task_neural_footprint_compressed(task_t task);
1128 
1129 extern kern_return_t task_convert_phys_footprint_limit(int, int *);
1130 extern kern_return_t task_set_phys_footprint_limit_internal(task_t, int, int *, boolean_t, boolean_t);
1131 extern kern_return_t task_get_phys_footprint_limit(task_t task, int *limit_mb);
1132 #if DEBUG || DEVELOPMENT
1133 #if CONFIG_MEMORYSTATUS
1134 extern kern_return_t task_set_diag_footprint_limit_internal(task_t, uint64_t, uint64_t *);
1135 extern kern_return_t task_get_diag_footprint_limit_internal(task_t, uint64_t *, bool *);
1136 extern kern_return_t task_set_diag_footprint_limit(task_t task, uint64_t new_limit_mb, uint64_t *old_limit_mb);
1137 #endif /* CONFIG_MEMORYSTATUS */
1138 #endif /* DEBUG || DEVELOPMENT */
1139 extern kern_return_t task_get_conclave_mem_limit(task_t, uint64_t *conclave_limit);
1140 extern kern_return_t task_set_conclave_mem_limit(task_t, uint64_t conclave_limit);
1141 
1142 extern security_token_t *task_get_sec_token(task_t task);
1143 extern void task_set_sec_token(task_t task, security_token_t *token);
1144 extern audit_token_t *task_get_audit_token(task_t task);
1145 extern void task_set_audit_token(task_t task, audit_token_t *token);
1146 extern void task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token);
1147 extern boolean_t task_is_privileged(task_t task);
1148 extern uint8_t *task_get_mach_trap_filter_mask(task_t task);
1149 extern void task_set_mach_trap_filter_mask(task_t task, uint8_t *mask);
1150 extern uint8_t *task_get_mach_kobj_filter_mask(task_t task);
1151 extern void task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask);
1152 extern mach_vm_address_t task_get_all_image_info_addr(task_t task);
1153 
1154 /* Jetsam memlimit attributes */
1155 extern bool task_get_memlimit_is_active(task_t task);
1156 extern bool task_get_memlimit_is_fatal(task_t task);
1157 extern void task_set_memlimit_is_active(task_t task, bool memlimit_is_active);
1158 extern void task_set_memlimit_is_fatal(task_t task, bool memlimit_is_fatal);
1159 extern bool task_set_exc_resource_bit(task_t task, bool memlimit_is_active);
1160 extern void task_reset_triggered_exc_resource(task_t task, bool memlimit_is_active);
1161 extern bool task_get_jetsam_realtime_audio(task_t task);
1162 extern void task_set_jetsam_realtime_audio(task_t task, bool realtime_audio);
1163 
1164 extern uint64_t task_get_dirty_start(task_t task);
1165 extern void task_set_dirty_start(task_t task, uint64_t start);
1166 
1167 extern void task_set_thread_limit(task_t task, uint16_t thread_limit);
1168 #if CONFIG_PROC_RESOURCE_LIMITS
1169 extern kern_return_t task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit);
1170 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1171 extern void task_port_space_ast(task_t task);
1172 
1173 #if XNU_TARGET_OS_OSX
1174 extern boolean_t task_has_system_version_compat_enabled(task_t task);
1175 extern void task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat);
1176 #endif
1177 
1178 extern boolean_t        is_kerneltask(task_t task);
1179 extern boolean_t        is_corpsefork(task_t task);
1180 
1181 extern kern_return_t check_actforsig(task_t task, thread_t thread, int setast);
1182 
1183 extern kern_return_t machine_task_get_state(
1184 	task_t task,
1185 	int flavor,
1186 	thread_state_t state,
1187 	mach_msg_type_number_t *state_count);
1188 
1189 extern kern_return_t machine_task_set_state(
1190 	task_t task,
1191 	int flavor,
1192 	thread_state_t state,
1193 	mach_msg_type_number_t state_count);
1194 
1195 extern void machine_task_terminate(task_t task);
1196 
1197 extern kern_return_t machine_task_process_signature(task_t task, uint32_t platform, uint32_t sdk, char const **error_msg);
1198 
1199 struct _task_ledger_indices {
1200 	int cpu_time;
1201 	int tkm_private;
1202 	int tkm_shared;
1203 	int phys_mem;
1204 	int wired_mem;
1205 	int conclave_mem;
1206 	int internal;
1207 	int iokit_mapped;
1208 	int external;
1209 	int reusable;
1210 	int alternate_accounting;
1211 	int alternate_accounting_compressed;
1212 	int page_table;
1213 	int phys_footprint;
1214 	int internal_compressed;
1215 	int purgeable_volatile;
1216 	int purgeable_nonvolatile;
1217 	int purgeable_volatile_compressed;
1218 	int purgeable_nonvolatile_compressed;
1219 	int tagged_nofootprint;
1220 	int tagged_footprint;
1221 	int tagged_nofootprint_compressed;
1222 	int tagged_footprint_compressed;
1223 	int network_volatile;
1224 	int network_nonvolatile;
1225 	int network_volatile_compressed;
1226 	int network_nonvolatile_compressed;
1227 	int media_nofootprint;
1228 	int media_footprint;
1229 	int media_nofootprint_compressed;
1230 	int media_footprint_compressed;
1231 	int graphics_nofootprint;
1232 	int graphics_footprint;
1233 	int graphics_nofootprint_compressed;
1234 	int graphics_footprint_compressed;
1235 	int neural_nofootprint;
1236 	int neural_footprint;
1237 	int neural_nofootprint_compressed;
1238 	int neural_footprint_compressed;
1239 	int neural_nofootprint_total;
1240 	int platform_idle_wakeups;
1241 	int interrupt_wakeups;
1242 #if CONFIG_SCHED_SFI
1243 	int sfi_wait_times[MAX_SFI_CLASS_ID];
1244 #endif /* CONFIG_SCHED_SFI */
1245 	int cpu_time_billed_to_me;
1246 	int cpu_time_billed_to_others;
1247 	int physical_writes;
1248 	int logical_writes;
1249 	int logical_writes_to_external;
1250 	int energy_billed_to_me;
1251 	int energy_billed_to_others;
1252 #if CONFIG_MEMORYSTATUS
1253 	int memorystatus_dirty_time;
1254 #endif /* CONFIG_MEMORYSTATUS */
1255 	int pages_grabbed;
1256 	int pages_grabbed_kern;
1257 	int pages_grabbed_iopl;
1258 	int pages_grabbed_upl;
1259 #if CONFIG_DEFERRED_RECLAIM
1260 	int est_reclaimable;
1261 #endif /* CONFIG_DEFERRED_RECLAIM */
1262 #if CONFIG_FREEZE
1263 	int frozen_to_swap;
1264 #endif /* CONFIG_FREEZE */
1265 #if CONFIG_PHYS_WRITE_ACCT
1266 	int fs_metadata_writes;
1267 #endif /* CONFIG_PHYS_WRITE_ACCT */
1268 	int swapins;
1269 };
1270 
1271 /*
1272  * Each runtime security mitigation that we support for userland processes
1273  * is tracked in the task security configuration and managed by the following
1274  * helpers.
1275  */
1276 #define TASK_SECURITY_CONFIG_HELPER_DECLARE(suffix) \
1277 	extern bool task_has_##suffix(task_t); \
1278 	extern void task_set_##suffix(task_t); \
1279 	extern void task_clear_##suffix(task_t); \
1280     extern void task_no_set_##suffix(task_t task) \
1281 
1282 extern uint32_t task_get_security_config(task_t);
1283 
1284 TASK_SECURITY_CONFIG_HELPER_DECLARE(hardened_heap);
1285 TASK_SECURITY_CONFIG_HELPER_DECLARE(tpro);
1286 TASK_SECURITY_CONFIG_HELPER_DECLARE(guard_objects);
1287 
1288 uint8_t task_get_platform_restrictions_version(task_t task);
1289 void    task_set_platform_restrictions_version(task_t task, uint64_t version);
1290 uint8_t task_get_hardened_process_version(task_t task);
1291 void    task_set_hardened_process_version(task_t task, uint64_t version);
1292 
1293 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
1294 TASK_SECURITY_CONFIG_HELPER_DECLARE(sec);
1295 /*
1296  * Definitions need to be visible on bsd/
1297  */
1298 
1299 #define TASK_MTE_POLICY_HELPER_DECLARE(suffix)  \
1300     extern bool task_has_sec_##suffix(task_t); \
1301 	extern void task_set_sec_##suffix(task_t)
1302 
1303 TASK_MTE_POLICY_HELPER_DECLARE(soft_mode);
1304 TASK_MTE_POLICY_HELPER_DECLARE(user_data);
1305 TASK_MTE_POLICY_HELPER_DECLARE(inherit);
1306 TASK_MTE_POLICY_HELPER_DECLARE(never_check);
1307 TASK_MTE_POLICY_HELPER_DECLARE(restrict_receiving_aliases_to_tagged_memory);
1308 
1309 extern bool current_task_has_sec_enabled(void);
1310 extern void task_clear_sec_policy(task_t);
1311 extern uint32_t task_get_sec_policy(task_t);
1312 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
1313 
1314 /*
1315  * Many of the task ledger entries use a reduced feature set
1316  * (specifically they just use LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE)
1317  * and are stored in a smaller entry structure.
1318  * That structure is an implementation detail of the ledger.
1319  * But on PPL systems, the task ledger's memory is managed by the PPL
1320  * and it has to determine the size of the task ledger at compile time.
1321  * This define specifies the number of small entries so the PPL can
1322  * properly determine the ledger's size.
1323  *
1324  * If you add a new entry with only the
1325  * LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE | LEDGER_ENTRY_ALLOW_INACTIVE
1326  * flags, you need to increment this count.
1327  * Otherwise, PPL systems will panic at boot.
1328  */
1329 #if CONFIG_DEFERRED_RECLAIM
1330 #define TASK_LEDGER_NUM_SMALL_INDICES 34
1331 #else /* CONFIG_DEFERRED_RECLAIM */
1332 #define TASK_LEDGER_NUM_SMALL_INDICES 33
1333 #endif /* !CONFIG_DEFERRED_RECLAIM */
1334 extern struct _task_ledger_indices task_ledgers;
1335 
1336 /* requires task to be unlocked, returns a referenced thread */
1337 thread_t task_findtid(task_t task, uint64_t tid);
1338 int pid_from_task(task_t task);
1339 
1340 extern kern_return_t task_wakeups_monitor_ctl(task_t task, uint32_t *rate_hz, int32_t *flags);
1341 extern kern_return_t task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags);
1342 extern void task_rollup_accounting_info(task_t new_task, task_t parent_task);
1343 extern kern_return_t task_io_monitor_ctl(task_t task, uint32_t *flags);
1344 extern void task_set_did_exec_flag(task_t task);
1345 extern void task_clear_exec_copy_flag(task_t task);
1346 extern bool task_is_initproc(task_t task);
1347 extern boolean_t task_is_exec_copy(task_t);
1348 extern boolean_t task_did_exec(task_t task);
1349 extern boolean_t task_is_active(task_t task);
1350 extern boolean_t task_is_halting(task_t task);
1351 extern void task_clear_return_wait(task_t task, uint32_t flags);
1352 extern void task_set_ctrl_port_default(task_t task, thread_t thread);
1353 extern void task_wait_to_return(void) __attribute__((noreturn));
1354 extern void task_post_signature_processing_hook(task_t task);
1355 extern event_t task_get_return_wait_event(task_t task);
1356 
1357 extern void task_bank_reset(task_t task);
1358 extern void task_bank_init(task_t task);
1359 
1360 #if CONFIG_MEMORYSTATUS
1361 extern void task_ledger_settle_dirty_time(task_t t);
1362 extern void task_ledger_settle_dirty_time_locked(task_t t);
1363 #endif /* CONFIG_MEMORYSTATUS */
1364 extern void task_ledger_settle(task_t t);
1365 
1366 #if CONFIG_ARCADE
1367 extern void task_prep_arcade(task_t task, thread_t thread);
1368 #endif /* CONFIG_ARCADE */
1369 
1370 extern int task_pid(task_t task);
1371 
1372 #if __has_feature(ptrauth_calls)
1373 char *task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *);
1374 void task_set_shared_region_id(task_t task, char *id);
1375 #endif /* __has_feature(ptrauth_calls) */
1376 
1377 extern boolean_t task_has_assertions(task_t task);
1378 /* End task_policy */
1379 
1380 extern void      task_set_gpu_role(task_t task, darwin_gpu_role_t gpu_role);
1381 extern boolean_t task_is_gpu_denied(task_t task);
1382 /* Returns PRIO_DARWIN_GPU values defined in sys/resource_private.h */
1383 extern darwin_gpu_role_t      task_get_gpu_role(task_t task);
1384 
1385 extern void task_set_game_mode(task_t task, bool enabled);
1386 /* returns true if update must be pushed to coalition (Automatically handled by task_set_game_mode) */
1387 extern bool task_set_game_mode_locked(task_t task, bool enabled);
1388 extern bool task_get_game_mode(task_t task);
1389 
1390 extern void task_set_carplay_mode(task_t task, bool enabled);
1391 /* returns true if update must be pushed to coalition (Automatically handled by task_set_carplay_mode) */
1392 extern bool task_set_carplay_mode_locked(task_t task, bool enabled);
1393 extern bool task_get_carplay_mode(task_t task);
1394 
1395 extern queue_head_t * task_io_user_clients(task_t task);
1396 extern void     task_set_message_app_suspended(task_t task, boolean_t enable);
1397 
1398 extern void task_copy_fields_for_exec(task_t dst_task, task_t src_task);
1399 
1400 extern void task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num);
1401 extern void task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries);
1402 
1403 extern void task_set_filter_msg_flag(task_t task, boolean_t flag);
1404 extern boolean_t task_get_filter_msg_flag(task_t task);
1405 
1406 #if __has_feature(ptrauth_calls)
1407 extern bool task_is_pac_exception_fatal(task_t task);
1408 extern void task_set_pac_exception_fatal_flag(task_t task);
1409 #endif /*__has_feature(ptrauth_calls)*/
1410 
1411 extern bool task_is_jit_exception_fatal(task_t task);
1412 extern void task_set_jit_flags(task_t task);
1413 
1414 extern bool task_needs_user_signed_thread_state(task_t task);
1415 extern void task_set_tecs(task_t task);
1416 extern void task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size);
1417 
1418 extern boolean_t task_corpse_forking_disabled(task_t task);
1419 
1420 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,
1421     uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit);
1422 
1423 extern int get_task_cdhash(task_t task, char cdhash[CS_CDHASH_LEN]);
1424 
1425 extern boolean_t kdp_task_is_locked(task_t task);
1426 
1427 /* redeclaration from task_server.h for the sake of kern_exec.c */
1428 extern kern_return_t _kernelrpc_mach_ports_register3(
1429 	task_t                  task,
1430 	mach_port_t             port1,
1431 	mach_port_t             port2,
1432 	mach_port_t             port3);
1433 
1434 /* Kernel side prototypes for MIG routines */
1435 extern kern_return_t task_get_exception_ports(
1436 	task_t                          task,
1437 	exception_mask_t                exception_mask,
1438 	exception_mask_array_t          masks,
1439 	mach_msg_type_number_t          *CountCnt,
1440 	exception_port_array_t          ports,
1441 	exception_behavior_array_t      behaviors,
1442 	thread_state_flavor_array_t     flavors);
1443 
1444 #if CONFIG_EXCLAVES
1445 int task_add_conclave(task_t task, void *, int64_t, const char *task_conclave_id);
1446 kern_return_t task_inherit_conclave(task_t old_task, task_t new_task, void *vnode, int64_t off);
1447 kern_return_t task_launch_conclave(mach_port_name_t port);
1448 void task_clear_conclave(task_t task);
1449 void task_stop_conclave(task_t task, bool gather_crash_bt);
1450 void task_suspend_conclave(task_t task);
1451 void task_resume_conclave(task_t task);
1452 kern_return_t task_stop_conclave_upcall(void);
1453 kern_return_t task_stop_conclave_upcall_complete(void);
1454 kern_return_t task_suspend_conclave_upcall(uint64_t *, size_t);
1455 struct conclave_sharedbuffer_t;
1456 kern_return_t task_crash_info_conclave_upcall(task_t task,
1457     const struct conclave_sharedbuffer_t *shared_buf, uint32_t length);
1458 typedef struct exclaves_resource exclaves_resource_t;
1459 exclaves_resource_t *task_get_conclave(task_t task);
1460 void task_set_conclave_untaintable(task_t task);
1461 void task_add_conclave_crash_info(task_t task, void *crash_info_ptr);
1462 //Changing this would also warrant a change in ConclaveSharedBuffer
1463 #define CONCLAVE_CRASH_BUFFER_PAGECOUNT 2
1464 
1465 #endif /* CONFIG_EXCLAVES */
1466 
1467 #endif  /* XNU_KERNEL_PRIVATE */
1468 #ifdef  KERNEL_PRIVATE
1469 
1470 extern void     *get_bsdtask_info(task_t);
1471 extern void     *get_bsdthreadtask_info(thread_t);
1472 extern void task_bsdtask_kill(task_t);
1473 extern vm_map_t get_task_map(task_t);
1474 extern ledger_t get_task_ledger(task_t);
1475 
1476 extern boolean_t get_task_pidsuspended(task_t);
1477 extern boolean_t get_task_suspended(task_t);
1478 extern boolean_t get_task_frozen(task_t);
1479 
1480 /*
1481  * Flavors of convert_task_to_port. XNU callers get convert_task_to_port_kernel,
1482  * external callers get convert_task_to_port_external.
1483  */
1484 extern ipc_port_t convert_task_to_port(task_t);
1485 extern ipc_port_t convert_task_to_port_kernel(task_t);
1486 extern ipc_port_t convert_task_to_port_external(task_t);
1487 extern void       convert_task_array_to_ports(task_array_t, size_t, mach_task_flavor_t);
1488 
1489 extern ipc_port_t convert_task_read_to_port(task_t);
1490 extern ipc_port_t convert_task_read_to_port_kernel(task_read_t);
1491 extern ipc_port_t convert_task_read_to_port_external(task_t);
1492 
1493 extern ipc_port_t convert_task_inspect_to_port(task_inspect_t);
1494 extern ipc_port_t convert_task_name_to_port(task_name_t);
1495 
1496 extern ipc_port_t convert_corpse_to_port_and_nsrequest(task_t task);
1497 
1498 extern ipc_port_t convert_task_suspension_token_to_port(task_suspension_token_t task);
1499 /* Convert from a port (in this case, an SO right to a task's resume port) to a task. */
1500 extern task_suspension_token_t convert_port_to_task_suspension_token(ipc_port_t port);
1501 
1502 extern void task_suspension_send_once(ipc_port_t port);
1503 
1504 #define TASK_WRITE_IMMEDIATE                 0x1
1505 #define TASK_WRITE_DEFERRED                  0x2
1506 #define TASK_WRITE_INVALIDATED               0x4
1507 #define TASK_WRITE_METADATA                  0x8
1508 extern void     task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp);
1509 
1510 __enum_decl(task_balance_flags_t, uint8_t, {
1511 	TASK_BALANCE_CREDIT                 = 0x1,
1512 	TASK_BALANCE_DEBIT                  = 0x2,
1513 });
1514 
1515 __enum_decl(task_physical_write_flavor_t, uint8_t, {
1516 	TASK_PHYSICAL_WRITE_METADATA        = 0x1,
1517 });
1518 extern void     task_update_physical_writes(task_t task, task_physical_write_flavor_t flavor,
1519     uint64_t io_size, task_balance_flags_t flags);
1520 
1521 #if CONFIG_SECLUDED_MEMORY
1522 extern void task_set_can_use_secluded_mem(
1523 	task_t task,
1524 	boolean_t can_use_secluded_mem);
1525 extern void task_set_could_use_secluded_mem(
1526 	task_t task,
1527 	boolean_t could_use_secluded_mem);
1528 extern void task_set_could_also_use_secluded_mem(
1529 	task_t task,
1530 	boolean_t could_also_use_secluded_mem);
1531 extern boolean_t task_can_use_secluded_mem(
1532 	task_t task,
1533 	boolean_t is_allocate);
1534 extern boolean_t task_could_use_secluded_mem(task_t task);
1535 extern boolean_t task_could_also_use_secluded_mem(task_t task);
1536 #endif /* CONFIG_SECLUDED_MEMORY */
1537 
1538 extern void task_set_darkwake_mode(task_t, boolean_t);
1539 extern boolean_t task_get_darkwake_mode(task_t);
1540 
1541 #if __arm64__
1542 extern void task_set_legacy_footprint(task_t task);
1543 extern void task_set_extra_footprint_limit(task_t task);
1544 extern void task_set_ios13extended_footprint_limit(task_t task);
1545 #endif /* __arm64__ */
1546 
1547 #if CONFIG_MACF
1548 extern struct label *get_task_crash_label(task_t task);
1549 extern void set_task_crash_label(task_t task, struct label *label);
1550 #endif /* CONFIG_MACF */
1551 
1552 /* task_find_region_details() */
1553 __options_closed_decl(find_region_details_options_t, uint32_t, {
1554 	FIND_REGION_DETAILS_OPTIONS_NONE        = 0x00000000,
1555 	FIND_REGION_DETAILS_AT_OFFSET           = 0x00000001,
1556 	FIND_REGION_DETAILS_GET_VNODE           = 0x00000002,
1557 });
1558 #define FIND_REGION_DETAILS_OPTIONS_ALL (       \
1559 	        FIND_REGION_DETAILS_AT_OFFSET | \
1560 	        FIND_REGION_DETAILS_GET_VNODE   \
1561 	        )
1562 extern int task_find_region_details(
1563 	task_t task,
1564 	vm_map_offset_t offset,
1565 	find_region_details_options_t options,
1566 	uintptr_t *vp_p, /* caller must call vnode_put(vp) when done */
1567 	uint32_t *vid_p,
1568 	bool *is_mapped_shared_p,
1569 	uint64_t *start_p,
1570 	uint64_t *len_p);
1571 
1572 
1573 #endif  /* KERNEL_PRIVATE */
1574 
1575 extern task_t   kernel_task;
1576 
1577 extern void             task_name_deallocate_mig(
1578 	task_name_t             task_name);
1579 
1580 extern void             task_policy_set_deallocate_mig(
1581 	task_policy_set_t       task_policy_set);
1582 
1583 extern void             task_policy_get_deallocate_mig(
1584 	task_policy_get_t       task_policy_get);
1585 
1586 extern void             task_inspect_deallocate_mig(
1587 	task_inspect_t          task_inspect);
1588 
1589 extern void             task_read_deallocate_mig(
1590 	task_read_t          task_read);
1591 
1592 extern void             task_suspension_token_deallocate(
1593 	task_suspension_token_t token);
1594 
1595 extern boolean_t task_self_region_footprint(void);
1596 extern void task_self_region_footprint_set(boolean_t newval);
1597 
1598 /* VM_REGION_INFO_FLAGS defined in vm_region.h */
1599 extern int task_self_region_info_flags(void);
1600 extern kern_return_t task_self_region_info_flags_set(int newval);
1601 
1602 extern void task_ledgers_footprint(ledger_t ledger,
1603     ledger_amount_t *ledger_resident,
1604     ledger_amount_t *ledger_compressed);
1605 extern void task_set_memory_ownership_transfer(
1606 	task_t task,
1607 	boolean_t value);
1608 
1609 #if DEVELOPMENT || DEBUG
1610 extern void task_set_no_footprint_for_debug(
1611 	task_t task,
1612 	boolean_t value);
1613 extern int task_get_no_footprint_for_debug(
1614 	task_t task);
1615 #endif /* DEVELOPMENT || DEBUG */
1616 
1617 #ifdef KERNEL_PRIVATE
1618 extern kern_return_t task_get_suspend_stats(task_t task, task_suspend_stats_t stats);
1619 extern kern_return_t task_get_suspend_stats_kdp(task_t task, task_suspend_stats_t stats);
1620 #endif /* KERNEL_PRIVATE*/
1621 
1622 #ifdef XNU_KERNEL_PRIVATE
1623 extern kern_return_t task_get_suspend_sources(task_t task, task_suspend_source_array_t sources);
1624 extern kern_return_t task_get_suspend_sources_kdp(task_t task, task_suspend_source_array_t sources);
1625 #endif /* XNU_KERNEL_PRIVATE */
1626 
1627 #if CONFIG_ROSETTA
1628 extern bool task_is_translated(task_t task);
1629 #endif
1630 
1631 
1632 #ifdef MACH_KERNEL_PRIVATE
1633 
1634 void task_procname(task_t task, char *buf, int size);
1635 const char *task_best_name(task_t task);
1636 
1637 #endif /* MACH_KERNEL_PRIVATE */
1638 
1639 #if HAS_MTE
1640 /* Must be callable from IOKit as it sometimes has need to asynchronously
1641  * terminate tasks. Takes the task lock.
1642  */
1643 void task_set_ast_mte_synthesize_mach_exception(task_t task);
1644 #endif /* HAS_MTE */
1645 
1646 
1647 #ifdef KERNEL_PRIVATE
1648 kern_return_t task_set_cs_auxiliary_info(task_t task, uint64_t info);
1649 uint64_t      task_get_cs_auxiliary_info_kdp(task_t task);
1650 #endif /* KERNEL_PRIVATE */
1651 
1652 __END_DECLS
1653 
1654 #endif  /* _KERN_TASK_H_ */
1655