1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: task.h
60 * Author: Avadis Tevanian, Jr.
61 *
62 * This file contains the structure definitions for tasks.
63 *
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to [email protected] any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83 /*
84 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
85 * support for mandatory and extensible security protections. This notice
86 * is included in support of clause 2.2 (b) of the Apple Public License,
87 * Version 2.0.
88 * Copyright (c) 2005 SPARTA, Inc.
89 */
90
91 #ifndef _KERN_TASK_H_
92 #define _KERN_TASK_H_
93
94 #include <kern/kern_types.h>
95 #include <kern/task_ref.h>
96 #include <mach/mach_types.h>
97 #include <sys/cdefs.h>
98
99 #ifdef XNU_KERNEL_PRIVATE
100 #include <kern/btlog.h>
101 #include <kern/kern_cdata.h>
102 #include <mach/sfi_class.h>
103 #include <kern/counter.h>
104 #include <kern/cs_blobs.h>
105 #include <kern/queue.h>
106 #include <kern/recount.h>
107 #include <sys/kern_sysctl.h>
108 #endif /* XNU_KERNEL_PRIVATE */
109
110 #ifdef MACH_KERNEL_PRIVATE
111 #include <mach/boolean.h>
112 #include <mach/port.h>
113 #include <mach/time_value.h>
114 #include <mach/message.h>
115 #include <mach/mach_param.h>
116 #include <mach/task_info.h>
117 #include <mach/exception_types.h>
118 #include <mach/vm_statistics.h>
119 #include <machine/task.h>
120
121 #include <kern/cpu_data.h>
122 #include <kern/queue.h>
123 #include <kern/exception.h>
124 #include <kern/locks.h>
125 #include <security/_label.h>
126 #include <ipc/ipc_port.h>
127
128 #include <kern/thread.h>
129 #include <mach/coalition.h>
130 #include <stdatomic.h>
131 #include <os/refcnt.h>
132
133 #if CONFIG_DEFERRED_RECLAIM
134 typedef struct vm_deferred_reclamation_metadata_s *vm_deferred_reclamation_metadata_t;
135 #endif /* CONFIG_DEFFERED_RECLAIM */
136
137 struct _cpu_time_qos_stats {
138 uint64_t cpu_time_qos_default;
139 uint64_t cpu_time_qos_maintenance;
140 uint64_t cpu_time_qos_background;
141 uint64_t cpu_time_qos_utility;
142 uint64_t cpu_time_qos_legacy;
143 uint64_t cpu_time_qos_user_initiated;
144 uint64_t cpu_time_qos_user_interactive;
145 };
146
147 struct task_writes_counters {
148 uint64_t task_immediate_writes;
149 uint64_t task_deferred_writes;
150 uint64_t task_invalidated_writes;
151 uint64_t task_metadata_writes;
152 };
153
154 struct task_watchports;
155 #include <bank/bank_internal.h>
156
157 #ifdef MACH_BSD
158 struct proc;
159 struct proc_ro;
160 #endif
161
162 struct task {
163 /* Synchronization/destruction information */
164 decl_lck_mtx_data(, lock); /* Task's lock */
165 os_refcnt_t ref_count; /* Number of references to me */
166
167 #if DEVELOPMENT || DEBUG
168 struct os_refgrp *ref_group;
169 lck_spin_t ref_group_lock;
170 #endif /* DEVELOPMENT || DEBUG */
171
172 bool active; /* Task has not been terminated */
173 bool ipc_active; /* IPC with the task ports is allowed */
174 bool halting; /* Task is being halted */
175 bool message_app_suspended; /* Let iokit know when pidsuspended */
176
177 /* Virtual timers */
178 uint32_t vtimers;
179 uint32_t loadTag; /* dext ID used for logging identity */
180
181 /* Globally uniqueid to identify tasks and corpses */
182 uint64_t task_uniqueid;
183
184 /* Miscellaneous */
185 vm_map_t XNU_PTRAUTH_SIGNED_PTR("task.map") map; /* Address space description */
186 queue_chain_t tasks; /* global list of tasks */
187 struct task_watchports *watchports; /* watchports passed in spawn */
188 turnstile_inheritor_t returnwait_inheritor; /* inheritor for task_wait */
189
190 #if defined(CONFIG_SCHED_MULTIQ)
191 sched_group_t sched_group;
192 #endif /* defined(CONFIG_SCHED_MULTIQ) */
193
194 /* Threads in this task */
195 queue_head_t threads;
196 struct restartable_ranges *t_rr_ranges;
197
198 processor_set_t pset_hint;
199 struct affinity_space *affinity_space;
200
201 int thread_count;
202 uint32_t active_thread_count;
203 int suspend_count; /* Internal scheduling only */
204 #ifdef CONFIG_TASK_SUSPEND_STATS
205 struct task_suspend_stats_s t_suspend_stats; /* suspension statistics for this task */
206 task_suspend_source_array_t t_suspend_sources; /* array of suspender debug info for this task */
207 #endif /* CONFIG_TASK_SUSPEND_STATS */
208
209 /* User-visible scheduling information */
210 integer_t user_stop_count; /* outstanding stops */
211 integer_t legacy_stop_count; /* outstanding legacy stops */
212
213 int16_t priority; /* base priority for threads */
214 int16_t max_priority; /* maximum priority for threads */
215
216 integer_t importance; /* priority offset (BSD 'nice' value) */
217
218 #define task_is_immovable(task) \
219 !!(task_get_control_port_options(task) & TASK_CONTROL_PORT_IMMOVABLE)
220 #define task_is_pinned(task) \
221 !!(task_get_control_port_options(task) & TASK_CONTROL_PORT_PINNED)
222
223 /* Statistics */
224 uint64_t total_runnable_time;
225
226 struct recount_task tk_recount;
227
228 /* IPC structures */
229 decl_lck_mtx_data(, itk_lock_data);
230 /*
231 * Different flavors of task port.
232 * These flavors TASK_FLAVOR_* are defined in mach_types.h
233 */
234 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_ports") itk_task_ports[TASK_SELF_PORT_COUNT];
235 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_settable_self") itk_settable_self; /* a send right */
236 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_self") itk_self; /* immovable/pinned task port, does not hold right */
237 struct exception_action exc_actions[EXC_TYPES_COUNT];
238 /* a send right each valid element */
239 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_host") itk_host; /* a send right */
240 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_bootstrap") itk_bootstrap; /* a send right */
241 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_debug_control") itk_debug_control; /* send right for debugmode communications */
242 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_access") itk_task_access; /* and another send right */
243 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resume") itk_resume; /* a receive right to resume this task */
244 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_registered") itk_registered[TASK_PORT_REGISTER_MAX];
245 /* all send rights */
246 ipc_port_t * XNU_PTRAUTH_SIGNED_PTR("task.itk_dyld_notify") itk_dyld_notify; /* lazy send rights array of size DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT */
247 #if CONFIG_PROC_RESOURCE_LIMITS
248 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resource_notify") itk_resource_notify; /* a send right to the resource notify port */
249 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
250 struct ipc_space * XNU_PTRAUTH_SIGNED_PTR("task.itk_space") itk_space;
251
252 ledger_t ledger;
253 /* Synchronizer ownership information */
254 queue_head_t semaphore_list; /* list of owned semaphores */
255 int semaphores_owned; /* number of semaphores owned */
256
257 unsigned int priv_flags; /* privilege resource flags */
258 #define VM_BACKING_STORE_PRIV 0x1
259
260 MACHINE_TASK
261
262 counter_t faults; /* faults counter */
263 counter_t pageins; /* pageins counter */
264 counter_t cow_faults; /* copy on write fault counter */
265 counter_t messages_sent; /* messages sent counter */
266 counter_t messages_received; /* messages received counter */
267 uint32_t decompressions; /* decompression counter */
268 uint32_t syscalls_mach; /* mach system call counter */
269 uint32_t syscalls_unix; /* unix system call counter */
270 uint32_t c_switch; /* total context switches */
271 uint32_t p_switch; /* total processor switches */
272 uint32_t ps_switch; /* total pset switches */
273
274 #ifdef MACH_BSD
275 struct proc_ro * bsd_info_ro;
276 #endif
277 kcdata_descriptor_t corpse_info;
278 uint64_t crashed_thread_id;
279 queue_chain_t corpse_tasks;
280 #ifdef CONFIG_MACF
281 struct label * crash_label;
282 #endif
283 volatile uint32_t t_flags; /* general-purpose task flags protected by task_lock (TL) */
284 #define TF_NONE 0
285 #define TF_64B_ADDR 0x00000001 /* task has 64-bit addressing */
286 #define TF_64B_DATA 0x00000002 /* task has 64-bit data registers */
287 #define TF_CPUMON_WARNING 0x00000004 /* task has at least one thread in CPU usage warning zone */
288 #define TF_WAKEMON_WARNING 0x00000008 /* task is in wakeups monitor warning zone */
289 #define TF_TELEMETRY (TF_CPUMON_WARNING | TF_WAKEMON_WARNING) /* task is a telemetry participant */
290 #define TF_GPU_DENIED 0x00000010 /* task is not allowed to access the GPU */
291 #define TF_PENDING_CORPSE 0x00000040 /* task corpse has not been reported yet */
292 #define TF_CORPSE_FORK 0x00000080 /* task is a forked corpse */
293 #define TF_CA_CLIENT_WI 0x00000800 /* task has CA_CLIENT work interval */
294 #define TF_DARKWAKE_MODE 0x00001000 /* task is in darkwake mode */
295 #define TF_NO_SMT 0x00002000 /* task threads must not be paired with SMT threads */
296 #define TF_SYS_VERSION_COMPAT 0x00008000 /* shim task accesses to OS version data (macOS - app compatibility) */
297 #define TF_TECS 0x00020000 /* task threads must enable CPU security */
298 #if defined(__x86_64__)
299 #define TF_INSN_COPY_OPTOUT 0x00040000 /* task threads opt out of unhandled-fault instruction stream collection */
300 #endif
301 #define TF_COALITION_MEMBER 0x00080000 /* task is a member of a coalition */
302 #define TF_NO_CORPSE_FORKING 0x00100000 /* do not fork a corpse for this task */
303 #define TF_USE_PSET_HINT_CLUSTER_TYPE 0x00200000 /* bind task to task->pset_hint->pset_cluster_type */
304 #define TF_DYLD_ALL_IMAGE_FINAL 0x00400000 /* all_image_info_addr can no longer be changed */
305 #define TF_HASPROC 0x00800000 /* task points to a proc */
306 #define TF_HAS_REPLY_PORT_TELEMETRY 0x10000000 /* Rate limit telemetry for reply port security semantics violations rdar://100244531 */
307 #define TF_HAS_EXCEPTION_TELEMETRY 0x20000000 /* Rate limit telemetry for exception identity violations rdar://100729339 */
308 #define TF_GAME_MODE 0x40000000 /* Set the game mode bit for CLPC */
309
310 /*
311 * RO-protected flags:
312 */
313 #define TFRO_CORPSE 0x00000020 /* task is a corpse */
314 #define TFRO_PLATFORM 0x00000400 /* task is a platform binary */
315 #define TFRO_FILTER_MSG 0x00004000 /* task calls into message filter callback before sending a message */
316 #define TFRO_PAC_EXC_FATAL 0x00010000 /* task is marked a corpse if a PAC exception occurs */
317 #define TFRO_PAC_ENFORCE_USER_STATE 0x01000000 /* Enforce user and kernel signed thread state */
318
319 /*
320 * Task is running within a 64-bit address space.
321 */
322 #define task_has_64Bit_addr(task) \
323 (((task)->t_flags & TF_64B_ADDR) != 0)
324 #define task_set_64Bit_addr(task) \
325 ((task)->t_flags |= TF_64B_ADDR)
326 #define task_clear_64Bit_addr(task) \
327 ((task)->t_flags &= ~TF_64B_ADDR)
328
329 /*
330 * Task is using 64-bit machine state.
331 */
332 #define task_has_64Bit_data(task) \
333 (((task)->t_flags & TF_64B_DATA) != 0)
334 #define task_set_64Bit_data(task) \
335 ((task)->t_flags |= TF_64B_DATA)
336 #define task_clear_64Bit_data(task) \
337 ((task)->t_flags &= ~TF_64B_DATA)
338
339 #define task_corpse_pending_report(task) \
340 (((task)->t_flags & TF_PENDING_CORPSE) != 0)
341
342 #define task_set_corpse_pending_report(task) \
343 ((task)->t_flags |= TF_PENDING_CORPSE)
344
345 #define task_clear_corpse_pending_report(task) \
346 ((task)->t_flags &= ~TF_PENDING_CORPSE)
347
348 #define task_is_a_corpse_fork(task) \
349 (((task)->t_flags & TF_CORPSE_FORK) != 0)
350
351 #define task_set_coalition_member(task) \
352 ((task)->t_flags |= TF_COALITION_MEMBER)
353
354 #define task_clear_coalition_member(task) \
355 ((task)->t_flags &= ~TF_COALITION_MEMBER)
356
357 #define task_is_coalition_member(task) \
358 (((task)->t_flags & TF_COALITION_MEMBER) != 0)
359
360 #define task_has_proc(task) \
361 (((task)->t_flags & TF_HASPROC) != 0)
362
363 #define task_set_has_proc(task) \
364 ((task)->t_flags |= TF_HASPROC)
365
366 #define task_clear_has_proc(task) \
367 ((task)->t_flags &= ~TF_HASPROC)
368
369 #define task_has_reply_port_telemetry(task) \
370 (((task)->t_flags & TF_HAS_REPLY_PORT_TELEMETRY) != 0)
371
372 #define task_set_reply_port_telemetry(task) \
373 ((task)->t_flags |= TF_HAS_REPLY_PORT_TELEMETRY)
374
375 #define task_has_exception_telemetry(task) \
376 (((task)->t_flags & TF_HAS_EXCEPTION_TELEMETRY) != 0)
377
378 #define task_set_exception_telemetry(task) \
379 ((task)->t_flags |= TF_HAS_EXCEPTION_TELEMETRY)
380
381 uint32_t t_procflags; /* general-purpose task flags protected by proc_lock (PL) */
382 #define TPF_NONE 0
383 #define TPF_DID_EXEC 0x00000001 /* task has been execed to a new task */
384 #define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */
385
386 #define task_did_exec_internal(task) \
387 (((task)->t_procflags & TPF_DID_EXEC) != 0)
388
389 #define task_is_exec_copy_internal(task) \
390 (((task)->t_procflags & TPF_EXEC_COPY) != 0)
391
392 mach_vm_address_t all_image_info_addr; /* dyld __all_image_info */
393 mach_vm_size_t all_image_info_size; /* section location and size */
394
395 #if KPC
396 #define TASK_KPC_FORCED_ALL_CTRS 0x2 /* Bit in "t_kpc" signifying this task forced all counters */
397 uint32_t t_kpc; /* kpc flags */
398 #endif /* KPC */
399
400 bool pidsuspended; /* pid_suspend called; no threads can execute */
401 bool frozen; /* frozen; private resident pages committed to swap */
402 bool changing_freeze_state; /* in the process of freezing or thawing */
403 bool is_large_corpse;
404 uint16_t policy_ru_cpu :4,
405 policy_ru_cpu_ext :4,
406 applied_ru_cpu :4,
407 applied_ru_cpu_ext :4;
408 uint8_t rusage_cpu_flags;
409 uint8_t rusage_cpu_percentage; /* Task-wide CPU limit percentage */
410 uint8_t rusage_cpu_perthr_percentage; /* Per-thread CPU limit percentage */
411 #if MACH_ASSERT
412 int8_t suspends_outstanding; /* suspends this task performed in excess of resumes */
413 #endif
414 uint8_t t_returnwaitflags;
415 #define TWF_NONE 0
416 #define TRW_LRETURNWAIT 0x01 /* task is waiting for fork/posix_spawn/exec to complete */
417 #define TRW_LRETURNWAITER 0x02 /* task is waiting for TRW_LRETURNWAIT to get cleared */
418 #define TRW_LEXEC_COMPLETE 0x04 /* thread should call exec complete */
419
420 #if __has_feature(ptrauth_calls)
421 bool shared_region_auth_remapped; /* authenticated sections ready for use */
422 char *shared_region_id; /* determines which ptr auth key to use */
423 #endif /* __has_feature(ptrauth_calls) */
424 struct vm_shared_region *shared_region;
425
426 uint64_t rusage_cpu_interval; /* Task-wide CPU limit interval */
427 uint64_t rusage_cpu_perthr_interval; /* Per-thread CPU limit interval */
428 uint64_t rusage_cpu_deadline;
429 thread_call_t rusage_cpu_callt;
430 #if CONFIG_TASKWATCH
431 queue_head_t task_watchers; /* app state watcher threads */
432 int num_taskwatchers;
433 int watchapplying;
434 #endif /* CONFIG_TASKWATCH */
435
436 struct bank_task *bank_context; /* pointer to per task bank structure */
437
438 #if IMPORTANCE_INHERITANCE
439 struct ipc_importance_task *task_imp_base; /* Base of IPC importance chain */
440 #endif /* IMPORTANCE_INHERITANCE */
441
442 vm_extmod_statistics_data_t extmod_statistics;
443
444 struct task_requested_policy requested_policy;
445 struct task_effective_policy effective_policy;
446
447 /*
448 * Can be merged with imp_donor bits, once the IMPORTANCE_INHERITANCE macro goes away.
449 */
450 uint32_t low_mem_notified_warn :1, /* warning low memory notification is sent to the task */
451 low_mem_notified_critical :1, /* critical low memory notification is sent to the task */
452 purged_memory_warn :1, /* purgeable memory of the task is purged for warning level pressure */
453 purged_memory_critical :1, /* purgeable memory of the task is purged for critical level pressure */
454 low_mem_privileged_listener :1, /* if set, task would like to know about pressure changes before other tasks on the system */
455 mem_notify_reserved :27; /* reserved for future use */
456
457 uint32_t memlimit_is_active :1, /* if set, use active attributes, otherwise use inactive attributes */
458 memlimit_is_fatal :1, /* if set, exceeding current memlimit will prove fatal to the task */
459 memlimit_active_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds active memory limit */
460 memlimit_inactive_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds inactive memory limit */
461 memlimit_attrs_reserved :28; /* reserved for future use */
462
463 io_stat_info_t task_io_stats;
464
465 struct task_writes_counters task_writes_counters_internal;
466 struct task_writes_counters task_writes_counters_external;
467
468 /*
469 * The cpu_time_qos_stats fields are protected by the task lock
470 */
471 struct _cpu_time_qos_stats cpu_time_eqos_stats;
472 struct _cpu_time_qos_stats cpu_time_rqos_stats;
473
474 /* Statistics accumulated for terminated threads from this task */
475 uint32_t task_timer_wakeups_bin_1;
476 uint32_t task_timer_wakeups_bin_2;
477 uint64_t task_gpu_ns;
478
479 uint8_t task_can_transfer_memory_ownership;
480 #if DEVELOPMENT || DEBUG
481 uint8_t task_no_footprint_for_debug;
482 #endif
483 uint8_t task_objects_disowning;
484 uint8_t task_objects_disowned;
485 /* # of purgeable volatile VM objects owned by this task: */
486 int task_volatile_objects;
487 /* # of purgeable but not volatile VM objects owned by this task: */
488 int task_nonvolatile_objects;
489 int task_owned_objects;
490 queue_head_t task_objq;
491 decl_lck_mtx_data(, task_objq_lock); /* protects "task_objq" */
492
493 unsigned int task_thread_limit:16;
494 #if __arm64__
495 unsigned int task_legacy_footprint:1;
496 unsigned int task_extra_footprint_limit:1;
497 unsigned int task_ios13extended_footprint_limit:1;
498 #endif /* __arm64__ */
499 unsigned int task_region_footprint:1;
500 unsigned int task_has_crossed_thread_limit:1;
501 unsigned int task_rr_in_flight:1; /* a t_rr_synchronzie() is in flight */
502 /*
503 * A task's coalition set is "adopted" in task_create_internal
504 * and unset in task_deallocate_internal, so each array member
505 * can be referenced without the task lock.
506 * Note: these fields are protected by coalition->lock,
507 * not the task lock.
508 */
509 coalition_t coalition[COALITION_NUM_TYPES];
510 queue_chain_t task_coalition[COALITION_NUM_TYPES];
511 uint64_t dispatchqueue_offset;
512
513 #if DEVELOPMENT || DEBUG
514 boolean_t task_unnested;
515 int task_disconnected_count;
516 #endif
517
518 #if HYPERVISOR
519 void * XNU_PTRAUTH_SIGNED_PTR("task.hv_task_target") hv_task_target; /* hypervisor virtual machine object associated with this task */
520 #endif /* HYPERVISOR */
521
522 #if CONFIG_SECLUDED_MEMORY
523 uint8_t task_can_use_secluded_mem;
524 uint8_t task_could_use_secluded_mem;
525 uint8_t task_could_also_use_secluded_mem;
526 uint8_t task_suppressed_secluded;
527 #endif /* CONFIG_SECLUDED_MEMORY */
528
529 task_exc_guard_behavior_t task_exc_guard;
530 mach_vm_address_t mach_header_vm_address;
531
532 queue_head_t io_user_clients;
533
534 #if CONFIG_FREEZE
535 queue_head_t task_frozen_cseg_q; /* queue of csegs frozen to NAND */
536 #endif /* CONFIG_FREEZE */
537 boolean_t donates_own_pages; /* pages land on the special Q (only swappable pages on iPadOS, early swap on macOS) */
538 uint32_t task_shared_region_slide; /* cached here to avoid locking during telemetry */
539 #if CONFIG_PHYS_WRITE_ACCT
540 uint64_t task_fs_metadata_writes;
541 #endif /* CONFIG_PHYS_WRITE_ACCT */
542 uuid_t task_shared_region_uuid;
543 #if CONFIG_MEMORYSTATUS
544 uint64_t memstat_dirty_start; /* last abstime transition into the dirty band or last call to task_ledger_settle_dirty_time while dirty */
545 #endif /* CONFIG_MEMORYSTATUS */
546 vmobject_list_output_t corpse_vmobject_list;
547 uint64_t corpse_vmobject_list_size;
548 #if CONFIG_DEFERRED_RECLAIM
549 vm_deferred_reclamation_metadata_t deferred_reclamation_metadata; /* Protected by the task lock */
550 #endif /* CONFIG_DEFERRED_RECLAIM */
551 };
552
553 ZONE_DECLARE_ID(ZONE_ID_PROC_TASK, void *);
554 extern zone_t proc_task_zone;
555
556 extern task_control_port_options_t task_get_control_port_options(task_t task);
557 extern void task_set_control_port_options(task_t task, task_control_port_options_t opts);
558
559 /*
560 * EXC_GUARD default delivery behavior for optional Mach port and VM guards.
561 * Applied to new tasks at creation time.
562 */
563 extern task_exc_guard_behavior_t task_exc_guard_default;
564 extern size_t proc_and_task_size;
565 extern void *get_bsdtask_info(task_t t);
566 extern void *task_get_proc_raw(task_t task);
567 static inline void
task_require(struct task * task)568 task_require(struct task *task)
569 {
570 zone_id_require(ZONE_ID_PROC_TASK, proc_and_task_size, task_get_proc_raw(task));
571 }
572
573 /*
574 * task_lock() and task_unlock() need to be callable from the `bsd/` tree of
575 * XNU and are therefore promoted to full functions instead of macros so that
576 * they can be linked against.
577 *
578 * We provide `extern` declarations here for consumers of `task.h` in `osfmk/`,
579 * then separately provide `inline` definitions in `task.c`. Together with the
580 * `BUILD_LTO=1` build argument, this guarantees these functions are always
581 * inlined regardless of whether called from the `osfmk/` tree or `bsd/` tree.
582 */
583 extern void task_lock(task_t);
584 extern void task_unlock(task_t);
585
586 #define task_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED)
587 #define task_lock_try(task) lck_mtx_try_lock(&(task)->lock)
588
589 #define task_objq_lock_init(task) lck_mtx_init(&(task)->task_objq_lock, &vm_object_lck_grp, &vm_object_lck_attr)
590 #define task_objq_lock_destroy(task) lck_mtx_destroy(&(task)->task_objq_lock, &vm_object_lck_grp)
591 #define task_objq_lock(task) lck_mtx_lock(&(task)->task_objq_lock)
592 #define task_objq_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->task_objq_lock, LCK_MTX_ASSERT_OWNED)
593 #define task_objq_lock_try(task) lck_mtx_try_lock(&(task)->task_objq_lock)
594 #define task_objq_unlock(task) lck_mtx_unlock(&(task)->task_objq_lock)
595
596 #define itk_lock_init(task) lck_mtx_init(&(task)->itk_lock_data, &ipc_lck_grp, &ipc_lck_attr)
597 #define itk_lock_destroy(task) lck_mtx_destroy(&(task)->itk_lock_data, &ipc_lck_grp)
598 #define itk_lock(task) lck_mtx_lock(&(task)->itk_lock_data)
599 #define itk_unlock(task) lck_mtx_unlock(&(task)->itk_lock_data)
600
601 /* task clear return wait flags */
602 #define TCRW_CLEAR_INITIAL_WAIT 0x1
603 #define TCRW_CLEAR_FINAL_WAIT 0x2
604 #define TCRW_CLEAR_EXEC_COMPLETE 0x4
605 #define TCRW_CLEAR_ALL_WAIT (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT)
606
607 /* Initialize task module */
608 extern void task_init(void);
609
610 /* coalition_init() calls this to initialize ledgers before task_init() */
611 extern void init_task_ledgers(void);
612
613 extern task_t current_task(void) __pure2;
614
615 extern bool task_is_driver(task_t task);
616 extern uint32_t task_ro_flags_get(task_t task);
617 extern void task_ro_flags_set(task_t task, uint32_t flags);
618 extern void task_ro_flags_clear(task_t task, uint32_t flags);
619
620 extern lck_attr_t task_lck_attr;
621 extern lck_grp_t task_lck_grp;
622
623 struct task_watchport_elem {
624 task_t twe_task;
625 ipc_port_t twe_port; /* (Space lock) */
626 ipc_port_t XNU_PTRAUTH_SIGNED_PTR("twe_pdrequest") twe_pdrequest;
627 };
628
629 struct task_watchports {
630 os_refcnt_t tw_refcount; /* (Space lock) */
631 task_t tw_task; /* (Space lock) & tw_refcount == 0 */
632 thread_t tw_thread; /* (Space lock) & tw_refcount == 0 */
633 uint32_t tw_elem_array_count; /* (Space lock) */
634 struct task_watchport_elem tw_elem[]; /* (Space lock) & (Portlock) & (mq lock) */
635 };
636
637 #define task_watchports_retain(x) (os_ref_retain(&(x)->tw_refcount))
638 #define task_watchports_release(x) (os_ref_release(&(x)->tw_refcount))
639
640 #define task_watchport_elem_init(elem, task, port) \
641 do { \
642 (elem)->twe_task = (task); \
643 (elem)->twe_port = (port); \
644 (elem)->twe_pdrequest = IP_NULL; \
645 } while(0)
646
647 #define task_watchport_elem_clear(elem) task_watchport_elem_init((elem), NULL, NULL)
648
649 extern void
650 task_add_turnstile_watchports(
651 task_t task,
652 thread_t thread,
653 ipc_port_t *portwatch_ports,
654 uint32_t portwatch_count);
655
656 extern void
657 task_watchport_elem_deallocate(
658 struct task_watchport_elem *watchport_elem);
659
660 extern boolean_t
661 task_has_watchports(task_t task);
662
663 void
664 task_dyld_process_info_update_helper(
665 task_t task,
666 size_t active_count,
667 vm_map_address_t magic_addr,
668 ipc_port_t *release_ports,
669 size_t release_count);
670
671 extern kern_return_t
672 task_suspend2_mig(
673 task_t task,
674 task_suspension_token_t *suspend_token);
675
676 extern kern_return_t
677 task_suspend2_external(
678 task_t task,
679 task_suspension_token_t *suspend_token);
680
681 extern kern_return_t
682 task_resume2_mig(
683 task_suspension_token_t suspend_token);
684
685 extern kern_return_t
686 task_resume2_external(
687 task_suspension_token_t suspend_token);
688
689 extern void
690 task_suspension_token_deallocate_grp(
691 task_suspension_token_t suspend_token,
692 task_grp_t grp);
693
694 extern ipc_port_t
695 convert_task_to_port_with_flavor(
696 task_t task,
697 mach_task_flavor_t flavor,
698 task_grp_t grp);
699
700 extern task_t current_task_early(void) __pure2;
701
702 #else /* MACH_KERNEL_PRIVATE */
703
704 __BEGIN_DECLS
705
706 extern task_t current_task(void) __pure2;
707
708 extern bool task_is_driver(task_t task);
709
710 #define TF_NONE 0
711
712 #define TWF_NONE 0
713 #define TRW_LRETURNWAIT 0x01 /* task is waiting for fork/posix_spawn/exec to complete */
714 #define TRW_LRETURNWAITER 0x02 /* task is waiting for TRW_LRETURNWAIT to get cleared */
715 #define TRW_LEXEC_COMPLETE 0x04 /* thread should call exec complete */
716
717 /* task clear return wait flags */
718 #define TCRW_CLEAR_INITIAL_WAIT 0x1
719 #define TCRW_CLEAR_FINAL_WAIT 0x2
720 #define TCRW_CLEAR_EXEC_COMPLETE 0x4
721 #define TCRW_CLEAR_ALL_WAIT (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT)
722
723
724 #define TPF_NONE 0
725 #define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */
726
727
728 __END_DECLS
729
730 #endif /* MACH_KERNEL_PRIVATE */
731
732 __BEGIN_DECLS
733
734 #ifdef KERNEL_PRIVATE
735 extern boolean_t task_is_app_suspended(task_t task);
736 extern bool task_is_exotic(task_t task);
737 extern bool task_is_alien(task_t task);
738 #endif /* KERNEL_PRIVATE */
739
740 #ifdef XNU_KERNEL_PRIVATE
741
742 /* Hold all threads in a task */
743 extern kern_return_t task_hold(
744 task_t task);
745
746 /* Wait for task to stop running, either just to get off CPU or to cease being runnable */
747 extern kern_return_t task_wait(
748 task_t task,
749 boolean_t until_not_runnable);
750
751 /* Release hold on all threads in a task */
752 extern kern_return_t task_release(
753 task_t task);
754
755 /* Suspend/resume a task where the kernel owns the suspend count */
756 extern kern_return_t task_suspend_internal_locked( task_t task);
757 extern kern_return_t task_suspend_internal( task_t task);
758 extern kern_return_t task_resume_internal_locked( task_t task);
759 extern kern_return_t task_resume_internal( task_t task);
760
761 /* Suspends a task by placing a hold on its threads */
762 extern kern_return_t task_pidsuspend(
763 task_t task);
764
765 /* Resumes a previously paused task */
766 extern kern_return_t task_pidresume(
767 task_t task);
768
769 extern kern_return_t task_send_trace_memory(
770 task_t task,
771 uint32_t pid,
772 uint64_t uniqueid);
773
774 extern void task_remove_turnstile_watchports(
775 task_t task);
776
777 extern void task_transfer_turnstile_watchports(
778 task_t old_task,
779 task_t new_task,
780 thread_t new_thread);
781
782 extern kern_return_t
783 task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *, bool);
784
785 #if DEVELOPMENT || DEBUG
786
787 extern kern_return_t task_disconnect_page_mappings(
788 task_t task);
789 #endif
790
791 extern void tasks_system_suspend(boolean_t suspend);
792
793 #if CONFIG_FREEZE
794
795 /* Freeze a task's resident pages */
796 extern kern_return_t task_freeze(
797 task_t task,
798 uint32_t *purgeable_count,
799 uint32_t *wired_count,
800 uint32_t *clean_count,
801 uint32_t *dirty_count,
802 uint32_t dirty_budget,
803 uint32_t *shared_count,
804 int *freezer_error_code,
805 boolean_t eval_only);
806
807 /* Thaw a currently frozen task */
808 extern kern_return_t task_thaw(
809 task_t task);
810
811 typedef enum {
812 CREDIT_TO_SWAP = 1,
813 DEBIT_FROM_SWAP = 2
814 } freezer_acct_op_t;
815
816 extern void task_update_frozen_to_swap_acct(
817 task_t task,
818 int64_t amount,
819 freezer_acct_op_t op);
820
821 #endif /* CONFIG_FREEZE */
822
823 /* Halt all other threads in the current task */
824 extern kern_return_t task_start_halt(
825 task_t task);
826
827 /* Wait for other threads to halt and free halting task resources */
828 extern void task_complete_halt(
829 task_t task);
830
831 extern kern_return_t task_terminate_internal(
832 task_t task);
833
834 struct proc_ro;
835 typedef struct proc_ro *proc_ro_t;
836
837 extern kern_return_t task_create_internal(
838 task_t parent_task,
839 proc_ro_t proc_ro,
840 coalition_t *parent_coalitions,
841 boolean_t inherit_memory,
842 boolean_t is_64bit,
843 boolean_t is_64bit_data,
844 uint32_t t_flags,
845 uint32_t t_flags_ro,
846 uint32_t procflags,
847 uint8_t t_returnwaitflags,
848 task_t child_task);
849
850 extern kern_return_t task_set_special_port_internal(
851 task_t task,
852 int which,
853 ipc_port_t port);
854
855 extern kern_return_t task_set_security_tokens(
856 task_t task,
857 security_token_t sec_token,
858 audit_token_t audit_token,
859 host_priv_t host_priv);
860
861 extern kern_return_t task_info(
862 task_t task,
863 task_flavor_t flavor,
864 task_info_t task_info_out,
865 mach_msg_type_number_t *task_info_count);
866
867 /*
868 * Additional fields that aren't exposed through `task_power_info` but needed
869 * by clients of `task_power_info_locked`.
870 */
871 struct task_power_info_extra {
872 uint64_t cycles;
873 uint64_t instructions;
874 uint64_t pcycles;
875 uint64_t pinstructions;
876 uint64_t user_ptime;
877 uint64_t system_ptime;
878 uint64_t runnable_time;
879 uint64_t energy;
880 uint64_t penergy;
881 };
882
883 void task_power_info_locked(
884 task_t task,
885 task_power_info_t info,
886 gpu_energy_data_t gpu_energy,
887 task_power_info_v2_t infov2,
888 struct task_power_info_extra *extra_info);
889
890 extern uint64_t task_gpu_utilisation(
891 task_t task);
892
893 extern void task_update_cpu_time_qos_stats(
894 task_t task,
895 uint64_t *eqos_stats,
896 uint64_t *rqos_stats);
897
898 extern void task_vtimer_set(
899 task_t task,
900 integer_t which);
901
902 extern void task_vtimer_clear(
903 task_t task,
904 integer_t which);
905
906 extern void task_vtimer_update(
907 task_t task,
908 integer_t which,
909 uint32_t *microsecs);
910
911 #define TASK_VTIMER_USER 0x01
912 #define TASK_VTIMER_PROF 0x02
913 #define TASK_VTIMER_RLIM 0x04
914
915 extern void task_set_64bit(
916 task_t task,
917 boolean_t is_64bit,
918 boolean_t is_64bit_data);
919
920 extern bool task_get_64bit_addr(
921 task_t task);
922
923 extern bool task_get_64bit_data(
924 task_t task);
925
926 extern void task_set_platform_binary(
927 task_t task,
928 boolean_t is_platform);
929
930 extern boolean_t task_get_platform_binary(
931 task_t task);
932
933 extern boolean_t task_is_a_corpse(
934 task_t task);
935
936 extern void task_set_corpse(
937 task_t task);
938
939 extern void task_set_exc_guard_ctrl_port_default(
940 task_t task,
941 thread_t main_thread,
942 const char *name,
943 unsigned int namelen,
944 boolean_t is_simulated,
945 uint32_t platform,
946 uint32_t sdk);
947
948 extern void task_set_immovable_pinned(task_t task);
949
950 extern bool task_set_ca_client_wi(
951 task_t task,
952 boolean_t ca_client_wi);
953
954 extern kern_return_t task_set_dyld_info(
955 task_t task,
956 mach_vm_address_t addr,
957 mach_vm_size_t size);
958
959 extern void task_set_mach_header_address(
960 task_t task,
961 mach_vm_address_t addr);
962
963 extern void task_set_uniqueid(task_t task);
964
965 /* Get number of activations in a task */
966 extern int get_task_numacts(
967 task_t task);
968
969 extern bool task_donates_own_pages(
970 task_t task);
971
972 struct label;
973 extern kern_return_t task_collect_crash_info(
974 task_t task,
975 #if CONFIG_MACF
976 struct label *crash_label,
977 #endif
978 int is_corpse_fork);
979 void task_wait_till_threads_terminate_locked(task_t task);
980
981 /* JMM - should just be temporary (implementation in bsd_kern still) */
982 extern void set_bsdtask_info(task_t, void *);
983 extern uint32_t set_task_loadTag(task_t task, uint32_t loadTag);
984 extern vm_map_t get_task_map_reference(task_t);
985 extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t);
986 extern pmap_t get_task_pmap(task_t);
987 extern uint64_t get_task_resident_size(task_t);
988 extern uint64_t get_task_compressed(task_t);
989 extern uint64_t get_task_resident_max(task_t);
990 extern uint64_t get_task_phys_footprint(task_t);
991 #if CONFIG_LEDGER_INTERVAL_MAX
992 extern uint64_t get_task_phys_footprint_interval_max(task_t, int reset);
993 #endif /* CONFIG_FOOTPRINT_INTERVAL_MAX */
994 extern uint64_t get_task_phys_footprint_lifetime_max(task_t);
995 extern uint64_t get_task_phys_footprint_limit(task_t);
996 extern uint64_t get_task_purgeable_size(task_t);
997 extern uint64_t get_task_cpu_time(task_t);
998 extern uint64_t get_task_dispatchqueue_offset(task_t);
999 extern uint64_t get_task_dispatchqueue_serialno_offset(task_t);
1000 extern uint64_t get_task_dispatchqueue_label_offset(task_t);
1001 extern uint64_t get_task_uniqueid(task_t task);
1002 extern int get_task_version(task_t task);
1003
1004 extern uint64_t get_task_internal(task_t);
1005 extern uint64_t get_task_internal_compressed(task_t);
1006 extern uint64_t get_task_purgeable_nonvolatile(task_t);
1007 extern uint64_t get_task_purgeable_nonvolatile_compressed(task_t);
1008 extern uint64_t get_task_iokit_mapped(task_t);
1009 extern uint64_t get_task_alternate_accounting(task_t);
1010 extern uint64_t get_task_alternate_accounting_compressed(task_t);
1011 extern uint64_t get_task_memory_region_count(task_t);
1012 extern uint64_t get_task_page_table(task_t);
1013 #if CONFIG_FREEZE
1014 extern uint64_t get_task_frozen_to_swap(task_t);
1015 #endif
1016 extern uint64_t get_task_network_nonvolatile(task_t);
1017 extern uint64_t get_task_network_nonvolatile_compressed(task_t);
1018 extern uint64_t get_task_wired_mem(task_t);
1019 extern uint32_t get_task_loadTag(task_t task);
1020
1021 extern uint64_t get_task_tagged_footprint(task_t task);
1022 extern uint64_t get_task_tagged_footprint_compressed(task_t task);
1023 extern uint64_t get_task_media_footprint(task_t task);
1024 extern uint64_t get_task_media_footprint_compressed(task_t task);
1025 extern uint64_t get_task_graphics_footprint(task_t task);
1026 extern uint64_t get_task_graphics_footprint_compressed(task_t task);
1027 extern uint64_t get_task_neural_footprint(task_t task);
1028 extern uint64_t get_task_neural_footprint_compressed(task_t task);
1029
1030 extern kern_return_t task_convert_phys_footprint_limit(int, int *);
1031 extern kern_return_t task_set_phys_footprint_limit_internal(task_t, int, int *, boolean_t, boolean_t);
1032 extern kern_return_t task_get_phys_footprint_limit(task_t task, int *limit_mb);
1033 #if DEBUG || DEVELOPMENT
1034 #if CONFIG_MEMORYSTATUS
1035 extern kern_return_t task_set_diag_footprint_limit_internal(task_t, uint64_t, uint64_t *);
1036 extern kern_return_t task_get_diag_footprint_limit_internal(task_t, uint64_t *, bool *);
1037 extern kern_return_t task_set_diag_footprint_limit(task_t task, uint64_t new_limit_mb, uint64_t *old_limit_mb);
1038 #endif /* CONFIG_MEMORYSTATUS */
1039 #endif /* DEBUG || DEVELOPMENT */
1040
1041 extern security_token_t *task_get_sec_token(task_t task);
1042 extern void task_set_sec_token(task_t task, security_token_t *token);
1043 extern audit_token_t *task_get_audit_token(task_t task);
1044 extern void task_set_audit_token(task_t task, audit_token_t *token);
1045 extern void task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token);
1046 extern boolean_t task_is_privileged(task_t task);
1047 extern uint8_t *task_get_mach_trap_filter_mask(task_t task);
1048 extern void task_set_mach_trap_filter_mask(task_t task, uint8_t *mask);
1049 extern uint8_t *task_get_mach_kobj_filter_mask(task_t task);
1050 extern void task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask);
1051 extern mach_vm_address_t task_get_all_image_info_addr(task_t task);
1052
1053 /* Jetsam memlimit attributes */
1054 extern boolean_t task_get_memlimit_is_active(task_t task);
1055 extern boolean_t task_get_memlimit_is_fatal(task_t task);
1056 extern void task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active);
1057 extern void task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal);
1058 extern boolean_t task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active);
1059 extern void task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active);
1060
1061 extern uint64_t task_get_dirty_start(task_t task);
1062 extern void task_set_dirty_start(task_t task, uint64_t start);
1063
1064 extern void task_set_thread_limit(task_t task, uint16_t thread_limit);
1065 #if CONFIG_PROC_RESOURCE_LIMITS
1066 extern kern_return_t task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit);
1067 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1068 extern void task_port_space_ast(task_t task);
1069
1070 #if XNU_TARGET_OS_OSX
1071 extern boolean_t task_has_system_version_compat_enabled(task_t task);
1072 extern void task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat);
1073 #endif
1074
1075 extern boolean_t is_kerneltask(task_t task);
1076 extern boolean_t is_corpsefork(task_t task);
1077
1078 extern kern_return_t check_actforsig(task_t task, thread_t thread, int setast);
1079
1080 extern kern_return_t machine_task_get_state(
1081 task_t task,
1082 int flavor,
1083 thread_state_t state,
1084 mach_msg_type_number_t *state_count);
1085
1086 extern kern_return_t machine_task_set_state(
1087 task_t task,
1088 int flavor,
1089 thread_state_t state,
1090 mach_msg_type_number_t state_count);
1091
1092 extern void machine_task_terminate(task_t task);
1093
1094 extern kern_return_t machine_task_process_signature(task_t task, uint32_t platform, uint32_t sdk, char const **error_msg);
1095
1096 struct _task_ledger_indices {
1097 int cpu_time;
1098 int tkm_private;
1099 int tkm_shared;
1100 int phys_mem;
1101 int wired_mem;
1102 int internal;
1103 int iokit_mapped;
1104 int external;
1105 int reusable;
1106 int alternate_accounting;
1107 int alternate_accounting_compressed;
1108 int page_table;
1109 int phys_footprint;
1110 int internal_compressed;
1111 int purgeable_volatile;
1112 int purgeable_nonvolatile;
1113 int purgeable_volatile_compressed;
1114 int purgeable_nonvolatile_compressed;
1115 int tagged_nofootprint;
1116 int tagged_footprint;
1117 int tagged_nofootprint_compressed;
1118 int tagged_footprint_compressed;
1119 int network_volatile;
1120 int network_nonvolatile;
1121 int network_volatile_compressed;
1122 int network_nonvolatile_compressed;
1123 int media_nofootprint;
1124 int media_footprint;
1125 int media_nofootprint_compressed;
1126 int media_footprint_compressed;
1127 int graphics_nofootprint;
1128 int graphics_footprint;
1129 int graphics_nofootprint_compressed;
1130 int graphics_footprint_compressed;
1131 int neural_nofootprint;
1132 int neural_footprint;
1133 int neural_nofootprint_compressed;
1134 int neural_footprint_compressed;
1135 int platform_idle_wakeups;
1136 int interrupt_wakeups;
1137 #if CONFIG_SCHED_SFI
1138 int sfi_wait_times[MAX_SFI_CLASS_ID];
1139 #endif /* CONFIG_SCHED_SFI */
1140 int cpu_time_billed_to_me;
1141 int cpu_time_billed_to_others;
1142 int physical_writes;
1143 int logical_writes;
1144 int logical_writes_to_external;
1145 int energy_billed_to_me;
1146 int energy_billed_to_others;
1147 #if CONFIG_MEMORYSTATUS
1148 int memorystatus_dirty_time;
1149 #endif /* CONFIG_MEMORYSTATUS */
1150 #if DEBUG || DEVELOPMENT
1151 int pages_grabbed;
1152 int pages_grabbed_kern;
1153 int pages_grabbed_iopl;
1154 int pages_grabbed_upl;
1155 #endif
1156 #if CONFIG_FREEZE
1157 int frozen_to_swap;
1158 #endif /* CONFIG_FREEZE */
1159 #if CONFIG_PHYS_WRITE_ACCT
1160 int fs_metadata_writes;
1161 #endif /* CONFIG_PHYS_WRITE_ACCT */
1162 int swapins;
1163 };
1164
1165 /*
1166 * Many of the task ledger entries use a reduced feature set
1167 * (specifically they just use LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE)
1168 * and are stored in a smaller entry structure.
1169 * That structure is an implementation detail of the ledger.
1170 * But on PPL systems, the task ledger's memory is managed by the PPL
1171 * and it has to determine the size of the task ledger at compile time.
1172 * This define specifies the number of small entries so the PPL can
1173 * properly determine the ledger's size.
1174 *
1175 * If you add a new entry with only the
1176 * LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE | LEDGER_ENTRY_ALLOW_INACTIVE
1177 * flags, you need to increment this count.
1178 * Otherwise, PPL systems will panic at boot.
1179 */
1180 #if DEVELOPMENT || DEBUG
1181 #define TASK_LEDGER_NUM_SMALL_INDICES 33
1182 #else
1183 #define TASK_LEDGER_NUM_SMALL_INDICES 29
1184 #endif /* DEVELOPMENT || DEBUG */
1185 extern struct _task_ledger_indices task_ledgers;
1186
1187 /* requires task to be unlocked, returns a referenced thread */
1188 thread_t task_findtid(task_t task, uint64_t tid);
1189 int pid_from_task(task_t task);
1190
1191 extern kern_return_t task_wakeups_monitor_ctl(task_t task, uint32_t *rate_hz, int32_t *flags);
1192 extern kern_return_t task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags);
1193 extern void task_rollup_accounting_info(task_t new_task, task_t parent_task);
1194 extern kern_return_t task_io_monitor_ctl(task_t task, uint32_t *flags);
1195 extern void task_set_did_exec_flag(task_t task);
1196 extern void task_clear_exec_copy_flag(task_t task);
1197 extern boolean_t task_is_exec_copy(task_t);
1198 extern boolean_t task_did_exec(task_t task);
1199 extern boolean_t task_is_active(task_t task);
1200 extern boolean_t task_is_halting(task_t task);
1201 extern void task_clear_return_wait(task_t task, uint32_t flags);
1202 extern void task_wait_to_return(void) __attribute__((noreturn));
1203 extern event_t task_get_return_wait_event(task_t task);
1204
1205 extern void task_bank_reset(task_t task);
1206 extern void task_bank_init(task_t task);
1207
1208 #if CONFIG_MEMORYSTATUS
1209 extern void task_ledger_settle_dirty_time(task_t t);
1210 #endif /* CONFIG_MEMORYSTATUS */
1211
1212 #if CONFIG_ARCADE
1213 extern void task_prep_arcade(task_t task, thread_t thread);
1214 #endif /* CONFIG_ARCADE */
1215
1216 extern int task_pid(task_t task);
1217
1218 #if __has_feature(ptrauth_calls)
1219 char *task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *);
1220 void task_set_shared_region_id(task_t task, char *id);
1221 #endif /* __has_feature(ptrauth_calls) */
1222
1223 extern boolean_t task_has_assertions(task_t task);
1224 /* End task_policy */
1225
1226 extern void task_set_gpu_denied(task_t task, boolean_t denied);
1227 extern boolean_t task_is_gpu_denied(task_t task);
1228
1229 extern void task_set_game_mode(task_t task, bool enabled);
1230 /* returns true if update must be pushed to coalition (Automatically handled by task_set_game_mode) */
1231 extern bool task_set_game_mode_locked(task_t task, bool enabled);
1232 extern bool task_get_game_mode(task_t task);
1233
1234 extern queue_head_t * task_io_user_clients(task_t task);
1235 extern void task_set_message_app_suspended(task_t task, boolean_t enable);
1236
1237 extern void task_copy_fields_for_exec(task_t dst_task, task_t src_task);
1238
1239 extern void task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num);
1240 extern void task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries);
1241 extern void task_store_owned_vmobject_info(task_t to_task, task_t from_task);
1242
1243 extern void task_set_filter_msg_flag(task_t task, boolean_t flag);
1244 extern boolean_t task_get_filter_msg_flag(task_t task);
1245
1246 #if __has_feature(ptrauth_calls)
1247 extern bool task_is_pac_exception_fatal(task_t task);
1248 extern void task_set_pac_exception_fatal_flag(task_t task);
1249 #endif /*__has_feature(ptrauth_calls)*/
1250
1251 extern bool task_needs_user_signed_thread_state(task_t task);
1252 extern void task_set_tecs(task_t task);
1253 extern void task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size);
1254
1255 extern boolean_t task_corpse_forking_disabled(task_t task);
1256
1257 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,
1258 uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit);
1259
1260 extern int get_task_cdhash(task_t task, char cdhash[CS_CDHASH_LEN]);
1261
1262 extern boolean_t kdp_task_is_locked(task_t task);
1263
1264 /* Kernel side prototypes for MIG routines */
1265 extern kern_return_t task_get_exception_ports(
1266 task_t task,
1267 exception_mask_t exception_mask,
1268 exception_mask_array_t masks,
1269 mach_msg_type_number_t *CountCnt,
1270 exception_port_array_t ports,
1271 exception_behavior_array_t behaviors,
1272 thread_state_flavor_array_t flavors);
1273
1274
1275 #endif /* XNU_KERNEL_PRIVATE */
1276 #ifdef KERNEL_PRIVATE
1277
1278 extern void *get_bsdtask_info(task_t);
1279 extern void *get_bsdthreadtask_info(thread_t);
1280 extern void task_bsdtask_kill(task_t);
1281 extern vm_map_t get_task_map(task_t);
1282 extern ledger_t get_task_ledger(task_t);
1283
1284 extern boolean_t get_task_pidsuspended(task_t);
1285 extern boolean_t get_task_suspended(task_t);
1286 extern boolean_t get_task_frozen(task_t);
1287
1288 /*
1289 * Flavors of convert_task_to_port. XNU callers get convert_task_to_port_kernel,
1290 * external callers get convert_task_to_port_external.
1291 */
1292 extern ipc_port_t convert_task_to_port(task_t);
1293 extern ipc_port_t convert_task_to_port_kernel(task_t);
1294 extern ipc_port_t convert_task_to_port_external(task_t);
1295 extern ipc_port_t convert_task_to_port_pinned(task_t);
1296
1297 extern ipc_port_t convert_task_read_to_port(task_t);
1298 extern ipc_port_t convert_task_read_to_port_kernel(task_read_t);
1299 extern ipc_port_t convert_task_read_to_port_external(task_t);
1300
1301 extern ipc_port_t convert_task_inspect_to_port(task_inspect_t);
1302 extern ipc_port_t convert_task_name_to_port(task_name_t);
1303
1304 extern ipc_port_t convert_corpse_to_port_and_nsrequest(task_t task);
1305
1306 extern ipc_port_t convert_task_suspension_token_to_port(task_suspension_token_t task);
1307 /* Convert from a port (in this case, an SO right to a task's resume port) to a task. */
1308 extern task_suspension_token_t convert_port_to_task_suspension_token(ipc_port_t port);
1309
1310 extern void task_suspension_send_once(ipc_port_t port);
1311
1312 #define TASK_WRITE_IMMEDIATE 0x1
1313 #define TASK_WRITE_DEFERRED 0x2
1314 #define TASK_WRITE_INVALIDATED 0x4
1315 #define TASK_WRITE_METADATA 0x8
1316 extern void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp);
1317
1318 __enum_decl(task_balance_flags_t, uint8_t, {
1319 TASK_BALANCE_CREDIT = 0x1,
1320 TASK_BALANCE_DEBIT = 0x2,
1321 });
1322
1323 __enum_decl(task_physical_write_flavor_t, uint8_t, {
1324 TASK_PHYSICAL_WRITE_METADATA = 0x1,
1325 });
1326 extern void task_update_physical_writes(task_t task, task_physical_write_flavor_t flavor,
1327 uint64_t io_size, task_balance_flags_t flags);
1328
1329 #if CONFIG_SECLUDED_MEMORY
1330 extern void task_set_can_use_secluded_mem(
1331 task_t task,
1332 boolean_t can_use_secluded_mem);
1333 extern void task_set_could_use_secluded_mem(
1334 task_t task,
1335 boolean_t could_use_secluded_mem);
1336 extern void task_set_could_also_use_secluded_mem(
1337 task_t task,
1338 boolean_t could_also_use_secluded_mem);
1339 extern boolean_t task_can_use_secluded_mem(
1340 task_t task,
1341 boolean_t is_allocate);
1342 extern boolean_t task_could_use_secluded_mem(task_t task);
1343 extern boolean_t task_could_also_use_secluded_mem(task_t task);
1344 #endif /* CONFIG_SECLUDED_MEMORY */
1345
1346 extern void task_set_darkwake_mode(task_t, boolean_t);
1347 extern boolean_t task_get_darkwake_mode(task_t);
1348
1349 #if __arm64__
1350 extern void task_set_legacy_footprint(task_t task);
1351 extern void task_set_extra_footprint_limit(task_t task);
1352 extern void task_set_ios13extended_footprint_limit(task_t task);
1353 #endif /* __arm64__ */
1354
1355 #if CONFIG_MACF
1356 extern struct label *get_task_crash_label(task_t task);
1357 extern void set_task_crash_label(task_t task, struct label *label);
1358 #endif /* CONFIG_MACF */
1359
1360 #endif /* KERNEL_PRIVATE */
1361
1362 extern task_t kernel_task;
1363
1364 extern void task_name_deallocate_mig(
1365 task_name_t task_name);
1366
1367 extern void task_policy_set_deallocate_mig(
1368 task_policy_set_t task_policy_set);
1369
1370 extern void task_policy_get_deallocate_mig(
1371 task_policy_get_t task_policy_get);
1372
1373 extern void task_inspect_deallocate_mig(
1374 task_inspect_t task_inspect);
1375
1376 extern void task_read_deallocate_mig(
1377 task_read_t task_read);
1378
1379 extern void task_suspension_token_deallocate(
1380 task_suspension_token_t token);
1381
1382 extern boolean_t task_self_region_footprint(void);
1383 extern void task_self_region_footprint_set(boolean_t newval);
1384 extern void task_ledgers_footprint(ledger_t ledger,
1385 ledger_amount_t *ledger_resident,
1386 ledger_amount_t *ledger_compressed);
1387 extern void task_set_memory_ownership_transfer(
1388 task_t task,
1389 boolean_t value);
1390
1391 #if DEVELOPMENT || DEBUG
1392 extern void task_set_no_footprint_for_debug(
1393 task_t task,
1394 boolean_t value);
1395 extern int task_get_no_footprint_for_debug(
1396 task_t task);
1397 #endif /* DEVELOPMENT || DEBUG */
1398
1399 #ifdef KERNEL_PRIVATE
1400 extern kern_return_t task_get_suspend_stats(task_t task, task_suspend_stats_t stats);
1401 extern kern_return_t task_get_suspend_stats_kdp(task_t task, task_suspend_stats_t stats);
1402 #endif /* KERNEL_PRIVATE*/
1403
1404 #ifdef XNU_KERNEL_PRIVATE
1405 extern kern_return_t task_get_suspend_sources(task_t task, task_suspend_source_array_t sources);
1406 extern kern_return_t task_get_suspend_sources_kdp(task_t task, task_suspend_source_array_t sources);
1407 #endif /* XNU_KERNEL_PRIVATE */
1408
1409 #if CONFIG_ROSETTA
1410 extern bool task_is_translated(task_t task);
1411 #endif
1412
1413
1414 #ifdef MACH_KERNEL_PRIVATE
1415 void task_procname(task_t task, char *buf, int size);
1416 void task_best_name(task_t task, char *buf, size_t size);
1417 #endif /* MACH_KERNEL_PRIVATE */
1418
1419 __END_DECLS
1420
1421 #endif /* _KERN_TASK_H_ */
1422