1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: task.h
60 * Author: Avadis Tevanian, Jr.
61 *
62 * This file contains the structure definitions for tasks.
63 *
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to [email protected] any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83 /*
84 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
85 * support for mandatory and extensible security protections. This notice
86 * is included in support of clause 2.2 (b) of the Apple Public License,
87 * Version 2.0.
88 * Copyright (c) 2005 SPARTA, Inc.
89 */
90
91 #ifndef _KERN_TASK_H_
92 #define _KERN_TASK_H_
93
94 #include <kern/kern_types.h>
95 #include <kern/task_ref.h>
96 #include <mach/mach_types.h>
97 #include <sys/cdefs.h>
98
99 #ifdef XNU_KERNEL_PRIVATE
100 #include <kern/btlog.h>
101 #include <kern/kern_cdata.h>
102 #include <mach/sfi_class.h>
103 #include <kern/counter.h>
104 #include <kern/cs_blobs.h>
105 #include <kern/queue.h>
106 #include <sys/kern_sysctl.h>
107 #endif /* XNU_KERNEL_PRIVATE */
108
109 #ifdef MACH_KERNEL_PRIVATE
110
111 #include <mach/boolean.h>
112 #include <mach/port.h>
113 #include <mach/time_value.h>
114 #include <mach/message.h>
115 #include <mach/mach_param.h>
116 #include <mach/task_info.h>
117 #include <mach/exception_types.h>
118 #include <mach/vm_statistics.h>
119 #include <machine/task.h>
120
121 #if MONOTONIC
122 #include <machine/monotonic.h>
123 #endif /* MONOTONIC */
124
125 #include <kern/cpu_data.h>
126 #include <kern/queue.h>
127 #include <kern/exception.h>
128 #include <kern/locks.h>
129 #include <security/_label.h>
130 #include <ipc/ipc_port.h>
131
132 #include <kern/thread.h>
133 #include <mach/coalition.h>
134 #include <stdatomic.h>
135 #include <os/refcnt.h>
136
137 struct _cpu_time_qos_stats {
138 uint64_t cpu_time_qos_default;
139 uint64_t cpu_time_qos_maintenance;
140 uint64_t cpu_time_qos_background;
141 uint64_t cpu_time_qos_utility;
142 uint64_t cpu_time_qos_legacy;
143 uint64_t cpu_time_qos_user_initiated;
144 uint64_t cpu_time_qos_user_interactive;
145 };
146
147 struct task_writes_counters {
148 uint64_t task_immediate_writes;
149 uint64_t task_deferred_writes;
150 uint64_t task_invalidated_writes;
151 uint64_t task_metadata_writes;
152 };
153
154 struct task_watchports;
155 #include <bank/bank_internal.h>
156
157 #ifdef MACH_BSD
158 struct proc;
159 struct proc_ro;
160 #endif
161
162 struct task {
163 /* Synchronization/destruction information */
164 decl_lck_mtx_data(, lock); /* Task's lock */
165 os_refcnt_t ref_count; /* Number of references to me */
166
167 #if DEVELOPMENT || DEBUG
168 struct os_refgrp *ref_group;
169 lck_spin_t ref_group_lock;
170 #endif /* DEVELOPMENT || DEBUG */
171
172 bool active; /* Task has not been terminated */
173 bool ipc_active; /* IPC with the task ports is allowed */
174 bool halting; /* Task is being halted */
175 bool message_app_suspended; /* Let iokit know when pidsuspended */
176
177 /* Virtual timers */
178 uint32_t vtimers;
179
180 /* Globally uniqueid to identify tasks and corpses */
181 uint64_t task_uniqueid;
182
183 /* Miscellaneous */
184 vm_map_t XNU_PTRAUTH_SIGNED_PTR("task.map") map; /* Address space description */
185 queue_chain_t tasks; /* global list of tasks */
186 struct task_watchports *watchports; /* watchports passed in spawn */
187 turnstile_inheritor_t returnwait_inheritor; /* inheritor for task_wait */
188
189 #if defined(CONFIG_SCHED_MULTIQ)
190 sched_group_t sched_group;
191 #endif /* defined(CONFIG_SCHED_MULTIQ) */
192
193 /* Threads in this task */
194 queue_head_t threads;
195 struct restartable_ranges *restartable_ranges;
196
197 processor_set_t pset_hint;
198 struct affinity_space *affinity_space;
199
200 int thread_count;
201 uint32_t active_thread_count;
202 int suspend_count; /* Internal scheduling only */
203
204 /* User-visible scheduling information */
205 integer_t user_stop_count; /* outstanding stops */
206 integer_t legacy_stop_count; /* outstanding legacy stops */
207
208 int16_t priority; /* base priority for threads */
209 int16_t max_priority; /* maximum priority for threads */
210
211 integer_t importance; /* priority offset (BSD 'nice' value) */
212
213 /* Statistics */
214 uint64_t total_user_time; /* terminated threads only */
215 uint64_t total_system_time;
216 uint64_t total_ptime;
217 uint64_t total_runnable_time;
218
219 /* IPC structures */
220 decl_lck_mtx_data(, itk_lock_data);
221 /*
222 * Different flavors of task port.
223 * These flavors TASK_FLAVOR_* are defined in mach_types.h
224 */
225 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_ports") itk_task_ports[TASK_SELF_PORT_COUNT];
226 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_settable_self") itk_settable_self; /* a send right */
227 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_self") itk_self; /* immovable/pinned task port, does not hold right */
228 struct exception_action exc_actions[EXC_TYPES_COUNT];
229 /* a send right each valid element */
230 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_host") itk_host; /* a send right */
231 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_bootstrap") itk_bootstrap; /* a send right */
232 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_debug_control") itk_debug_control; /* send right for debugmode communications */
233 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_access") itk_task_access; /* and another send right */
234 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resume") itk_resume; /* a receive right to resume this task */
235 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_registered") itk_registered[TASK_PORT_REGISTER_MAX];
236 /* all send rights */
237 ipc_port_t * XNU_PTRAUTH_SIGNED_PTR("task.itk_dyld_notify") itk_dyld_notify; /* lazy send rights array of size DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT */
238 #if CONFIG_PROC_RESOURCE_LIMITS
239 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resource_notify") itk_resource_notify; /* a send right to the resource notify port */
240 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
241 struct ipc_space * XNU_PTRAUTH_SIGNED_PTR("task.itk_space") itk_space;
242
243 ledger_t ledger;
244 /* Synchronizer ownership information */
245 queue_head_t semaphore_list; /* list of owned semaphores */
246 int semaphores_owned; /* number of semaphores owned */
247
248 unsigned int priv_flags; /* privilege resource flags */
249 #define VM_BACKING_STORE_PRIV 0x1
250
251 MACHINE_TASK
252
253 counter_t faults; /* faults counter */
254 counter_t pageins; /* pageins counter */
255 counter_t cow_faults; /* copy on write fault counter */
256 counter_t messages_sent; /* messages sent counter */
257 counter_t messages_received; /* messages received counter */
258 uint32_t decompressions; /* decompression counter */
259 uint32_t syscalls_mach; /* mach system call counter */
260 uint32_t syscalls_unix; /* unix system call counter */
261 uint32_t c_switch; /* total context switches */
262 uint32_t p_switch; /* total processor switches */
263 uint32_t ps_switch; /* total pset switches */
264
265 #ifdef MACH_BSD
266 struct proc * XNU_PTRAUTH_SIGNED_PTR("task.bsd_info") bsd_info;
267 struct proc_ro * bsd_info_ro;
268 #endif
269 kcdata_descriptor_t corpse_info;
270 uint64_t crashed_thread_id;
271 queue_chain_t corpse_tasks;
272 #ifdef CONFIG_MACF
273 struct label * crash_label;
274 #endif
275 struct vm_shared_region *shared_region;
276 #if __has_feature(ptrauth_calls)
277 char *shared_region_id; /* determines which ptr auth key to use */
278 bool shared_region_auth_remapped; /* authenticated sections ready for use */
279 #endif /* __has_feature(ptrauth_calls) */
280 volatile uint32_t t_flags; /* general-purpose task flags protected by task_lock (TL) */
281 #define TF_NONE 0
282 #define TF_64B_ADDR 0x00000001 /* task has 64-bit addressing */
283 #define TF_64B_DATA 0x00000002 /* task has 64-bit data registers */
284 #define TF_CPUMON_WARNING 0x00000004 /* task has at least one thread in CPU usage warning zone */
285 #define TF_WAKEMON_WARNING 0x00000008 /* task is in wakeups monitor warning zone */
286 #define TF_TELEMETRY (TF_CPUMON_WARNING | TF_WAKEMON_WARNING) /* task is a telemetry participant */
287 #define TF_GPU_DENIED 0x00000010 /* task is not allowed to access the GPU */
288 #define TF_CORPSE 0x00000020 /* task is a corpse */
289 #define TF_PENDING_CORPSE 0x00000040 /* task corpse has not been reported yet */
290 #define TF_CORPSE_FORK 0x00000080 /* task is a forked corpse */
291 #define TF_PLATFORM 0x00000400 /* task is a platform binary */
292 #define TF_CA_CLIENT_WI 0x00000800 /* task has CA_CLIENT work interval */
293 #define TF_DARKWAKE_MODE 0x00001000 /* task is in darkwake mode */
294 #define TF_NO_SMT 0x00002000 /* task threads must not be paired with SMT threads */
295 #define TF_FILTER_MSG 0x00004000 /* task calls into message filter callback before sending a message */
296 #define TF_SYS_VERSION_COMPAT 0x00008000 /* shim task accesses to OS version data (macOS - app compatibility) */
297 #define TF_PAC_EXC_FATAL 0x00010000 /* task is marked a corpse if a PAC exception occurs */
298 #define TF_TECS 0x00020000 /* task threads must enable CPU security */
299 #if defined(__x86_64__)
300 #define TF_INSN_COPY_OPTOUT 0x00040000 /* task threads opt out of unhandled-fault instruction stream collection */
301 #endif
302 #define TF_COALITION_MEMBER 0x00080000 /* task is a member of a coalition */
303 #define TF_NO_CORPSE_FORKING 0x00100000 /* do not fork a corpse for this task */
304 #define TF_USE_PSET_HINT_CLUSTER_TYPE 0x00200000 /* bind task to task->pset_hint->pset_cluster_type */
305
306 /*
307 * Task is running within a 64-bit address space.
308 */
309 #define task_has_64Bit_addr(task) \
310 (((task)->t_flags & TF_64B_ADDR) != 0)
311 #define task_set_64Bit_addr(task) \
312 ((task)->t_flags |= TF_64B_ADDR)
313 #define task_clear_64Bit_addr(task) \
314 ((task)->t_flags &= ~TF_64B_ADDR)
315
316 /*
317 * Task is using 64-bit machine state.
318 */
319 #define task_has_64Bit_data(task) \
320 (((task)->t_flags & TF_64B_DATA) != 0)
321 #define task_set_64Bit_data(task) \
322 ((task)->t_flags |= TF_64B_DATA)
323 #define task_clear_64Bit_data(task) \
324 ((task)->t_flags &= ~TF_64B_DATA)
325
326 #define task_is_a_corpse(task) \
327 (((task)->t_flags & TF_CORPSE) != 0)
328
329 #define task_set_corpse(task) \
330 ((task)->t_flags |= TF_CORPSE)
331
332 #define task_corpse_pending_report(task) \
333 (((task)->t_flags & TF_PENDING_CORPSE) != 0)
334
335 #define task_set_corpse_pending_report(task) \
336 ((task)->t_flags |= TF_PENDING_CORPSE)
337
338 #define task_clear_corpse_pending_report(task) \
339 ((task)->t_flags &= ~TF_PENDING_CORPSE)
340
341 #define task_is_a_corpse_fork(task) \
342 (((task)->t_flags & TF_CORPSE_FORK) != 0)
343
344 #define task_set_coalition_member(task) \
345 ((task)->t_flags |= TF_COALITION_MEMBER)
346
347 #define task_clear_coalition_member(task) \
348 ((task)->t_flags &= ~TF_COALITION_MEMBER)
349
350 #define task_is_coalition_member(task) \
351 (((task)->t_flags & TF_COALITION_MEMBER) != 0)
352
353 uint32_t t_procflags; /* general-purpose task flags protected by proc_lock (PL) */
354 #define TPF_NONE 0
355 #define TPF_DID_EXEC 0x00000001 /* task has been execed to a new task */
356 #define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */
357 #ifdef CONFIG_32BIT_TELEMETRY
358 #define TPF_LOG_32BIT_TELEMETRY 0x00000004 /* task should log identifying information */
359 #endif
360
361 #define task_did_exec_internal(task) \
362 (((task)->t_procflags & TPF_DID_EXEC) != 0)
363
364 #define task_is_exec_copy_internal(task) \
365 (((task)->t_procflags & TPF_EXEC_COPY) != 0)
366
367 uint8_t t_returnwaitflags;
368 #define TWF_NONE 0
369 #define TRW_LRETURNWAIT 0x01 /* task is waiting for fork/posix_spawn/exec to complete */
370 #define TRW_LRETURNWAITER 0x02 /* task is waiting for TRW_LRETURNWAIT to get cleared */
371
372 mach_vm_address_t all_image_info_addr; /* dyld __all_image_info */
373 mach_vm_size_t all_image_info_size; /* section location and size */
374
375 #if KPC
376 #define TASK_KPC_FORCED_ALL_CTRS 0x2 /* Bit in "t_kpc" signifying this task forced all counters */
377 uint32_t t_kpc; /* kpc flags */
378 #endif /* KPC */
379
380 boolean_t pidsuspended; /* pid_suspend called; no threads can execute */
381 boolean_t frozen; /* frozen; private resident pages committed to swap */
382 boolean_t changing_freeze_state; /* in the process of freezing or thawing */
383 uint16_t policy_ru_cpu :4,
384 policy_ru_cpu_ext :4,
385 applied_ru_cpu :4,
386 applied_ru_cpu_ext :4;
387 uint8_t rusage_cpu_flags;
388 uint8_t rusage_cpu_percentage; /* Task-wide CPU limit percentage */
389 uint8_t rusage_cpu_perthr_percentage; /* Per-thread CPU limit percentage */
390 #if MACH_ASSERT
391 int8_t suspends_outstanding; /* suspends this task performed in excess of resumes */
392 #endif
393 uint64_t rusage_cpu_interval; /* Task-wide CPU limit interval */
394 uint64_t rusage_cpu_perthr_interval; /* Per-thread CPU limit interval */
395 uint64_t rusage_cpu_deadline;
396 thread_call_t rusage_cpu_callt;
397 #if CONFIG_TASKWATCH
398 queue_head_t task_watchers; /* app state watcher threads */
399 int num_taskwatchers;
400 int watchapplying;
401 #endif /* CONFIG_TASKWATCH */
402
403 struct bank_task *bank_context; /* pointer to per task bank structure */
404
405 #if IMPORTANCE_INHERITANCE
406 struct ipc_importance_task *task_imp_base; /* Base of IPC importance chain */
407 #endif /* IMPORTANCE_INHERITANCE */
408
409 vm_extmod_statistics_data_t extmod_statistics;
410
411 struct task_requested_policy requested_policy;
412 struct task_effective_policy effective_policy;
413
414 /*
415 * Can be merged with imp_donor bits, once the IMPORTANCE_INHERITANCE macro goes away.
416 */
417 uint32_t low_mem_notified_warn :1, /* warning low memory notification is sent to the task */
418 low_mem_notified_critical :1, /* critical low memory notification is sent to the task */
419 purged_memory_warn :1, /* purgeable memory of the task is purged for warning level pressure */
420 purged_memory_critical :1, /* purgeable memory of the task is purged for critical level pressure */
421 low_mem_privileged_listener :1, /* if set, task would like to know about pressure changes before other tasks on the system */
422 mem_notify_reserved :27; /* reserved for future use */
423
424 uint32_t memlimit_is_active :1, /* if set, use active attributes, otherwise use inactive attributes */
425 memlimit_is_fatal :1, /* if set, exceeding current memlimit will prove fatal to the task */
426 memlimit_active_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds active memory limit */
427 memlimit_inactive_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds inactive memory limit */
428 memlimit_attrs_reserved :28; /* reserved for future use */
429
430 io_stat_info_t task_io_stats;
431
432 struct task_writes_counters task_writes_counters_internal;
433 struct task_writes_counters task_writes_counters_external;
434
435 /*
436 * The cpu_time_qos_stats fields are protected by the task lock
437 */
438 struct _cpu_time_qos_stats cpu_time_eqos_stats;
439 struct _cpu_time_qos_stats cpu_time_rqos_stats;
440
441 /* Statistics accumulated for terminated threads from this task */
442 uint32_t task_timer_wakeups_bin_1;
443 uint32_t task_timer_wakeups_bin_2;
444 uint64_t task_gpu_ns;
445 uint64_t task_energy;
446
447 #if MONOTONIC
448 /* Read and written under task_lock */
449 struct mt_task task_monotonic;
450 #endif /* MONOTONIC */
451
452 uint8_t task_can_transfer_memory_ownership;
453 #if DEVELOPMENT || DEBUG
454 uint8_t task_no_footprint_for_debug;
455 #endif
456 uint8_t task_objects_disowning;
457 uint8_t task_objects_disowned;
458 /* # of purgeable volatile VM objects owned by this task: */
459 int task_volatile_objects;
460 /* # of purgeable but not volatile VM objects owned by this task: */
461 int task_nonvolatile_objects;
462 int task_owned_objects;
463 queue_head_t task_objq;
464 decl_lck_mtx_data(, task_objq_lock); /* protects "task_objq" */
465
466 unsigned int task_thread_limit:16;
467 #if __arm64__
468 unsigned int task_legacy_footprint:1;
469 unsigned int task_extra_footprint_limit:1;
470 unsigned int task_ios13extended_footprint_limit:1;
471 #endif /* __arm64__ */
472 unsigned int task_region_footprint:1;
473 unsigned int task_has_crossed_thread_limit:1;
474 uint32_t exec_token;
475 /*
476 * A task's coalition set is "adopted" in task_create_internal
477 * and unset in task_deallocate_internal, so each array member
478 * can be referenced without the task lock.
479 * Note: these fields are protected by coalition->lock,
480 * not the task lock.
481 */
482 coalition_t coalition[COALITION_NUM_TYPES];
483 queue_chain_t task_coalition[COALITION_NUM_TYPES];
484 uint64_t dispatchqueue_offset;
485
486 #if DEVELOPMENT || DEBUG
487 boolean_t task_unnested;
488 int task_disconnected_count;
489 #endif
490
491 #if HYPERVISOR
492 void * XNU_PTRAUTH_SIGNED_PTR("task.hv_task_target") hv_task_target; /* hypervisor virtual machine object associated with this task */
493 #endif /* HYPERVISOR */
494
495 #if CONFIG_SECLUDED_MEMORY
496 uint8_t task_can_use_secluded_mem;
497 uint8_t task_could_use_secluded_mem;
498 uint8_t task_could_also_use_secluded_mem;
499 uint8_t task_suppressed_secluded;
500 #endif /* CONFIG_SECLUDED_MEMORY */
501
502 task_exc_guard_behavior_t task_exc_guard;
503
504 #define task_is_immovable(task) \
505 !!(task->task_control_port_options & TASK_CONTROL_PORT_IMMOVABLE)
506 #define task_is_pinned(task) \
507 !!(task->task_control_port_options & TASK_CONTROL_PORT_PINNED)
508
509 task_control_port_options_t task_control_port_options;
510
511 queue_head_t io_user_clients;
512
513 mach_vm_address_t mach_header_vm_address;
514
515 uint32_t loadTag; /* dext ID used for logging identity */
516 #if CONFIG_FREEZE
517 queue_head_t task_frozen_cseg_q; /* queue of csegs frozen to NAND */
518 #endif /* CONFIG_FREEZE */
519 #if CONFIG_PHYS_WRITE_ACCT
520 uint64_t task_fs_metadata_writes;
521 #endif /* CONFIG_PHYS_WRITE_ACCT */
522 uint32_t task_shared_region_slide; /* cached here to avoid locking during telemetry */
523 uuid_t task_shared_region_uuid;
524 #if CONFIG_MEMORYSTATUS
525 uint64_t memstat_dirty_start; /* last abstime transition into the dirty band or last call to task_ledger_settle_dirty_time while dirty */
526 #endif /* CONFIG_MEMORYSTATUS */
527 vmobject_list_output_t corpse_vmobject_list;
528 uint64_t corpse_vmobject_list_size;
529 };
530
531 /*
532 * EXC_GUARD default delivery behavior for optional Mach port and VM guards.
533 * Applied to new tasks at creation time.
534 */
535 extern task_exc_guard_behavior_t task_exc_guard_default;
536
537 extern kern_return_t
538 task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *);
539
540 static inline void
task_require(struct task * task)541 task_require(struct task *task)
542 {
543 zone_id_require(ZONE_ID_TASK, sizeof(struct task), task);
544 }
545
546 #define task_lock(task) lck_mtx_lock(&(task)->lock)
547 #define task_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED)
548 #define task_lock_try(task) lck_mtx_try_lock(&(task)->lock)
549 #define task_unlock(task) lck_mtx_unlock(&(task)->lock)
550
551 #define task_objq_lock_init(task) lck_mtx_init(&(task)->task_objq_lock, &vm_object_lck_grp, &vm_object_lck_attr)
552 #define task_objq_lock_destroy(task) lck_mtx_destroy(&(task)->task_objq_lock, &vm_object_lck_grp)
553 #define task_objq_lock(task) lck_mtx_lock(&(task)->task_objq_lock)
554 #define task_objq_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->task_objq_lock, LCK_MTX_ASSERT_OWNED)
555 #define task_objq_lock_try(task) lck_mtx_try_lock(&(task)->task_objq_lock)
556 #define task_objq_unlock(task) lck_mtx_unlock(&(task)->task_objq_lock)
557
558 #define itk_lock_init(task) lck_mtx_init(&(task)->itk_lock_data, &ipc_lck_grp, &ipc_lck_attr)
559 #define itk_lock_destroy(task) lck_mtx_destroy(&(task)->itk_lock_data, &ipc_lck_grp)
560 #define itk_lock(task) lck_mtx_lock(&(task)->itk_lock_data)
561 #define itk_unlock(task) lck_mtx_unlock(&(task)->itk_lock_data)
562
563 /* task clear return wait flags */
564 #define TCRW_CLEAR_INITIAL_WAIT 0x1
565 #define TCRW_CLEAR_FINAL_WAIT 0x2
566 #define TCRW_CLEAR_ALL_WAIT (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT)
567
568 extern kern_return_t kernel_task_create(
569 task_t task,
570 vm_offset_t map_base,
571 vm_size_t map_size,
572 task_t *child);
573
574 /* Initialize task module */
575 extern void task_init(void);
576
577 /* coalition_init() calls this to initialize ledgers before task_init() */
578 extern void init_task_ledgers(void);
579
580 extern task_t current_task(void) __pure2;
581
582 extern bool task_is_driver(task_t task);
583
584 extern lck_attr_t task_lck_attr;
585 extern lck_grp_t task_lck_grp;
586
587 struct task_watchport_elem {
588 task_t twe_task;
589 ipc_port_t twe_port; /* (Space lock) */
590 ipc_port_t XNU_PTRAUTH_SIGNED_PTR("twe_pdrequest") twe_pdrequest;
591 };
592
593 struct task_watchports {
594 os_refcnt_t tw_refcount; /* (Space lock) */
595 task_t tw_task; /* (Space lock) & tw_refcount == 0 */
596 thread_t tw_thread; /* (Space lock) & tw_refcount == 0 */
597 uint32_t tw_elem_array_count; /* (Space lock) */
598 struct task_watchport_elem tw_elem[]; /* (Space lock) & (Portlock) & (mq lock) */
599 };
600
601 #define task_watchports_retain(x) (os_ref_retain(&(x)->tw_refcount))
602 #define task_watchports_release(x) (os_ref_release(&(x)->tw_refcount))
603
604 #define task_watchport_elem_init(elem, task, port) \
605 do { \
606 (elem)->twe_task = (task); \
607 (elem)->twe_port = (port); \
608 (elem)->twe_pdrequest = IP_NULL; \
609 } while(0)
610
611 #define task_watchport_elem_clear(elem) task_watchport_elem_init((elem), NULL, NULL)
612
613 extern void
614 task_add_turnstile_watchports(
615 task_t task,
616 thread_t thread,
617 ipc_port_t *portwatch_ports,
618 uint32_t portwatch_count);
619
620 extern void
621 task_watchport_elem_deallocate(
622 struct task_watchport_elem *watchport_elem);
623
624 extern boolean_t
625 task_has_watchports(task_t task);
626
627 void
628 task_dyld_process_info_update_helper(
629 task_t task,
630 size_t active_count,
631 vm_map_address_t magic_addr,
632 ipc_port_t *release_ports,
633 size_t release_count);
634
635 extern kern_return_t
636 task_suspend2_mig(
637 task_t task,
638 task_suspension_token_t *suspend_token);
639
640 extern kern_return_t
641 task_suspend2_external(
642 task_t task,
643 task_suspension_token_t *suspend_token);
644
645 extern kern_return_t
646 task_resume2_mig(
647 task_suspension_token_t suspend_token);
648
649 extern kern_return_t
650 task_resume2_external(
651 task_suspension_token_t suspend_token);
652
653 extern void
654 task_suspension_token_deallocate_grp(
655 task_suspension_token_t suspend_token,
656 task_grp_t grp);
657
658 extern ipc_port_t
659 convert_task_to_port_with_flavor(
660 task_t task,
661 mach_task_flavor_t flavor,
662 task_grp_t grp);
663
664 extern task_t current_task_early(void) __pure2;
665
666 #else /* MACH_KERNEL_PRIVATE */
667
668 __BEGIN_DECLS
669
670 extern task_t current_task(void) __pure2;
671
672 extern bool task_is_driver(task_t task);
673
674 #define TF_NONE 0
675
676 #define TWF_NONE 0
677 #define TRW_LRETURNWAIT 0x01 /* task is waiting for fork/posix_spawn/exec to complete */
678 #define TRW_LRETURNWAITER 0x02 /* task is waiting for TRW_LRETURNWAIT to get cleared */
679
680 /* task clear return wait flags */
681 #define TCRW_CLEAR_INITIAL_WAIT 0x1
682 #define TCRW_CLEAR_FINAL_WAIT 0x2
683 #define TCRW_CLEAR_ALL_WAIT (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT)
684
685
686 #define TPF_NONE 0
687 #define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */
688
689
690 __END_DECLS
691
692 #endif /* MACH_KERNEL_PRIVATE */
693
694 __BEGIN_DECLS
695
696 #ifdef KERNEL_PRIVATE
697 extern boolean_t task_is_app_suspended(task_t task);
698 extern bool task_is_exotic(task_t task);
699 extern bool task_is_alien(task_t task);
700 #endif
701
702 #ifdef XNU_KERNEL_PRIVATE
703
704 /* Hold all threads in a task */
705 extern kern_return_t task_hold(
706 task_t task);
707
708 /* Wait for task to stop running, either just to get off CPU or to cease being runnable */
709 extern kern_return_t task_wait(
710 task_t task,
711 boolean_t until_not_runnable);
712
713 /* Release hold on all threads in a task */
714 extern kern_return_t task_release(
715 task_t task);
716
717 /* Suspend/resume a task where the kernel owns the suspend count */
718 extern kern_return_t task_suspend_internal( task_t task);
719 extern kern_return_t task_resume_internal( task_t task);
720
721 /* Suspends a task by placing a hold on its threads */
722 extern kern_return_t task_pidsuspend(
723 task_t task);
724
725 /* Resumes a previously paused task */
726 extern kern_return_t task_pidresume(
727 task_t task);
728
729 extern kern_return_t task_send_trace_memory(
730 task_t task,
731 uint32_t pid,
732 uint64_t uniqueid);
733
734 extern void task_remove_turnstile_watchports(
735 task_t task);
736
737 extern void task_transfer_turnstile_watchports(
738 task_t old_task,
739 task_t new_task,
740 thread_t new_thread);
741
742 #if DEVELOPMENT || DEBUG
743
744 extern kern_return_t task_disconnect_page_mappings(
745 task_t task);
746 #endif
747
748 extern void tasks_system_suspend(boolean_t suspend);
749
750 #if CONFIG_FREEZE
751
752 /* Freeze a task's resident pages */
753 extern kern_return_t task_freeze(
754 task_t task,
755 uint32_t *purgeable_count,
756 uint32_t *wired_count,
757 uint32_t *clean_count,
758 uint32_t *dirty_count,
759 uint32_t dirty_budget,
760 uint32_t *shared_count,
761 int *freezer_error_code,
762 boolean_t eval_only);
763
764 /* Thaw a currently frozen task */
765 extern kern_return_t task_thaw(
766 task_t task);
767
768 typedef enum {
769 CREDIT_TO_SWAP = 1,
770 DEBIT_FROM_SWAP = 2
771 } freezer_acct_op_t;
772
773 extern void task_update_frozen_to_swap_acct(
774 task_t task,
775 int64_t amount,
776 freezer_acct_op_t op);
777
778 #endif /* CONFIG_FREEZE */
779
780 /* Halt all other threads in the current task */
781 extern kern_return_t task_start_halt(
782 task_t task);
783
784 /* Wait for other threads to halt and free halting task resources */
785 extern void task_complete_halt(
786 task_t task);
787
788 extern kern_return_t task_terminate_internal(
789 task_t task);
790
791 struct proc_ro;
792 typedef struct proc_ro *proc_ro_t;
793
794 extern kern_return_t task_create_internal(
795 task_t parent_task,
796 proc_ro_t proc_ro,
797 coalition_t *parent_coalitions,
798 boolean_t inherit_memory,
799 boolean_t is_64bit,
800 boolean_t is_64bit_data,
801 uint32_t flags,
802 uint32_t procflags,
803 uint8_t t_returnwaitflags,
804 task_t *child_task); /* OUT */
805
806 extern kern_return_t task_set_special_port_internal(
807 task_t task,
808 int which,
809 ipc_port_t port);
810
811 extern kern_return_t task_set_security_tokens(
812 task_t task,
813 security_token_t sec_token,
814 audit_token_t audit_token,
815 host_priv_t host_priv);
816
817 extern kern_return_t task_info(
818 task_t task,
819 task_flavor_t flavor,
820 task_info_t task_info_out,
821 mach_msg_type_number_t *task_info_count);
822
823 extern void task_power_info_locked(
824 task_t task,
825 task_power_info_t info,
826 gpu_energy_data_t gpu_energy,
827 task_power_info_v2_t infov2,
828 uint64_t *runnable_time);
829
830 extern uint64_t task_gpu_utilisation(
831 task_t task);
832
833 extern uint64_t task_energy(
834 task_t task);
835
836 extern uint64_t task_cpu_ptime(
837 task_t task);
838 extern void task_update_cpu_time_qos_stats(
839 task_t task,
840 uint64_t *eqos_stats,
841 uint64_t *rqos_stats);
842
843 extern void task_vtimer_set(
844 task_t task,
845 integer_t which);
846
847 extern void task_vtimer_clear(
848 task_t task,
849 integer_t which);
850
851 extern void task_vtimer_update(
852 task_t task,
853 integer_t which,
854 uint32_t *microsecs);
855
856 #define TASK_VTIMER_USER 0x01
857 #define TASK_VTIMER_PROF 0x02
858 #define TASK_VTIMER_RLIM 0x04
859
860 extern void task_set_64bit(
861 task_t task,
862 boolean_t is_64bit,
863 boolean_t is_64bit_data);
864
865 extern bool task_get_64bit_addr(
866 task_t task);
867
868 extern bool task_get_64bit_data(
869 task_t task);
870
871 extern void task_set_platform_binary(
872 task_t task,
873 boolean_t is_platform);
874
875 extern void task_set_exc_guard_ctrl_port_default(
876 task_t task,
877 thread_t main_thread,
878 const char *name,
879 unsigned int namelen,
880 boolean_t is_simulated,
881 uint32_t platform,
882 uint32_t sdk);
883
884 extern void task_set_immovable_pinned(task_t task);
885
886 extern bool task_set_ca_client_wi(
887 task_t task,
888 boolean_t ca_client_wi);
889
890 extern void task_set_dyld_info(
891 task_t task,
892 mach_vm_address_t addr,
893 mach_vm_size_t size);
894
895 extern void task_set_mach_header_address(
896 task_t task,
897 mach_vm_address_t addr);
898
899 extern void task_set_uniqueid(task_t task);
900
901 /* Get number of activations in a task */
902 extern int get_task_numacts(
903 task_t task);
904
905 struct label;
906 extern kern_return_t task_collect_crash_info(
907 task_t task,
908 #if CONFIG_MACF
909 struct label *crash_label,
910 #endif
911 int is_corpse_fork);
912
913 void task_wait_till_threads_terminate_locked(task_t task);
914
915 /* JMM - should just be temporary (implementation in bsd_kern still) */
916 extern void set_bsdtask_info(task_t, void *);
917 extern uint32_t set_task_loadTag(task_t task, uint32_t loadTag);
918 extern vm_map_t get_task_map_reference(task_t);
919 extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t);
920 extern pmap_t get_task_pmap(task_t);
921 extern uint64_t get_task_resident_size(task_t);
922 extern uint64_t get_task_compressed(task_t);
923 extern uint64_t get_task_resident_max(task_t);
924 extern uint64_t get_task_phys_footprint(task_t);
925 #if CONFIG_LEDGER_INTERVAL_MAX
926 extern uint64_t get_task_phys_footprint_interval_max(task_t, int reset);
927 #endif /* CONFIG_FOOTPRINT_INTERVAL_MAX */
928 extern uint64_t get_task_phys_footprint_lifetime_max(task_t);
929 extern uint64_t get_task_phys_footprint_limit(task_t);
930 extern uint64_t get_task_purgeable_size(task_t);
931 extern uint64_t get_task_cpu_time(task_t);
932 extern uint64_t get_task_dispatchqueue_offset(task_t);
933 extern uint64_t get_task_dispatchqueue_serialno_offset(task_t);
934 extern uint64_t get_task_dispatchqueue_label_offset(task_t);
935 extern uint64_t get_task_uniqueid(task_t task);
936 extern int get_task_version(task_t task);
937
938 extern uint64_t get_task_internal(task_t);
939 extern uint64_t get_task_internal_compressed(task_t);
940 extern uint64_t get_task_purgeable_nonvolatile(task_t);
941 extern uint64_t get_task_purgeable_nonvolatile_compressed(task_t);
942 extern uint64_t get_task_iokit_mapped(task_t);
943 extern uint64_t get_task_alternate_accounting(task_t);
944 extern uint64_t get_task_alternate_accounting_compressed(task_t);
945 extern uint64_t get_task_memory_region_count(task_t);
946 extern uint64_t get_task_page_table(task_t);
947 #if CONFIG_FREEZE
948 extern uint64_t get_task_frozen_to_swap(task_t);
949 #endif
950 extern uint64_t get_task_network_nonvolatile(task_t);
951 extern uint64_t get_task_network_nonvolatile_compressed(task_t);
952 extern uint64_t get_task_wired_mem(task_t);
953 extern uint32_t get_task_loadTag(task_t task);
954
955 extern uint64_t get_task_tagged_footprint(task_t task);
956 extern uint64_t get_task_tagged_footprint_compressed(task_t task);
957 extern uint64_t get_task_media_footprint(task_t task);
958 extern uint64_t get_task_media_footprint_compressed(task_t task);
959 extern uint64_t get_task_graphics_footprint(task_t task);
960 extern uint64_t get_task_graphics_footprint_compressed(task_t task);
961 extern uint64_t get_task_neural_footprint(task_t task);
962 extern uint64_t get_task_neural_footprint_compressed(task_t task);
963
964 extern kern_return_t task_convert_phys_footprint_limit(int, int *);
965 extern kern_return_t task_set_phys_footprint_limit_internal(task_t, int, int *, boolean_t, boolean_t);
966 extern kern_return_t task_get_phys_footprint_limit(task_t task, int *limit_mb);
967
968 extern security_token_t *task_get_sec_token(task_t task);
969 extern void task_set_sec_token(task_t task, security_token_t *token);
970 extern audit_token_t *task_get_audit_token(task_t task);
971 extern void task_set_audit_token(task_t task, audit_token_t *token);
972 extern void task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token);
973 extern boolean_t task_is_privileged(task_t task);
974 extern uint8_t *task_get_mach_trap_filter_mask(task_t task);
975 extern void task_set_mach_trap_filter_mask(task_t task, uint8_t *mask);
976 extern uint8_t *task_get_mach_kobj_filter_mask(task_t task);
977 extern void task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask);
978 extern void task_copy_filter_masks(task_t new_task, task_t old_task);
979
980 /* Jetsam memlimit attributes */
981 extern boolean_t task_get_memlimit_is_active(task_t task);
982 extern boolean_t task_get_memlimit_is_fatal(task_t task);
983 extern void task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active);
984 extern void task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal);
985 extern boolean_t task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active);
986 extern void task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active);
987
988 extern uint64_t task_get_dirty_start(task_t task);
989 extern void task_set_dirty_start(task_t task, uint64_t start);
990
991 extern void task_set_thread_limit(task_t task, uint16_t thread_limit);
992 #if CONFIG_PROC_RESOURCE_LIMITS
993 extern kern_return_t task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit);
994 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
995 extern void task_port_space_ast(task_t task);
996
997 #if XNU_TARGET_OS_OSX
998 extern boolean_t task_has_system_version_compat_enabled(task_t task);
999 extern void task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat);
1000 #endif
1001
1002 extern boolean_t is_kerneltask(task_t task);
1003 extern boolean_t is_corpsetask(task_t task);
1004 extern boolean_t is_corpsefork(task_t task);
1005
1006 extern kern_return_t check_actforsig(task_t task, thread_t thread, int setast);
1007
1008 extern kern_return_t machine_task_get_state(
1009 task_t task,
1010 int flavor,
1011 thread_state_t state,
1012 mach_msg_type_number_t *state_count);
1013
1014 extern kern_return_t machine_task_set_state(
1015 task_t task,
1016 int flavor,
1017 thread_state_t state,
1018 mach_msg_type_number_t state_count);
1019
1020 extern void machine_task_terminate(task_t task);
1021
1022 struct _task_ledger_indices {
1023 int cpu_time;
1024 int tkm_private;
1025 int tkm_shared;
1026 int phys_mem;
1027 int wired_mem;
1028 int internal;
1029 int iokit_mapped;
1030 int external;
1031 int reusable;
1032 int alternate_accounting;
1033 int alternate_accounting_compressed;
1034 int page_table;
1035 int phys_footprint;
1036 int internal_compressed;
1037 int purgeable_volatile;
1038 int purgeable_nonvolatile;
1039 int purgeable_volatile_compressed;
1040 int purgeable_nonvolatile_compressed;
1041 int tagged_nofootprint;
1042 int tagged_footprint;
1043 int tagged_nofootprint_compressed;
1044 int tagged_footprint_compressed;
1045 int network_volatile;
1046 int network_nonvolatile;
1047 int network_volatile_compressed;
1048 int network_nonvolatile_compressed;
1049 int media_nofootprint;
1050 int media_footprint;
1051 int media_nofootprint_compressed;
1052 int media_footprint_compressed;
1053 int graphics_nofootprint;
1054 int graphics_footprint;
1055 int graphics_nofootprint_compressed;
1056 int graphics_footprint_compressed;
1057 int neural_nofootprint;
1058 int neural_footprint;
1059 int neural_nofootprint_compressed;
1060 int neural_footprint_compressed;
1061 int platform_idle_wakeups;
1062 int interrupt_wakeups;
1063 #if CONFIG_SCHED_SFI
1064 int sfi_wait_times[MAX_SFI_CLASS_ID];
1065 #endif /* CONFIG_SCHED_SFI */
1066 int cpu_time_billed_to_me;
1067 int cpu_time_billed_to_others;
1068 int physical_writes;
1069 int logical_writes;
1070 int logical_writes_to_external;
1071 int energy_billed_to_me;
1072 int energy_billed_to_others;
1073 #if CONFIG_MEMORYSTATUS
1074 int memorystatus_dirty_time;
1075 #endif /* CONFIG_MEMORYSTATUS */
1076 #if DEBUG || DEVELOPMENT
1077 int pages_grabbed;
1078 int pages_grabbed_kern;
1079 int pages_grabbed_iopl;
1080 int pages_grabbed_upl;
1081 #endif
1082 #if CONFIG_FREEZE
1083 int frozen_to_swap;
1084 #endif /* CONFIG_FREEZE */
1085 #if CONFIG_PHYS_WRITE_ACCT
1086 int fs_metadata_writes;
1087 #endif /* CONFIG_PHYS_WRITE_ACCT */
1088 int swapins;
1089 };
1090
1091 /*
1092 * Many of the task ledger entries use a reduced feature set
1093 * (specifically they just use LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE)
1094 * and are stored in a smaller entry structure.
1095 * That structure is an implementation detail of the ledger.
1096 * But on PPL systems, the task ledger's memory is managed by the PPL
1097 * and it has to determine the size of the task ledger at compile time.
1098 * This define specifies the number of small entries so the PPL can
1099 * properly determine the ledger's size.
1100 *
1101 * If you add a new entry with only the
1102 * LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE | LEDGER_ENTRY_ALLOW_INACTIVE
1103 * flags, you need to increment this count.
1104 * Otherwise, PPL systems will panic at boot.
1105 */
1106 #if DEVELOPMENT || DEBUG
1107 #define TASK_LEDGER_NUM_SMALL_INDICES 33
1108 #else
1109 #define TASK_LEDGER_NUM_SMALL_INDICES 29
1110 #endif /* DEVELOPMENT || DEBUG */
1111 extern struct _task_ledger_indices task_ledgers;
1112
1113 /* requires task to be unlocked, returns a referenced thread */
1114 thread_t task_findtid(task_t task, uint64_t tid);
1115 int pid_from_task(task_t task);
1116
1117 extern kern_return_t task_wakeups_monitor_ctl(task_t task, uint32_t *rate_hz, int32_t *flags);
1118 extern kern_return_t task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags);
1119 extern void task_rollup_accounting_info(task_t new_task, task_t parent_task);
1120 extern kern_return_t task_io_monitor_ctl(task_t task, uint32_t *flags);
1121 extern void task_set_did_exec_flag(task_t task);
1122 extern void task_clear_exec_copy_flag(task_t task);
1123 extern boolean_t task_is_exec_copy(task_t);
1124 extern boolean_t task_did_exec(task_t task);
1125 #ifdef CONFIG_32BIT_TELEMETRY
1126 extern boolean_t task_consume_32bit_log_flag(task_t task);
1127 extern void task_set_32bit_log_flag(task_t task);
1128 #endif /* CONFIG_32BIT_TELEMETRY */
1129 extern boolean_t task_is_active(task_t task);
1130 extern boolean_t task_is_halting(task_t task);
1131 extern void task_clear_return_wait(task_t task, uint32_t flags);
1132 extern void task_wait_to_return(void) __attribute__((noreturn));
1133 extern event_t task_get_return_wait_event(task_t task);
1134
1135 extern void task_bank_reset(task_t task);
1136 extern void task_bank_init(task_t task);
1137
1138 #if CONFIG_MEMORYSTATUS
1139 extern void task_ledger_settle_dirty_time(task_t t);
1140 #endif /* CONFIG_MEMORYSTATUS */
1141
1142 #if CONFIG_ARCADE
1143 extern void task_prep_arcade(task_t task, thread_t thread);
1144 #endif /* CONFIG_ARCADE */
1145
1146 extern int task_pid(task_t task);
1147
1148 #if __has_feature(ptrauth_calls)
1149 char *task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *);
1150 void task_set_shared_region_id(task_t task, char *id);
1151 #endif /* __has_feature(ptrauth_calls) */
1152
1153 extern boolean_t task_has_assertions(task_t task);
1154 /* End task_policy */
1155
1156 extern void task_set_gpu_denied(task_t task, boolean_t denied);
1157 extern boolean_t task_is_gpu_denied(task_t task);
1158
1159 extern queue_head_t * task_io_user_clients(task_t task);
1160 extern void task_set_message_app_suspended(task_t task, boolean_t enable);
1161
1162 extern void task_copy_fields_for_exec(task_t dst_task, task_t src_task);
1163
1164 extern void task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num);
1165 extern void task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries);
1166 extern void task_store_owned_vmobject_info(task_t to_task, task_t from_task);
1167
1168 extern void task_set_filter_msg_flag(task_t task, boolean_t flag);
1169 extern boolean_t task_get_filter_msg_flag(task_t task);
1170
1171 extern void task_transfer_mach_filter_bits(task_t new_task, task_t old_mask);
1172
1173 #if __has_feature(ptrauth_calls)
1174 extern bool task_is_pac_exception_fatal(task_t task);
1175 extern void task_set_pac_exception_fatal_flag(task_t task);
1176 #endif /*__has_feature(ptrauth_calls)*/
1177
1178 extern void task_set_tecs(task_t task);
1179 extern void task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size);
1180
1181 extern boolean_t task_corpse_forking_disabled(task_t task);
1182
1183 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,
1184 uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit);
1185
1186 extern int get_task_cdhash(task_t task, char cdhash[CS_CDHASH_LEN]);
1187
1188 #endif /* XNU_KERNEL_PRIVATE */
1189 #ifdef KERNEL_PRIVATE
1190
1191 extern void *get_bsdtask_info(task_t);
1192 extern void *get_bsdthreadtask_info(thread_t);
1193 extern void task_bsdtask_kill(task_t);
1194 extern vm_map_t get_task_map(task_t);
1195 extern ledger_t get_task_ledger(task_t);
1196
1197 extern boolean_t get_task_pidsuspended(task_t);
1198 extern boolean_t get_task_suspended(task_t);
1199 extern boolean_t get_task_frozen(task_t);
1200
1201 /*
1202 * Flavors of convert_task_to_port. XNU callers get convert_task_to_port_kernel,
1203 * external callers get convert_task_to_port_external, the MIG layer calls
1204 * convert_task_to_port_mig.
1205 */
1206 extern ipc_port_t convert_task_to_port(task_t);
1207 extern ipc_port_t convert_corpse_to_port_and_nsrequest(task_t task);
1208 extern ipc_port_t convert_task_to_port_pinned(task_t);
1209
1210 extern ipc_port_t convert_task_to_port_mig(task_t);
1211 extern ipc_port_t convert_task_to_port_kernel(task_t);
1212 extern ipc_port_t convert_task_to_port_external(task_t);
1213
1214 extern ipc_port_t convert_task_name_to_port(task_name_t);
1215 extern ipc_port_t convert_task_inspect_to_port(task_inspect_t);
1216 extern ipc_port_t convert_task_read_to_port(task_read_t);
1217 extern ipc_port_t convert_task_suspension_token_to_port(task_suspension_token_t task);
1218
1219
1220 /* Convert from a port (in this case, an SO right to a task's resume port) to a task. */
1221 extern task_suspension_token_t convert_port_to_task_suspension_token(ipc_port_t port);
1222 extern void task_suspension_send_once(ipc_port_t port);
1223
1224 #define TASK_WRITE_IMMEDIATE 0x1
1225 #define TASK_WRITE_DEFERRED 0x2
1226 #define TASK_WRITE_INVALIDATED 0x4
1227 #define TASK_WRITE_METADATA 0x8
1228 extern void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp);
1229
1230 __enum_decl(task_balance_flags_t, uint8_t, {
1231 TASK_BALANCE_CREDIT = 0x1,
1232 TASK_BALANCE_DEBIT = 0x2,
1233 });
1234
1235 __enum_decl(task_physical_write_flavor_t, uint8_t, {
1236 TASK_PHYSICAL_WRITE_METADATA = 0x1,
1237 });
1238 extern void task_update_physical_writes(task_t task, task_physical_write_flavor_t flavor,
1239 uint64_t io_size, task_balance_flags_t flags);
1240
1241 #if CONFIG_SECLUDED_MEMORY
1242 extern void task_set_can_use_secluded_mem(
1243 task_t task,
1244 boolean_t can_use_secluded_mem);
1245 extern void task_set_could_use_secluded_mem(
1246 task_t task,
1247 boolean_t could_use_secluded_mem);
1248 extern void task_set_could_also_use_secluded_mem(
1249 task_t task,
1250 boolean_t could_also_use_secluded_mem);
1251 extern boolean_t task_can_use_secluded_mem(
1252 task_t task,
1253 boolean_t is_allocate);
1254 extern boolean_t task_could_use_secluded_mem(task_t task);
1255 extern boolean_t task_could_also_use_secluded_mem(task_t task);
1256 #endif /* CONFIG_SECLUDED_MEMORY */
1257
1258 extern void task_set_darkwake_mode(task_t, boolean_t);
1259 extern boolean_t task_get_darkwake_mode(task_t);
1260
1261 #if __arm64__
1262 extern void task_set_legacy_footprint(task_t task);
1263 extern void task_set_extra_footprint_limit(task_t task);
1264 extern void task_set_ios13extended_footprint_limit(task_t task);
1265 #endif /* __arm64__ */
1266
1267 #if CONFIG_MACF
1268 extern struct label *get_task_crash_label(task_t task);
1269 extern void set_task_crash_label(task_t task, struct label *label);
1270 #endif /* CONFIG_MACF */
1271
1272 #endif /* KERNEL_PRIVATE */
1273
1274 extern task_t kernel_task;
1275
1276 extern void task_name_deallocate_mig(
1277 task_name_t task_name);
1278
1279 extern void task_policy_set_deallocate_mig(
1280 task_policy_set_t task_policy_set);
1281
1282 extern void task_policy_get_deallocate_mig(
1283 task_policy_get_t task_policy_get);
1284
1285 extern void task_inspect_deallocate_mig(
1286 task_inspect_t task_inspect);
1287
1288 extern void task_read_deallocate_mig(
1289 task_read_t task_read);
1290
1291 extern void task_suspension_token_deallocate(
1292 task_suspension_token_t token);
1293
1294 extern boolean_t task_self_region_footprint(void);
1295 extern void task_self_region_footprint_set(boolean_t newval);
1296 extern void task_ledgers_footprint(ledger_t ledger,
1297 ledger_amount_t *ledger_resident,
1298 ledger_amount_t *ledger_compressed);
1299 extern void task_set_memory_ownership_transfer(
1300 task_t task,
1301 boolean_t value);
1302
1303 #if DEVELOPMENT || DEBUG
1304 extern void task_set_no_footprint_for_debug(
1305 task_t task,
1306 boolean_t value);
1307 extern int task_get_no_footprint_for_debug(
1308 task_t task);
1309 #endif /* DEVELOPMENT || DEBUG */
1310
1311
1312 __END_DECLS
1313
1314 #endif /* _KERN_TASK_H_ */
1315