xref: /xnu-8792.81.2/osfmk/kern/thread.h (revision 19c3b8c28c31cb8130e034cfb5df6bf9ba342d90)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	thread.h
60  *	Author:	Avadis Tevanian, Jr.
61  *
62  *	This file contains the structure definitions for threads.
63  *
64  */
65 /*
66  * Copyright (c) 1993 The University of Utah and
67  * the Computer Systems Laboratory (CSL).  All rights reserved.
68  *
69  * Permission to use, copy, modify and distribute this software and its
70  * documentation is hereby granted, provided that both the copyright
71  * notice and this permission notice appear in all copies of the
72  * software, derivative works or modified versions, and any portions
73  * thereof, and that both notices appear in supporting documentation.
74  *
75  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78  *
79  * CSL requests users of this software to return to [email protected] any
80  * improvements that they make and grant CSL redistribution rights.
81  *
82  */
83 
84 #ifndef _KERN_THREAD_H_
85 #define _KERN_THREAD_H_
86 
87 #include <mach/kern_return.h>
88 #include <mach/mach_types.h>
89 #include <mach/mach_param.h>
90 #include <mach/message.h>
91 #include <mach/boolean.h>
92 #include <mach/vm_param.h>
93 #include <mach/thread_info.h>
94 #include <mach/thread_status.h>
95 #include <mach/exception_types.h>
96 
97 #include <kern/kern_types.h>
98 #include <vm/vm_kern.h>
99 #include <sys/cdefs.h>
100 #include <sys/_types/_size_t.h>
101 
102 #ifdef MACH_KERNEL_PRIVATE
103 #include <mach_assert.h>
104 #include <mach_ldebug.h>
105 
106 #include <ipc/ipc_types.h>
107 
108 #include <mach/port.h>
109 #include <kern/cpu_number.h>
110 #include <kern/smp.h>
111 #include <kern/queue.h>
112 
113 #include <kern/timer.h>
114 #include <kern/simple_lock.h>
115 #include <kern/locks.h>
116 #include <kern/sched.h>
117 #include <kern/sched_prim.h>
118 #include <mach/sfi_class.h>
119 #include <kern/thread_call.h>
120 #include <kern/thread_group.h>
121 #include <kern/timer_call.h>
122 #include <kern/task.h>
123 #include <kern/exception.h>
124 #include <kern/affinity.h>
125 #include <kern/debug.h>
126 #include <kern/block_hint.h>
127 #include <kern/recount.h>
128 #include <kern/turnstile.h>
129 #include <kern/mpsc_queue.h>
130 
131 #include <kern/waitq.h>
132 #include <san/kasan.h>
133 #include <san/kcov_data.h>
134 #include <os/refcnt.h>
135 
136 #include <ipc/ipc_kmsg.h>
137 
138 #include <machine/atomic.h>
139 #include <machine/cpu_data.h>
140 #include <machine/thread.h>
141 
142 #endif  /* MACH_KERNEL_PRIVATE */
143 #ifdef XNU_KERNEL_PRIVATE
144 /* priority queue static asserts fail for __ARM64_ARCH_8_32__ kext builds */
145 #include <kern/priority_queue.h>
146 #endif /* XNU_KERNEL_PRIVATE */
147 
148 __BEGIN_DECLS
149 
150 #ifdef XNU_KERNEL_PRIVATE
151 #if CONFIG_TASKWATCH
152 /* Taskwatch related. TODO: find this a better home */
153 typedef struct task_watcher task_watch_t;
154 #endif /* CONFIG_TASKWATCH */
155 
156 /* Thread tags; for easy identification. */
157 __options_closed_decl(thread_tag_t, uint16_t, {
158 	THREAD_TAG_MAINTHREAD   = 0x01,
159 	THREAD_TAG_CALLOUT      = 0x02,
160 	THREAD_TAG_IOWORKLOOP   = 0x04,
161 	THREAD_TAG_PTHREAD      = 0x10,
162 	THREAD_TAG_WORKQUEUE    = 0x20,
163 	THREAD_TAG_USER_JOIN    = 0x40,
164 });
165 
166 __options_closed_decl(thread_ro_flags_t, uint16_t, {
167 	TRO_NONE                = 0x0000,
168 	TRO_SETUID              = 0x0001,
169 });
170 
171 typedef struct thread_ro *thread_ro_t;
172 
173 /*!
174  * @struct thread_ro
175  *
176  * @brief
177  * A structure allocated in a read only zone that safely
178  * represents the linkages of a thread to its cred, proc, task, ...
179  *
180  * @discussion
181  * The lifetime of a @c thread_ro structure is 1:1 with that
182  * of a @c thread_t or a @c uthread_t and holding a thread reference
183  * always allows to dereference this structure safely.
184  */
185 struct thread_ro {
186 	struct thread              *tro_owner;
187 #if MACH_BSD
188 	struct ucred               *tro_cred;
189 	struct proc                *tro_proc;
190 	struct proc_ro             *tro_proc_ro;
191 #endif
192 	struct task                *tro_task;
193 	thread_ro_flags_t           tro_flags;
194 
195 	struct ipc_port            *tro_self_port;
196 	struct ipc_port            *tro_settable_self_port;             /* send right */
197 	struct ipc_port            *tro_ports[THREAD_SELF_PORT_COUNT];  /* no right */
198 
199 	struct exception_action    *tro_exc_actions;
200 };
201 
202 /*
203  * Flags for `thread set status`.
204  */
205 __options_decl(thread_set_status_flags_t, uint32_t, {
206 	TSSF_FLAGS_NONE = 0,
207 
208 	/* Translate the state to user. */
209 	TSSF_TRANSLATE_TO_USER = 0x01,
210 
211 	/* Translate the state to user. Preserve flags */
212 	TSSF_PRESERVE_FLAGS = 0x02,
213 
214 	/* Check kernel signed flag */
215 	TSSF_CHECK_USER_FLAGS = 0x04,
216 
217 	/* Allow only user state PTRS */
218 	TSSF_ALLOW_ONLY_USER_PTRS = 0x08,
219 
220 	/* Generate random diversifier and stash it */
221 	TSSF_RANDOM_USER_DIV = 0x10,
222 
223 	/* Stash sigreturn token */
224 	TSSF_STASH_SIGRETURN_TOKEN = 0x20,
225 
226 	/* Check sigreturn token */
227 	TSSF_CHECK_SIGRETURN_TOKEN = 0x40,
228 
229 	/* Allow only matching sigreturn token */
230 	TSSF_ALLOW_ONLY_MATCHING_TOKEN = 0x80,
231 
232 	/* Stash diversifier from thread */
233 	TSSF_THREAD_USER_DIV = 0x100,
234 });
235 
236 /*
237  * Size in bits of compact thread id (ctid).
238  */
239 #define CTID_SIZE_BIT 20
240 typedef uint32_t ctid_t;
241 
242 #endif /* XNU_KERNEL_PRIVATE */
243 #ifdef MACH_KERNEL_PRIVATE
244 
245 extern zone_t thread_ro_zone;
246 
247 __options_decl(thread_work_interval_flags_t, uint32_t, {
248 	TH_WORK_INTERVAL_FLAGS_NONE            = 0x0,
249 #if CONFIG_SCHED_AUTO_JOIN
250 	/* Flags to indicate status about work interval thread is currently part of */
251 	TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK  = 0x1,
252 #endif /* CONFIG_SCHED_AUTO_JOIN */
253 	TH_WORK_INTERVAL_FLAGS_HAS_WORKLOAD_ID = 0x2,
254 	TH_WORK_INTERVAL_FLAGS_RT_ALLOWED      = 0x4,
255 	TH_WORK_INTERVAL_FLAGS_RT_CRITICAL     = 0x8,
256 });
257 
258 typedef union thread_rr_state {
259 	uint32_t trr_value;
260 	struct {
261 #define TRR_FAULT_NONE     0
262 #define TRR_FAULT_PENDING  1
263 #define TRR_FAULT_OBSERVED 2
264 		/*
265 		 * Set to TRR_FAULT_PENDING with interrupts disabled
266 		 * by the thread when it is entering a user fault codepath.
267 		 *
268 		 * Moved to TRR_FAULT_OBSERVED from TRR_FAULT_PENDING:
269 		 * - by the thread if at IPI time,
270 		 * - or by task_restartable_ranges_synchronize() if the thread
271 		 *   is interrupted (under the thread lock)
272 		 *
273 		 * Cleared by the thread when returning from a user fault
274 		 * codepath.
275 		 */
276 		uint8_t  trr_fault_state;
277 
278 		/*
279 		 * Set by task_restartable_ranges_synchronize()
280 		 * if trr_fault_state is TRR_FAULT_OBSERVED
281 		 * and a rendez vous at the AST is required.
282 		 *
283 		 * Set atomically if trr_fault_state == TRR_FAULT_OBSERVED,
284 		 * and trr_ipi_ack_pending == 0
285 		 */
286 		uint8_t  trr_sync_waiting;
287 
288 		/*
289 		 * Updated under the thread_lock(),
290 		 * set by task_restartable_ranges_synchronize()
291 		 * when the thread was IPIed and the caller is waiting
292 		 * for an ACK.
293 		 */
294 		uint16_t trr_ipi_ack_pending;
295 	};
296 } thread_rr_state_t;
297 
298 struct thread {
299 #if MACH_ASSERT
300 #define THREAD_MAGIC 0x1234ABCDDCBA4321ULL
301 	/* Ensure nothing uses &thread as a queue entry */
302 	uint64_t                thread_magic;
303 #endif /* MACH_ASSERT */
304 
305 	/*
306 	 *	NOTE:	The runq field in the thread structure has an unusual
307 	 *	locking protocol.  If its value is PROCESSOR_NULL, then it is
308 	 *	locked by the thread_lock, but if its value is something else
309 	 *	then it is locked by the associated run queue lock. It is
310 	 *	set to PROCESSOR_NULL without holding the thread lock, but the
311 	 *	transition from PROCESSOR_NULL to non-null must be done
312 	 *	under the thread lock and the run queue lock.
313 	 *
314 	 *	New waitq APIs allow the 'links' and 'runq' fields to be
315 	 *	anywhere in the thread structure.
316 	 */
317 	union {
318 		queue_chain_t                   runq_links;             /* run queue links */
319 		queue_chain_t                   wait_links;             /* wait queue links */
320 		struct mpsc_queue_chain         mpsc_links;             /* thread daemon mpsc links */
321 		struct priority_queue_entry_sched wait_prioq_links;       /* priority ordered waitq links */
322 	};
323 
324 	event64_t               wait_event;     /* wait queue event */
325 	processor_t             runq;           /* run queue assignment */
326 	waitq_t                 waitq;          /* wait queue this thread is enqueued on */
327 	struct turnstile       *turnstile;      /* thread's turnstile, protected by primitives interlock */
328 	void                   *inheritor;      /* inheritor of the primitive the thread will block on */
329 	struct priority_queue_sched_max sched_inheritor_queue; /* Inheritor queue for kernel promotion */
330 	struct priority_queue_sched_max base_inheritor_queue; /* Inheritor queue for user promotion */
331 
332 #if CONFIG_SCHED_EDGE
333 	bool            th_bound_cluster_enqueued;
334 	bool            th_shared_rsrc_enqueued[CLUSTER_SHARED_RSRC_TYPE_COUNT];
335 	bool            th_shared_rsrc_heavy_user[CLUSTER_SHARED_RSRC_TYPE_COUNT];
336 	bool            th_shared_rsrc_heavy_perf_control[CLUSTER_SHARED_RSRC_TYPE_COUNT];
337 #endif /* CONFIG_SCHED_EDGE */
338 
339 #if CONFIG_SCHED_CLUTCH
340 	/*
341 	 * In the clutch scheduler, the threads are maintained in runqs at the clutch_bucket
342 	 * level (clutch_bucket defines a unique thread group and scheduling bucket pair). The
343 	 * thread is linked via a couple of linkages in the clutch bucket:
344 	 *
345 	 * - A stable priority queue linkage which is the main runqueue (based on sched_pri) for the clutch bucket
346 	 * - A regular priority queue linkage which is based on thread's base/promoted pri (used for clutch bucket priority calculation)
347 	 * - A queue linkage used for timesharing operations of threads at the scheduler tick
348 	 */
349 	struct priority_queue_entry_stable      th_clutch_runq_link;
350 	struct priority_queue_entry_sched       th_clutch_pri_link;
351 	queue_chain_t                           th_clutch_timeshare_link;
352 #endif /* CONFIG_SCHED_CLUTCH */
353 
354 	/* Data updated during assert_wait/thread_wakeup */
355 	decl_simple_lock_data(, sched_lock);     /* scheduling lock (thread_lock()) */
356 	decl_simple_lock_data(, wake_lock);      /* for thread stop / wait (wake_lock()) */
357 	uint16_t                options;        /* options set by thread itself */
358 #define TH_OPT_INTMASK          0x0003          /* interrupt / abort level */
359 #define TH_OPT_VMPRIV           0x0004          /* may allocate reserved memory */
360 #define TH_OPT_SYSTEM_CRITICAL  0x0010          /* Thread must always be allowed to run - even under heavy load */
361 #define TH_OPT_PROC_CPULIMIT    0x0020          /* Thread has a task-wide CPU limit applied to it */
362 #define TH_OPT_PRVT_CPULIMIT    0x0040          /* Thread has a thread-private CPU limit applied to it */
363 #define TH_OPT_IDLE_THREAD      0x0080          /* Thread is a per-processor idle thread */
364 #define TH_OPT_GLOBAL_FORCED_IDLE       0x0100  /* Thread performs forced idle for thermal control */
365 #define TH_OPT_SCHED_VM_GROUP   0x0200          /* Thread belongs to special scheduler VM group */
366 #define TH_OPT_HONOR_QLIMIT     0x0400          /* Thread will honor qlimit while sending mach_msg, regardless of MACH_SEND_ALWAYS */
367 #define TH_OPT_SEND_IMPORTANCE  0x0800          /* Thread will allow importance donation from kernel rpc */
368 #define TH_OPT_ZONE_PRIV        0x1000          /* Thread may use the zone replenish reserve */
369 #define TH_OPT_IPC_TG_BLOCKED   0x2000          /* Thread blocked in sync IPC and has made the thread group blocked callout */
370 
371 	bool                    wake_active;    /* wake event on stop */
372 	bool                    at_safe_point;  /* thread_abort_safely allowed */
373 	uint8_t                 sched_saved_run_weight;
374 #if DEVELOPMENT || DEBUG
375 	bool                    pmap_footprint_suspended;
376 #endif /* DEVELOPMENT || DEBUG */
377 
378 
379 	ast_t                   reason;         /* why we blocked */
380 	uint32_t                quantum_remaining;
381 	wait_result_t           wait_result;    /* outcome of wait -
382 	                                        * may be examined by this thread
383 	                                        * WITHOUT locking */
384 	thread_rr_state_t       t_rr_state;     /* state for restartable ranges */
385 	thread_continue_t       continuation;   /* continue here next dispatch */
386 	void                   *parameter;      /* continuation parameter */
387 
388 	/* Data updated/used in thread_invoke */
389 	vm_offset_t             kernel_stack;   /* current kernel stack */
390 	vm_offset_t             reserved_stack; /* reserved kernel stack */
391 
392 	/*** Machine-dependent state ***/
393 	struct machine_thread   machine;
394 
395 #if KASAN
396 	struct kasan_thread_data kasan_data;
397 #endif
398 #if CONFIG_KCOV
399 	kcov_thread_data_t       kcov_data;
400 #endif
401 
402 	/* Thread state: */
403 	int                     state;
404 /*
405  *	Thread states [bits or'ed]
406  * All but TH_WAIT_REPORT are encoded in SS_TH_FLAGS
407  * All are encoded in kcdata.py ('ths_state')
408  */
409 #define TH_WAIT                 0x01            /* queued for waiting */
410 #define TH_SUSP                 0x02            /* stopped or requested to stop */
411 #define TH_RUN                  0x04            /* running or on runq */
412 #define TH_UNINT                0x08            /* waiting uninteruptibly */
413 #define TH_TERMINATE            0x10            /* halted at termination */
414 #define TH_TERMINATE2           0x20            /* added to termination queue */
415 #define TH_WAIT_REPORT          0x40            /* the wait is using the sched_call,
416 	                                        * only set if TH_WAIT is also set */
417 #define TH_IDLE                 0x80            /* idling processor */
418 
419 	/* Scheduling information */
420 	sched_mode_t            sched_mode;     /* scheduling mode */
421 	sched_mode_t            saved_mode;     /* saved mode during forced mode demotion */
422 
423 	/* This thread's contribution to global sched counters */
424 	sched_bucket_t          th_sched_bucket;
425 
426 	sfi_class_id_t          sfi_class;      /* SFI class (XXX Updated on CSW/QE/AST) */
427 	sfi_class_id_t          sfi_wait_class; /* Currently in SFI wait for this class, protected by sfi_lock */
428 
429 	uint32_t                sched_flags;            /* current flag bits */
430 #define TH_SFLAG_NO_SMT                 0x0001          /* On an SMT CPU, this thread must be scheduled alone */
431 #define TH_SFLAG_FAILSAFE               0x0002          /* fail-safe has tripped */
432 #define TH_SFLAG_THROTTLED              0x0004          /* throttled thread forced to timeshare mode (may be applied in addition to failsafe) */
433 
434 #define TH_SFLAG_PROMOTED               0x0008          /* sched pri has been promoted by kernel mutex priority promotion */
435 #define TH_SFLAG_ABORT                  0x0010          /* abort interruptible waits */
436 #define TH_SFLAG_ABORTSAFELY            0x0020          /* ... but only those at safe point */
437 #define TH_SFLAG_ABORTED_MASK           (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY)
438 #define TH_SFLAG_DEPRESS                0x0040          /* normal depress yield */
439 #define TH_SFLAG_POLLDEPRESS            0x0080          /* polled depress yield */
440 #define TH_SFLAG_DEPRESSED_MASK         (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
441 /* unused TH_SFLAG_PRI_UPDATE           0x0100 */
442 #define TH_SFLAG_EAGERPREEMPT           0x0200          /* Any preemption of this thread should be treated as if AST_URGENT applied */
443 #define TH_SFLAG_RW_PROMOTED            0x0400          /* promote reason: blocking with RW lock held */
444 #define TH_SFLAG_BASE_PRI_FROZEN        0x0800          /* (effective) base_pri is frozen */
445 #define TH_SFLAG_WAITQ_PROMOTED         0x1000          /* promote reason: waitq wakeup (generally for IPC receive) */
446 
447 #if __AMP__
448 #define TH_SFLAG_ECORE_ONLY             0x2000          /* (unused) Bind thread to E core processor set */
449 #define TH_SFLAG_PCORE_ONLY             0x4000          /* (unused) Bind thread to P core processor set */
450 #endif
451 
452 #define TH_SFLAG_EXEC_PROMOTED          0x8000          /* promote reason: thread is in an exec */
453 
454 #define TH_SFLAG_THREAD_GROUP_AUTO_JOIN 0x10000         /* thread has been auto-joined to thread group */
455 #if __AMP__
456 #define TH_SFLAG_BOUND_SOFT             0x20000         /* thread is soft bound to a cluster; can run anywhere if bound cluster unavailable */
457 #endif /* __AMP__ */
458 
459 #if CONFIG_PREADOPT_TG
460 #define TH_SFLAG_REEVALUTE_TG_HIERARCHY_LATER 0x40000   /* thread needs to reevaluate its TG hierarchy */
461 #endif
462 
463 #define TH_SFLAG_FLOOR_PROMOTED               0x80000   /* promote reason: boost requested */
464 
465 /* 'promote reasons' that request a priority floor only, not a custom priority */
466 #define TH_SFLAG_PROMOTE_REASON_MASK    (TH_SFLAG_RW_PROMOTED | TH_SFLAG_WAITQ_PROMOTED | TH_SFLAG_EXEC_PROMOTED | TH_SFLAG_FLOOR_PROMOTED)
467 
468 #define TH_SFLAG_RT_RESTRICTED         0x100000         /* thread wants RT but may not have joined a work interval that allows it */
469 #define TH_SFLAG_DEMOTED_MASK      (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE | TH_SFLAG_RT_RESTRICTED)     /* saved_mode contains previous sched_mode */
470 
471 
472 	int16_t                 sched_pri;              /* scheduled (current) priority */
473 	int16_t                 base_pri;               /* effective base priority (equal to req_base_pri unless TH_SFLAG_BASE_PRI_FROZEN) */
474 	int16_t                 req_base_pri;           /* requested base priority */
475 	int16_t                 max_priority;           /* copy of max base priority */
476 	int16_t                 task_priority;          /* copy of task base priority */
477 	int16_t                 promotion_priority;     /* priority thread is currently promoted to */
478 	uint16_t                priority_floor_count;   /* number of push to boost the floor priority */
479 	int16_t                 suspend_count;          /* Kernel holds on this thread  */
480 
481 	int                     iotier_override;        /* atomic operations to set, cleared on ret to user */
482 	os_ref_atomic_t         ref_count;              /* number of references to me */
483 
484 	uint32_t                rwlock_count;           /* Number of lck_rw_t locks held by thread */
485 #ifdef DEBUG_RW
486 	rw_lock_debug_t         rw_lock_held;           /* rw_locks currently held by the thread */
487 #endif /* DEBUG_RW */
488 
489 	integer_t               importance;             /* task-relative importance */
490 
491 	/* Priority depression expiration */
492 	integer_t               depress_timer_active;
493 	timer_call_t            depress_timer;
494 
495 	/* real-time parameters */
496 	struct {                                        /* see mach/thread_policy.h */
497 		uint32_t            period;
498 		uint32_t            computation;
499 		uint32_t            constraint;
500 		bool                preemptible;
501 		uint8_t             priority_offset;   /* base_pri = BASEPRI_RTQUEUES + priority_offset */
502 		uint64_t            deadline;
503 	}                       realtime;
504 
505 	uint64_t                last_run_time;          /* time when thread was switched away from */
506 	uint64_t                last_made_runnable_time;        /* time when thread was unblocked or preempted */
507 	uint64_t                last_basepri_change_time;       /* time when thread was last changed in basepri while runnable */
508 	uint64_t                same_pri_latency;
509 	/*
510 	 * workq_quantum_deadline is the workq thread's next runtime deadline. This
511 	 * value is set to 0 if the thread has no such deadline applicable to it.
512 	 *
513 	 * The synchronization for this field is due to how this field is modified
514 	 * 1) This field is always modified on the thread by itself or on the thread
515 	 * when it is not running/runnable
516 	 * 2) Change of this field is immediately followed by a
517 	 * corresponding change to the AST_KEVENT to either set or clear the
518 	 * AST_KEVENT_WORKQ_QUANTUM_EXPIRED bit
519 	 *
520 	 * workq_quantum_deadline can be modified by the thread on itself during
521 	 * interrupt context. However, due to (2) and due to the fact that the
522 	 * change to the AST_KEVENT is volatile, this forces the compiler to
523 	 * guarantee the order between the write to workq_quantum_deadline and the
524 	 * kevent field and therefore guarantees the correct synchronization.
525 	 */
526 	uint64_t                workq_quantum_deadline;
527 
528 #if WORKQ_QUANTUM_HISTORY_DEBUG
529 
530 #define WORKQ_QUANTUM_HISTORY_COUNT 16
531 	struct workq_quantum_history {
532 		uint64_t time;
533 		uint64_t deadline;
534 		bool arm;
535 	} workq_quantum_history[WORKQ_QUANTUM_HISTORY_COUNT];
536 	uint64_t workq_quantum_history_index;
537 
538 #define WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, ...)  ({\
539 	        thread_t __th = (thread); \
540 	        uint64_t __index = os_atomic_inc_orig(&thread->workq_quantum_history_index, relaxed); \
541 	        struct workq_quantum_history _wq_quantum_history = { mach_approximate_time(), __VA_ARGS__}; \
542 	        __th->workq_quantum_history[__index % WORKQ_QUANTUM_HISTORY_COUNT] = \
543 	                        (struct workq_quantum_history) _wq_quantum_history; \
544 	})
545 #else /* WORKQ_QUANTUM_HISTORY_DEBUG */
546 #define WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, ...)
547 #endif /* WORKQ_QUANTUM_HISTORY_DEBUG */
548 
549 #define THREAD_NOT_RUNNABLE (~0ULL)
550 
551 #if CONFIG_THREAD_GROUPS
552 	struct thread_group     *thread_group;
553 #endif
554 
555 #if defined(CONFIG_SCHED_MULTIQ)
556 	sched_group_t           sched_group;
557 #endif /* defined(CONFIG_SCHED_MULTIQ) */
558 
559 	/* Data used during setrun/dispatch */
560 	processor_t             bound_processor;        /* bound to a processor? */
561 	processor_t             last_processor;         /* processor last dispatched on */
562 	processor_t             chosen_processor;       /* Where we want to run this thread */
563 
564 	/* Fail-safe computation since last unblock or qualifying yield */
565 	uint64_t                computation_metered;
566 	uint64_t                computation_epoch;
567 	uint64_t                safe_release;           /* when to release fail-safe */
568 
569 	/* Call out from scheduler */
570 	void                  (*sched_call)(int type, thread_t thread);
571 
572 #if defined(CONFIG_SCHED_PROTO)
573 	uint32_t                runqueue_generation;    /* last time runqueue was drained */
574 #endif
575 
576 	/* Statistics and timesharing calculations */
577 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
578 	natural_t               sched_stamp;            /* last scheduler tick */
579 	natural_t               sched_usage;            /* timesharing cpu usage [sched] */
580 	natural_t               pri_shift;              /* usage -> priority from pset */
581 	natural_t               cpu_usage;              /* instrumented cpu usage [%cpu] */
582 	natural_t               cpu_delta;              /* accumulated cpu_usage delta */
583 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
584 
585 	uint32_t                c_switch;               /* total context switches */
586 	uint32_t                p_switch;               /* total processor switches */
587 	uint32_t                ps_switch;              /* total pset switches */
588 
589 	integer_t mutex_count;  /* total count of locks held */
590 	/* Timing data structures */
591 	uint64_t                sched_time_save;        /* saved time for scheduler tick */
592 	uint64_t                vtimer_user_save;       /* saved values for vtimers */
593 	uint64_t                vtimer_prof_save;
594 	uint64_t                vtimer_rlim_save;
595 	uint64_t                vtimer_qos_save;
596 
597 	timer_data_t            runnable_timer;         /* time the thread is runnable (including running) */
598 
599 	struct recount_thread   th_recount;             /* resource accounting */
600 
601 #if CONFIG_SCHED_SFI
602 	/* Timing for wait state */
603 	uint64_t                wait_sfi_begin_time;    /* start time for thread waiting in SFI */
604 #endif
605 
606 	/*
607 	 * Processor/cache affinity
608 	 * - affinity_threads links task threads with the same affinity set
609 	 */
610 	queue_chain_t           affinity_threads;
611 	affinity_set_t          affinity_set;
612 
613 #if CONFIG_TASKWATCH
614 	task_watch_t           *taskwatch;              /* task watch */
615 #endif /* CONFIG_TASKWATCH */
616 
617 	/* Various bits of state to stash across a continuation, exclusive to the current thread block point */
618 	union {
619 		struct {
620 			mach_msg_return_t       state;          /* receive state */
621 			mach_port_seqno_t       seqno;          /* seqno of recvd message */
622 			ipc_object_t            object;         /* object received on */
623 			mach_vm_address_t       msg_addr;       /* receive msg buffer pointer */
624 			mach_vm_address_t       aux_addr;       /* receive aux buffer pointer */
625 			mach_msg_size_t         max_msize;      /* max rcv size for msg */
626 			mach_msg_size_t         max_asize;      /* max rcv size for aux data */
627 			mach_msg_size_t         msize;          /* actual size for the msg */
628 			mach_msg_size_t         asize;          /* actual size for aux data */
629 			mach_msg_option64_t     option;         /* 64 bits options for receive */
630 			mach_port_name_t        receiver_name;  /* the receive port name */
631 			union {
632 				struct ipc_kmsg   *kmsg;        /* received message */
633 				struct ipc_mqueue *peekq;       /* mqueue to peek at */
634 			};
635 		} receive;
636 		struct {
637 			struct semaphore        *waitsemaphore;         /* semaphore ref */
638 			struct semaphore        *signalsemaphore;       /* semaphore ref */
639 			int                     options;                /* semaphore options */
640 			kern_return_t           result;                 /* primary result */
641 			mach_msg_continue_t continuation;
642 		} sema;
643 		struct {
644 #define THREAD_SAVE_IOKIT_TLS_COUNT     8
645 			void                    *tls[THREAD_SAVE_IOKIT_TLS_COUNT];
646 		} iokit;
647 	} saved;
648 
649 	/* Only user threads can cause guard exceptions, only kernel threads can be thread call threads */
650 	union {
651 		/* Thread call thread's state structure, stored on its stack */
652 		struct thread_call_thread_state *thc_state;
653 
654 		/* Structure to save information about guard exception */
655 		struct {
656 			mach_exception_code_t           code;
657 			mach_exception_subcode_t        subcode;
658 		} guard_exc_info;
659 	};
660 
661 	/* User level suspensions */
662 	int32_t                 user_stop_count;
663 
664 	/* IPC data structures */
665 #if IMPORTANCE_INHERITANCE
666 	natural_t ith_assertions;                       /* assertions pending drop */
667 #endif
668 	circle_queue_head_t     ith_messages;           /* messages to reap */
669 	mach_port_t             ith_kernel_reply_port;  /* reply port for kernel RPCs */
670 
671 	/* Pending thread ast(s) */
672 	os_atomic(ast_t)        ast;
673 
674 	/* Ast/Halt data structures */
675 	vm_offset_t             recover;                /* page fault recover(copyin/out) */
676 
677 	queue_chain_t           threads;                /* global list of all threads */
678 
679 	/* Activation */
680 	queue_chain_t           task_threads;
681 
682 	/* Task membership */
683 #if __x86_64__ || __arm__
684 	struct task            *t_task;
685 #endif
686 	struct thread_ro       *t_tro;
687 	vm_map_t                map;
688 	thread_t                handoff_thread;
689 
690 	/* Timed wait expiration */
691 	timer_call_t            wait_timer;
692 	uint16_t                wait_timer_active;
693 	bool                    wait_timer_is_set;
694 
695 	/* Miscellaneous bits guarded by mutex */
696 	uint32_t
697 	    active:1,           /* Thread is active and has not been terminated */
698 	    ipc_active:1,       /* IPC with the thread ports is allowed */
699 	    started:1,          /* Thread has been started after creation */
700 	    static_param:1,     /* Disallow policy parameter changes */
701 	    inspection:1,       /* TRUE when task is being inspected by crash reporter */
702 	    policy_reset:1,     /* Disallow policy parameter changes on terminating threads */
703 	    suspend_parked:1,   /* thread parked in thread_suspended */
704 	    corpse_dup:1,       /* TRUE when thread is an inactive duplicate in a corpse */
705 	:0;
706 
707 	decl_lck_mtx_data(, mutex);
708 
709 	struct ipc_port         *ith_special_reply_port;   /* ref to special reply port */
710 
711 #if CONFIG_DTRACE
712 	uint16_t                t_dtrace_flags;         /* DTrace thread states */
713 #define TH_DTRACE_EXECSUCCESS   0x01
714 	uint16_t                t_dtrace_inprobe;       /* Executing under dtrace_probe */
715 	uint32_t                t_dtrace_predcache;     /* DTrace per thread predicate value hint */
716 	int64_t                 t_dtrace_tracing;       /* Thread time under dtrace_probe() */
717 	int64_t                 t_dtrace_vtime;
718 #endif
719 
720 	clock_sec_t             t_page_creation_time;
721 	uint32_t                t_page_creation_count;
722 	uint32_t                t_page_creation_throttled;
723 #if (DEVELOPMENT || DEBUG)
724 	uint64_t                t_page_creation_throttled_hard;
725 	uint64_t                t_page_creation_throttled_soft;
726 #endif /* DEVELOPMENT || DEBUG */
727 	int                     t_pagein_error;         /* for vm_fault(), holds error from vnop_pagein() */
728 
729 #ifdef KPERF
730 /* The high 8 bits are the number of frames to sample of a user callstack. */
731 #define T_KPERF_CALLSTACK_DEPTH_OFFSET     (24)
732 #define T_KPERF_SET_CALLSTACK_DEPTH(DEPTH) (((uint32_t)(DEPTH)) << T_KPERF_CALLSTACK_DEPTH_OFFSET)
733 #define T_KPERF_GET_CALLSTACK_DEPTH(FLAGS) ((FLAGS) >> T_KPERF_CALLSTACK_DEPTH_OFFSET)
734 #define T_KPERF_ACTIONID_OFFSET            (18)
735 #define T_KPERF_SET_ACTIONID(AID)          (((uint32_t)(AID)) << T_KPERF_ACTIONID_OFFSET)
736 #define T_KPERF_GET_ACTIONID(FLAGS)        ((FLAGS) >> T_KPERF_ACTIONID_OFFSET)
737 #endif
738 
739 #define T_KPERF_AST_CALLSTACK 0x1 /* dump a callstack on thread's next AST */
740 #define T_KPERF_AST_DISPATCH  0x2 /* dump a name on thread's next AST */
741 #define T_KPC_ALLOC           0x4 /* thread needs a kpc_buf allocated */
742 
743 #define T_KPERF_AST_ALL \
744     (T_KPERF_AST_CALLSTACK | T_KPERF_AST_DISPATCH | T_KPC_ALLOC)
745 /* only go up to T_KPERF_ACTIONID_OFFSET - 1 */
746 
747 #ifdef KPERF
748 	uint32_t                kperf_ast;
749 	uint32_t                kperf_pet_gen;  /* last generation of PET that sampled this thread*/
750 	uint32_t                kperf_c_switch; /* last dispatch detection */
751 	uint32_t                kperf_pet_cnt;  /* how many times a thread has been sampled by PET */
752 #endif
753 
754 #ifdef KPC
755 	/* accumulated performance counters for this thread */
756 	uint64_t               *kpc_buf;
757 #endif
758 
759 #if HYPERVISOR
760 	/* hypervisor virtual CPU object associated with this thread */
761 	void                   *hv_thread_target;
762 #endif /* HYPERVISOR */
763 
764 	/* Statistics accumulated per-thread and aggregated per-task */
765 	uint32_t                syscalls_unix;
766 	uint32_t                syscalls_mach;
767 	ledger_t                t_ledger;
768 	ledger_t                t_threadledger; /* per thread ledger */
769 	ledger_t                t_bankledger;                /* ledger to charge someone */
770 	uint64_t                t_deduct_bank_ledger_time;   /* cpu time to be deducted from bank ledger */
771 	uint64_t                t_deduct_bank_ledger_energy; /* energy to be deducted from bank ledger */
772 
773 	uint64_t                thread_id;             /* system wide unique thread-id */
774 	uint32_t                ctid;                  /* system wide compact thread-id */
775 	uint32_t                ctsid;                 /* this thread ts ID */
776 
777 	/* policy is protected by the thread mutex */
778 	struct thread_requested_policy  requested_policy;
779 	struct thread_effective_policy  effective_policy;
780 
781 	/* usynch override is protected by the task lock, eventually will be thread mutex */
782 	struct thread_qos_override {
783 		struct thread_qos_override      *override_next;
784 		uint32_t        override_contended_resource_count;
785 		int16_t         override_qos;
786 		int16_t         override_resource_type;
787 		user_addr_t     override_resource;
788 	} *overrides;
789 
790 	uint32_t                kevent_overrides;
791 	uint8_t                 user_promotion_basepri;
792 	uint8_t                 kern_promotion_schedpri;
793 	_Atomic uint16_t        kevent_ast_bits;
794 
795 	io_stat_info_t          thread_io_stats; /* per-thread I/O statistics */
796 
797 	uint32_t                thread_callout_interrupt_wakeups;
798 	uint32_t                thread_callout_platform_idle_wakeups;
799 	uint32_t                thread_timer_wakeups_bin_1;
800 	uint32_t                thread_timer_wakeups_bin_2;
801 	thread_tag_t            thread_tag;
802 
803 	/*
804 	 * callout_* fields are only set for thread call threads whereas guard_exc_fatal is set
805 	 * by user threads on themselves while taking a guard exception. So it's okay for them to
806 	 * share this bitfield.
807 	 */
808 	uint16_t
809 	    callout_woken_from_icontext:1,
810 	    callout_woken_from_platform_idle:1,
811 	    callout_woke_thread:1,
812 	    guard_exc_fatal:1,
813 	    thread_bitfield_unused:12;
814 
815 	mach_port_name_t        ith_voucher_name;
816 	ipc_voucher_t           ith_voucher;
817 
818 #if CONFIG_THREAD_GROUPS
819 #if CONFIG_PREADOPT_TG
820 	/* The preadopt thread group is set on the thread
821 	 *
822 	 *   a) By another thread when it is a creator and it is scheduled with the
823 	 *   thread group on the TR
824 	 *   b) On itself when it binds a thread request and becomes a
825 	 *   servicer or when it rebinds to the thread request
826 	 *   c) On itself when it processes knotes and finds the first
827 	 *   EVFILT_MACHPORT event to deliver to userspace
828 	 *
829 	 * Note that this is a full reference owned by the thread_t and not a
830 	 * borrowed reference.
831 	 *
832 	 * This reference is cleared from the thread_t by the thread itself at the
833 	 * following times:
834 	 *   a) When it explicitly adopts a work interval or a bank voucher
835 	 *   b) If it still exists on the thread, after it has unbound and is about
836 	 *   to park
837 	 *   c) During thread termination if one still exists
838 	 *   d) When a different preadoption thread group is set on the thread
839 	 *
840 	 * It is modified under the thread lock.
841 	 */
842 	struct thread_group     *preadopt_thread_group;
843 
844 	/* This field here is present in order to make sure that the t->thread_group
845 	 * is always pointing to a valid thread group and isn't a dangling pointer.
846 	 *
847 	 * Consider the following scenario:
848 	 *	a) t->thread_group points to the preadoption thread group
849 	 *	b) The preadoption thread group is modified on the thread but we are
850 	 *	unable to resolve the hierarchy immediately due to the current state of
851 	 *	the thread
852 	 *
853 	 *	In order to make sure that t->thread_group points to a valid thread
854 	 *	group until we can resolve the hierarchy again, we save the existing
855 	 *	thread_group it points to in old_preadopt_thread_group. The next time a
856 	 *	hierarchy resolution is done, we know that t->thread_group will not point
857 	 *	to this field anymore so we can clear it.
858 	 *
859 	 *	 This field is always going to take the reference that was previously in
860 	 *	 preadopt_thread_group so it will have a full +1
861 	 */
862 	struct thread_group     *old_preadopt_thread_group;
863 #endif /* CONFIG_PREADOPT_TG */
864 
865 	/* This is a borrowed reference to the TG from the ith_voucher and is saved
866 	 * here since we may not always be in the right context to able to do the
867 	 * lookups.
868 	 *
869 	 * It is set always set on self under the thread lock */
870 	struct thread_group     *bank_thread_group;
871 
872 	/*  Whether this is the autojoin thread group or the work interval thread
873 	 *  group depends on whether the thread's sched_flags has the
874 	 *  TH_SFLAG_THREAD_GROUP_AUTO_JOIN bit set */
875 	union {
876 		/* This is a borrowed reference to the auto join thread group from the
877 		 * work_interval. It is set with the thread lock held */
878 		struct thread_group             *auto_join_thread_group;
879 		/* This is a borrowed reference to the explicit work_interval thread group
880 		 * and is always set on self */
881 		struct thread_group             *work_interval_thread_group;
882 	};
883 #endif /* CONFIG_THREAD_GROUPS */
884 
885 	/* work interval (if any) associated with the thread. Only modified by
886 	 * current thread on itself or when another thread when the thread is held
887 	 * off of runq */
888 	struct work_interval            *th_work_interval;
889 	thread_work_interval_flags_t    th_work_interval_flags;
890 
891 #define THREAD_BOUND_CLUSTER_NONE       (UINT32_MAX)
892 	uint32_t                 th_bound_cluster_id;
893 
894 #if SCHED_TRACE_THREAD_WAKEUPS
895 	uintptr_t               thread_wakeup_bt[64];
896 #endif
897 	turnstile_update_flags_t inheritor_flags; /* inheritor flags for inheritor field */
898 	block_hint_t            pending_block_hint;
899 	block_hint_t            block_hint;      /* What type of primitive last caused us to block. */
900 	uint32_t                decompressions;  /* Per-thread decompressions counter to be added to per-task decompressions counter */
901 	int                     thread_region_page_shift; /* Page shift that this thread would like to use when */
902 	                                                  /* introspecting a task. This is currently being used */
903 	                                                  /* by footprint which uses a thread for each task being inspected. */
904 #if CONFIG_IOSCHED
905 	void                   *decmp_upl;
906 #endif /* CONFIG_IOSCHED */
907 	struct knote            *ith_knote;         /* knote fired for rcv */
908 };
909 
910 #define ith_state           saved.receive.state
911 #define ith_object          saved.receive.object
912 #define ith_msg_addr        saved.receive.msg_addr
913 #define ith_aux_addr        saved.receive.aux_addr
914 #define ith_max_msize       saved.receive.max_msize
915 #define ith_max_asize       saved.receive.max_asize
916 #define ith_msize           saved.receive.msize
917 #define ith_asize           saved.receive.asize
918 #define ith_option          saved.receive.option
919 #define ith_receiver_name   saved.receive.receiver_name
920 #define ith_kmsg            saved.receive.kmsg
921 #define ith_peekq           saved.receive.peekq
922 #define ith_seqno           saved.receive.seqno
923 
924 #define sth_waitsemaphore   saved.sema.waitsemaphore
925 #define sth_signalsemaphore saved.sema.signalsemaphore
926 #define sth_options         saved.sema.options
927 #define sth_result          saved.sema.result
928 #define sth_continuation    saved.sema.continuation
929 
930 #define ITH_KNOTE_NULL      ((void *)NULL)
931 #define ITH_KNOTE_PSEUDO    ((void *)0xdeadbeef)
932 /*
933  * The ith_knote is used during message delivery, and can safely be interpreted
934  * only when used for one of these codepaths, which the test for the msgt_name
935  * being RECEIVE or SEND_ONCE is about.
936  */
937 #define ITH_KNOTE_VALID(kn, msgt_name) \
938 	        (((kn) != ITH_KNOTE_NULL && (kn) != ITH_KNOTE_PSEUDO) && \
939 	         ((msgt_name) == MACH_MSG_TYPE_PORT_RECEIVE || \
940 	         (msgt_name) == MACH_MSG_TYPE_PORT_SEND_ONCE))
941 
942 #if MACH_ASSERT
943 #define assert_thread_magic(thread) assertf((thread)->thread_magic == THREAD_MAGIC, \
944 	                                    "bad thread magic 0x%llx for thread %p, expected 0x%llx", \
945 	                                    (thread)->thread_magic, (thread), THREAD_MAGIC)
946 #else
947 #define assert_thread_magic(thread) do { (void)(thread); } while (0)
948 #endif
949 
950 extern thread_t                 thread_bootstrap(void);
951 
952 extern void                     thread_machine_init_template(void);
953 
954 extern void                     thread_init(void);
955 
956 extern void                     thread_daemon_init(void);
957 
958 extern void                     thread_reference(
959 	thread_t                thread);
960 
961 extern void                     thread_deallocate(
962 	thread_t                thread);
963 
964 extern void                     thread_inspect_deallocate(
965 	thread_inspect_t        thread);
966 
967 extern void                     thread_read_deallocate(
968 	thread_read_t           thread);
969 
970 extern void                     thread_terminate_self(void);
971 
972 extern kern_return_t    thread_terminate_internal(
973 	thread_t                    thread);
974 
975 extern void                     thread_start(
976 	thread_t                        thread) __attribute__ ((noinline));
977 
978 extern void                     thread_start_in_assert_wait(
979 	thread_t                        thread,
980 	event_t             event,
981 	wait_interrupt_t    interruptible) __attribute__ ((noinline));
982 
983 extern void                     thread_terminate_enqueue(
984 	thread_t                thread);
985 
986 extern void                     thread_exception_enqueue(
987 	task_t          task,
988 	thread_t        thread,
989 	exception_type_t etype);
990 
991 extern void thread_backtrace_enqueue(
992 	kcdata_object_t obj,
993 	exception_port_t     ports[static BT_EXC_PORTS_COUNT],
994 	exception_type_t etype);
995 
996 extern void                     thread_copy_resource_info(
997 	thread_t dst_thread,
998 	thread_t src_thread);
999 
1000 extern void                     thread_terminate_crashed_threads(void);
1001 
1002 extern void                     thread_stack_enqueue(
1003 	thread_t                thread);
1004 
1005 extern void                     thread_hold(
1006 	thread_t        thread);
1007 
1008 extern void                     thread_release(
1009 	thread_t        thread);
1010 
1011 extern void                     thread_corpse_continue(void) __dead2;
1012 
1013 extern boolean_t                thread_is_active(thread_t thread);
1014 
1015 extern lck_grp_t                thread_lck_grp;
1016 
1017 /* Locking for scheduler state, always acquired with interrupts disabled (splsched()) */
1018 #define thread_lock_init(th)    simple_lock_init(&(th)->sched_lock, 0)
1019 #define thread_lock(th)                 simple_lock(&(th)->sched_lock, &thread_lck_grp)
1020 #define thread_unlock(th)               simple_unlock(&(th)->sched_lock)
1021 
1022 #define wake_lock_init(th)              simple_lock_init(&(th)->wake_lock, 0)
1023 #define wake_lock(th)                   simple_lock(&(th)->wake_lock, &thread_lck_grp)
1024 #define wake_unlock(th)                 simple_unlock(&(th)->wake_lock)
1025 
1026 #define thread_should_halt_fast(thread)         (!(thread)->active)
1027 
1028 extern void                             stack_alloc(
1029 	thread_t                thread);
1030 
1031 extern void                     stack_handoff(
1032 	thread_t                from,
1033 	thread_t                to);
1034 
1035 extern void                             stack_free(
1036 	thread_t                thread);
1037 
1038 extern void                             stack_free_reserved(
1039 	thread_t                thread);
1040 
1041 extern boolean_t                stack_alloc_try(
1042 	thread_t            thread);
1043 
1044 extern void                             stack_collect(void);
1045 
1046 extern kern_return_t    thread_info_internal(
1047 	thread_t                                thread,
1048 	thread_flavor_t                 flavor,
1049 	thread_info_t                   thread_info_out,
1050 	mach_msg_type_number_t  *thread_info_count);
1051 
1052 extern kern_return_t    kernel_thread_create(
1053 	thread_continue_t       continuation,
1054 	void                            *parameter,
1055 	integer_t                       priority,
1056 	thread_t                        *new_thread);
1057 
1058 extern kern_return_t    kernel_thread_start_priority(
1059 	thread_continue_t       continuation,
1060 	void                            *parameter,
1061 	integer_t                       priority,
1062 	thread_t                        *new_thread);
1063 
1064 extern void                             machine_stack_attach(
1065 	thread_t                thread,
1066 	vm_offset_t             stack);
1067 
1068 extern vm_offset_t              machine_stack_detach(
1069 	thread_t                thread);
1070 
1071 extern void                             machine_stack_handoff(
1072 	thread_t                old,
1073 	thread_t                new);
1074 
1075 extern thread_t                 machine_switch_context(
1076 	thread_t                        old_thread,
1077 	thread_continue_t       continuation,
1078 	thread_t                        new_thread);
1079 
1080 extern void                             machine_load_context(
1081 	thread_t                thread) __attribute__((noreturn));
1082 
1083 extern void             machine_thread_state_initialize(
1084 	thread_t                                thread);
1085 
1086 extern kern_return_t    machine_thread_set_state(
1087 	thread_t                                thread,
1088 	thread_flavor_t                 flavor,
1089 	thread_state_t                  state,
1090 	mach_msg_type_number_t  count);
1091 
1092 extern mach_vm_address_t machine_thread_pc(
1093 	thread_t                thread);
1094 
1095 extern void machine_thread_reset_pc(
1096 	thread_t                thread,
1097 	mach_vm_address_t       pc);
1098 
1099 extern boolean_t        machine_thread_on_core(
1100 	thread_t                thread);
1101 
1102 extern boolean_t        machine_thread_on_core_allow_invalid(
1103 	thread_t                thread);
1104 
1105 extern kern_return_t    machine_thread_get_state(
1106 	thread_t                                thread,
1107 	thread_flavor_t                 flavor,
1108 	thread_state_t                  state,
1109 	mach_msg_type_number_t  *count);
1110 
1111 extern kern_return_t    machine_thread_state_convert_from_user(
1112 	thread_t                                thread,
1113 	thread_flavor_t                 flavor,
1114 	thread_state_t                  tstate,
1115 	mach_msg_type_number_t  count,
1116 	thread_state_t old_tstate,
1117 	mach_msg_type_number_t old_count,
1118 	thread_set_status_flags_t tssf_flags);
1119 
1120 extern kern_return_t    machine_thread_state_convert_to_user(
1121 	thread_t                                thread,
1122 	thread_flavor_t                 flavor,
1123 	thread_state_t                  tstate,
1124 	mach_msg_type_number_t  *count,
1125 	thread_set_status_flags_t tssf_flags);
1126 
1127 extern kern_return_t    machine_thread_dup(
1128 	thread_t                self,
1129 	thread_t                target,
1130 	boolean_t               is_corpse);
1131 
1132 extern void             machine_thread_init(void);
1133 
1134 extern void             machine_thread_template_init(thread_t thr_template);
1135 
1136 
1137 extern void             machine_thread_create(
1138 	thread_t                thread,
1139 	task_t                  task,
1140 	bool                    first_thread);
1141 
1142 extern kern_return_t    machine_thread_process_signature(
1143 	thread_t                thread,
1144 	task_t                  task);
1145 
1146 extern void             machine_thread_switch_addrmode(
1147 	thread_t                 thread);
1148 
1149 extern void                 machine_thread_destroy(
1150 	thread_t                thread);
1151 
1152 extern void                             machine_set_current_thread(
1153 	thread_t                        thread);
1154 
1155 extern kern_return_t    machine_thread_get_kern_state(
1156 	thread_t                                thread,
1157 	thread_flavor_t                 flavor,
1158 	thread_state_t                  tstate,
1159 	mach_msg_type_number_t  *count);
1160 
1161 extern kern_return_t    machine_thread_inherit_taskwide(
1162 	thread_t                thread,
1163 	task_t                  parent_task);
1164 
1165 extern kern_return_t    machine_thread_set_tsd_base(
1166 	thread_t                                thread,
1167 	mach_vm_offset_t                tsd_base);
1168 
1169 #define thread_mtx_try(thread)                  lck_mtx_try_lock(&(thread)->mutex)
1170 #define thread_mtx_held(thread)                 lck_mtx_assert(&(thread)->mutex, LCK_MTX_ASSERT_OWNED)
1171 
1172 extern void thread_apc_ast(thread_t thread);
1173 
1174 extern void thread_update_qos_cpu_time(thread_t thread);
1175 
1176 void act_machine_sv_free(thread_t, int);
1177 
1178 vm_offset_t                     min_valid_stack_address(void);
1179 vm_offset_t                     max_valid_stack_address(void);
1180 
1181 extern bool thread_no_smt(thread_t thread);
1182 extern bool processor_active_thread_no_smt(processor_t processor);
1183 
1184 extern void thread_set_options(uint32_t thopt);
1185 
1186 #if CONFIG_THREAD_GROUPS
1187 struct thread_group *thread_get_current_voucher_thread_group(thread_t thread);
1188 #endif /* CONFIG_THREAD_GROUPS */
1189 
1190 #endif  /* MACH_KERNEL_PRIVATE */
1191 #if BSD_KERNEL_PRIVATE
1192 
1193 /* Duplicated from osfmk/kern/ipc_tt.h */
1194 __options_decl(port_intrans_options_t, uint32_t, {
1195 	PORT_INTRANS_OPTIONS_NONE              = 0x0000,
1196 	PORT_INTRANS_THREAD_IN_CURRENT_TASK    = 0x0001,
1197 	PORT_INTRANS_THREAD_NOT_CURRENT_THREAD = 0x0002,
1198 
1199 	PORT_INTRANS_SKIP_TASK_EVAL            = 0x0004,
1200 	PORT_INTRANS_ALLOW_CORPSE_TASK         = 0x0008,
1201 });
1202 
1203 extern thread_t port_name_to_thread(
1204 	mach_port_name_t            port_name,
1205 	port_intrans_options_t    options);
1206 
1207 #endif /* BSD_KERNEL_PRIVATE */
1208 #ifdef XNU_KERNEL_PRIVATE
1209 
1210 extern void                     thread_require(
1211 	thread_t        thread);
1212 
1213 extern void                     thread_deallocate_safe(
1214 	thread_t                thread);
1215 
1216 extern uint64_t                 thread_rettokern_addr(
1217 	thread_t thread);
1218 
1219 extern uint64_t                 thread_wqquantum_addr(
1220 	thread_t thread);
1221 
1222 extern integer_t        thread_kern_get_pri(thread_t thr) __pure2;
1223 
1224 extern void             thread_kern_set_pri(thread_t thr, integer_t pri);
1225 
1226 extern integer_t        thread_kern_get_kernel_maxpri(void) __pure2;
1227 
1228 uint16_t        thread_set_tag(thread_t thread, uint16_t tag);
1229 uint16_t        thread_get_tag(thread_t thread);
1230 
1231 __options_decl(shared_rsrc_policy_agent_t, uint32_t, {
1232 	SHARED_RSRC_POLICY_AGENT_DISPATCH = 0,
1233 	SHARED_RSRC_POLICY_AGENT_SYSCTL = 1,
1234 	SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW = 2,
1235 	SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM = 3,
1236 });
1237 
1238 boolean_t       thread_shared_rsrc_policy_get(thread_t thread, cluster_shared_rsrc_type_t type);
1239 kern_return_t   thread_shared_rsrc_policy_set(thread_t thread, uint32_t index, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent);
1240 kern_return_t   thread_shared_rsrc_policy_clear(thread_t thread, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent);
1241 
1242 #ifdef MACH_KERNEL_PRIVATE
1243 static inline thread_tag_t
thread_set_tag_internal(thread_t thread,thread_tag_t tag)1244 thread_set_tag_internal(thread_t thread, thread_tag_t tag)
1245 {
1246 	return os_atomic_or_orig(&thread->thread_tag, tag, relaxed);
1247 }
1248 
1249 static inline thread_tag_t
thread_get_tag_internal(thread_t thread)1250 thread_get_tag_internal(thread_t thread)
1251 {
1252 	return thread->thread_tag;
1253 }
1254 #endif /* MACH_KERNEL_PRIVATE */
1255 
1256 uint64_t        thread_last_run_time(thread_t thread);
1257 
1258 extern kern_return_t    thread_state_initialize(
1259 	thread_t                                thread);
1260 
1261 extern kern_return_t    thread_setstatus(
1262 	thread_t                                thread,
1263 	int                                             flavor,
1264 	thread_state_t                  tstate,
1265 	mach_msg_type_number_t  count);
1266 
1267 extern kern_return_t    thread_setstatus_from_user(
1268 	thread_t                                thread,
1269 	int                                             flavor,
1270 	thread_state_t                  tstate,
1271 	mach_msg_type_number_t  count,
1272 	thread_state_t                  old_tstate,
1273 	mach_msg_type_number_t  old_count,
1274 	thread_set_status_flags_t flags);
1275 
1276 extern kern_return_t    thread_getstatus(
1277 	thread_t                                thread,
1278 	int                                             flavor,
1279 	thread_state_t                  tstate,
1280 	mach_msg_type_number_t  *count);
1281 
1282 extern void main_thread_set_immovable_pinned(thread_t thread);
1283 
1284 extern kern_return_t    thread_getstatus_to_user(
1285 	thread_t                                thread,
1286 	int                                             flavor,
1287 	thread_state_t                  tstate,
1288 	mach_msg_type_number_t  *count,
1289 	thread_set_status_flags_t flags);
1290 
1291 extern kern_return_t    thread_create_with_continuation(
1292 	task_t task,
1293 	thread_t *new_thread,
1294 	thread_continue_t continuation);
1295 
1296 extern kern_return_t main_thread_create_waiting(task_t    task,
1297     thread_continue_t              continuation,
1298     event_t                        event,
1299     thread_t                       *new_thread);
1300 
1301 extern kern_return_t    thread_create_workq_waiting(
1302 	task_t                  task,
1303 	thread_continue_t       thread_return,
1304 	thread_t                *new_thread);
1305 
1306 extern  void    thread_yield_internal(
1307 	mach_msg_timeout_t      interval);
1308 
1309 extern void thread_yield_to_preemption(void);
1310 
1311 extern void thread_depress_timer_setup(thread_t self);
1312 
1313 /*
1314  * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are:
1315  *
1316  * 1) Block. Prevent CPU consumption of the thread from exceeding the limit.
1317  * 2) Exception. Generate a resource consumption exception when the limit is exceeded.
1318  * 3) Disable. Remove any existing CPU limit.
1319  */
1320 #define THREAD_CPULIMIT_BLOCK           0x1
1321 #define THREAD_CPULIMIT_EXCEPTION       0x2
1322 #define THREAD_CPULIMIT_DISABLE         0x3
1323 
1324 struct _thread_ledger_indices {
1325 	int cpu_time;
1326 };
1327 
1328 extern struct _thread_ledger_indices thread_ledgers;
1329 
1330 extern int thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns);
1331 extern int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns);
1332 
1333 extern void thread_read_times(
1334 	thread_t         thread,
1335 	time_value_t    *user_time,
1336 	time_value_t    *system_time,
1337 	time_value_t    *runnable_time);
1338 
1339 extern void thread_read_times_unsafe(
1340 	thread_t         thread,
1341 	time_value_t    *user_time,
1342 	time_value_t    *system_time,
1343 	time_value_t    *runnable_time);
1344 
1345 extern uint64_t         thread_get_runtime_self(void);
1346 
1347 extern void                     thread_setuserstack(
1348 	thread_t                thread,
1349 	mach_vm_offset_t        user_stack);
1350 
1351 extern user_addr_t         thread_adjuserstack(
1352 	thread_t                thread,
1353 	int                             adjust);
1354 
1355 
1356 extern void                     thread_setentrypoint(
1357 	thread_t                thread,
1358 	mach_vm_offset_t        entry);
1359 
1360 extern kern_return_t    thread_set_tsd_base(
1361 	thread_t        thread,
1362 	mach_vm_offset_t tsd_base);
1363 
1364 extern kern_return_t    thread_setsinglestep(
1365 	thread_t                thread,
1366 	int                     on);
1367 
1368 extern kern_return_t    thread_userstack(
1369 	thread_t,
1370 	int,
1371 	thread_state_t,
1372 	unsigned int,
1373 	mach_vm_offset_t *,
1374 	int *,
1375 	boolean_t);
1376 
1377 extern kern_return_t    thread_entrypoint(
1378 	thread_t,
1379 	int,
1380 	thread_state_t,
1381 	unsigned int,
1382 	mach_vm_offset_t *);
1383 
1384 extern kern_return_t    thread_userstackdefault(
1385 	mach_vm_offset_t *,
1386 	boolean_t);
1387 
1388 extern kern_return_t    thread_wire_internal(
1389 	host_priv_t             host_priv,
1390 	thread_t                thread,
1391 	boolean_t               wired,
1392 	boolean_t               *prev_state);
1393 
1394 
1395 extern kern_return_t    thread_dup(thread_t);
1396 
1397 extern kern_return_t thread_dup2(thread_t, thread_t);
1398 
1399 #if !defined(_SCHED_CALL_T_DEFINED)
1400 #define _SCHED_CALL_T_DEFINED
1401 typedef void    (*sched_call_t)(
1402 	int                             type,
1403 	thread_t                thread);
1404 #endif
1405 
1406 #define SCHED_CALL_BLOCK                0x1
1407 #define SCHED_CALL_UNBLOCK              0x2
1408 
1409 extern void             thread_sched_call(
1410 	thread_t                thread,
1411 	sched_call_t    call);
1412 
1413 extern boolean_t        thread_is_static_param(
1414 	thread_t                thread);
1415 
1416 extern task_t   get_threadtask(thread_t) __pure2;
1417 
1418 extern task_t   get_threadtask_early(thread_t) __pure2;
1419 
1420 /*
1421  * Thread is running within a 64-bit address space.
1422  */
1423 #define thread_is_64bit_addr(thd)       \
1424 	task_has_64Bit_addr(get_threadtask(thd))
1425 
1426 /*
1427  * Thread is using 64-bit machine state.
1428  */
1429 #define thread_is_64bit_data(thd)       \
1430 	task_has_64Bit_data(get_threadtask(thd))
1431 
1432 struct uthread;
1433 
1434 #if defined(__x86_64__)
1435 extern int              thread_task_has_ldt(thread_t);
1436 #endif
1437 extern void             set_thread_pagein_error(thread_t, int);
1438 extern event_t          workq_thread_init_and_wq_lock(task_t, thread_t); // bsd/pthread/
1439 
1440 struct proc;
1441 struct uthread;
1442 struct image_params;
1443 extern const size_t     uthread_size;
1444 extern thread_ro_t      get_thread_ro_unchecked(thread_t) __pure2;
1445 extern thread_ro_t      get_thread_ro(thread_t) __pure2;
1446 extern thread_ro_t      current_thread_ro_unchecked(void) __pure2;
1447 extern thread_ro_t      current_thread_ro(void) __pure2;
1448 extern void             clear_thread_ro_proc(thread_t);
1449 extern struct uthread  *get_bsdthread_info(thread_t) __pure2;
1450 extern thread_t         get_machthread(struct uthread *) __pure2;
1451 extern uint64_t         uthread_tid(struct uthread *) __pure2;
1452 extern user_addr_t      thread_get_sigreturn_token(thread_t thread);
1453 extern uint32_t         thread_get_sigreturn_diversifier(thread_t thread);
1454 extern void             uthread_init(task_t, struct uthread *, thread_ro_t, int);
1455 extern void             uthread_cleanup_name(struct uthread *uthread);
1456 extern void             uthread_cleanup(struct uthread *, thread_ro_t);
1457 extern void             uthread_cred_ref(struct ucred *);
1458 extern void             uthread_cred_free(struct ucred *);
1459 extern void             uthread_destroy(struct uthread *);
1460 extern void             uthread_reset_proc_refcount(struct uthread *);
1461 extern void             thread_ro_update_cred(thread_ro_t, struct ucred *);
1462 extern void             thread_ro_update_flags(thread_ro_t, thread_ro_flags_t add, thread_ro_flags_t clr);
1463 extern void             uthread_set_exec_data(struct uthread *uth, struct image_params *imgp);
1464 extern bool             uthread_is64bit(struct uthread *uth) __pure2;
1465 #if PROC_REF_DEBUG
1466 extern void             uthread_init_proc_refcount(struct uthread *);
1467 extern void             uthread_destroy_proc_refcount(struct uthread *);
1468 extern void             uthread_assert_zero_proc_refcount(struct uthread *);
1469 #else
1470 #define                 uthread_init_proc_refcount(uth)        ((void)(uth))
1471 #define                 uthread_destroy_proc_refcount(uth)     ((void)(uth))
1472 #define                 uthread_assert_zero_proc_refcount(uth) ((void)(uth))
1473 #endif
1474 #if CONFIG_DEBUG_SYSCALL_REJECTION
1475 extern uint64_t         uthread_get_syscall_rejection_flags(void *);
1476 extern uint64_t         *uthread_get_syscall_rejection_mask(void *);
1477 extern uint64_t         *uthread_get_syscall_rejection_once_mask(void *);
1478 extern bool             uthread_syscall_rejection_is_enabled(void *);
1479 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
1480 extern mach_port_name_t  uthread_joiner_port(struct uthread *);
1481 extern user_addr_t       uthread_joiner_address(struct uthread *);
1482 extern void              uthread_joiner_wake(task_t task, struct uthread *);
1483 
1484 extern boolean_t        thread_should_halt(
1485 	thread_t                thread);
1486 
1487 extern boolean_t        thread_should_abort(
1488 	thread_t);
1489 
1490 extern int is_64signalregset(void);
1491 
1492 extern void act_set_kperf(thread_t);
1493 extern void act_set_astledger(thread_t thread);
1494 extern void act_set_astledger_async(thread_t thread);
1495 extern void act_set_io_telemetry_ast(thread_t);
1496 extern void act_set_macf_telemetry_ast(thread_t);
1497 extern void act_set_astproc_resource(thread_t);
1498 
1499 extern vm_offset_t thread_get_kernel_stack(thread_t);
1500 
1501 extern kern_return_t thread_process_signature(thread_t thread, task_t task);
1502 
1503 extern uint32_t dtrace_get_thread_predcache(thread_t);
1504 extern int64_t dtrace_get_thread_vtime(thread_t);
1505 extern int64_t dtrace_get_thread_tracing(thread_t);
1506 extern uint16_t dtrace_get_thread_inprobe(thread_t);
1507 extern int dtrace_get_thread_last_cpu_id(thread_t);
1508 extern vm_offset_t dtrace_get_kernel_stack(thread_t);
1509 #define dtrace_get_kernel_stack thread_get_kernel_stack
1510 extern void dtrace_set_thread_predcache(thread_t, uint32_t);
1511 extern void dtrace_set_thread_vtime(thread_t, int64_t);
1512 extern void dtrace_set_thread_tracing(thread_t, int64_t);
1513 extern void dtrace_set_thread_inprobe(thread_t, uint16_t);
1514 extern void dtrace_thread_bootstrap(void);
1515 extern void dtrace_thread_didexec(thread_t);
1516 
1517 extern int64_t dtrace_calc_thread_recent_vtime(thread_t);
1518 
1519 
1520 extern kern_return_t    thread_set_wq_state32(
1521 	thread_t          thread,
1522 	thread_state_t    tstate);
1523 
1524 extern kern_return_t    thread_set_wq_state64(
1525 	thread_t          thread,
1526 	thread_state_t    tstate);
1527 
1528 extern vm_offset_t      kernel_stack_mask;
1529 extern vm_offset_t      kernel_stack_size;
1530 extern vm_offset_t      kernel_stack_depth_max;
1531 
1532 extern void guard_ast(thread_t);
1533 extern void fd_guard_ast(thread_t,
1534     mach_exception_code_t, mach_exception_subcode_t);
1535 #if CONFIG_VNGUARD
1536 extern void vn_guard_ast(thread_t,
1537     mach_exception_code_t, mach_exception_subcode_t);
1538 #endif
1539 extern void mach_port_guard_ast(thread_t,
1540     mach_exception_code_t, mach_exception_subcode_t);
1541 extern void virt_memory_guard_ast(thread_t,
1542     mach_exception_code_t, mach_exception_subcode_t);
1543 extern void thread_guard_violation(thread_t,
1544     mach_exception_code_t, mach_exception_subcode_t, boolean_t);
1545 extern void thread_update_io_stats(thread_t, int size, int io_flags);
1546 
1547 extern kern_return_t    thread_set_voucher_name(mach_port_name_t name);
1548 extern kern_return_t thread_get_current_voucher_origin_pid(int32_t *pid);
1549 
1550 extern void thread_enable_send_importance(thread_t thread, boolean_t enable);
1551 
1552 /*
1553  * Translate signal context data pointer to userspace representation
1554  */
1555 
1556 extern kern_return_t    machine_thread_siguctx_pointer_convert_to_user(
1557 	thread_t thread,
1558 	user_addr_t *uctxp);
1559 
1560 extern void machine_tecs(thread_t thr);
1561 
1562 typedef enum cpuvn {
1563 	CPUVN_CI = 1
1564 } cpuvn_e;
1565 
1566 extern int machine_csv(cpuvn_e cve);
1567 #if defined(__x86_64__)
1568 extern void machine_thread_set_insn_copy_optout(thread_t thr);
1569 #endif
1570 
1571 /*
1572  * Translate array of function pointer syscall arguments from userspace representation
1573  */
1574 
1575 extern kern_return_t    machine_thread_function_pointers_convert_from_user(
1576 	thread_t thread,
1577 	user_addr_t *fptrs,
1578 	uint32_t count);
1579 
1580 /*
1581  * Get the duration of the given thread's last wait.
1582  */
1583 uint64_t thread_get_last_wait_duration(thread_t thread);
1584 
1585 extern bool thread_get_no_smt(void);
1586 #if defined(__x86_64__)
1587 extern bool curtask_get_insn_copy_optout(void);
1588 extern void curtask_set_insn_copy_optout(void);
1589 #endif /* defined(__x86_64__) */
1590 
1591 /*! @function ctid_get_thread
1592  *  @abstract translates a ctid_t to thread_t
1593  *  @discussion ctid are system wide compact thread-id
1594  *              associated to thread_t at thread creation
1595  *              and recycled at thread termination. If a ctid is
1596  *              referenced past the corresponding thread termination,
1597  *              it is considered stale, and the behavior is not defined.
1598  *              Note that this call does not acquire a reference on the thread,
1599  *              so as soon as the matching thread terminates, the ctid
1600  *              will become stale, and it could be re-used and associated with
1601  *              another thread. You must externally guarantee that the thread
1602  *              will not exit while you are using its ctid.
1603  *  @result   thread_t corresponding to ctid
1604  */
1605 extern thread_t ctid_get_thread(ctid_t ctid);
1606 
1607 /*! @function ctid_get_thread
1608  *  @abstract translates a ctid_t to thread_t
1609  *  @discussion Unsafe variant of ctid_get_thread() to be used
1610  *              when the caller can't guarantee the liveness of this ctid_t.
1611  *              may return NULL or a freed thread_t.
1612  */
1613 extern thread_t ctid_get_thread_unsafe(ctid_t ctid);
1614 
1615 /*!
1616  *   @function thread_get_ctid
1617  *   @abstract returns the ctid of thread.
1618  *   @param thread to find the corresponding ctid.
1619  *   @discussion the ctid provided will become stale after the matching thread
1620  *               terminates.
1621  *   @result uint32_t ctid.
1622  */
1623 extern ctid_t thread_get_ctid(thread_t thread);
1624 
1625 #endif  /* XNU_KERNEL_PRIVATE */
1626 #ifdef KERNEL_PRIVATE
1627 
1628 typedef struct thread_pri_floor {
1629 	thread_t thread;
1630 } thread_pri_floor_t;
1631 
1632 #ifdef MACH_KERNEL_PRIVATE
1633 extern void thread_floor_boost_ast(thread_t thread);
1634 extern void thread_floor_boost_set_promotion_locked(thread_t thread);
1635 #endif /* MACH_KERNEL_PRIVATE */
1636 
1637 /*!  @function thread_priority_floor_start
1638  *   @abstract boost the current thread priority to floor.
1639  *   @discussion Increase the priority of the current thread to at least MINPRI_FLOOR.
1640  *       The boost will be mantained until a corresponding thread_priority_floor_end()
1641  *       is called. Every call of thread_priority_floor_start() needs to have a corresponding
1642  *       call to thread_priority_floor_end() from the same thread.
1643  *       No thread can return to userspace before calling thread_priority_floor_end().
1644  *
1645  *       NOTE: avoid to use this function. Try to use gate_t or sleep_with_inheritor()
1646  *       instead.
1647  *   @result a token to be given to the corresponding thread_priority_floor_end()
1648  */
1649 extern thread_pri_floor_t thread_priority_floor_start(void);
1650 /*!  @function thread_priority_floor_end
1651  *   @abstract ends the floor boost.
1652  *   @param token the token obtained from thread_priority_floor_start()
1653  *   @discussion ends the priority floor boost started with thread_priority_floor_start()
1654  */
1655 extern void thread_priority_floor_end(thread_pri_floor_t *token);
1656 
1657 extern void thread_set_no_smt(bool set);
1658 
1659 extern void thread_mtx_lock(thread_t thread);
1660 
1661 extern void thread_mtx_unlock(thread_t thread);
1662 
1663 extern uint64_t thread_dispatchqaddr(
1664 	thread_t thread);
1665 
1666 bool thread_is_eager_preempt(thread_t thread);
1667 void thread_set_eager_preempt(thread_t thread);
1668 void thread_clear_eager_preempt(thread_t thread);
1669 void thread_set_honor_qlimit(thread_t thread);
1670 void thread_clear_honor_qlimit(thread_t thread);
1671 extern ipc_port_t convert_thread_to_port(thread_t);
1672 extern ipc_port_t convert_thread_to_port_pinned(thread_t);
1673 extern ipc_port_t convert_thread_inspect_to_port(thread_inspect_t);
1674 extern ipc_port_t convert_thread_read_to_port(thread_read_t);
1675 extern boolean_t is_external_pageout_thread(void);
1676 extern boolean_t is_vm_privileged(void);
1677 extern boolean_t set_vm_privilege(boolean_t);
1678 extern kern_allocation_name_t thread_set_allocation_name(kern_allocation_name_t new_name);
1679 extern void *thread_iokit_tls_get(uint32_t index);
1680 extern void thread_iokit_tls_set(uint32_t index, void * data);
1681 extern int thread_self_region_page_shift(void);
1682 extern void thread_self_region_page_shift_set(int pgshift);
1683 extern kern_return_t thread_create_immovable(task_t task, thread_t *new_thread);
1684 extern kern_return_t thread_terminate_pinned(thread_t thread);
1685 
1686 struct thread_attr_for_ipc_propagation;
1687 extern kern_return_t thread_get_ipc_propagate_attr(thread_t thread, struct thread_attr_for_ipc_propagation *attr);
1688 extern size_t thread_get_current_exec_path(char *path, size_t size);
1689 #endif /* KERNEL_PRIVATE */
1690 #ifdef XNU_KERNEL_PRIVATE
1691 
1692 extern void
1693 thread_get_thread_name(thread_t th, char* name);
1694 
1695 extern bool thread_supports_cooperative_workqueue(thread_t thread);
1696 extern void thread_arm_workqueue_quantum(thread_t thread);
1697 extern void thread_disarm_workqueue_quantum(thread_t thread);
1698 
1699 extern void thread_evaluate_workqueue_quantum_expiry(thread_t thread);
1700 extern bool thread_has_expired_workqueue_quantum(thread_t thread, bool should_trace);
1701 
1702 /* Kernel side prototypes for MIG routines */
1703 extern kern_return_t thread_get_exception_ports(
1704 	thread_t                        thread,
1705 	exception_mask_t                exception_mask,
1706 	exception_mask_array_t          masks,
1707 	mach_msg_type_number_t          *CountCnt,
1708 	exception_port_array_t          ports,
1709 	exception_behavior_array_t      behaviors,
1710 	thread_state_flavor_array_t     flavors);
1711 
1712 extern kern_return_t thread_get_special_port(
1713 	thread_inspect_t         thread,
1714 	int                      which,
1715 	ipc_port_t              *portp);
1716 
1717 #endif /* XNU_KERNEL_PRIVATE */
1718 
1719 /*! @function thread_has_thread_name
1720  *   @abstract Checks if a thread has a name.
1721  *   @discussion This function takes one input, a thread, and returns
1722  *       a boolean value indicating if that thread already has a name associated
1723  *       with it.
1724  *   @param th The thread to inspect.
1725  *   @result TRUE if the thread has a name, FALSE otherwise.
1726  */
1727 extern boolean_t thread_has_thread_name(thread_t th);
1728 
1729 /*! @function thread_set_thread_name
1730  *   @abstract Set a thread's name.
1731  *   @discussion This function takes two input parameters: a thread to name,
1732  *       and the name to apply to the thread.  The name will be copied over to
1733  *       the thread in order to better identify the thread.  If the name is
1734  *       longer than MAXTHREADNAMESIZE - 1, it will be truncated.
1735  *   @param th The thread to be named.
1736  *   @param name The name to apply to the thread.
1737  */
1738 extern void thread_set_thread_name(thread_t th, const char* name);
1739 
1740 #if !MACH_KERNEL_PRIVATE || !defined(current_thread)
1741 extern thread_t current_thread(void) __pure2;
1742 #endif
1743 
1744 extern uint64_t thread_tid(thread_t thread) __pure2;
1745 
1746 extern void thread_reference(
1747 	thread_t        thread);
1748 
1749 extern void thread_deallocate(
1750 	thread_t        thread);
1751 
1752 /*! @function kernel_thread_start
1753  *   @abstract Create a kernel thread.
1754  *   @discussion This function takes three input parameters, namely reference
1755  *       to the function that the thread should execute, caller specified data
1756  *       and a reference which is used to return the newly created kernel
1757  *       thread. The function returns KERN_SUCCESS on success or an appropriate
1758  *       kernel code type indicating the error. It may be noted that the caller
1759  *       is responsible for explicitly releasing the reference to the created
1760  *       thread when no longer needed. This should be done by calling
1761  *       thread_deallocate(new_thread).
1762  *   @param continuation A C-function pointer where the thread will begin execution.
1763  *   @param parameter Caller specified data to be passed to the new thread.
1764  *   @param new_thread Reference to the new thread is returned in this parameter.
1765  *   @result Returns KERN_SUCCESS on success or an appropriate kernel code type.
1766  */
1767 
1768 extern kern_return_t    kernel_thread_start(
1769 	thread_continue_t       continuation,
1770 	void                    *parameter,
1771 	thread_t                *new_thread);
1772 
1773 __END_DECLS
1774 
1775 #endif  /* _KERN_THREAD_H_ */
1776