xref: /xnu-8020.101.4/osfmk/kern/thread.h (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	thread.h
60  *	Author:	Avadis Tevanian, Jr.
61  *
62  *	This file contains the structure definitions for threads.
63  *
64  */
65 /*
66  * Copyright (c) 1993 The University of Utah and
67  * the Computer Systems Laboratory (CSL).  All rights reserved.
68  *
69  * Permission to use, copy, modify and distribute this software and its
70  * documentation is hereby granted, provided that both the copyright
71  * notice and this permission notice appear in all copies of the
72  * software, derivative works or modified versions, and any portions
73  * thereof, and that both notices appear in supporting documentation.
74  *
75  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78  *
79  * CSL requests users of this software to return to [email protected] any
80  * improvements that they make and grant CSL redistribution rights.
81  *
82  */
83 
84 #ifndef _KERN_THREAD_H_
85 #define _KERN_THREAD_H_
86 
87 #include <mach/kern_return.h>
88 #include <mach/mach_types.h>
89 #include <mach/mach_param.h>
90 #include <mach/message.h>
91 #include <mach/boolean.h>
92 #include <mach/vm_param.h>
93 #include <mach/thread_info.h>
94 #include <mach/thread_status.h>
95 #include <mach/exception_types.h>
96 
97 #include <kern/kern_types.h>
98 #include <vm/vm_kern.h>
99 #include <sys/cdefs.h>
100 
101 #ifdef MACH_KERNEL_PRIVATE
102 #include <mach_assert.h>
103 #include <mach_ldebug.h>
104 
105 #include <ipc/ipc_types.h>
106 
107 #include <mach/port.h>
108 #include <kern/cpu_number.h>
109 #include <kern/smp.h>
110 #include <kern/queue.h>
111 
112 #include <kern/timer.h>
113 #include <kern/simple_lock.h>
114 #include <kern/locks.h>
115 #include <kern/sched.h>
116 #include <kern/sched_prim.h>
117 #include <mach/sfi_class.h>
118 #include <kern/thread_call.h>
119 #include <kern/thread_group.h>
120 #include <kern/timer_call.h>
121 #include <kern/task.h>
122 #include <kern/exception.h>
123 #include <kern/affinity.h>
124 #include <kern/debug.h>
125 #include <kern/block_hint.h>
126 #include <kern/turnstile.h>
127 #include <kern/mpsc_queue.h>
128 
129 #include <kern/waitq.h>
130 #include <san/kasan.h>
131 #include <san/kcov_data.h>
132 #include <os/refcnt.h>
133 
134 #include <ipc/ipc_kmsg.h>
135 
136 #include <machine/atomic.h>
137 #include <machine/cpu_data.h>
138 #include <machine/thread.h>
139 
140 #if MONOTONIC
141 #include <stdatomic.h>
142 #include <machine/monotonic.h>
143 #endif /* MONOTONIC */
144 #endif  /* MACH_KERNEL_PRIVATE */
145 #ifdef XNU_KERNEL_PRIVATE
146 /* priority queue static asserts fail for __ARM64_ARCH_8_32__ kext builds */
147 #include <kern/priority_queue.h>
148 #endif /* XNU_KERNEL_PRIVATE */
149 
150 __BEGIN_DECLS
151 
152 #ifdef XNU_KERNEL_PRIVATE
153 #if CONFIG_TASKWATCH
154 /* Taskwatch related. TODO: find this a better home */
155 typedef struct task_watcher task_watch_t;
156 #endif /* CONFIG_TASKWATCH */
157 
158 /* Thread tags; for easy identification. */
159 __options_closed_decl(thread_tag_t, uint16_t, {
160 	THREAD_TAG_MAINTHREAD   = 0x01,
161 	THREAD_TAG_CALLOUT      = 0x02,
162 	THREAD_TAG_IOWORKLOOP   = 0x04,
163 	THREAD_TAG_PTHREAD      = 0x10,
164 	THREAD_TAG_WORKQUEUE    = 0x20,
165 	THREAD_TAG_USER_JOIN    = 0x40,
166 });
167 
168 __options_closed_decl(thread_ro_flags_t, uint16_t, {
169 	TRO_NONE                = 0x0000,
170 	TRO_SETUID              = 0x0001,
171 });
172 
173 typedef struct thread_ro *thread_ro_t;
174 
175 /*!
176  * @struct thread_ro
177  *
178  * @brief
179  * A structure allocated in a read only zone that safely
180  * represents the linkages of a thread to its cred, proc, task, ...
181  *
182  * @discussion
183  * The lifetime of a @c thread_ro structure is 1:1 with that
184  * of a @c thread_t or a @c uthread_t and holding a thread reference
185  * always allows to dereference this structure safely.
186  */
187 struct thread_ro {
188 	struct thread              *tro_owner;
189 #if MACH_BSD
190 	struct ucred               *tro_cred;
191 	struct proc                *tro_proc;
192 	struct proc_ro             *tro_proc_ro;
193 #endif
194 	struct task                *tro_task;
195 	thread_ro_flags_t           tro_flags;
196 
197 	struct ipc_port            *tro_self_port;
198 	struct ipc_port            *tro_settable_self_port;             /* send right */
199 	struct ipc_port            *tro_ports[THREAD_SELF_PORT_COUNT];  /* no right */
200 
201 	struct exception_action    *tro_exc_actions;
202 };
203 
204 /*
205  * Flags for `thread set status`.
206  */
207 __options_decl(thread_set_status_flags_t, uint32_t, {
208 	TSSF_FLAGS_NONE = 0,
209 
210 	/* Translate the state to user. */
211 	TSSF_TRANSLATE_TO_USER = 0x01,
212 
213 	/* Translate the state to user. Preserve flags */
214 	TSSF_PRESERVE_FLAGS = 0x02,
215 
216 	/* Check kernel signed flag */
217 	TSSF_CHECK_USER_FLAGS = 0x04,
218 
219 	/* Allow only user state PTRS */
220 	TSSF_ALLOW_ONLY_USER_PTRS = 0x08,
221 
222 	/* Allow only user state */
223 	TSSF_ALLOW_ONLY_USER_STATE = 0x10,
224 
225 	/* Stash sigreturn token */
226 	TSSF_STASH_SIGRETURN_TOKEN = 0x20,
227 
228 	/* Check sigreturn token */
229 	TSSF_CHECK_SIGRETURN_TOKEN = 0x40,
230 
231 	/* Allow only matching sigreturn token */
232 	TSSF_ALLOW_ONLY_MATCHING_TOKEN = 0x80,
233 });
234 
235 #endif /* XNU_KERNEL_PRIVATE */
236 #ifdef MACH_KERNEL_PRIVATE
237 
238 extern zone_t thread_ro_zone;
239 
240 __options_decl(thread_work_interval_flags_t, uint32_t, {
241 	TH_WORK_INTERVAL_FLAGS_NONE           = 0x0,
242 #if CONFIG_SCHED_AUTO_JOIN
243 	/* Flags to indicate status about work interval thread is currently part of */
244 	TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK = 0x1,
245 #endif /* CONFIG_SCHED_AUTO_JOIN */
246 });
247 
248 struct thread {
249 #if MACH_ASSERT
250 #define THREAD_MAGIC 0x1234ABCDDCBA4321ULL
251 	/* Ensure nothing uses &thread as a queue entry */
252 	uint64_t                thread_magic;
253 #endif /* MACH_ASSERT */
254 
255 	/*
256 	 *	NOTE:	The runq field in the thread structure has an unusual
257 	 *	locking protocol.  If its value is PROCESSOR_NULL, then it is
258 	 *	locked by the thread_lock, but if its value is something else
259 	 *	then it is locked by the associated run queue lock. It is
260 	 *	set to PROCESSOR_NULL without holding the thread lock, but the
261 	 *	transition from PROCESSOR_NULL to non-null must be done
262 	 *	under the thread lock and the run queue lock.
263 	 *
264 	 *	New waitq APIs allow the 'links' and 'runq' fields to be
265 	 *	anywhere in the thread structure.
266 	 */
267 	union {
268 		queue_chain_t                   runq_links;             /* run queue links */
269 		queue_chain_t                   wait_links;             /* wait queue links */
270 		struct mpsc_queue_chain         mpsc_links;             /* thread daemon mpsc links */
271 		struct priority_queue_entry_sched wait_prioq_links;       /* priority ordered waitq links */
272 	};
273 
274 	event64_t               wait_event;     /* wait queue event */
275 	processor_t             runq;           /* run queue assignment */
276 	waitq_t                 waitq;          /* wait queue this thread is enqueued on */
277 	struct turnstile       *turnstile;      /* thread's turnstile, protected by primitives interlock */
278 	void                   *inheritor;      /* inheritor of the primitive the thread will block on */
279 	struct priority_queue_sched_max sched_inheritor_queue; /* Inheritor queue for kernel promotion */
280 	struct priority_queue_sched_max base_inheritor_queue; /* Inheritor queue for user promotion */
281 
282 #if CONFIG_SCHED_EDGE
283 	bool            th_bound_cluster_enqueued;
284 	bool            th_shared_rsrc_enqueued[CLUSTER_SHARED_RSRC_TYPE_COUNT];
285 	bool            th_shared_rsrc_heavy_user[CLUSTER_SHARED_RSRC_TYPE_COUNT];
286 	bool            th_shared_rsrc_heavy_perf_control[CLUSTER_SHARED_RSRC_TYPE_COUNT];
287 #endif /* CONFIG_SCHED_EDGE */
288 
289 #if CONFIG_SCHED_CLUTCH
290 	/*
291 	 * In the clutch scheduler, the threads are maintained in runqs at the clutch_bucket
292 	 * level (clutch_bucket defines a unique thread group and scheduling bucket pair). The
293 	 * thread is linked via a couple of linkages in the clutch bucket:
294 	 *
295 	 * - A stable priority queue linkage which is the main runqueue (based on sched_pri) for the clutch bucket
296 	 * - A regular priority queue linkage which is based on thread's base/promoted pri (used for clutch bucket priority calculation)
297 	 * - A queue linkage used for timesharing operations of threads at the scheduler tick
298 	 */
299 	struct priority_queue_entry_stable      th_clutch_runq_link;
300 	struct priority_queue_entry_sched       th_clutch_pri_link;
301 	queue_chain_t                           th_clutch_timeshare_link;
302 #endif /* CONFIG_SCHED_CLUTCH */
303 
304 	/* Data updated during assert_wait/thread_wakeup */
305 	decl_simple_lock_data(, sched_lock);     /* scheduling lock (thread_lock()) */
306 	decl_simple_lock_data(, wake_lock);      /* for thread stop / wait (wake_lock()) */
307 	uint16_t                options;        /* options set by thread itself */
308 #define TH_OPT_INTMASK          0x0003          /* interrupt / abort level */
309 #define TH_OPT_VMPRIV           0x0004          /* may allocate reserved memory */
310 #define TH_OPT_SYSTEM_CRITICAL  0x0010          /* Thread must always be allowed to run - even under heavy load */
311 #define TH_OPT_PROC_CPULIMIT    0x0020          /* Thread has a task-wide CPU limit applied to it */
312 #define TH_OPT_PRVT_CPULIMIT    0x0040          /* Thread has a thread-private CPU limit applied to it */
313 #define TH_OPT_IDLE_THREAD      0x0080          /* Thread is a per-processor idle thread */
314 #define TH_OPT_GLOBAL_FORCED_IDLE       0x0100  /* Thread performs forced idle for thermal control */
315 #define TH_OPT_SCHED_VM_GROUP   0x0200          /* Thread belongs to special scheduler VM group */
316 #define TH_OPT_HONOR_QLIMIT     0x0400          /* Thread will honor qlimit while sending mach_msg, regardless of MACH_SEND_ALWAYS */
317 #define TH_OPT_SEND_IMPORTANCE  0x0800          /* Thread will allow importance donation from kernel rpc */
318 #define TH_OPT_ZONE_PRIV        0x1000          /* Thread may use the zone replenish reserve */
319 #define TH_OPT_IPC_TG_BLOCKED   0x2000          /* Thread blocked in sync IPC and has made the thread group blocked callout */
320 
321 	bool                    wake_active;    /* wake event on stop */
322 	bool                    at_safe_point;  /* thread_abort_safely allowed */
323 	uint8_t                 sched_saved_run_weight;
324 #if DEVELOPMENT || DEBUG
325 	bool                    pmap_footprint_suspended;
326 #endif /* DEVELOPMENT || DEBUG */
327 	ast_t                   reason;         /* why we blocked */
328 	uint32_t                quantum_remaining;
329 	wait_result_t           wait_result;    /* outcome of wait -
330 	                                        * may be examined by this thread
331 	                                        * WITHOUT locking */
332 	thread_continue_t       continuation;   /* continue here next dispatch */
333 	void                   *parameter;      /* continuation parameter */
334 
335 	/* Data updated/used in thread_invoke */
336 	vm_offset_t             kernel_stack;   /* current kernel stack */
337 	vm_offset_t             reserved_stack; /* reserved kernel stack */
338 
339 	/*** Machine-dependent state ***/
340 	struct machine_thread   machine;
341 
342 #if KASAN
343 	struct kasan_thread_data kasan_data;
344 #endif
345 #if CONFIG_KCOV
346 	kcov_thread_data_t       kcov_data;
347 #endif
348 
349 	/* Thread state: */
350 	int                     state;
351 /*
352  *	Thread states [bits or'ed]
353  * All but TH_WAIT_REPORT are encoded in SS_TH_FLAGS
354  * All are encoded in kcdata.py ('ths_state')
355  */
356 #define TH_WAIT                 0x01            /* queued for waiting */
357 #define TH_SUSP                 0x02            /* stopped or requested to stop */
358 #define TH_RUN                  0x04            /* running or on runq */
359 #define TH_UNINT                0x08            /* waiting uninteruptibly */
360 #define TH_TERMINATE            0x10            /* halted at termination */
361 #define TH_TERMINATE2           0x20            /* added to termination queue */
362 #define TH_WAIT_REPORT          0x40            /* the wait is using the sched_call,
363 	                                        * only set if TH_WAIT is also set */
364 #define TH_IDLE                 0x80            /* idling processor */
365 
366 	/* Scheduling information */
367 	sched_mode_t            sched_mode;     /* scheduling mode */
368 	sched_mode_t            saved_mode;     /* saved mode during forced mode demotion */
369 
370 	/* This thread's contribution to global sched counters */
371 	sched_bucket_t          th_sched_bucket;
372 
373 	sfi_class_id_t          sfi_class;      /* SFI class (XXX Updated on CSW/QE/AST) */
374 	sfi_class_id_t          sfi_wait_class; /* Currently in SFI wait for this class, protected by sfi_lock */
375 
376 	uint32_t                sched_flags;            /* current flag bits */
377 #define TH_SFLAG_NO_SMT                 0x0001          /* On an SMT CPU, this thread must be scheduled alone */
378 #define TH_SFLAG_FAILSAFE               0x0002          /* fail-safe has tripped */
379 #define TH_SFLAG_THROTTLED              0x0004          /* throttled thread forced to timeshare mode (may be applied in addition to failsafe) */
380 #define TH_SFLAG_DEMOTED_MASK      (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE)     /* saved_mode contains previous sched_mode */
381 
382 #define TH_SFLAG_PROMOTED               0x0008          /* sched pri has been promoted by kernel mutex priority promotion */
383 #define TH_SFLAG_ABORT                  0x0010          /* abort interruptible waits */
384 #define TH_SFLAG_ABORTSAFELY            0x0020          /* ... but only those at safe point */
385 #define TH_SFLAG_ABORTED_MASK           (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY)
386 #define TH_SFLAG_DEPRESS                0x0040          /* normal depress yield */
387 #define TH_SFLAG_POLLDEPRESS            0x0080          /* polled depress yield */
388 #define TH_SFLAG_DEPRESSED_MASK         (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
389 /* unused TH_SFLAG_PRI_UPDATE           0x0100 */
390 #define TH_SFLAG_EAGERPREEMPT           0x0200          /* Any preemption of this thread should be treated as if AST_URGENT applied */
391 #define TH_SFLAG_RW_PROMOTED            0x0400          /* promote reason: blocking with RW lock held */
392 #define TH_SFLAG_BASE_PRI_FROZEN        0x0800          /* (effective) base_pri is frozen */
393 #define TH_SFLAG_WAITQ_PROMOTED         0x1000          /* promote reason: waitq wakeup (generally for IPC receive) */
394 
395 #if __AMP__
396 #define TH_SFLAG_ECORE_ONLY             0x2000          /* (unused) Bind thread to E core processor set */
397 #define TH_SFLAG_PCORE_ONLY             0x4000          /* (unused) Bind thread to P core processor set */
398 #endif
399 
400 #define TH_SFLAG_EXEC_PROMOTED          0x8000          /* promote reason: thread is in an exec */
401 
402 #define TH_SFLAG_THREAD_GROUP_AUTO_JOIN 0x10000         /* thread has been auto-joined to thread group */
403 #if __AMP__
404 #define TH_SFLAG_BOUND_SOFT             0x20000         /* thread is soft bound to a cluster; can run anywhere if bound cluster unavailable */
405 #endif /* __AMP__ */
406 
407 #if CONFIG_PREADOPT_TG
408 #define TH_SFLAG_REEVALUTE_TG_HIERARCHY_LATER 0x40000   /* thread needs to reevaluate its TG hierarchy */
409 #endif
410 
411 #define TH_SFLAG_FLOOR_PROMOTED               0x80000   /* promote reason: boost requested */
412 
413 /* 'promote reasons' that request a priority floor only, not a custom priority */
414 #define TH_SFLAG_PROMOTE_REASON_MASK    (TH_SFLAG_RW_PROMOTED | TH_SFLAG_WAITQ_PROMOTED | TH_SFLAG_EXEC_PROMOTED | TH_SFLAG_FLOOR_PROMOTED)
415 
416 	int16_t                 sched_pri;              /* scheduled (current) priority */
417 	int16_t                 base_pri;               /* effective base priority (equal to req_base_pri unless TH_SFLAG_BASE_PRI_FROZEN) */
418 	int16_t                 req_base_pri;           /* requested base priority */
419 	int16_t                 max_priority;           /* copy of max base priority */
420 	int16_t                 task_priority;          /* copy of task base priority */
421 	int16_t                 promotion_priority;     /* priority thread is currently promoted to */
422 	uint16_t                priority_floor_count;   /* number of push to boost the floor priority */
423 	int16_t                 suspend_count;          /* Kernel holds on this thread  */
424 
425 	int                     iotier_override;        /* atomic operations to set, cleared on ret to user */
426 	os_ref_atomic_t         ref_count;              /* number of references to me */
427 
428 	uint32_t                rwlock_count;           /* Number of lck_rw_t locks held by thread */
429 #ifdef DEBUG_RW
430 	rw_lock_debug_t         rw_lock_held;           /* rw_locks currently held by the thread */
431 #endif /* DEBUG_RW */
432 
433 	integer_t               importance;             /* task-relative importance */
434 
435 	/* Priority depression expiration */
436 	integer_t               depress_timer_active;
437 	timer_call_t            depress_timer;
438 
439 	/* real-time parameters */
440 	struct {                                        /* see mach/thread_policy.h */
441 		uint32_t            period;
442 		uint32_t            computation;
443 		uint32_t            constraint;
444 		bool                preemptible;
445 		uint8_t             priority_offset;   /* base_pri = BASEPRI_RTQUEUES + priority_offset */
446 		uint64_t            deadline;
447 	}                       realtime;
448 
449 	uint64_t                last_run_time;          /* time when thread was switched away from */
450 	uint64_t                last_made_runnable_time;        /* time when thread was unblocked or preempted */
451 	uint64_t                last_basepri_change_time;       /* time when thread was last changed in basepri while runnable */
452 	uint64_t                same_pri_latency;
453 	/*
454 	 * workq_quantum_deadline is the workq thread's next runtime deadline. This
455 	 * value is set to 0 if the thread has no such deadline applicable to it.
456 	 *
457 	 * The synchronization for this field is due to how this field is modified
458 	 * 1) This field is always modified on the thread by itself or on the thread
459 	 * when it is not running/runnable
460 	 * 2) Change of this field is immediately followed by a
461 	 * corresponding change to the AST_KEVENT to either set or clear the
462 	 * AST_KEVENT_WORKQ_QUANTUM_EXPIRED bit
463 	 *
464 	 * workq_quantum_deadline can be modified by the thread on itself during
465 	 * interrupt context. However, due to (2) and due to the fact that the
466 	 * change to the AST_KEVENT is volatile, this forces the compiler to
467 	 * guarantee the order between the write to workq_quantum_deadline and the
468 	 * kevent field and therefore guarantees the correct synchronization.
469 	 */
470 	uint64_t                workq_quantum_deadline;
471 
472 #if WORKQ_QUANTUM_HISTORY_DEBUG
473 
474 #define WORKQ_QUANTUM_HISTORY_COUNT 16
475 	struct workq_quantum_history {
476 		uint64_t time;
477 		uint64_t deadline;
478 		bool arm;
479 	} workq_quantum_history[WORKQ_QUANTUM_HISTORY_COUNT];
480 	uint64_t workq_quantum_history_index;
481 
482 #define WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, ...)  ({\
483 	        thread_t __th = (thread); \
484 	        uint64_t __index = os_atomic_inc_orig(&thread->workq_quantum_history_index, relaxed); \
485 	        struct workq_quantum_history _wq_quantum_history = { mach_approximate_time(), __VA_ARGS__}; \
486 	        __th->workq_quantum_history[__index % WORKQ_QUANTUM_HISTORY_COUNT] = \
487 	                        (struct workq_quantum_history) _wq_quantum_history; \
488 	})
489 #else /* WORKQ_QUANTUM_HISTORY_DEBUG */
490 #define WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, ...)
491 #endif /* WORKQ_QUANTUM_HISTORY_DEBUG */
492 
493 #define THREAD_NOT_RUNNABLE (~0ULL)
494 
495 #if CONFIG_THREAD_GROUPS
496 	struct thread_group     *thread_group;
497 #endif
498 
499 #if defined(CONFIG_SCHED_MULTIQ)
500 	sched_group_t           sched_group;
501 #endif /* defined(CONFIG_SCHED_MULTIQ) */
502 
503 	/* Data used during setrun/dispatch */
504 	timer_data_t            system_timer;           /* system mode timer */
505 	processor_t             bound_processor;        /* bound to a processor? */
506 	processor_t             last_processor;         /* processor last dispatched on */
507 	processor_t             chosen_processor;       /* Where we want to run this thread */
508 
509 	/* Fail-safe computation since last unblock or qualifying yield */
510 	uint64_t                computation_metered;
511 	uint64_t                computation_epoch;
512 	uint64_t                safe_release;           /* when to release fail-safe */
513 
514 	/* Call out from scheduler */
515 	void                  (*sched_call)(int type, thread_t thread);
516 
517 #if defined(CONFIG_SCHED_PROTO)
518 	uint32_t                runqueue_generation;    /* last time runqueue was drained */
519 #endif
520 
521 	/* Statistics and timesharing calculations */
522 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
523 	natural_t               sched_stamp;            /* last scheduler tick */
524 	natural_t               sched_usage;            /* timesharing cpu usage [sched] */
525 	natural_t               pri_shift;              /* usage -> priority from pset */
526 	natural_t               cpu_usage;              /* instrumented cpu usage [%cpu] */
527 	natural_t               cpu_delta;              /* accumulated cpu_usage delta */
528 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
529 
530 	uint32_t                c_switch;               /* total context switches */
531 	uint32_t                p_switch;               /* total processor switches */
532 	uint32_t                ps_switch;              /* total pset switches */
533 
534 	integer_t mutex_count;  /* total count of locks held */
535 	/* Timing data structures */
536 	int                     precise_user_kernel_time; /* precise user/kernel enabled for this thread */
537 	timer_data_t            user_timer;             /* user mode timer */
538 	uint64_t                user_timer_save;        /* saved user timer value */
539 	uint64_t                system_timer_save;      /* saved system timer value */
540 	uint64_t                vtimer_user_save;       /* saved values for vtimers */
541 	uint64_t                vtimer_prof_save;
542 	uint64_t                vtimer_rlim_save;
543 	uint64_t                vtimer_qos_save;
544 
545 	timer_data_t            ptime;                  /* time executing in P mode */
546 	timer_data_t            runnable_timer;         /* time the thread is runnable (including running) */
547 
548 #if CONFIG_SCHED_SFI
549 	/* Timing for wait state */
550 	uint64_t                wait_sfi_begin_time;    /* start time for thread waiting in SFI */
551 #endif
552 
553 	/*
554 	 * Processor/cache affinity
555 	 * - affinity_threads links task threads with the same affinity set
556 	 */
557 	queue_chain_t           affinity_threads;
558 	affinity_set_t          affinity_set;
559 
560 #if CONFIG_TASKWATCH
561 	task_watch_t           *taskwatch;              /* task watch */
562 #endif /* CONFIG_TASKWATCH */
563 
564 	/* Various bits of state to stash across a continuation, exclusive to the current thread block point */
565 	union {
566 		struct {
567 			mach_msg_return_t       state;          /* receive state */
568 			mach_port_seqno_t       seqno;          /* seqno of recvd message */
569 			ipc_object_t            object;         /* object received on */
570 			vm_address_t            msg_addr;       /* receive buffer pointer */
571 			mach_msg_size_t         rsize;          /* max size for recvd msg */
572 			mach_msg_size_t         msize;          /* actual size for recvd msg */
573 			mach_msg_option_t       option;         /* options for receive */
574 			mach_port_name_t        receiver_name;  /* the receive port name */
575 			struct knote            *knote;         /* knote fired for rcv */
576 			union {
577 				struct ipc_kmsg   *kmsg;        /* received message */
578 				struct ipc_mqueue *peekq;       /* mqueue to peek at */
579 				struct {
580 					uint32_t       ppri;    /* received message pthread_priority_t */
581 					mach_msg_qos_t oqos;    /* override qos for message */
582 				} received_qos;
583 			};
584 			mach_msg_continue_t     continuation;
585 		} receive;
586 		struct {
587 			struct semaphore        *waitsemaphore;         /* semaphore ref */
588 			struct semaphore        *signalsemaphore;       /* semaphore ref */
589 			int                     options;                /* semaphore options */
590 			kern_return_t           result;                 /* primary result */
591 			mach_msg_continue_t continuation;
592 		} sema;
593 		struct {
594 #define THREAD_SAVE_IOKIT_TLS_COUNT     8
595 			void                    *tls[THREAD_SAVE_IOKIT_TLS_COUNT];
596 		} iokit;
597 	} saved;
598 
599 	/* Only user threads can cause guard exceptions, only kernel threads can be thread call threads */
600 	union {
601 		/* Thread call thread's state structure, stored on its stack */
602 		struct thread_call_thread_state *thc_state;
603 
604 		/* Structure to save information about guard exception */
605 		struct {
606 			mach_exception_code_t           code;
607 			mach_exception_subcode_t        subcode;
608 		} guard_exc_info;
609 	};
610 
611 	/* User level suspensions */
612 	int32_t                 user_stop_count;
613 
614 	/* IPC data structures */
615 #if IMPORTANCE_INHERITANCE
616 	natural_t ith_assertions;                       /* assertions pending drop */
617 #endif
618 	struct ipc_kmsg_queue ith_messages;             /* messages to reap */
619 	mach_port_t ith_kernel_reply_port;              /* reply port for kernel RPCs */
620 
621 	/* Pending thread ast(s) */
622 	ast_t                   ast;
623 
624 	/* Ast/Halt data structures */
625 	vm_offset_t             recover;                /* page fault recover(copyin/out) */
626 
627 	queue_chain_t           threads;                /* global list of all threads */
628 
629 	/* Activation */
630 	queue_chain_t           task_threads;
631 
632 	/* Task membership */
633 #if __x86_64__ || __arm__
634 	struct task            *t_task;
635 #endif
636 	struct thread_ro       *t_tro;
637 	vm_map_t                map;
638 	thread_t                handoff_thread;
639 
640 	/* Timed wait expiration */
641 	timer_call_t            wait_timer;
642 	uint16_t                wait_timer_active;
643 	bool                    wait_timer_is_set;
644 
645 	/* Miscellaneous bits guarded by mutex */
646 	uint32_t
647 	    active:1,           /* Thread is active and has not been terminated */
648 	    ipc_active:1,       /* IPC with the thread ports is allowed */
649 	    started:1,          /* Thread has been started after creation */
650 	    static_param:1,     /* Disallow policy parameter changes */
651 	    inspection:1,       /* TRUE when task is being inspected by crash reporter */
652 	    policy_reset:1,     /* Disallow policy parameter changes on terminating threads */
653 	    suspend_parked:1,   /* thread parked in thread_suspended */
654 	    corpse_dup:1,       /* TRUE when thread is an inactive duplicate in a corpse */
655 	:0;
656 
657 	decl_lck_mtx_data(, mutex);
658 
659 	struct ipc_port         *ith_special_reply_port;   /* ref to special reply port */
660 
661 #if CONFIG_DTRACE
662 	uint16_t                t_dtrace_flags;         /* DTrace thread states */
663 #define TH_DTRACE_EXECSUCCESS   0x01
664 	uint16_t                t_dtrace_inprobe;       /* Executing under dtrace_probe */
665 	uint32_t                t_dtrace_predcache;     /* DTrace per thread predicate value hint */
666 	int64_t                 t_dtrace_tracing;       /* Thread time under dtrace_probe() */
667 	int64_t                 t_dtrace_vtime;
668 #endif
669 
670 	clock_sec_t             t_page_creation_time;
671 	uint32_t                t_page_creation_count;
672 	uint32_t                t_page_creation_throttled;
673 #if (DEVELOPMENT || DEBUG)
674 	uint64_t                t_page_creation_throttled_hard;
675 	uint64_t                t_page_creation_throttled_soft;
676 #endif /* DEVELOPMENT || DEBUG */
677 	int                     t_pagein_error;         /* for vm_fault(), holds error from vnop_pagein() */
678 
679 #ifdef KPERF
680 /* The high 8 bits are the number of frames to sample of a user callstack. */
681 #define T_KPERF_CALLSTACK_DEPTH_OFFSET     (24)
682 #define T_KPERF_SET_CALLSTACK_DEPTH(DEPTH) (((uint32_t)(DEPTH)) << T_KPERF_CALLSTACK_DEPTH_OFFSET)
683 #define T_KPERF_GET_CALLSTACK_DEPTH(FLAGS) ((FLAGS) >> T_KPERF_CALLSTACK_DEPTH_OFFSET)
684 #define T_KPERF_ACTIONID_OFFSET            (18)
685 #define T_KPERF_SET_ACTIONID(AID)          (((uint32_t)(AID)) << T_KPERF_ACTIONID_OFFSET)
686 #define T_KPERF_GET_ACTIONID(FLAGS)        ((FLAGS) >> T_KPERF_ACTIONID_OFFSET)
687 #endif
688 
689 #define T_KPERF_AST_CALLSTACK 0x1 /* dump a callstack on thread's next AST */
690 #define T_KPERF_AST_DISPATCH  0x2 /* dump a name on thread's next AST */
691 #define T_KPC_ALLOC           0x4 /* thread needs a kpc_buf allocated */
692 
693 #define T_KPERF_AST_ALL \
694     (T_KPERF_AST_CALLSTACK | T_KPERF_AST_DISPATCH | T_KPC_ALLOC)
695 /* only go up to T_KPERF_ACTIONID_OFFSET - 1 */
696 
697 #ifdef KPERF
698 	uint32_t                kperf_ast;
699 	uint32_t                kperf_pet_gen;  /* last generation of PET that sampled this thread*/
700 	uint32_t                kperf_c_switch; /* last dispatch detection */
701 	uint32_t                kperf_pet_cnt;  /* how many times a thread has been sampled by PET */
702 #endif
703 
704 #ifdef KPC
705 	/* accumulated performance counters for this thread */
706 	uint64_t               *kpc_buf;
707 #endif
708 
709 #if HYPERVISOR
710 	/* hypervisor virtual CPU object associated with this thread */
711 	void                   *hv_thread_target;
712 #endif /* HYPERVISOR */
713 
714 	/* Statistics accumulated per-thread and aggregated per-task */
715 	uint32_t                syscalls_unix;
716 	uint32_t                syscalls_mach;
717 	ledger_t                t_ledger;
718 	ledger_t                t_threadledger; /* per thread ledger */
719 	ledger_t                t_bankledger;                /* ledger to charge someone */
720 	uint64_t                t_deduct_bank_ledger_time;   /* cpu time to be deducted from bank ledger */
721 	uint64_t                t_deduct_bank_ledger_energy; /* energy to be deducted from bank ledger */
722 
723 	uint64_t                thread_id;             /*system wide unique thread-id*/
724 
725 #if MONOTONIC
726 	struct mt_thread        t_monotonic;
727 #endif /* MONOTONIC */
728 
729 	/* policy is protected by the thread mutex */
730 	struct thread_requested_policy  requested_policy;
731 	struct thread_effective_policy  effective_policy;
732 
733 	/* usynch override is protected by the task lock, eventually will be thread mutex */
734 	struct thread_qos_override {
735 		struct thread_qos_override      *override_next;
736 		uint32_t        override_contended_resource_count;
737 		int16_t         override_qos;
738 		int16_t         override_resource_type;
739 		user_addr_t     override_resource;
740 	} *overrides;
741 
742 	uint32_t                kevent_overrides;
743 	uint8_t                 user_promotion_basepri;
744 	uint8_t                 kern_promotion_schedpri;
745 	_Atomic uint16_t        kevent_ast_bits;
746 
747 	io_stat_info_t          thread_io_stats; /* per-thread I/O statistics */
748 
749 	uint32_t                thread_callout_interrupt_wakeups;
750 	uint32_t                thread_callout_platform_idle_wakeups;
751 	uint32_t                thread_timer_wakeups_bin_1;
752 	uint32_t                thread_timer_wakeups_bin_2;
753 	thread_tag_t            thread_tag;
754 
755 	/*
756 	 * callout_* fields are only set for thread call threads whereas guard_exc_fatal is set
757 	 * by user threads on themselves while taking a guard exception. So it's okay for them to
758 	 * share this bitfield.
759 	 */
760 	uint16_t
761 	    callout_woken_from_icontext:1,
762 	    callout_woken_from_platform_idle:1,
763 	    callout_woke_thread:1,
764 	    guard_exc_fatal:1,
765 	    thread_bitfield_unused:12;
766 
767 	mach_port_name_t        ith_voucher_name;
768 	ipc_voucher_t           ith_voucher;
769 
770 #if CONFIG_THREAD_GROUPS
771 #if CONFIG_PREADOPT_TG
772 	/* The preadopt thread group is set on the thread
773 	 *
774 	 *   a) By another thread when it is a creator and it is scheduled with the
775 	 *   thread group on the TR
776 	 *   b) On itself when it binds a thread request and becomes a
777 	 *   servicer or when it rebinds to the thread request
778 	 *   c) On itself when it processes knotes and finds the first
779 	 *   EVFILT_MACHPORT event to deliver to userspace
780 	 *
781 	 * Note that this is a full reference owned by the thread_t and not a
782 	 * borrowed reference.
783 	 *
784 	 * This reference is cleared from the thread_t by the thread itself at the
785 	 * following times:
786 	 *   a) When it explicitly adopts a work interval or a bank voucher
787 	 *   b) If it still exists on the thread, after it has unbound and is about
788 	 *   to park
789 	 *   c) During thread termination if one still exists
790 	 *   d) When a different preadoption thread group is set on the thread
791 	 *
792 	 * It is modified under the thread lock.
793 	 */
794 	struct thread_group     *preadopt_thread_group;
795 
796 	/* This field here is present in order to make sure that the t->thread_group
797 	 * is always pointing to a valid thread group and isn't a dangling pointer.
798 	 *
799 	 * Consider the following scenario:
800 	 *	a) t->thread_group points to the preadoption thread group
801 	 *	b) The preadoption thread group is modified on the thread but we are
802 	 *	unable to resolve the hierarchy immediately due to the current state of
803 	 *	the thread
804 	 *
805 	 *	In order to make sure that t->thread_group points to a valid thread
806 	 *	group until we can resolve the hierarchy again, we save the existing
807 	 *	thread_group it points to in old_preadopt_thread_group. The next time a
808 	 *	hierarchy resolution is done, we know that t->thread_group will not point
809 	 *	to this field anymore so we can clear it.
810 	 *
811 	 *	 This field is always going to take the reference that was previously in
812 	 *	 preadopt_thread_group so it will have a full +1
813 	 */
814 	struct thread_group     *old_preadopt_thread_group;
815 #endif /* CONFIG_PREADOPT_TG */
816 
817 	/* This is a borrowed reference to the TG from the ith_voucher and is saved
818 	 * here since we may not always be in the right context to able to do the
819 	 * lookups.
820 	 *
821 	 * It is set always set on self under the thread lock */
822 	struct thread_group     *bank_thread_group;
823 
824 	/*  Whether this is the autojoin thread group or the work interval thread
825 	 *  group depends on whether the thread's sched_flags has the
826 	 *  TH_SFLAG_THREAD_GROUP_AUTO_JOIN bit set */
827 	union {
828 		/* This is a borrowed reference to the auto join thread group from the
829 		 * work_interval. It is set with the thread lock held */
830 		struct thread_group             *auto_join_thread_group;
831 		/* This is a borrowed reference to the explicit work_interval thread group
832 		 * and is always set on self */
833 		struct thread_group             *work_interval_thread_group;
834 	};
835 #endif /* CONFIG_THREAD_GROUPS */
836 
837 	/* work interval (if any) associated with the thread. Only modified by
838 	 * current thread on itself or when another thread when the thread is held
839 	 * off of runq */
840 	struct work_interval            *th_work_interval;
841 	thread_work_interval_flags_t    th_work_interval_flags;
842 
843 #define THREAD_BOUND_CLUSTER_NONE       (UINT32_MAX)
844 	uint32_t                 th_bound_cluster_id;
845 
846 #if SCHED_TRACE_THREAD_WAKEUPS
847 	uintptr_t               thread_wakeup_bt[64];
848 #endif
849 	turnstile_update_flags_t inheritor_flags; /* inheritor flags for inheritor field */
850 	block_hint_t            pending_block_hint;
851 	block_hint_t            block_hint;      /* What type of primitive last caused us to block. */
852 	uint32_t                decompressions;  /* Per-thread decompressions counter to be added to per-task decompressions counter */
853 	int                     thread_region_page_shift; /* Page shift that this thread would like to use when */
854 	                                                  /* introspecting a task. This is currently being used */
855 	                                                  /* by footprint which uses a thread for each task being inspected. */
856 #if CONFIG_IOSCHED
857 	void                   *decmp_upl;
858 #endif /* CONFIG_IOSCHED */
859 };
860 
861 #define ith_state           saved.receive.state
862 #define ith_object          saved.receive.object
863 #define ith_msg_addr        saved.receive.msg_addr
864 #define ith_rsize           saved.receive.rsize
865 #define ith_msize           saved.receive.msize
866 #define ith_option          saved.receive.option
867 #define ith_receiver_name   saved.receive.receiver_name
868 #define ith_continuation    saved.receive.continuation
869 #define ith_kmsg            saved.receive.kmsg
870 #define ith_peekq           saved.receive.peekq
871 #define ith_knote           saved.receive.knote
872 #define ith_ppriority       saved.receive.received_qos.ppri
873 #define ith_qos_override    saved.receive.received_qos.oqos
874 #define ith_seqno           saved.receive.seqno
875 
876 #define sth_waitsemaphore   saved.sema.waitsemaphore
877 #define sth_signalsemaphore saved.sema.signalsemaphore
878 #define sth_options         saved.sema.options
879 #define sth_result          saved.sema.result
880 #define sth_continuation    saved.sema.continuation
881 
882 #define ITH_KNOTE_NULL      ((void *)NULL)
883 #define ITH_KNOTE_PSEUDO    ((void *)0xdeadbeef)
884 /*
885  * The ith_knote is used during message delivery, and can safely be interpreted
886  * only when used for one of these codepaths, which the test for the msgt_name
887  * being RECEIVE or SEND_ONCE is about.
888  */
889 #define ITH_KNOTE_VALID(kn, msgt_name) \
890 	        (((kn) != ITH_KNOTE_NULL && (kn) != ITH_KNOTE_PSEUDO) && \
891 	         ((msgt_name) == MACH_MSG_TYPE_PORT_RECEIVE || \
892 	         (msgt_name) == MACH_MSG_TYPE_PORT_SEND_ONCE))
893 
894 #if MACH_ASSERT
895 #define assert_thread_magic(thread) assertf((thread)->thread_magic == THREAD_MAGIC, \
896 	                                    "bad thread magic 0x%llx for thread %p, expected 0x%llx", \
897 	                                    (thread)->thread_magic, (thread), THREAD_MAGIC)
898 #else
899 #define assert_thread_magic(thread) do { (void)(thread); } while (0)
900 #endif
901 
902 extern thread_t                 thread_bootstrap(void);
903 
904 extern void                     thread_machine_init_template(void);
905 
906 extern void                     thread_init(void);
907 
908 extern void                     thread_daemon_init(void);
909 
910 extern void                     thread_reference(
911 	thread_t                thread);
912 
913 extern void                     thread_deallocate(
914 	thread_t                thread);
915 
916 extern void                     thread_inspect_deallocate(
917 	thread_inspect_t        thread);
918 
919 extern void                     thread_read_deallocate(
920 	thread_read_t           thread);
921 
922 extern void                     thread_terminate_self(void);
923 
924 extern kern_return_t    thread_terminate_internal(
925 	thread_t                    thread);
926 
927 extern void                     thread_start(
928 	thread_t                        thread) __attribute__ ((noinline));
929 
930 extern void                     thread_start_in_assert_wait(
931 	thread_t                        thread,
932 	event_t             event,
933 	wait_interrupt_t    interruptible) __attribute__ ((noinline));
934 
935 extern void                     thread_terminate_enqueue(
936 	thread_t                thread);
937 
938 extern void                     thread_exception_enqueue(
939 	task_t          task,
940 	thread_t        thread,
941 	exception_type_t etype);
942 
943 extern void                     thread_copy_resource_info(
944 	thread_t dst_thread,
945 	thread_t src_thread);
946 
947 extern void                     thread_terminate_crashed_threads(void);
948 
949 extern void                     thread_stack_enqueue(
950 	thread_t                thread);
951 
952 extern void                     thread_hold(
953 	thread_t        thread);
954 
955 extern void                     thread_release(
956 	thread_t        thread);
957 
958 extern void                     thread_corpse_continue(void) __dead2;
959 
960 extern boolean_t                thread_is_active(thread_t thread);
961 
962 extern lck_grp_t                thread_lck_grp;
963 
964 /* Locking for scheduler state, always acquired with interrupts disabled (splsched()) */
965 #define thread_lock_init(th)    simple_lock_init(&(th)->sched_lock, 0)
966 #define thread_lock(th)                 simple_lock(&(th)->sched_lock, &thread_lck_grp)
967 #define thread_unlock(th)               simple_unlock(&(th)->sched_lock)
968 
969 #define wake_lock_init(th)              simple_lock_init(&(th)->wake_lock, 0)
970 #define wake_lock(th)                   simple_lock(&(th)->wake_lock, &thread_lck_grp)
971 #define wake_unlock(th)                 simple_unlock(&(th)->wake_lock)
972 
973 #define thread_should_halt_fast(thread)         (!(thread)->active)
974 
975 extern void                             stack_alloc(
976 	thread_t                thread);
977 
978 extern void                     stack_handoff(
979 	thread_t                from,
980 	thread_t                to);
981 
982 extern void                             stack_free(
983 	thread_t                thread);
984 
985 extern void                             stack_free_reserved(
986 	thread_t                thread);
987 
988 extern boolean_t                stack_alloc_try(
989 	thread_t            thread);
990 
991 extern void                             stack_collect(void);
992 
993 extern kern_return_t    thread_info_internal(
994 	thread_t                                thread,
995 	thread_flavor_t                 flavor,
996 	thread_info_t                   thread_info_out,
997 	mach_msg_type_number_t  *thread_info_count);
998 
999 
1000 
1001 extern kern_return_t    kernel_thread_create(
1002 	thread_continue_t       continuation,
1003 	void                            *parameter,
1004 	integer_t                       priority,
1005 	thread_t                        *new_thread);
1006 
1007 extern kern_return_t    kernel_thread_start_priority(
1008 	thread_continue_t       continuation,
1009 	void                            *parameter,
1010 	integer_t                       priority,
1011 	thread_t                        *new_thread);
1012 
1013 extern void                             machine_stack_attach(
1014 	thread_t                thread,
1015 	vm_offset_t             stack);
1016 
1017 extern vm_offset_t              machine_stack_detach(
1018 	thread_t                thread);
1019 
1020 extern void                             machine_stack_handoff(
1021 	thread_t                old,
1022 	thread_t                new);
1023 
1024 extern thread_t                 machine_switch_context(
1025 	thread_t                        old_thread,
1026 	thread_continue_t       continuation,
1027 	thread_t                        new_thread);
1028 
1029 extern void                             machine_load_context(
1030 	thread_t                thread) __attribute__((noreturn));
1031 
1032 extern void             machine_thread_state_initialize(
1033 	thread_t                                thread);
1034 
1035 extern kern_return_t    machine_thread_set_state(
1036 	thread_t                                thread,
1037 	thread_flavor_t                 flavor,
1038 	thread_state_t                  state,
1039 	mach_msg_type_number_t  count);
1040 
1041 extern mach_vm_address_t machine_thread_pc(
1042 	thread_t                thread);
1043 
1044 extern void machine_thread_reset_pc(
1045 	thread_t                thread,
1046 	mach_vm_address_t       pc);
1047 
1048 extern boolean_t        machine_thread_on_core(
1049 	thread_t                thread);
1050 
1051 extern kern_return_t    machine_thread_get_state(
1052 	thread_t                                thread,
1053 	thread_flavor_t                 flavor,
1054 	thread_state_t                  state,
1055 	mach_msg_type_number_t  *count);
1056 
1057 extern kern_return_t    machine_thread_state_convert_from_user(
1058 	thread_t                                thread,
1059 	thread_flavor_t                 flavor,
1060 	thread_state_t                  tstate,
1061 	mach_msg_type_number_t  count,
1062 	thread_state_t old_tstate,
1063 	mach_msg_type_number_t old_count,
1064 	thread_set_status_flags_t tssf_flags);
1065 
1066 extern kern_return_t    machine_thread_state_convert_to_user(
1067 	thread_t                                thread,
1068 	thread_flavor_t                 flavor,
1069 	thread_state_t                  tstate,
1070 	mach_msg_type_number_t  *count,
1071 	thread_set_status_flags_t tssf_flags);
1072 
1073 extern kern_return_t    machine_thread_dup(
1074 	thread_t                self,
1075 	thread_t                target,
1076 	boolean_t               is_corpse);
1077 
1078 extern void             machine_thread_init(void);
1079 
1080 extern void             machine_thread_template_init(thread_t thr_template);
1081 
1082 
1083 extern void             machine_thread_create(
1084 	thread_t                thread,
1085 	task_t                  task,
1086 	bool                    first_thread);
1087 extern void             machine_thread_switch_addrmode(
1088 	thread_t                 thread);
1089 
1090 extern void                 machine_thread_destroy(
1091 	thread_t                thread);
1092 
1093 extern void                             machine_set_current_thread(
1094 	thread_t                        thread);
1095 
1096 extern kern_return_t    machine_thread_get_kern_state(
1097 	thread_t                                thread,
1098 	thread_flavor_t                 flavor,
1099 	thread_state_t                  tstate,
1100 	mach_msg_type_number_t  *count);
1101 
1102 extern kern_return_t    machine_thread_inherit_taskwide(
1103 	thread_t                thread,
1104 	task_t                  parent_task);
1105 
1106 extern kern_return_t    machine_thread_set_tsd_base(
1107 	thread_t                                thread,
1108 	mach_vm_offset_t                tsd_base);
1109 
1110 #define thread_mtx_try(thread)                  lck_mtx_try_lock(&(thread)->mutex)
1111 #define thread_mtx_held(thread)                 lck_mtx_assert(&(thread)->mutex, LCK_MTX_ASSERT_OWNED)
1112 
1113 extern void thread_apc_ast(thread_t thread);
1114 
1115 extern void thread_update_qos_cpu_time(thread_t thread);
1116 
1117 void act_machine_sv_free(thread_t, int);
1118 
1119 vm_offset_t                     min_valid_stack_address(void);
1120 vm_offset_t                     max_valid_stack_address(void);
1121 
1122 extern bool thread_no_smt(thread_t thread);
1123 extern bool processor_active_thread_no_smt(processor_t processor);
1124 
1125 extern void thread_set_options(uint32_t thopt);
1126 
1127 #if CONFIG_THREAD_GROUPS
1128 struct thread_group *thread_get_current_voucher_thread_group(thread_t thread);
1129 #endif /* CONFIG_THREAD_GROUPS */
1130 
1131 #endif  /* MACH_KERNEL_PRIVATE */
1132 #if BSD_KERNEL_PRIVATE
1133 
1134 /* Duplicated from osfmk/kern/ipc_tt.h */
1135 __options_decl(port_intrans_options_t, uint32_t, {
1136 	PORT_INTRANS_OPTIONS_NONE              = 0x0000,
1137 	PORT_INTRANS_THREAD_IN_CURRENT_TASK    = 0x0001,
1138 	PORT_INTRANS_THREAD_NOT_CURRENT_THREAD = 0x0002,
1139 
1140 	PORT_INTRANS_SKIP_TASK_EVAL            = 0x0004,
1141 	PORT_INTRANS_ALLOW_CORPSE_TASK         = 0x0008,
1142 });
1143 
1144 extern thread_t port_name_to_thread(
1145 	mach_port_name_t            port_name,
1146 	port_intrans_options_t    options);
1147 
1148 #endif /* BSD_KERNEL_PRIVATE */
1149 #ifdef XNU_KERNEL_PRIVATE
1150 
1151 extern void                     thread_require(
1152 	thread_t        thread);
1153 
1154 extern void                     thread_deallocate_safe(
1155 	thread_t                thread);
1156 
1157 extern uint64_t                 thread_rettokern_addr(
1158 	thread_t thread);
1159 
1160 extern uint64_t                 thread_wqquantum_addr(
1161 	thread_t thread);
1162 
1163 extern integer_t        thread_kern_get_pri(thread_t thr) __pure2;
1164 
1165 extern void             thread_kern_set_pri(thread_t thr, integer_t pri);
1166 
1167 extern integer_t        thread_kern_get_kernel_maxpri(void) __pure2;
1168 
1169 uint16_t        thread_set_tag(thread_t thread, uint16_t tag);
1170 uint16_t        thread_get_tag(thread_t thread);
1171 
1172 __options_decl(shared_rsrc_policy_agent_t, uint32_t, {
1173 	SHARED_RSRC_POLICY_AGENT_DISPATCH = 0,
1174 	SHARED_RSRC_POLICY_AGENT_SYSCTL = 1,
1175 	SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW = 2,
1176 	SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM = 3,
1177 });
1178 
1179 boolean_t       thread_shared_rsrc_policy_get(thread_t thread, cluster_shared_rsrc_type_t type);
1180 kern_return_t   thread_shared_rsrc_policy_set(thread_t thread, uint32_t index, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent);
1181 kern_return_t   thread_shared_rsrc_policy_clear(thread_t thread, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent);
1182 
1183 #ifdef MACH_KERNEL_PRIVATE
1184 static inline thread_tag_t
thread_set_tag_internal(thread_t thread,thread_tag_t tag)1185 thread_set_tag_internal(thread_t thread, thread_tag_t tag)
1186 {
1187 	return os_atomic_or_orig(&thread->thread_tag, tag, relaxed);
1188 }
1189 
1190 static inline thread_tag_t
thread_get_tag_internal(thread_t thread)1191 thread_get_tag_internal(thread_t thread)
1192 {
1193 	return thread->thread_tag;
1194 }
1195 #endif /* MACH_KERNEL_PRIVATE */
1196 
1197 uint64_t        thread_last_run_time(thread_t thread);
1198 
1199 extern kern_return_t    thread_state_initialize(
1200 	thread_t                                thread);
1201 
1202 extern kern_return_t    thread_setstatus(
1203 	thread_t                                thread,
1204 	int                                             flavor,
1205 	thread_state_t                  tstate,
1206 	mach_msg_type_number_t  count);
1207 
1208 extern kern_return_t    thread_setstatus_from_user(
1209 	thread_t                                thread,
1210 	int                                             flavor,
1211 	thread_state_t                  tstate,
1212 	mach_msg_type_number_t  count,
1213 	thread_state_t                  old_tstate,
1214 	mach_msg_type_number_t  old_count,
1215 	thread_set_status_flags_t flags);
1216 
1217 extern kern_return_t    thread_getstatus(
1218 	thread_t                                thread,
1219 	int                                             flavor,
1220 	thread_state_t                  tstate,
1221 	mach_msg_type_number_t  *count);
1222 
1223 extern void main_thread_set_immovable_pinned(thread_t thread);
1224 
1225 extern kern_return_t    thread_getstatus_to_user(
1226 	thread_t                                thread,
1227 	int                                             flavor,
1228 	thread_state_t                  tstate,
1229 	mach_msg_type_number_t  *count);
1230 
1231 extern kern_return_t    thread_create_with_continuation(
1232 	task_t task,
1233 	thread_t *new_thread,
1234 	thread_continue_t continuation);
1235 
1236 extern kern_return_t main_thread_create_waiting(task_t    task,
1237     thread_continue_t              continuation,
1238     event_t                        event,
1239     thread_t                       *new_thread);
1240 
1241 extern kern_return_t    thread_create_workq_waiting(
1242 	task_t                  task,
1243 	thread_continue_t       thread_return,
1244 	thread_t                *new_thread);
1245 
1246 extern  void    thread_yield_internal(
1247 	mach_msg_timeout_t      interval);
1248 
1249 extern void thread_yield_to_preemption(void);
1250 
1251 extern void thread_depress_timer_setup(thread_t self);
1252 
1253 /*
1254  * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are:
1255  *
1256  * 1) Block. Prevent CPU consumption of the thread from exceeding the limit.
1257  * 2) Exception. Generate a resource consumption exception when the limit is exceeded.
1258  * 3) Disable. Remove any existing CPU limit.
1259  */
1260 #define THREAD_CPULIMIT_BLOCK           0x1
1261 #define THREAD_CPULIMIT_EXCEPTION       0x2
1262 #define THREAD_CPULIMIT_DISABLE         0x3
1263 
1264 struct _thread_ledger_indices {
1265 	int cpu_time;
1266 };
1267 
1268 extern struct _thread_ledger_indices thread_ledgers;
1269 
1270 extern int thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns);
1271 extern int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns);
1272 
1273 extern void                     thread_read_times(
1274 	thread_t                thread,
1275 	time_value_t    *user_time,
1276 	time_value_t    *system_time,
1277 	time_value_t    *runnable_time);
1278 
1279 extern uint64_t         thread_get_runtime_self(void);
1280 
1281 extern void                     thread_setuserstack(
1282 	thread_t                thread,
1283 	mach_vm_offset_t        user_stack);
1284 
1285 extern user_addr_t         thread_adjuserstack(
1286 	thread_t                thread,
1287 	int                             adjust);
1288 
1289 
1290 extern void                     thread_setentrypoint(
1291 	thread_t                thread,
1292 	mach_vm_offset_t        entry);
1293 
1294 extern kern_return_t    thread_set_tsd_base(
1295 	thread_t        thread,
1296 	mach_vm_offset_t tsd_base);
1297 
1298 extern kern_return_t    thread_setsinglestep(
1299 	thread_t                thread,
1300 	int                     on);
1301 
1302 extern kern_return_t    thread_userstack(
1303 	thread_t,
1304 	int,
1305 	thread_state_t,
1306 	unsigned int,
1307 	mach_vm_offset_t *,
1308 	int *,
1309 	boolean_t);
1310 
1311 extern kern_return_t    thread_entrypoint(
1312 	thread_t,
1313 	int,
1314 	thread_state_t,
1315 	unsigned int,
1316 	mach_vm_offset_t *);
1317 
1318 extern kern_return_t    thread_userstackdefault(
1319 	mach_vm_offset_t *,
1320 	boolean_t);
1321 
1322 extern kern_return_t    thread_wire_internal(
1323 	host_priv_t             host_priv,
1324 	thread_t                thread,
1325 	boolean_t               wired,
1326 	boolean_t               *prev_state);
1327 
1328 
1329 extern kern_return_t    thread_dup(thread_t);
1330 
1331 extern kern_return_t thread_dup2(thread_t, thread_t);
1332 
1333 #if !defined(_SCHED_CALL_T_DEFINED)
1334 #define _SCHED_CALL_T_DEFINED
1335 typedef void    (*sched_call_t)(
1336 	int                             type,
1337 	thread_t                thread);
1338 #endif
1339 
1340 #define SCHED_CALL_BLOCK                0x1
1341 #define SCHED_CALL_UNBLOCK              0x2
1342 
1343 extern void             thread_sched_call(
1344 	thread_t                thread,
1345 	sched_call_t    call);
1346 
1347 extern boolean_t        thread_is_static_param(
1348 	thread_t                thread);
1349 
1350 extern task_t   get_threadtask(thread_t) __pure2;
1351 
1352 extern task_t   get_threadtask_early(thread_t) __pure2;
1353 
1354 /*
1355  * Thread is running within a 64-bit address space.
1356  */
1357 #define thread_is_64bit_addr(thd)       \
1358 	task_has_64Bit_addr(get_threadtask(thd))
1359 
1360 /*
1361  * Thread is using 64-bit machine state.
1362  */
1363 #define thread_is_64bit_data(thd)       \
1364 	task_has_64Bit_data(get_threadtask(thd))
1365 
1366 struct uthread;
1367 
1368 #if defined(__x86_64__)
1369 extern int              thread_task_has_ldt(thread_t);
1370 #endif
1371 extern void             set_thread_pagein_error(thread_t, int);
1372 extern event_t          workq_thread_init_and_wq_lock(task_t, thread_t); // bsd/pthread/
1373 
1374 struct proc;
1375 struct uthread;
1376 extern const size_t     uthread_size;
1377 extern thread_ro_t      get_thread_ro_unchecked(thread_t) __pure2;
1378 extern thread_ro_t      get_thread_ro(thread_t) __pure2;
1379 extern thread_ro_t      current_thread_ro_unchecked(void) __pure2;
1380 extern thread_ro_t      current_thread_ro(void) __pure2;
1381 extern void             clear_thread_ro_proc(thread_t);
1382 extern struct uthread  *get_bsdthread_info(thread_t) __pure2;
1383 extern thread_t         get_machthread(struct uthread *) __pure2;
1384 extern uint64_t         uthread_tid(struct uthread *) __pure2;
1385 extern user_addr_t      thread_get_sigreturn_token(thread_t thread);
1386 extern void             uthread_init(task_t, struct uthread *, thread_ro_t, int);
1387 extern void             uthread_cleanup_name(struct uthread *uthread);
1388 extern void             uthread_cleanup(struct uthread *, thread_ro_t);
1389 extern void             uthread_cred_ref(struct ucred *);
1390 extern void             uthread_cred_free(struct ucred *);
1391 extern void             uthread_destroy(struct uthread *);
1392 extern void             uthread_reset_proc_refcount(struct uthread *);
1393 extern void             thread_ro_update_cred(thread_ro_t, struct ucred *);
1394 extern void             thread_ro_update_flags(thread_ro_t, thread_ro_flags_t add, thread_ro_flags_t clr);
1395 extern bool             uthread_is64bit(struct uthread *uth) __pure2;
1396 #if PROC_REF_DEBUG
1397 extern void             uthread_init_proc_refcount(struct uthread *);
1398 extern void             uthread_destroy_proc_refcount(struct uthread *);
1399 extern void             uthread_assert_zero_proc_refcount(struct uthread *);
1400 #else
1401 #define                 uthread_init_proc_refcount(uth)        ((void)(uth))
1402 #define                 uthread_destroy_proc_refcount(uth)     ((void)(uth))
1403 #define                 uthread_assert_zero_proc_refcount(uth) ((void)(uth))
1404 #endif
1405 #if CONFIG_DEBUG_SYSCALL_REJECTION
1406 extern uint64_t         *uthread_get_syscall_rejection_mask(void *);
1407 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
1408 extern mach_port_name_t  uthread_joiner_port(struct uthread *);
1409 extern user_addr_t       uthread_joiner_address(struct uthread *);
1410 extern void              uthread_joiner_wake(task_t task, struct uthread *);
1411 
1412 extern boolean_t        thread_should_halt(
1413 	thread_t                thread);
1414 
1415 extern boolean_t        thread_should_abort(
1416 	thread_t);
1417 
1418 extern int is_64signalregset(void);
1419 
1420 extern void act_set_kperf(thread_t);
1421 extern void act_set_astledger(thread_t thread);
1422 extern void act_set_astledger_async(thread_t thread);
1423 extern void act_set_io_telemetry_ast(thread_t);
1424 extern void act_set_macf_telemetry_ast(thread_t);
1425 extern void act_set_astproc_resource(thread_t);
1426 
1427 extern vm_offset_t thread_get_kernel_stack(thread_t);
1428 
1429 extern uint32_t dtrace_get_thread_predcache(thread_t);
1430 extern int64_t dtrace_get_thread_vtime(thread_t);
1431 extern int64_t dtrace_get_thread_tracing(thread_t);
1432 extern uint16_t dtrace_get_thread_inprobe(thread_t);
1433 extern int dtrace_get_thread_last_cpu_id(thread_t);
1434 extern vm_offset_t dtrace_get_kernel_stack(thread_t);
1435 #define dtrace_get_kernel_stack thread_get_kernel_stack
1436 extern void dtrace_set_thread_predcache(thread_t, uint32_t);
1437 extern void dtrace_set_thread_vtime(thread_t, int64_t);
1438 extern void dtrace_set_thread_tracing(thread_t, int64_t);
1439 extern void dtrace_set_thread_inprobe(thread_t, uint16_t);
1440 extern vm_offset_t dtrace_set_thread_recover(thread_t, vm_offset_t);
1441 extern vm_offset_t dtrace_sign_and_set_thread_recover(thread_t, vm_offset_t);
1442 extern void dtrace_thread_bootstrap(void);
1443 extern void dtrace_thread_didexec(thread_t);
1444 
1445 extern int64_t dtrace_calc_thread_recent_vtime(thread_t);
1446 
1447 
1448 extern kern_return_t    thread_set_wq_state32(
1449 	thread_t          thread,
1450 	thread_state_t    tstate);
1451 
1452 extern kern_return_t    thread_set_wq_state64(
1453 	thread_t          thread,
1454 	thread_state_t    tstate);
1455 
1456 extern vm_offset_t      kernel_stack_mask;
1457 extern vm_offset_t      kernel_stack_size;
1458 extern vm_offset_t      kernel_stack_depth_max;
1459 
1460 extern void guard_ast(thread_t);
1461 extern void fd_guard_ast(thread_t,
1462     mach_exception_code_t, mach_exception_subcode_t);
1463 #if CONFIG_VNGUARD
1464 extern void vn_guard_ast(thread_t,
1465     mach_exception_code_t, mach_exception_subcode_t);
1466 #endif
1467 extern void mach_port_guard_ast(thread_t,
1468     mach_exception_code_t, mach_exception_subcode_t);
1469 extern void virt_memory_guard_ast(thread_t,
1470     mach_exception_code_t, mach_exception_subcode_t);
1471 extern void thread_guard_violation(thread_t,
1472     mach_exception_code_t, mach_exception_subcode_t, boolean_t);
1473 extern void thread_update_io_stats(thread_t, int size, int io_flags);
1474 
1475 extern kern_return_t    thread_set_voucher_name(mach_port_name_t name);
1476 extern kern_return_t thread_get_current_voucher_origin_pid(int32_t *pid);
1477 
1478 extern void thread_enable_send_importance(thread_t thread, boolean_t enable);
1479 
1480 /*
1481  * Translate signal context data pointer to userspace representation
1482  */
1483 
1484 extern kern_return_t    machine_thread_siguctx_pointer_convert_to_user(
1485 	thread_t thread,
1486 	user_addr_t *uctxp);
1487 
1488 extern void machine_tecs(thread_t thr);
1489 
1490 typedef enum cpuvn {
1491 	CPUVN_CI = 1
1492 } cpuvn_e;
1493 
1494 extern int machine_csv(cpuvn_e cve);
1495 #if defined(__x86_64__)
1496 extern void machine_thread_set_insn_copy_optout(thread_t thr);
1497 #endif
1498 
1499 /*
1500  * Translate array of function pointer syscall arguments from userspace representation
1501  */
1502 
1503 extern kern_return_t    machine_thread_function_pointers_convert_from_user(
1504 	thread_t thread,
1505 	user_addr_t *fptrs,
1506 	uint32_t count);
1507 
1508 /*
1509  * Get the duration of the given thread's last wait.
1510  */
1511 uint64_t thread_get_last_wait_duration(thread_t thread);
1512 
1513 extern bool thread_get_no_smt(void);
1514 #if defined(__x86_64__)
1515 extern bool curtask_get_insn_copy_optout(void);
1516 extern void curtask_set_insn_copy_optout(void);
1517 #endif /* defined(__x86_64__) */
1518 
1519 #endif  /* XNU_KERNEL_PRIVATE */
1520 #ifdef KERNEL_PRIVATE
1521 
1522 typedef struct thread_pri_floor {
1523 	thread_t thread;
1524 } thread_pri_floor_t;
1525 
1526 #ifdef MACH_KERNEL_PRIVATE
1527 extern void thread_floor_boost_ast(thread_t thread);
1528 extern void thread_floor_boost_set_promotion_locked(thread_t thread);
1529 #endif /* MACH_KERNEL_PRIVATE */
1530 
1531 /*!  @function thread_priority_floor_start
1532  *   @abstract boost the current thread priority to floor.
1533  *   @discussion Increase the priority of the current thread to at least MINPRI_FLOOR.
1534  *       The boost will be mantained until a corresponding thread_priority_floor_end()
1535  *       is called. Every call of thread_priority_floor_start() needs to have a corresponding
1536  *       call to thread_priority_floor_end() from the same thread.
1537  *       No thread can return to userspace before calling thread_priority_floor_end().
1538  *
1539  *       NOTE: avoid to use this function. Try to use gate_t or sleep_with_inheritor()
1540  *       instead.
1541  *   @result a token to be given to the corresponding thread_priority_floor_end()
1542  */
1543 extern thread_pri_floor_t thread_priority_floor_start(void);
1544 /*!  @function thread_priority_floor_end
1545  *   @abstract ends the floor boost.
1546  *   @param token the token obtained from thread_priority_floor_start()
1547  *   @discussion ends the priority floor boost started with thread_priority_floor_start()
1548  */
1549 extern void thread_priority_floor_end(thread_pri_floor_t *token);
1550 
1551 extern void thread_set_no_smt(bool set);
1552 
1553 extern void thread_mtx_lock(thread_t thread);
1554 
1555 extern void thread_mtx_unlock(thread_t thread);
1556 
1557 extern uint64_t thread_dispatchqaddr(
1558 	thread_t thread);
1559 
1560 bool thread_is_eager_preempt(thread_t thread);
1561 void thread_set_eager_preempt(thread_t thread);
1562 void thread_clear_eager_preempt(thread_t thread);
1563 void thread_set_honor_qlimit(thread_t thread);
1564 void thread_clear_honor_qlimit(thread_t thread);
1565 extern ipc_port_t convert_thread_to_port(thread_t);
1566 extern ipc_port_t convert_thread_to_port_pinned(thread_t);
1567 extern ipc_port_t convert_thread_inspect_to_port(thread_inspect_t);
1568 extern ipc_port_t convert_thread_read_to_port(thread_read_t);
1569 extern boolean_t is_external_pageout_thread(void);
1570 extern boolean_t is_vm_privileged(void);
1571 extern boolean_t set_vm_privilege(boolean_t);
1572 extern kern_allocation_name_t thread_set_allocation_name(kern_allocation_name_t new_name);
1573 extern void *thread_iokit_tls_get(uint32_t index);
1574 extern void thread_iokit_tls_set(uint32_t index, void * data);
1575 extern int thread_self_region_page_shift(void);
1576 extern void thread_self_region_page_shift_set(int pgshift);
1577 extern kern_return_t thread_create_immovable(task_t task, thread_t *new_thread);
1578 extern kern_return_t thread_terminate_pinned(thread_t thread);
1579 
1580 struct thread_attr_for_ipc_propagation;
1581 extern kern_return_t thread_get_ipc_propagate_attr(thread_t thread, struct thread_attr_for_ipc_propagation *attr);
1582 
1583 #endif /* KERNEL_PRIVATE */
1584 #ifdef XNU_KERNEL_PRIVATE
1585 
1586 extern void
1587 thread_get_thread_name(thread_t th, char* name);
1588 
1589 extern bool thread_supports_cooperative_workqueue(thread_t thread);
1590 extern void thread_arm_workqueue_quantum(thread_t thread);
1591 extern void thread_disarm_workqueue_quantum(thread_t thread);
1592 
1593 extern void thread_evaluate_workqueue_quantum_expiry(thread_t thread);
1594 extern bool thread_has_expired_workqueue_quantum(thread_t thread, bool should_trace);
1595 
1596 #endif /* XNU_KERNEL_PRIVATE */
1597 
1598 /*! @function thread_has_thread_name
1599  *   @abstract Checks if a thread has a name.
1600  *   @discussion This function takes one input, a thread, and returns
1601  *       a boolean value indicating if that thread already has a name associated
1602  *       with it.
1603  *   @param th The thread to inspect.
1604  *   @result TRUE if the thread has a name, FALSE otherwise.
1605  */
1606 extern boolean_t thread_has_thread_name(thread_t th);
1607 
1608 /*! @function thread_set_thread_name
1609  *   @abstract Set a thread's name.
1610  *   @discussion This function takes two input parameters: a thread to name,
1611  *       and the name to apply to the thread.  The name will be copied over to
1612  *       the thread in order to better identify the thread.  If the name is
1613  *       longer than MAXTHREADNAMESIZE - 1, it will be truncated.
1614  *   @param th The thread to be named.
1615  *   @param name The name to apply to the thread.
1616  */
1617 extern void thread_set_thread_name(thread_t th, const char* name);
1618 
1619 extern thread_t current_thread(void) __pure2;
1620 
1621 extern uint64_t thread_tid(thread_t thread) __pure2;
1622 
1623 extern void thread_reference(
1624 	thread_t        thread);
1625 
1626 extern void thread_deallocate(
1627 	thread_t        thread);
1628 
1629 /*! @function kernel_thread_start
1630  *   @abstract Create a kernel thread.
1631  *   @discussion This function takes three input parameters, namely reference
1632  *       to the function that the thread should execute, caller specified data
1633  *       and a reference which is used to return the newly created kernel
1634  *       thread. The function returns KERN_SUCCESS on success or an appropriate
1635  *       kernel code type indicating the error. It may be noted that the caller
1636  *       is responsible for explicitly releasing the reference to the created
1637  *       thread when no longer needed. This should be done by calling
1638  *       thread_deallocate(new_thread).
1639  *   @param continuation A C-function pointer where the thread will begin execution.
1640  *   @param parameter Caller specified data to be passed to the new thread.
1641  *   @param new_thread Reference to the new thread is returned in this parameter.
1642  *   @result Returns KERN_SUCCESS on success or an appropriate kernel code type.
1643  */
1644 
1645 extern kern_return_t    kernel_thread_start(
1646 	thread_continue_t       continuation,
1647 	void                    *parameter,
1648 	thread_t                *new_thread);
1649 
1650 __END_DECLS
1651 
1652 #endif  /* _KERN_THREAD_H_ */
1653