xref: /xnu-8019.80.24/osfmk/kern/thread.h (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	thread.h
60  *	Author:	Avadis Tevanian, Jr.
61  *
62  *	This file contains the structure definitions for threads.
63  *
64  */
65 /*
66  * Copyright (c) 1993 The University of Utah and
67  * the Computer Systems Laboratory (CSL).  All rights reserved.
68  *
69  * Permission to use, copy, modify and distribute this software and its
70  * documentation is hereby granted, provided that both the copyright
71  * notice and this permission notice appear in all copies of the
72  * software, derivative works or modified versions, and any portions
73  * thereof, and that both notices appear in supporting documentation.
74  *
75  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78  *
79  * CSL requests users of this software to return to [email protected] any
80  * improvements that they make and grant CSL redistribution rights.
81  *
82  */
83 
84 #ifndef _KERN_THREAD_H_
85 #define _KERN_THREAD_H_
86 
87 #include <mach/kern_return.h>
88 #include <mach/mach_types.h>
89 #include <mach/mach_param.h>
90 #include <mach/message.h>
91 #include <mach/boolean.h>
92 #include <mach/vm_param.h>
93 #include <mach/thread_info.h>
94 #include <mach/thread_status.h>
95 #include <mach/exception_types.h>
96 
97 #include <kern/kern_types.h>
98 #include <vm/vm_kern.h>
99 #include <sys/cdefs.h>
100 
101 #ifdef MACH_KERNEL_PRIVATE
102 #include <mach_assert.h>
103 #include <mach_ldebug.h>
104 
105 #include <ipc/ipc_types.h>
106 
107 #include <mach/port.h>
108 #include <kern/cpu_number.h>
109 #include <kern/smp.h>
110 #include <kern/queue.h>
111 
112 #include <kern/timer.h>
113 #include <kern/simple_lock.h>
114 #include <kern/locks.h>
115 #include <kern/sched.h>
116 #include <kern/sched_prim.h>
117 #include <mach/sfi_class.h>
118 #include <kern/thread_call.h>
119 #include <kern/thread_group.h>
120 #include <kern/timer_call.h>
121 #include <kern/task.h>
122 #include <kern/exception.h>
123 #include <kern/affinity.h>
124 #include <kern/debug.h>
125 #include <kern/block_hint.h>
126 #include <kern/turnstile.h>
127 #include <kern/mpsc_queue.h>
128 
129 #include <kern/waitq.h>
130 #include <san/kasan.h>
131 #include <san/kcov_data.h>
132 #include <os/refcnt.h>
133 
134 #include <ipc/ipc_kmsg.h>
135 
136 #include <machine/atomic.h>
137 #include <machine/cpu_data.h>
138 #include <machine/thread.h>
139 
140 #if MONOTONIC
141 #include <stdatomic.h>
142 #include <machine/monotonic.h>
143 #endif /* MONOTONIC */
144 #endif  /* MACH_KERNEL_PRIVATE */
145 #ifdef XNU_KERNEL_PRIVATE
146 /* priority queue static asserts fail for __ARM64_ARCH_8_32__ kext builds */
147 #include <kern/priority_queue.h>
148 #endif /* XNU_KERNEL_PRIVATE */
149 
150 __BEGIN_DECLS
151 
152 #ifdef XNU_KERNEL_PRIVATE
153 #if CONFIG_TASKWATCH
154 /* Taskwatch related. TODO: find this a better home */
155 typedef struct task_watcher task_watch_t;
156 #endif /* CONFIG_TASKWATCH */
157 
158 /* Thread tags; for easy identification. */
159 __options_closed_decl(thread_tag_t, uint16_t, {
160 	THREAD_TAG_MAINTHREAD   = 0x01,
161 	THREAD_TAG_CALLOUT      = 0x02,
162 	THREAD_TAG_IOWORKLOOP   = 0x04,
163 	THREAD_TAG_PTHREAD      = 0x10,
164 	THREAD_TAG_WORKQUEUE    = 0x20,
165 });
166 
167 __options_closed_decl(thread_ro_flags_t, uint16_t, {
168 	TRO_NONE                = 0x0000,
169 	TRO_SETUID              = 0x0001,
170 });
171 
172 typedef struct thread_ro *thread_ro_t;
173 
174 /*!
175  * @struct thread_ro
176  *
177  * @brief
178  * A structure allocated in a read only zone that safely
179  * represents the linkages of a thread to its cred, proc, task, ...
180  *
181  * @discussion
182  * The lifetime of a @c thread_ro structure is 1:1 with that
183  * of a @c thread_t or a @c uthread_t and holding a thread reference
184  * always allows to dereference this structure safely.
185  */
186 struct thread_ro {
187 	struct thread              *tro_owner;
188 #if MACH_BSD
189 	struct ucred               *tro_cred;
190 	struct proc                *tro_proc;
191 	struct proc_ro             *tro_proc_ro;
192 #endif
193 	struct task                *tro_task;
194 	thread_ro_flags_t           tro_flags;
195 
196 	struct ipc_port            *tro_self_port;
197 	struct ipc_port            *tro_settable_self_port;             /* send right */
198 	struct ipc_port            *tro_ports[THREAD_SELF_PORT_COUNT];  /* no right */
199 
200 	struct exception_action    *tro_exc_actions;
201 };
202 
203 #endif /* XNU_KERNEL_PRIVATE */
204 #ifdef MACH_KERNEL_PRIVATE
205 
206 extern zone_t thread_ro_zone;
207 
208 __options_decl(thread_work_interval_flags_t, uint32_t, {
209 	TH_WORK_INTERVAL_FLAGS_NONE           = 0x0,
210 #if CONFIG_SCHED_AUTO_JOIN
211 	/* Flags to indicate status about work interval thread is currently part of */
212 	TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK = 0x1,
213 #endif /* CONFIG_SCHED_AUTO_JOIN */
214 });
215 
216 struct thread {
217 #if MACH_ASSERT
218 #define THREAD_MAGIC 0x1234ABCDDCBA4321ULL
219 	/* Ensure nothing uses &thread as a queue entry */
220 	uint64_t                thread_magic;
221 #endif /* MACH_ASSERT */
222 
223 	/*
224 	 *	NOTE:	The runq field in the thread structure has an unusual
225 	 *	locking protocol.  If its value is PROCESSOR_NULL, then it is
226 	 *	locked by the thread_lock, but if its value is something else
227 	 *	then it is locked by the associated run queue lock. It is
228 	 *	set to PROCESSOR_NULL without holding the thread lock, but the
229 	 *	transition from PROCESSOR_NULL to non-null must be done
230 	 *	under the thread lock and the run queue lock.
231 	 *
232 	 *	New waitq APIs allow the 'links' and 'runq' fields to be
233 	 *	anywhere in the thread structure.
234 	 */
235 	union {
236 		queue_chain_t                   runq_links;             /* run queue links */
237 		queue_chain_t                   wait_links;             /* wait queue links */
238 		struct mpsc_queue_chain         mpsc_links;             /* thread daemon mpsc links */
239 		struct priority_queue_entry_sched wait_prioq_links;       /* priority ordered waitq links */
240 	};
241 
242 	event64_t               wait_event;     /* wait queue event */
243 	processor_t             runq;           /* run queue assignment */
244 	struct waitq           *waitq;          /* wait queue this thread is enqueued on */
245 	struct turnstile       *turnstile;      /* thread's turnstile, protected by primitives interlock */
246 	void                   *inheritor;      /* inheritor of the primitive the thread will block on */
247 	struct priority_queue_sched_max sched_inheritor_queue; /* Inheritor queue for kernel promotion */
248 	struct priority_queue_sched_max base_inheritor_queue; /* Inheritor queue for user promotion */
249 
250 #if CONFIG_SCHED_EDGE
251 	bool            th_bound_cluster_enqueued;
252 	bool            th_shared_rsrc_enqueued[CLUSTER_SHARED_RSRC_TYPE_COUNT];
253 	bool            th_shared_rsrc_heavy_user[CLUSTER_SHARED_RSRC_TYPE_COUNT];
254 	bool            th_shared_rsrc_heavy_perf_control[CLUSTER_SHARED_RSRC_TYPE_COUNT];
255 #endif /* CONFIG_SCHED_EDGE */
256 
257 #if CONFIG_SCHED_CLUTCH
258 	/*
259 	 * In the clutch scheduler, the threads are maintained in runqs at the clutch_bucket
260 	 * level (clutch_bucket defines a unique thread group and scheduling bucket pair). The
261 	 * thread is linked via a couple of linkages in the clutch bucket:
262 	 *
263 	 * - A stable priority queue linkage which is the main runqueue (based on sched_pri) for the clutch bucket
264 	 * - A regular priority queue linkage which is based on thread's base/promoted pri (used for clutch bucket priority calculation)
265 	 * - A queue linkage used for timesharing operations of threads at the scheduler tick
266 	 */
267 	struct priority_queue_entry_stable      th_clutch_runq_link;
268 	struct priority_queue_entry_sched       th_clutch_pri_link;
269 	queue_chain_t                           th_clutch_timeshare_link;
270 #endif /* CONFIG_SCHED_CLUTCH */
271 
272 	/* Data updated during assert_wait/thread_wakeup */
273 	decl_simple_lock_data(, sched_lock);     /* scheduling lock (thread_lock()) */
274 	decl_simple_lock_data(, wake_lock);      /* for thread stop / wait (wake_lock()) */
275 	uint16_t                options;        /* options set by thread itself */
276 #define TH_OPT_INTMASK          0x0003          /* interrupt / abort level */
277 #define TH_OPT_VMPRIV           0x0004          /* may allocate reserved memory */
278 #define TH_OPT_SYSTEM_CRITICAL  0x0010          /* Thread must always be allowed to run - even under heavy load */
279 #define TH_OPT_PROC_CPULIMIT    0x0020          /* Thread has a task-wide CPU limit applied to it */
280 #define TH_OPT_PRVT_CPULIMIT    0x0040          /* Thread has a thread-private CPU limit applied to it */
281 #define TH_OPT_IDLE_THREAD      0x0080          /* Thread is a per-processor idle thread */
282 #define TH_OPT_GLOBAL_FORCED_IDLE       0x0100  /* Thread performs forced idle for thermal control */
283 #define TH_OPT_SCHED_VM_GROUP   0x0200          /* Thread belongs to special scheduler VM group */
284 #define TH_OPT_HONOR_QLIMIT     0x0400          /* Thread will honor qlimit while sending mach_msg, regardless of MACH_SEND_ALWAYS */
285 #define TH_OPT_SEND_IMPORTANCE  0x0800          /* Thread will allow importance donation from kernel rpc */
286 #define TH_OPT_ZONE_PRIV        0x1000          /* Thread may use the zone replenish reserve */
287 #define TH_OPT_IPC_TG_BLOCKED   0x2000          /* Thread blocked in sync IPC and has made the thread group blocked callout */
288 
289 	bool                    wake_active;    /* wake event on stop */
290 	bool                    at_safe_point;  /* thread_abort_safely allowed */
291 	uint8_t                 sched_saved_run_weight;
292 #if DEVELOPMENT || DEBUG
293 	bool                    pmap_footprint_suspended;
294 #endif /* DEVELOPMENT || DEBUG */
295 	ast_t                   reason;         /* why we blocked */
296 	uint32_t                quantum_remaining;
297 	wait_result_t           wait_result;    /* outcome of wait -
298 	                                        * may be examined by this thread
299 	                                        * WITHOUT locking */
300 	thread_continue_t       continuation;   /* continue here next dispatch */
301 	void                   *parameter;      /* continuation parameter */
302 
303 	/* Data updated/used in thread_invoke */
304 	vm_offset_t             kernel_stack;   /* current kernel stack */
305 	vm_offset_t             reserved_stack; /* reserved kernel stack */
306 
307 	/*** Machine-dependent state ***/
308 	struct machine_thread   machine;
309 
310 #if KASAN
311 	struct kasan_thread_data kasan_data;
312 #endif
313 #if CONFIG_KCOV
314 	kcov_thread_data_t       kcov_data;
315 #endif
316 
317 	/* Thread state: */
318 	int                     state;
319 /*
320  *	Thread states [bits or'ed]
321  * All but TH_WAIT_REPORT are encoded in SS_TH_FLAGS
322  * All are encoded in kcdata.py ('ths_state')
323  */
324 #define TH_WAIT                 0x01            /* queued for waiting */
325 #define TH_SUSP                 0x02            /* stopped or requested to stop */
326 #define TH_RUN                  0x04            /* running or on runq */
327 #define TH_UNINT                0x08            /* waiting uninteruptibly */
328 #define TH_TERMINATE            0x10            /* halted at termination */
329 #define TH_TERMINATE2           0x20            /* added to termination queue */
330 #define TH_WAIT_REPORT          0x40            /* the wait is using the sched_call,
331 	                                        * only set if TH_WAIT is also set */
332 #define TH_IDLE                 0x80            /* idling processor */
333 
334 	/* Scheduling information */
335 	sched_mode_t            sched_mode;     /* scheduling mode */
336 	sched_mode_t            saved_mode;     /* saved mode during forced mode demotion */
337 
338 	/* This thread's contribution to global sched counters */
339 	sched_bucket_t          th_sched_bucket;
340 
341 	sfi_class_id_t          sfi_class;      /* SFI class (XXX Updated on CSW/QE/AST) */
342 	sfi_class_id_t          sfi_wait_class; /* Currently in SFI wait for this class, protected by sfi_lock */
343 
344 	uint32_t                sched_flags;            /* current flag bits */
345 #define TH_SFLAG_NO_SMT                 0x0001          /* On an SMT CPU, this thread must be scheduled alone */
346 #define TH_SFLAG_FAILSAFE               0x0002          /* fail-safe has tripped */
347 #define TH_SFLAG_THROTTLED              0x0004          /* throttled thread forced to timeshare mode (may be applied in addition to failsafe) */
348 #define TH_SFLAG_DEMOTED_MASK      (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE)     /* saved_mode contains previous sched_mode */
349 
350 #define TH_SFLAG_PROMOTED               0x0008          /* sched pri has been promoted by kernel mutex priority promotion */
351 #define TH_SFLAG_ABORT                  0x0010          /* abort interruptible waits */
352 #define TH_SFLAG_ABORTSAFELY            0x0020          /* ... but only those at safe point */
353 #define TH_SFLAG_ABORTED_MASK           (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY)
354 #define TH_SFLAG_DEPRESS                0x0040          /* normal depress yield */
355 #define TH_SFLAG_POLLDEPRESS            0x0080          /* polled depress yield */
356 #define TH_SFLAG_DEPRESSED_MASK         (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
357 /* unused TH_SFLAG_PRI_UPDATE           0x0100 */
358 #define TH_SFLAG_EAGERPREEMPT           0x0200          /* Any preemption of this thread should be treated as if AST_URGENT applied */
359 #define TH_SFLAG_RW_PROMOTED            0x0400          /* promote reason: blocking with RW lock held */
360 #define TH_SFLAG_BASE_PRI_FROZEN        0x0800          /* (effective) base_pri is frozen */
361 #define TH_SFLAG_WAITQ_PROMOTED         0x1000          /* promote reason: waitq wakeup (generally for IPC receive) */
362 
363 #if __AMP__
364 #define TH_SFLAG_ECORE_ONLY             0x2000          /* (unused) Bind thread to E core processor set */
365 #define TH_SFLAG_PCORE_ONLY             0x4000          /* (unused) Bind thread to P core processor set */
366 #endif
367 
368 #define TH_SFLAG_EXEC_PROMOTED          0x8000          /* promote reason: thread is in an exec */
369 
370 #define TH_SFLAG_THREAD_GROUP_AUTO_JOIN 0x10000         /* thread has been auto-joined to thread group */
371 #if __AMP__
372 #define TH_SFLAG_BOUND_SOFT             0x20000         /* thread is soft bound to a cluster; can run anywhere if bound cluster unavailable */
373 #endif /* __AMP__ */
374 
375 #if CONFIG_PREADOPT_TG
376 #define TH_SFLAG_REEVALUTE_TG_HIERARCHY_LATER 0x40000   /* thread needs to reevaluate its TG hierarchy */
377 #endif
378 
379 #define TH_SFLAG_FLOOR_PROMOTED               0x80000   /* promote reason: boost requested */
380 
381 /* 'promote reasons' that request a priority floor only, not a custom priority */
382 #define TH_SFLAG_PROMOTE_REASON_MASK    (TH_SFLAG_RW_PROMOTED | TH_SFLAG_WAITQ_PROMOTED | TH_SFLAG_EXEC_PROMOTED | TH_SFLAG_FLOOR_PROMOTED)
383 
384 	int16_t                 sched_pri;              /* scheduled (current) priority */
385 	int16_t                 base_pri;               /* effective base priority (equal to req_base_pri unless TH_SFLAG_BASE_PRI_FROZEN) */
386 	int16_t                 req_base_pri;           /* requested base priority */
387 	int16_t                 max_priority;           /* copy of max base priority */
388 	int16_t                 task_priority;          /* copy of task base priority */
389 	int16_t                 promotion_priority;     /* priority thread is currently promoted to */
390 	uint16_t                priority_floor_count;   /* number of push to boost the floor priority */
391 	int16_t                 suspend_count;          /* Kernel holds on this thread  */
392 
393 	int                     iotier_override;        /* atomic operations to set, cleared on ret to user */
394 	os_ref_atomic_t         ref_count;              /* number of references to me */
395 
396 	uint32_t                rwlock_count;           /* Number of lck_rw_t locks held by thread */
397 #ifdef DEBUG_RW
398 	rw_lock_debug_t         rw_lock_held;           /* rw_locks currently held by the thread */
399 #endif /* DEBUG_RW */
400 
401 	integer_t               importance;             /* task-relative importance */
402 
403 	/* Priority depression expiration */
404 	integer_t               depress_timer_active;
405 	timer_call_t            depress_timer;
406 
407 	/* real-time parameters */
408 	struct {                                        /* see mach/thread_policy.h */
409 		uint32_t            period;
410 		uint32_t            computation;
411 		uint32_t            constraint;
412 		bool                preemptible;
413 		uint8_t             priority_offset;   /* base_pri = BASEPRI_RTQUEUES + priority_offset */
414 		uint64_t            deadline;
415 	}                       realtime;
416 
417 	uint64_t                last_run_time;          /* time when thread was switched away from */
418 	uint64_t                last_made_runnable_time;        /* time when thread was unblocked or preempted */
419 	uint64_t                last_basepri_change_time;       /* time when thread was last changed in basepri while runnable */
420 	uint64_t                same_pri_latency;
421 	/*
422 	 * workq_quantum_deadline is the workq thread's next runtime deadline. This
423 	 * value is set to 0 if the thread has no such deadline applicable to it.
424 	 *
425 	 * The synchronization for this field is due to how this field is modified
426 	 * 1) This field is always modified on the thread by itself or on the thread
427 	 * when it is not running/runnable
428 	 * 2) Change of this field is immediately followed by a
429 	 * corresponding change to the AST_KEVENT to either set or clear the
430 	 * AST_KEVENT_WORKQ_QUANTUM_EXPIRED bit
431 	 *
432 	 * workq_quantum_deadline can be modified by the thread on itself during
433 	 * interrupt context. However, due to (2) and due to the fact that the
434 	 * change to the AST_KEVENT is volatile, this forces the compiler to
435 	 * guarantee the order between the write to workq_quantum_deadline and the
436 	 * kevent field and therefore guarantees the correct synchronization.
437 	 */
438 	uint64_t                workq_quantum_deadline;
439 
440 #if WORKQ_QUANTUM_HISTORY_DEBUG
441 
442 #define WORKQ_QUANTUM_HISTORY_COUNT 16
443 	struct workq_quantum_history {
444 		uint64_t time;
445 		uint64_t deadline;
446 		bool arm;
447 	} workq_quantum_history[WORKQ_QUANTUM_HISTORY_COUNT];
448 	uint64_t workq_quantum_history_index;
449 
450 #define WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, ...)  ({\
451 	        thread_t __th = (thread); \
452 	        uint64_t __index = os_atomic_inc_orig(&thread->workq_quantum_history_index, relaxed); \
453 	        struct workq_quantum_history _wq_quantum_history = { mach_approximate_time(), __VA_ARGS__}; \
454 	        __th->workq_quantum_history[__index % WORKQ_QUANTUM_HISTORY_COUNT] = \
455 	                        (struct workq_quantum_history) _wq_quantum_history; \
456 	})
457 #else /* WORKQ_QUANTUM_HISTORY_DEBUG */
458 #define WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, ...)
459 #endif /* WORKQ_QUANTUM_HISTORY_DEBUG */
460 
461 #define THREAD_NOT_RUNNABLE (~0ULL)
462 
463 #if CONFIG_THREAD_GROUPS
464 	struct thread_group     *thread_group;
465 #endif
466 
467 #if defined(CONFIG_SCHED_MULTIQ)
468 	sched_group_t           sched_group;
469 #endif /* defined(CONFIG_SCHED_MULTIQ) */
470 
471 	/* Data used during setrun/dispatch */
472 	timer_data_t            system_timer;           /* system mode timer */
473 	processor_t             bound_processor;        /* bound to a processor? */
474 	processor_t             last_processor;         /* processor last dispatched on */
475 	processor_t             chosen_processor;       /* Where we want to run this thread */
476 
477 	/* Fail-safe computation since last unblock or qualifying yield */
478 	uint64_t                computation_metered;
479 	uint64_t                computation_epoch;
480 	uint64_t                safe_release;           /* when to release fail-safe */
481 
482 	/* Call out from scheduler */
483 	void                  (*sched_call)(int type, thread_t thread);
484 
485 #if defined(CONFIG_SCHED_PROTO)
486 	uint32_t                runqueue_generation;    /* last time runqueue was drained */
487 #endif
488 
489 	/* Statistics and timesharing calculations */
490 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
491 	natural_t               sched_stamp;            /* last scheduler tick */
492 	natural_t               sched_usage;            /* timesharing cpu usage [sched] */
493 	natural_t               pri_shift;              /* usage -> priority from pset */
494 	natural_t               cpu_usage;              /* instrumented cpu usage [%cpu] */
495 	natural_t               cpu_delta;              /* accumulated cpu_usage delta */
496 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
497 
498 	uint32_t                c_switch;               /* total context switches */
499 	uint32_t                p_switch;               /* total processor switches */
500 	uint32_t                ps_switch;              /* total pset switches */
501 
502 	integer_t mutex_count;  /* total count of locks held */
503 	/* Timing data structures */
504 	int                     precise_user_kernel_time; /* precise user/kernel enabled for this thread */
505 	timer_data_t            user_timer;             /* user mode timer */
506 	uint64_t                user_timer_save;        /* saved user timer value */
507 	uint64_t                system_timer_save;      /* saved system timer value */
508 	uint64_t                vtimer_user_save;       /* saved values for vtimers */
509 	uint64_t                vtimer_prof_save;
510 	uint64_t                vtimer_rlim_save;
511 	uint64_t                vtimer_qos_save;
512 
513 	timer_data_t            ptime;                  /* time executing in P mode */
514 	timer_data_t            runnable_timer;         /* time the thread is runnable (including running) */
515 
516 #if CONFIG_SCHED_SFI
517 	/* Timing for wait state */
518 	uint64_t                wait_sfi_begin_time;    /* start time for thread waiting in SFI */
519 #endif
520 
521 	/*
522 	 * Processor/cache affinity
523 	 * - affinity_threads links task threads with the same affinity set
524 	 */
525 	queue_chain_t           affinity_threads;
526 	affinity_set_t          affinity_set;
527 
528 #if CONFIG_TASKWATCH
529 	task_watch_t           *taskwatch;              /* task watch */
530 #endif /* CONFIG_TASKWATCH */
531 
532 	/* Various bits of state to stash across a continuation, exclusive to the current thread block point */
533 	union {
534 		struct {
535 			mach_msg_return_t       state;          /* receive state */
536 			mach_port_seqno_t       seqno;          /* seqno of recvd message */
537 			ipc_object_t            object;         /* object received on */
538 			vm_address_t            msg_addr;       /* receive buffer pointer */
539 			mach_msg_size_t         rsize;          /* max size for recvd msg */
540 			mach_msg_size_t         msize;          /* actual size for recvd msg */
541 			mach_msg_option_t       option;         /* options for receive */
542 			mach_port_name_t        receiver_name;  /* the receive port name */
543 			struct knote            *knote;         /* knote fired for rcv */
544 			union {
545 				struct ipc_kmsg   *kmsg;        /* received message */
546 				struct ipc_mqueue *peekq;       /* mqueue to peek at */
547 				struct {
548 					uint32_t       ppri;    /* received message pthread_priority_t */
549 					mach_msg_qos_t oqos;    /* override qos for message */
550 				} received_qos;
551 			};
552 			mach_msg_continue_t     continuation;
553 		} receive;
554 		struct {
555 			struct semaphore        *waitsemaphore;         /* semaphore ref */
556 			struct semaphore        *signalsemaphore;       /* semaphore ref */
557 			int                     options;                /* semaphore options */
558 			kern_return_t           result;                 /* primary result */
559 			mach_msg_continue_t continuation;
560 		} sema;
561 		struct {
562 #define THREAD_SAVE_IOKIT_TLS_COUNT     8
563 			void                    *tls[THREAD_SAVE_IOKIT_TLS_COUNT];
564 		} iokit;
565 	} saved;
566 
567 	/* Only user threads can cause guard exceptions, only kernel threads can be thread call threads */
568 	union {
569 		/* Thread call thread's state structure, stored on its stack */
570 		struct thread_call_thread_state *thc_state;
571 
572 		/* Structure to save information about guard exception */
573 		struct {
574 			mach_exception_code_t           code;
575 			mach_exception_subcode_t        subcode;
576 		} guard_exc_info;
577 	};
578 
579 	/* User level suspensions */
580 	int32_t                 user_stop_count;
581 
582 	/* IPC data structures */
583 #if IMPORTANCE_INHERITANCE
584 	natural_t ith_assertions;                       /* assertions pending drop */
585 #endif
586 	struct ipc_kmsg_queue ith_messages;             /* messages to reap */
587 	mach_port_t ith_kernel_reply_port;              /* reply port for kernel RPCs */
588 
589 	/* Pending thread ast(s) */
590 	ast_t                   ast;
591 
592 	/* Ast/Halt data structures */
593 	vm_offset_t             recover;                /* page fault recover(copyin/out) */
594 
595 	queue_chain_t           threads;                /* global list of all threads */
596 
597 	/* Activation */
598 	queue_chain_t           task_threads;
599 
600 	/* Task membership */
601 #if __x86_64__ || __arm__
602 	struct task            *t_task;
603 #endif
604 	struct thread_ro       *t_tro;
605 	vm_map_t                map;
606 	thread_t                handoff_thread;
607 
608 	/* Timed wait expiration */
609 	timer_call_t            wait_timer;
610 	uint16_t                wait_timer_active;
611 	bool                    wait_timer_is_set;
612 
613 	/* Miscellaneous bits guarded by mutex */
614 	uint32_t
615 	    active:1,           /* Thread is active and has not been terminated */
616 	    ipc_active:1,       /* IPC with the thread ports is allowed */
617 	    started:1,          /* Thread has been started after creation */
618 	    static_param:1,     /* Disallow policy parameter changes */
619 	    inspection:1,       /* TRUE when task is being inspected by crash reporter */
620 	    policy_reset:1,     /* Disallow policy parameter changes on terminating threads */
621 	    suspend_parked:1,   /* thread parked in thread_suspended */
622 	    corpse_dup:1,       /* TRUE when thread is an inactive duplicate in a corpse */
623 	:0;
624 
625 	decl_lck_mtx_data(, mutex);
626 
627 	struct ipc_port         *ith_special_reply_port;   /* ref to special reply port */
628 
629 #if CONFIG_DTRACE
630 	uint16_t                t_dtrace_flags;         /* DTrace thread states */
631 #define TH_DTRACE_EXECSUCCESS   0x01
632 	uint16_t                t_dtrace_inprobe;       /* Executing under dtrace_probe */
633 	uint32_t                t_dtrace_predcache;     /* DTrace per thread predicate value hint */
634 	int64_t                 t_dtrace_tracing;       /* Thread time under dtrace_probe() */
635 	int64_t                 t_dtrace_vtime;
636 #endif
637 
638 	clock_sec_t             t_page_creation_time;
639 	uint32_t                t_page_creation_count;
640 	uint32_t                t_page_creation_throttled;
641 #if (DEVELOPMENT || DEBUG)
642 	uint64_t                t_page_creation_throttled_hard;
643 	uint64_t                t_page_creation_throttled_soft;
644 #endif /* DEVELOPMENT || DEBUG */
645 	int                     t_pagein_error;         /* for vm_fault(), holds error from vnop_pagein() */
646 
647 #ifdef KPERF
648 /* The high 8 bits are the number of frames to sample of a user callstack. */
649 #define T_KPERF_CALLSTACK_DEPTH_OFFSET     (24)
650 #define T_KPERF_SET_CALLSTACK_DEPTH(DEPTH) (((uint32_t)(DEPTH)) << T_KPERF_CALLSTACK_DEPTH_OFFSET)
651 #define T_KPERF_GET_CALLSTACK_DEPTH(FLAGS) ((FLAGS) >> T_KPERF_CALLSTACK_DEPTH_OFFSET)
652 #define T_KPERF_ACTIONID_OFFSET            (18)
653 #define T_KPERF_SET_ACTIONID(AID)          (((uint32_t)(AID)) << T_KPERF_ACTIONID_OFFSET)
654 #define T_KPERF_GET_ACTIONID(FLAGS)        ((FLAGS) >> T_KPERF_ACTIONID_OFFSET)
655 #endif
656 
657 #define T_KPERF_AST_CALLSTACK 0x1 /* dump a callstack on thread's next AST */
658 #define T_KPERF_AST_DISPATCH  0x2 /* dump a name on thread's next AST */
659 #define T_KPC_ALLOC           0x4 /* thread needs a kpc_buf allocated */
660 
661 #define T_KPERF_AST_ALL \
662     (T_KPERF_AST_CALLSTACK | T_KPERF_AST_DISPATCH | T_KPC_ALLOC)
663 /* only go up to T_KPERF_ACTIONID_OFFSET - 1 */
664 
665 #ifdef KPERF
666 	uint32_t                kperf_ast;
667 	uint32_t                kperf_pet_gen;  /* last generation of PET that sampled this thread*/
668 	uint32_t                kperf_c_switch; /* last dispatch detection */
669 	uint32_t                kperf_pet_cnt;  /* how many times a thread has been sampled by PET */
670 #endif
671 
672 #ifdef KPC
673 	/* accumulated performance counters for this thread */
674 	uint64_t               *kpc_buf;
675 #endif
676 
677 #if HYPERVISOR
678 	/* hypervisor virtual CPU object associated with this thread */
679 	void                   *hv_thread_target;
680 #endif /* HYPERVISOR */
681 
682 	/* Statistics accumulated per-thread and aggregated per-task */
683 	uint32_t                syscalls_unix;
684 	uint32_t                syscalls_mach;
685 	ledger_t                t_ledger;
686 	ledger_t                t_threadledger; /* per thread ledger */
687 	ledger_t                t_bankledger;                /* ledger to charge someone */
688 	uint64_t                t_deduct_bank_ledger_time;   /* cpu time to be deducted from bank ledger */
689 	uint64_t                t_deduct_bank_ledger_energy; /* energy to be deducted from bank ledger */
690 
691 	uint64_t                thread_id;             /*system wide unique thread-id*/
692 
693 #if MONOTONIC
694 	struct mt_thread        t_monotonic;
695 #endif /* MONOTONIC */
696 
697 	/* policy is protected by the thread mutex */
698 	struct thread_requested_policy  requested_policy;
699 	struct thread_effective_policy  effective_policy;
700 
701 	/* usynch override is protected by the task lock, eventually will be thread mutex */
702 	struct thread_qos_override {
703 		struct thread_qos_override      *override_next;
704 		uint32_t        override_contended_resource_count;
705 		int16_t         override_qos;
706 		int16_t         override_resource_type;
707 		user_addr_t     override_resource;
708 	} *overrides;
709 
710 	uint32_t                kevent_overrides;
711 	uint8_t                 user_promotion_basepri;
712 	uint8_t                 kern_promotion_schedpri;
713 	_Atomic uint16_t        kevent_ast_bits;
714 
715 	io_stat_info_t          thread_io_stats; /* per-thread I/O statistics */
716 
717 	uint32_t                thread_callout_interrupt_wakeups;
718 	uint32_t                thread_callout_platform_idle_wakeups;
719 	uint32_t                thread_timer_wakeups_bin_1;
720 	uint32_t                thread_timer_wakeups_bin_2;
721 	thread_tag_t            thread_tag;
722 
723 	/*
724 	 * callout_* fields are only set for thread call threads whereas guard_exc_fatal is set
725 	 * by user threads on themselves while taking a guard exception. So it's okay for them to
726 	 * share this bitfield.
727 	 */
728 	uint16_t
729 	    callout_woken_from_icontext:1,
730 	    callout_woken_from_platform_idle:1,
731 	    callout_woke_thread:1,
732 	    guard_exc_fatal:1,
733 	    thread_bitfield_unused:12;
734 
735 	mach_port_name_t        ith_voucher_name;
736 	ipc_voucher_t           ith_voucher;
737 
738 #if CONFIG_THREAD_GROUPS
739 #if CONFIG_PREADOPT_TG
740 	/* The preadopt thread group is set on the thread
741 	 *
742 	 *   a) By another thread when it is a creator and it is scheduled with the
743 	 *   thread group on the TR
744 	 *   b) On itself when it binds a thread request and becomes a
745 	 *   servicer or when it rebinds to the thread request
746 	 *   c) On itself when it processes knotes and finds the first
747 	 *   EVFILT_MACHPORT event to deliver to userspace
748 	 *
749 	 * Note that this is a full reference owned by the thread_t and not a
750 	 * borrowed reference.
751 	 *
752 	 * This reference is cleared from the thread_t by the thread itself at the
753 	 * following times:
754 	 *   a) When it explicitly adopts a work interval or a bank voucher
755 	 *   b) If it still exists on the thread, after it has unbound and is about
756 	 *   to park
757 	 *   c) During thread termination if one still exists
758 	 *   d) When a different preadoption thread group is set on the thread
759 	 *
760 	 * It is modified under the thread lock.
761 	 */
762 	struct thread_group     *preadopt_thread_group;
763 
764 	/* This field here is present in order to make sure that the t->thread_group
765 	 * is always pointing to a valid thread group and isn't a dangling pointer.
766 	 *
767 	 * Consider the following scenario:
768 	 *	a) t->thread_group points to the preadoption thread group
769 	 *	b) The preadoption thread group is modified on the thread but we are
770 	 *	unable to resolve the hierarchy immediately due to the current state of
771 	 *	the thread
772 	 *
773 	 *	In order to make sure that t->thread_group points to a valid thread
774 	 *	group until we can resolve the hierarchy again, we save the existing
775 	 *	thread_group it points to in old_preadopt_thread_group. The next time a
776 	 *	hierarchy resolution is done, we know that t->thread_group will not point
777 	 *	to this field anymore so we can clear it.
778 	 *
779 	 *	 This field is always going to take the reference that was previously in
780 	 *	 preadopt_thread_group so it will have a full +1
781 	 */
782 	struct thread_group     *old_preadopt_thread_group;
783 #endif /* CONFIG_PREADOPT_TG */
784 
785 	/* This is a borrowed reference to the TG from the ith_voucher and is saved
786 	 * here since we may not always be in the right context to able to do the
787 	 * lookups.
788 	 *
789 	 * It is set always set on self under the thread lock */
790 	struct thread_group     *bank_thread_group;
791 
792 	/*  Whether this is the autojoin thread group or the work interval thread
793 	 *  group depends on whether the thread's sched_flags has the
794 	 *  TH_SFLAG_THREAD_GROUP_AUTO_JOIN bit set */
795 	union {
796 		/* This is a borrowed reference to the auto join thread group from the
797 		 * work_interval. It is set with the thread lock held */
798 		struct thread_group             *auto_join_thread_group;
799 		/* This is a borrowed reference to the explicit work_interval thread group
800 		 * and is always set on self */
801 		struct thread_group             *work_interval_thread_group;
802 	};
803 #endif /* CONFIG_THREAD_GROUPS */
804 
805 	/* work interval (if any) associated with the thread. Only modified by
806 	 * current thread on itself or when another thread when the thread is held
807 	 * off of runq */
808 	struct work_interval            *th_work_interval;
809 	thread_work_interval_flags_t    th_work_interval_flags;
810 
811 #define THREAD_BOUND_CLUSTER_NONE       (UINT32_MAX)
812 	uint32_t                 th_bound_cluster_id;
813 
814 #if SCHED_TRACE_THREAD_WAKEUPS
815 	uintptr_t               thread_wakeup_bt[64];
816 #endif
817 	turnstile_update_flags_t inheritor_flags; /* inheritor flags for inheritor field */
818 	block_hint_t            pending_block_hint;
819 	block_hint_t            block_hint;      /* What type of primitive last caused us to block. */
820 	uint32_t                decompressions;  /* Per-thread decompressions counter to be added to per-task decompressions counter */
821 	int                     thread_region_page_shift; /* Page shift that this thread would like to use when */
822 	                                                  /* introspecting a task. This is currently being used */
823 	                                                  /* by footprint which uses a thread for each task being inspected. */
824 #if CONFIG_IOSCHED
825 	void                   *decmp_upl;
826 #endif /* CONFIG_IOSCHED */
827 };
828 
829 #define ith_state           saved.receive.state
830 #define ith_object          saved.receive.object
831 #define ith_msg_addr        saved.receive.msg_addr
832 #define ith_rsize           saved.receive.rsize
833 #define ith_msize           saved.receive.msize
834 #define ith_option          saved.receive.option
835 #define ith_receiver_name   saved.receive.receiver_name
836 #define ith_continuation    saved.receive.continuation
837 #define ith_kmsg            saved.receive.kmsg
838 #define ith_peekq           saved.receive.peekq
839 #define ith_knote           saved.receive.knote
840 #define ith_ppriority       saved.receive.received_qos.ppri
841 #define ith_qos_override    saved.receive.received_qos.oqos
842 #define ith_seqno           saved.receive.seqno
843 
844 #define sth_waitsemaphore   saved.sema.waitsemaphore
845 #define sth_signalsemaphore saved.sema.signalsemaphore
846 #define sth_options         saved.sema.options
847 #define sth_result          saved.sema.result
848 #define sth_continuation    saved.sema.continuation
849 
850 #define ITH_KNOTE_NULL      ((void *)NULL)
851 #define ITH_KNOTE_PSEUDO    ((void *)0xdeadbeef)
852 /*
853  * The ith_knote is used during message delivery, and can safely be interpreted
854  * only when used for one of these codepaths, which the test for the msgt_name
855  * being RECEIVE or SEND_ONCE is about.
856  */
857 #define ITH_KNOTE_VALID(kn, msgt_name) \
858 	        (((kn) != ITH_KNOTE_NULL && (kn) != ITH_KNOTE_PSEUDO) && \
859 	         ((msgt_name) == MACH_MSG_TYPE_PORT_RECEIVE || \
860 	         (msgt_name) == MACH_MSG_TYPE_PORT_SEND_ONCE))
861 
862 #if MACH_ASSERT
863 #define assert_thread_magic(thread) assertf((thread)->thread_magic == THREAD_MAGIC, \
864 	                                    "bad thread magic 0x%llx for thread %p, expected 0x%llx", \
865 	                                    (thread)->thread_magic, (thread), THREAD_MAGIC)
866 #else
867 #define assert_thread_magic(thread) do { (void)(thread); } while (0)
868 #endif
869 
870 extern thread_t                 thread_bootstrap(void);
871 
872 extern void                     thread_machine_init_template(void);
873 
874 extern void                     thread_init(void);
875 
876 extern void                     thread_daemon_init(void);
877 
878 extern void                     thread_reference(
879 	thread_t                thread);
880 
881 extern void                     thread_deallocate(
882 	thread_t                thread);
883 
884 extern void                     thread_inspect_deallocate(
885 	thread_inspect_t        thread);
886 
887 extern void                     thread_read_deallocate(
888 	thread_read_t           thread);
889 
890 extern void                     thread_terminate_self(void);
891 
892 extern kern_return_t    thread_terminate_internal(
893 	thread_t                    thread);
894 
895 extern void                     thread_start(
896 	thread_t                        thread) __attribute__ ((noinline));
897 
898 extern void                     thread_start_in_assert_wait(
899 	thread_t                        thread,
900 	event_t             event,
901 	wait_interrupt_t    interruptible) __attribute__ ((noinline));
902 
903 extern void                     thread_terminate_enqueue(
904 	thread_t                thread);
905 
906 extern void                     thread_exception_enqueue(
907 	task_t          task,
908 	thread_t        thread,
909 	exception_type_t etype);
910 
911 extern void                     thread_copy_resource_info(
912 	thread_t dst_thread,
913 	thread_t src_thread);
914 
915 extern void                     thread_terminate_crashed_threads(void);
916 
917 extern void                     thread_stack_enqueue(
918 	thread_t                thread);
919 
920 extern void                     thread_hold(
921 	thread_t        thread);
922 
923 extern void                     thread_release(
924 	thread_t        thread);
925 
926 extern void                     thread_corpse_continue(void) __dead2;
927 
928 extern boolean_t                thread_is_active(thread_t thread);
929 
930 extern lck_grp_t                thread_lck_grp;
931 
932 /* Locking for scheduler state, always acquired with interrupts disabled (splsched()) */
933 #define thread_lock_init(th)    simple_lock_init(&(th)->sched_lock, 0)
934 #define thread_lock(th)                 simple_lock(&(th)->sched_lock, &thread_lck_grp)
935 #define thread_unlock(th)               simple_unlock(&(th)->sched_lock)
936 
937 #define wake_lock_init(th)              simple_lock_init(&(th)->wake_lock, 0)
938 #define wake_lock(th)                   simple_lock(&(th)->wake_lock, &thread_lck_grp)
939 #define wake_unlock(th)                 simple_unlock(&(th)->wake_lock)
940 
941 #define thread_should_halt_fast(thread)         (!(thread)->active)
942 
943 extern void                             stack_alloc(
944 	thread_t                thread);
945 
946 extern void                     stack_handoff(
947 	thread_t                from,
948 	thread_t                to);
949 
950 extern void                             stack_free(
951 	thread_t                thread);
952 
953 extern void                             stack_free_reserved(
954 	thread_t                thread);
955 
956 extern boolean_t                stack_alloc_try(
957 	thread_t            thread);
958 
959 extern void                             stack_collect(void);
960 
961 extern kern_return_t    thread_info_internal(
962 	thread_t                                thread,
963 	thread_flavor_t                 flavor,
964 	thread_info_t                   thread_info_out,
965 	mach_msg_type_number_t  *thread_info_count);
966 
967 
968 
969 extern kern_return_t    kernel_thread_create(
970 	thread_continue_t       continuation,
971 	void                            *parameter,
972 	integer_t                       priority,
973 	thread_t                        *new_thread);
974 
975 extern kern_return_t    kernel_thread_start_priority(
976 	thread_continue_t       continuation,
977 	void                            *parameter,
978 	integer_t                       priority,
979 	thread_t                        *new_thread);
980 
981 extern void                             machine_stack_attach(
982 	thread_t                thread,
983 	vm_offset_t             stack);
984 
985 extern vm_offset_t              machine_stack_detach(
986 	thread_t                thread);
987 
988 extern void                             machine_stack_handoff(
989 	thread_t                old,
990 	thread_t                new);
991 
992 extern thread_t                 machine_switch_context(
993 	thread_t                        old_thread,
994 	thread_continue_t       continuation,
995 	thread_t                        new_thread);
996 
997 extern void                             machine_load_context(
998 	thread_t                thread) __attribute__((noreturn));
999 
1000 extern void             machine_thread_state_initialize(
1001 	thread_t                                thread);
1002 
1003 extern kern_return_t    machine_thread_set_state(
1004 	thread_t                                thread,
1005 	thread_flavor_t                 flavor,
1006 	thread_state_t                  state,
1007 	mach_msg_type_number_t  count);
1008 
1009 extern mach_vm_address_t machine_thread_pc(
1010 	thread_t                thread);
1011 
1012 extern void machine_thread_reset_pc(
1013 	thread_t                thread,
1014 	mach_vm_address_t       pc);
1015 
1016 extern boolean_t        machine_thread_on_core(
1017 	thread_t                thread);
1018 
1019 extern kern_return_t    machine_thread_get_state(
1020 	thread_t                                thread,
1021 	thread_flavor_t                 flavor,
1022 	thread_state_t                  state,
1023 	mach_msg_type_number_t  *count);
1024 
1025 extern kern_return_t    machine_thread_state_convert_from_user(
1026 	thread_t                                thread,
1027 	thread_flavor_t                 flavor,
1028 	thread_state_t                  tstate,
1029 	mach_msg_type_number_t  count);
1030 
1031 extern kern_return_t    machine_thread_state_convert_to_user(
1032 	thread_t                                thread,
1033 	thread_flavor_t                 flavor,
1034 	thread_state_t                  tstate,
1035 	mach_msg_type_number_t  *count);
1036 
1037 extern kern_return_t    machine_thread_dup(
1038 	thread_t                self,
1039 	thread_t                target,
1040 	boolean_t               is_corpse);
1041 
1042 extern void             machine_thread_init(void);
1043 
1044 extern void             machine_thread_template_init(thread_t thr_template);
1045 
1046 
1047 extern void             machine_thread_create(
1048 	thread_t                thread,
1049 	task_t                  task,
1050 	bool                    first_thread);
1051 extern void             machine_thread_switch_addrmode(
1052 	thread_t                 thread);
1053 
1054 extern void                 machine_thread_destroy(
1055 	thread_t                thread);
1056 
1057 extern void                             machine_set_current_thread(
1058 	thread_t                        thread);
1059 
1060 extern kern_return_t    machine_thread_get_kern_state(
1061 	thread_t                                thread,
1062 	thread_flavor_t                 flavor,
1063 	thread_state_t                  tstate,
1064 	mach_msg_type_number_t  *count);
1065 
1066 extern kern_return_t    machine_thread_inherit_taskwide(
1067 	thread_t                thread,
1068 	task_t                  parent_task);
1069 
1070 extern kern_return_t    machine_thread_set_tsd_base(
1071 	thread_t                                thread,
1072 	mach_vm_offset_t                tsd_base);
1073 
1074 #define thread_mtx_try(thread)                  lck_mtx_try_lock(&(thread)->mutex)
1075 #define thread_mtx_held(thread)                 lck_mtx_assert(&(thread)->mutex, LCK_MTX_ASSERT_OWNED)
1076 
1077 extern void thread_apc_ast(thread_t thread);
1078 
1079 extern void thread_update_qos_cpu_time(thread_t thread);
1080 
1081 void act_machine_sv_free(thread_t, int);
1082 
1083 vm_offset_t                     min_valid_stack_address(void);
1084 vm_offset_t                     max_valid_stack_address(void);
1085 
1086 extern bool thread_no_smt(thread_t thread);
1087 extern bool processor_active_thread_no_smt(processor_t processor);
1088 
1089 extern void thread_set_options(uint32_t thopt);
1090 
1091 #if CONFIG_THREAD_GROUPS
1092 struct thread_group *thread_get_current_voucher_thread_group(thread_t thread);
1093 #endif /* CONFIG_THREAD_GROUPS */
1094 
1095 #endif  /* MACH_KERNEL_PRIVATE */
1096 #if BSD_KERNEL_PRIVATE
1097 
1098 /* Duplicated from osfmk/kern/ipc_tt.h */
1099 __options_decl(port_intrans_options_t, uint32_t, {
1100 	PORT_INTRANS_OPTIONS_NONE              = 0x0000,
1101 	PORT_INTRANS_THREAD_IN_CURRENT_TASK    = 0x0001,
1102 	PORT_INTRANS_THREAD_NOT_CURRENT_THREAD = 0x0002,
1103 
1104 	PORT_INTRANS_SKIP_TASK_EVAL            = 0x0004,
1105 	PORT_INTRANS_ALLOW_CORPSE_TASK         = 0x0008,
1106 });
1107 
1108 extern thread_t port_name_to_thread(
1109 	mach_port_name_t            port_name,
1110 	port_intrans_options_t    options);
1111 
1112 #endif /* BSD_KERNEL_PRIVATE */
1113 #ifdef XNU_KERNEL_PRIVATE
1114 
1115 extern void                     thread_require(
1116 	thread_t        thread);
1117 
1118 extern void                     thread_deallocate_safe(
1119 	thread_t                thread);
1120 
1121 extern uint64_t                 thread_rettokern_addr(
1122 	thread_t thread);
1123 
1124 extern uint64_t                 thread_wqquantum_addr(
1125 	thread_t thread);
1126 
1127 extern integer_t        thread_kern_get_pri(thread_t thr) __pure2;
1128 
1129 extern void             thread_kern_set_pri(thread_t thr, integer_t pri);
1130 
1131 extern integer_t        thread_kern_get_kernel_maxpri(void) __pure2;
1132 
1133 uint16_t        thread_set_tag(thread_t thread, uint16_t tag);
1134 uint16_t        thread_get_tag(thread_t thread);
1135 
1136 __options_decl(shared_rsrc_policy_agent_t, uint32_t, {
1137 	SHARED_RSRC_POLICY_AGENT_DISPATCH = 0,
1138 	SHARED_RSRC_POLICY_AGENT_SYSCTL = 1,
1139 	SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW = 2,
1140 	SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM = 3,
1141 });
1142 
1143 boolean_t       thread_shared_rsrc_policy_get(thread_t thread, cluster_shared_rsrc_type_t type);
1144 kern_return_t   thread_shared_rsrc_policy_set(thread_t thread, uint32_t index, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent);
1145 kern_return_t   thread_shared_rsrc_policy_clear(thread_t thread, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent);
1146 
1147 #ifdef MACH_KERNEL_PRIVATE
1148 static inline thread_tag_t
thread_set_tag_internal(thread_t thread,thread_tag_t tag)1149 thread_set_tag_internal(thread_t thread, thread_tag_t tag)
1150 {
1151 	return os_atomic_or_orig(&thread->thread_tag, tag, relaxed);
1152 }
1153 
1154 static inline thread_tag_t
thread_get_tag_internal(thread_t thread)1155 thread_get_tag_internal(thread_t thread)
1156 {
1157 	return thread->thread_tag;
1158 }
1159 #endif /* MACH_KERNEL_PRIVATE */
1160 
1161 uint64_t        thread_last_run_time(thread_t thread);
1162 
1163 extern kern_return_t    thread_state_initialize(
1164 	thread_t                                thread);
1165 
1166 extern kern_return_t    thread_setstatus(
1167 	thread_t                                thread,
1168 	int                                             flavor,
1169 	thread_state_t                  tstate,
1170 	mach_msg_type_number_t  count);
1171 
1172 extern kern_return_t    thread_setstatus_from_user(
1173 	thread_t                                thread,
1174 	int                                             flavor,
1175 	thread_state_t                  tstate,
1176 	mach_msg_type_number_t  count);
1177 
1178 extern kern_return_t    thread_getstatus(
1179 	thread_t                                thread,
1180 	int                                             flavor,
1181 	thread_state_t                  tstate,
1182 	mach_msg_type_number_t  *count);
1183 
1184 extern void main_thread_set_immovable_pinned(thread_t thread);
1185 
1186 extern kern_return_t    thread_getstatus_to_user(
1187 	thread_t                                thread,
1188 	int                                             flavor,
1189 	thread_state_t                  tstate,
1190 	mach_msg_type_number_t  *count);
1191 
1192 extern kern_return_t    thread_create_with_continuation(
1193 	task_t task,
1194 	thread_t *new_thread,
1195 	thread_continue_t continuation);
1196 
1197 /* thread_create_waiting options */
1198 __options_decl(th_create_waiting_options_t, uint32_t, {
1199 	TH_CREATE_WAITING_OPTION_NONE      = 0x00,
1200 	TH_CREATE_WAITING_OPTION_PINNED    = 0x10,
1201 	TH_CREATE_WAITING_OPTION_IMMOVABLE = 0x20,
1202 });
1203 #define TH_CREATE_WAITING_OPTION_MASK          0x30
1204 
1205 extern kern_return_t thread_create_waiting(task_t    task,
1206     thread_continue_t              continuation,
1207     event_t                        event,
1208     th_create_waiting_options_t    options,
1209     thread_t                       *new_thread);
1210 
1211 extern kern_return_t    thread_create_workq_waiting(
1212 	task_t                  task,
1213 	thread_continue_t       thread_return,
1214 	thread_t                *new_thread);
1215 
1216 extern  void    thread_yield_internal(
1217 	mach_msg_timeout_t      interval);
1218 
1219 extern void thread_yield_to_preemption(void);
1220 
1221 extern void thread_depress_timer_setup(thread_t self);
1222 
1223 /*
1224  * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are:
1225  *
1226  * 1) Block. Prevent CPU consumption of the thread from exceeding the limit.
1227  * 2) Exception. Generate a resource consumption exception when the limit is exceeded.
1228  * 3) Disable. Remove any existing CPU limit.
1229  */
1230 #define THREAD_CPULIMIT_BLOCK           0x1
1231 #define THREAD_CPULIMIT_EXCEPTION       0x2
1232 #define THREAD_CPULIMIT_DISABLE         0x3
1233 
1234 struct _thread_ledger_indices {
1235 	int cpu_time;
1236 };
1237 
1238 extern struct _thread_ledger_indices thread_ledgers;
1239 
1240 extern int thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns);
1241 extern int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns);
1242 
1243 extern void                     thread_read_times(
1244 	thread_t                thread,
1245 	time_value_t    *user_time,
1246 	time_value_t    *system_time,
1247 	time_value_t    *runnable_time);
1248 
1249 extern uint64_t         thread_get_runtime_self(void);
1250 
1251 extern void                     thread_setuserstack(
1252 	thread_t                thread,
1253 	mach_vm_offset_t        user_stack);
1254 
1255 extern user_addr_t         thread_adjuserstack(
1256 	thread_t                thread,
1257 	int                             adjust);
1258 
1259 
1260 extern void                     thread_setentrypoint(
1261 	thread_t                thread,
1262 	mach_vm_offset_t        entry);
1263 
1264 extern kern_return_t    thread_set_tsd_base(
1265 	thread_t        thread,
1266 	mach_vm_offset_t tsd_base);
1267 
1268 extern kern_return_t    thread_setsinglestep(
1269 	thread_t                thread,
1270 	int                     on);
1271 
1272 extern kern_return_t    thread_userstack(
1273 	thread_t,
1274 	int,
1275 	thread_state_t,
1276 	unsigned int,
1277 	mach_vm_offset_t *,
1278 	int *,
1279 	boolean_t);
1280 
1281 extern kern_return_t    thread_entrypoint(
1282 	thread_t,
1283 	int,
1284 	thread_state_t,
1285 	unsigned int,
1286 	mach_vm_offset_t *);
1287 
1288 extern kern_return_t    thread_userstackdefault(
1289 	mach_vm_offset_t *,
1290 	boolean_t);
1291 
1292 extern kern_return_t    thread_wire_internal(
1293 	host_priv_t             host_priv,
1294 	thread_t                thread,
1295 	boolean_t               wired,
1296 	boolean_t               *prev_state);
1297 
1298 
1299 extern kern_return_t    thread_dup(thread_t);
1300 
1301 extern kern_return_t thread_dup2(thread_t, thread_t);
1302 
1303 #if !defined(_SCHED_CALL_T_DEFINED)
1304 #define _SCHED_CALL_T_DEFINED
1305 typedef void    (*sched_call_t)(
1306 	int                             type,
1307 	thread_t                thread);
1308 #endif
1309 
1310 #define SCHED_CALL_BLOCK                0x1
1311 #define SCHED_CALL_UNBLOCK              0x2
1312 
1313 extern void             thread_sched_call(
1314 	thread_t                thread,
1315 	sched_call_t    call);
1316 
1317 extern boolean_t        thread_is_static_param(
1318 	thread_t                thread);
1319 
1320 extern task_t   get_threadtask(thread_t) __pure2;
1321 
1322 extern task_t   get_threadtask_early(thread_t) __pure2;
1323 
1324 /*
1325  * Thread is running within a 64-bit address space.
1326  */
1327 #define thread_is_64bit_addr(thd)       \
1328 	task_has_64Bit_addr(get_threadtask(thd))
1329 
1330 /*
1331  * Thread is using 64-bit machine state.
1332  */
1333 #define thread_is_64bit_data(thd)       \
1334 	task_has_64Bit_data(get_threadtask(thd))
1335 
1336 #if defined(__x86_64__)
1337 extern int              thread_task_has_ldt(thread_t);
1338 #endif
1339 extern void             set_thread_pagein_error(thread_t, int);
1340 extern event_t          workq_thread_init_and_wq_lock(task_t, thread_t); // bsd/pthread/
1341 
1342 struct proc;
1343 struct uthread;
1344 extern const size_t     uthread_size;
1345 extern thread_ro_t      get_thread_ro_unchecked(thread_t) __pure2;
1346 extern thread_ro_t      get_thread_ro(thread_t) __pure2;
1347 extern thread_ro_t      current_thread_ro_unchecked(void) __pure2;
1348 extern thread_ro_t      current_thread_ro(void) __pure2;
1349 extern void             clear_thread_ro_proc(thread_t);
1350 extern struct uthread  *get_bsdthread_info(thread_t) __pure2;
1351 extern thread_t         get_machthread(struct uthread *) __pure2;
1352 extern uint64_t         uthread_tid(struct uthread *) __pure2;
1353 extern void             uthread_init(task_t, struct uthread *, thread_ro_t, int);
1354 extern void             uthread_cleanup_name(struct uthread *uthread);
1355 extern void             uthread_cleanup(struct uthread *, thread_ro_t);
1356 extern void             uthread_cred_ref(struct ucred *);
1357 extern void             uthread_cred_free(struct ucred *);
1358 extern void             uthread_destroy(struct uthread *);
1359 extern void             uthread_reset_proc_refcount(struct uthread *);
1360 extern void             thread_ro_update_cred(thread_ro_t, struct ucred *);
1361 extern void             thread_ro_update_flags(thread_ro_t, thread_ro_flags_t add, thread_ro_flags_t clr);
1362 extern bool             uthread_is64bit(struct uthread *uth) __pure2;
1363 #if PROC_REF_DEBUG
1364 extern void             uthread_init_proc_refcount(struct uthread *);
1365 extern void             uthread_destroy_proc_refcount(struct uthread *);
1366 extern void             uthread_assert_zero_proc_refcount(struct uthread *);
1367 #else
1368 #define                 uthread_init_proc_refcount(uth)        ((void)(uth))
1369 #define                 uthread_destroy_proc_refcount(uth)     ((void)(uth))
1370 #define                 uthread_assert_zero_proc_refcount(uth) ((void)(uth))
1371 #endif
1372 #if CONFIG_DEBUG_SYSCALL_REJECTION
1373 extern uint64_t         *uthread_get_syscall_rejection_mask(void *);
1374 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
1375 
1376 extern boolean_t        thread_should_halt(
1377 	thread_t                thread);
1378 
1379 extern boolean_t        thread_should_abort(
1380 	thread_t);
1381 
1382 extern int is_64signalregset(void);
1383 
1384 extern void act_set_kperf(thread_t);
1385 extern void act_set_astledger(thread_t thread);
1386 extern void act_set_astledger_async(thread_t thread);
1387 extern void act_set_io_telemetry_ast(thread_t);
1388 extern void act_set_macf_telemetry_ast(thread_t);
1389 extern void act_set_astproc_resource(thread_t);
1390 
1391 extern vm_offset_t thread_get_kernel_stack(thread_t);
1392 
1393 extern uint32_t dtrace_get_thread_predcache(thread_t);
1394 extern int64_t dtrace_get_thread_vtime(thread_t);
1395 extern int64_t dtrace_get_thread_tracing(thread_t);
1396 extern uint16_t dtrace_get_thread_inprobe(thread_t);
1397 extern int dtrace_get_thread_last_cpu_id(thread_t);
1398 extern vm_offset_t dtrace_get_kernel_stack(thread_t);
1399 #define dtrace_get_kernel_stack thread_get_kernel_stack
1400 extern void dtrace_set_thread_predcache(thread_t, uint32_t);
1401 extern void dtrace_set_thread_vtime(thread_t, int64_t);
1402 extern void dtrace_set_thread_tracing(thread_t, int64_t);
1403 extern void dtrace_set_thread_inprobe(thread_t, uint16_t);
1404 extern vm_offset_t dtrace_set_thread_recover(thread_t, vm_offset_t);
1405 extern vm_offset_t dtrace_sign_and_set_thread_recover(thread_t, vm_offset_t);
1406 extern void dtrace_thread_bootstrap(void);
1407 extern void dtrace_thread_didexec(thread_t);
1408 
1409 extern int64_t dtrace_calc_thread_recent_vtime(thread_t);
1410 
1411 
1412 extern kern_return_t    thread_set_wq_state32(
1413 	thread_t          thread,
1414 	thread_state_t    tstate);
1415 
1416 extern kern_return_t    thread_set_wq_state64(
1417 	thread_t          thread,
1418 	thread_state_t    tstate);
1419 
1420 extern vm_offset_t      kernel_stack_mask;
1421 extern vm_offset_t      kernel_stack_size;
1422 extern vm_offset_t      kernel_stack_depth_max;
1423 
1424 extern void guard_ast(thread_t);
1425 extern void fd_guard_ast(thread_t,
1426     mach_exception_code_t, mach_exception_subcode_t);
1427 #if CONFIG_VNGUARD
1428 extern void vn_guard_ast(thread_t,
1429     mach_exception_code_t, mach_exception_subcode_t);
1430 #endif
1431 extern void mach_port_guard_ast(thread_t,
1432     mach_exception_code_t, mach_exception_subcode_t);
1433 extern void virt_memory_guard_ast(thread_t,
1434     mach_exception_code_t, mach_exception_subcode_t);
1435 extern void thread_guard_violation(thread_t,
1436     mach_exception_code_t, mach_exception_subcode_t, boolean_t);
1437 extern void thread_update_io_stats(thread_t, int size, int io_flags);
1438 
1439 extern kern_return_t    thread_set_voucher_name(mach_port_name_t name);
1440 extern kern_return_t thread_get_current_voucher_origin_pid(int32_t *pid);
1441 
1442 extern void thread_enable_send_importance(thread_t thread, boolean_t enable);
1443 
1444 /*
1445  * Translate signal context data pointer to userspace representation
1446  */
1447 
1448 extern kern_return_t    machine_thread_siguctx_pointer_convert_to_user(
1449 	thread_t thread,
1450 	user_addr_t *uctxp);
1451 
1452 extern void machine_tecs(thread_t thr);
1453 
1454 typedef enum cpuvn {
1455 	CPUVN_CI = 1
1456 } cpuvn_e;
1457 
1458 extern int machine_csv(cpuvn_e cve);
1459 #if defined(__x86_64__)
1460 extern void machine_thread_set_insn_copy_optout(thread_t thr);
1461 #endif
1462 
1463 /*
1464  * Translate array of function pointer syscall arguments from userspace representation
1465  */
1466 
1467 extern kern_return_t    machine_thread_function_pointers_convert_from_user(
1468 	thread_t thread,
1469 	user_addr_t *fptrs,
1470 	uint32_t count);
1471 
1472 /*
1473  * Get the duration of the given thread's last wait.
1474  */
1475 uint64_t thread_get_last_wait_duration(thread_t thread);
1476 
1477 extern bool thread_get_no_smt(void);
1478 #if defined(__x86_64__)
1479 extern bool curtask_get_insn_copy_optout(void);
1480 extern void curtask_set_insn_copy_optout(void);
1481 #endif /* defined(__x86_64__) */
1482 
1483 #endif  /* XNU_KERNEL_PRIVATE */
1484 #ifdef KERNEL_PRIVATE
1485 
1486 typedef struct thread_pri_floor {
1487 	thread_t thread;
1488 } thread_pri_floor_t;
1489 
1490 #ifdef MACH_KERNEL_PRIVATE
1491 extern void thread_floor_boost_ast(thread_t thread);
1492 extern void thread_floor_boost_set_promotion_locked(thread_t thread);
1493 #endif /* MACH_KERNEL_PRIVATE */
1494 
1495 /*!  @function thread_priority_floor_start
1496  *   @abstract boost the current thread priority to floor.
1497  *   @discussion Increase the priority of the current thread to at least MINPRI_FLOOR.
1498  *       The boost will be mantained until a corresponding thread_priority_floor_end()
1499  *       is called. Every call of thread_priority_floor_start() needs to have a corresponding
1500  *       call to thread_priority_floor_end() from the same thread.
1501  *       No thread can return to userspace before calling thread_priority_floor_end().
1502  *
1503  *       NOTE: avoid to use this function. Try to use gate_t or sleep_with_inheritor()
1504  *       instead.
1505  *   @result a token to be given to the corresponding thread_priority_floor_end()
1506  */
1507 extern thread_pri_floor_t thread_priority_floor_start(void);
1508 /*!  @function thread_priority_floor_end
1509  *   @abstract ends the floor boost.
1510  *   @param token the token obtained from thread_priority_floor_start()
1511  *   @discussion ends the priority floor boost started with thread_priority_floor_start()
1512  */
1513 extern void thread_priority_floor_end(thread_pri_floor_t *token);
1514 
1515 extern void thread_set_no_smt(bool set);
1516 
1517 extern void thread_mtx_lock(thread_t thread);
1518 
1519 extern void thread_mtx_unlock(thread_t thread);
1520 
1521 extern uint64_t thread_dispatchqaddr(
1522 	thread_t thread);
1523 
1524 bool thread_is_eager_preempt(thread_t thread);
1525 void thread_set_eager_preempt(thread_t thread);
1526 void thread_clear_eager_preempt(thread_t thread);
1527 void thread_set_honor_qlimit(thread_t thread);
1528 void thread_clear_honor_qlimit(thread_t thread);
1529 extern ipc_port_t convert_thread_to_port(thread_t);
1530 extern ipc_port_t convert_thread_to_port_pinned(thread_t);
1531 extern ipc_port_t convert_thread_inspect_to_port(thread_inspect_t);
1532 extern ipc_port_t convert_thread_read_to_port(thread_read_t);
1533 extern boolean_t is_vm_privileged(void);
1534 extern boolean_t set_vm_privilege(boolean_t);
1535 extern kern_allocation_name_t thread_set_allocation_name(kern_allocation_name_t new_name);
1536 extern void *thread_iokit_tls_get(uint32_t index);
1537 extern void thread_iokit_tls_set(uint32_t index, void * data);
1538 extern int thread_self_region_page_shift(void);
1539 extern void thread_self_region_page_shift_set(int pgshift);
1540 extern kern_return_t thread_create_immovable(task_t task, thread_t *new_thread);
1541 extern kern_return_t thread_terminate_pinned(thread_t thread);
1542 
1543 #endif /* KERNEL_PRIVATE */
1544 #ifdef XNU_KERNEL_PRIVATE
1545 
1546 extern void
1547 thread_get_thread_name(thread_t th, char* name);
1548 
1549 extern bool thread_supports_cooperative_workqueue(thread_t thread);
1550 extern void thread_arm_workqueue_quantum(thread_t thread);
1551 extern void thread_disarm_workqueue_quantum(thread_t thread);
1552 
1553 extern void thread_evaluate_workqueue_quantum_expiry(thread_t thread);
1554 extern bool thread_has_expired_workqueue_quantum(thread_t thread, bool should_trace);
1555 
1556 #endif /* XNU_KERNEL_PRIVATE */
1557 
1558 /*! @function thread_has_thread_name
1559  *   @abstract Checks if a thread has a name.
1560  *   @discussion This function takes one input, a thread, and returns
1561  *       a boolean value indicating if that thread already has a name associated
1562  *       with it.
1563  *   @param th The thread to inspect.
1564  *   @result TRUE if the thread has a name, FALSE otherwise.
1565  */
1566 extern boolean_t thread_has_thread_name(thread_t th);
1567 
1568 /*! @function thread_set_thread_name
1569  *   @abstract Set a thread's name.
1570  *   @discussion This function takes two input parameters: a thread to name,
1571  *       and the name to apply to the thread.  The name will be copied over to
1572  *       the thread in order to better identify the thread.  If the name is
1573  *       longer than MAXTHREADNAMESIZE - 1, it will be truncated.
1574  *   @param th The thread to be named.
1575  *   @param name The name to apply to the thread.
1576  */
1577 extern void thread_set_thread_name(thread_t th, const char* name);
1578 
1579 extern thread_t current_thread(void) __pure2;
1580 
1581 extern uint64_t thread_tid(thread_t thread) __pure2;
1582 
1583 extern void thread_reference(
1584 	thread_t        thread);
1585 
1586 extern void thread_deallocate(
1587 	thread_t        thread);
1588 
1589 /*! @function kernel_thread_start
1590  *   @abstract Create a kernel thread.
1591  *   @discussion This function takes three input parameters, namely reference
1592  *       to the function that the thread should execute, caller specified data
1593  *       and a reference which is used to return the newly created kernel
1594  *       thread. The function returns KERN_SUCCESS on success or an appropriate
1595  *       kernel code type indicating the error. It may be noted that the caller
1596  *       is responsible for explicitly releasing the reference to the created
1597  *       thread when no longer needed. This should be done by calling
1598  *       thread_deallocate(new_thread).
1599  *   @param continuation A C-function pointer where the thread will begin execution.
1600  *   @param parameter Caller specified data to be passed to the new thread.
1601  *   @param new_thread Reference to the new thread is returned in this parameter.
1602  *   @result Returns KERN_SUCCESS on success or an appropriate kernel code type.
1603  */
1604 
1605 extern kern_return_t    kernel_thread_start(
1606 	thread_continue_t       continuation,
1607 	void                    *parameter,
1608 	thread_t                *new_thread);
1609 
1610 __END_DECLS
1611 
1612 #endif  /* _KERN_THREAD_H_ */
1613