xref: /xnu-8796.101.5/osfmk/kern/thread.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	kern/thread.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61  *	Date:	1986
62  *
63  *	Thread management primitives implementation.
64  */
65 /*
66  * Copyright (c) 1993 The University of Utah and
67  * the Computer Systems Laboratory (CSL).  All rights reserved.
68  *
69  * Permission to use, copy, modify and distribute this software and its
70  * documentation is hereby granted, provided that both the copyright
71  * notice and this permission notice appear in all copies of the
72  * software, derivative works or modified versions, and any portions
73  * thereof, and that both notices appear in supporting documentation.
74  *
75  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78  *
79  * CSL requests users of this software to return to [email protected] any
80  * improvements that they make and grant CSL redistribution rights.
81  *
82  */
83 
84 #include <mach/mach_types.h>
85 #include <mach/boolean.h>
86 #include <mach/policy.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_special_ports.h>
89 #include <mach/thread_act.h>
90 #include <mach/thread_status.h>
91 #include <mach/time_value.h>
92 #include <mach/vm_param.h>
93 
94 #include <machine/thread.h>
95 #include <machine/pal_routines.h>
96 #include <machine/limits.h>
97 
98 #include <kern/kern_types.h>
99 #include <kern/kalloc.h>
100 #include <kern/cpu_data.h>
101 #include <kern/extmod_statistics.h>
102 #include <kern/ipc_mig.h>
103 #include <kern/ipc_tt.h>
104 #include <kern/mach_param.h>
105 #include <kern/machine.h>
106 #include <kern/misc_protos.h>
107 #include <kern/processor.h>
108 #include <kern/queue.h>
109 #include <kern/restartable.h>
110 #include <kern/sched.h>
111 #include <kern/sched_prim.h>
112 #include <kern/syscall_subr.h>
113 #include <kern/task.h>
114 #include <kern/thread.h>
115 #include <kern/thread_group.h>
116 #include <kern/coalition.h>
117 #include <kern/host.h>
118 #include <kern/zalloc.h>
119 #include <kern/assert.h>
120 #include <kern/exc_resource.h>
121 #include <kern/exc_guard.h>
122 #include <kern/telemetry.h>
123 #include <kern/policy_internal.h>
124 #include <kern/turnstile.h>
125 #include <kern/sched_clutch.h>
126 #include <kern/recount.h>
127 #include <kern/smr.h>
128 #include <kern/ast.h>
129 #include <kern/compact_id.h>
130 
131 #include <corpses/task_corpse.h>
132 #if KPC
133 #include <kern/kpc.h>
134 #endif
135 
136 #if CONFIG_PERVASIVE_CPI
137 #include <kern/monotonic.h>
138 #include <machine/monotonic.h>
139 #endif /* CONFIG_PERVASIVE_CPI */
140 
141 #include <ipc/ipc_kmsg.h>
142 #include <ipc/ipc_port.h>
143 #include <bank/bank_types.h>
144 
145 #include <vm/vm_kern.h>
146 #include <vm/vm_pageout.h>
147 
148 #include <sys/kdebug.h>
149 #include <sys/bsdtask_info.h>
150 #include <mach/sdt.h>
151 #include <san/kasan.h>
152 #include <san/kcov_stksz.h>
153 
154 #include <stdatomic.h>
155 
156 #if defined(HAS_APPLE_PAC)
157 #include <ptrauth.h>
158 #include <arm64/proc_reg.h>
159 #endif /* defined(HAS_APPLE_PAC) */
160 
161 /*
162  * Exported interfaces
163  */
164 #include <mach/task_server.h>
165 #include <mach/thread_act_server.h>
166 #include <mach/mach_host_server.h>
167 #include <mach/host_priv_server.h>
168 #include <mach/mach_voucher_server.h>
169 #include <kern/policy_internal.h>
170 
171 #if CONFIG_MACF
172 #include <security/mac_mach_internal.h>
173 #endif
174 
175 #include <pthread/workqueue_trace.h>
176 
177 LCK_GRP_DECLARE(thread_lck_grp, "thread");
178 
179 static SECURITY_READ_ONLY_LATE(zone_t) thread_zone;
180 ZONE_DEFINE_ID(ZONE_ID_THREAD_RO, "threads_ro", struct thread_ro, ZC_READONLY);
181 
182 static void thread_port_with_flavor_no_senders(ipc_port_t, mach_port_mscount_t);
183 
184 IPC_KOBJECT_DEFINE(IKOT_THREAD_CONTROL);
185 IPC_KOBJECT_DEFINE(IKOT_THREAD_READ,
186     .iko_op_no_senders = thread_port_with_flavor_no_senders);
187 IPC_KOBJECT_DEFINE(IKOT_THREAD_INSPECT,
188     .iko_op_no_senders = thread_port_with_flavor_no_senders);
189 
190 static struct mpsc_daemon_queue thread_stack_queue;
191 static struct mpsc_daemon_queue thread_terminate_queue;
192 static struct mpsc_daemon_queue thread_deallocate_queue;
193 static struct mpsc_daemon_queue thread_exception_queue;
194 static struct mpsc_daemon_queue thread_backtrace_queue;
195 
196 decl_simple_lock_data(static, crashed_threads_lock);
197 static queue_head_t             crashed_threads_queue;
198 
199 struct thread_exception_elt {
200 	struct mpsc_queue_chain link;
201 	exception_type_t        exception_type;
202 	task_t                  exception_task;
203 	thread_t                exception_thread;
204 };
205 
206 struct thread_backtrace_elt {
207 	struct mpsc_queue_chain link;
208 	exception_type_t        exception_type;
209 	kcdata_object_t         obj;
210 	exception_port_t        exc_ports[BT_EXC_PORTS_COUNT]; /* send rights */
211 };
212 
213 static SECURITY_READ_ONLY_LATE(struct thread) thread_template = {
214 #if MACH_ASSERT
215 	.thread_magic               = THREAD_MAGIC,
216 #endif /* MACH_ASSERT */
217 	.wait_result                = THREAD_WAITING,
218 	.options                    = THREAD_ABORTSAFE,
219 	.state                      = TH_WAIT | TH_UNINT,
220 	.th_sched_bucket            = TH_BUCKET_RUN,
221 	.base_pri                   = BASEPRI_DEFAULT,
222 	.realtime.deadline          = UINT64_MAX,
223 	.last_made_runnable_time    = THREAD_NOT_RUNNABLE,
224 	.last_basepri_change_time   = THREAD_NOT_RUNNABLE,
225 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
226 	.pri_shift                  = INT8_MAX,
227 #endif
228 	/* timers are initialized in thread_bootstrap */
229 };
230 
231 #define CTID_SIZE_BIT           20
232 #define CTID_MASK               ((1u << CTID_SIZE_BIT) - 1)
233 #define CTID_MAX_THREAD_NUMBER  (CTID_MASK - 1)
234 static_assert(CTID_MAX_THREAD_NUMBER <= COMPACT_ID_MAX);
235 
236 #ifndef __LITTLE_ENDIAN__
237 #error "ctid relies on the ls bits of uint32_t to be populated"
238 #endif
239 
240 __startup_data
241 static struct thread init_thread;
242 static SECURITY_READ_ONLY_LATE(uint32_t) ctid_nonce;
243 COMPACT_ID_TABLE_DEFINE(static, ctid_table);
244 
245 __startup_func
246 static void
thread_zone_startup(void)247 thread_zone_startup(void)
248 {
249 	size_t size = sizeof(struct thread);
250 
251 #ifdef MACH_BSD
252 	size += roundup(uthread_size, _Alignof(struct thread));
253 #endif
254 	thread_zone = zone_create_ext("threads", size,
255 	    ZC_SEQUESTER | ZC_ZFREE_CLEARMEM, ZONE_ID_THREAD, NULL);
256 }
257 STARTUP(ZALLOC, STARTUP_RANK_FOURTH, thread_zone_startup);
258 
259 static void thread_deallocate_enqueue(thread_t thread);
260 static void thread_deallocate_complete(thread_t thread);
261 
262 static void ctid_table_remove(thread_t thread);
263 static void ctid_table_add(thread_t thread);
264 static void ctid_table_init(void);
265 
266 #ifdef MACH_BSD
267 extern void proc_exit(void *);
268 extern mach_exception_data_type_t proc_encode_exit_exception_code(void *);
269 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
270 extern uint64_t get_return_to_kernel_offset_from_proc(void *p);
271 extern uint64_t get_wq_quantum_offset_from_proc(void *);
272 extern int      proc_selfpid(void);
273 extern void     proc_name(int, char*, int);
274 extern char *   proc_name_address(void *p);
275 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
276 extern void kdebug_proc_name_args(struct proc *proc, long args[static 4]);
277 #endif /* MACH_BSD */
278 
279 extern bool bsdthread_part_of_cooperative_workqueue(struct uthread *uth);
280 extern int disable_exc_resource;
281 extern int audio_active;
282 extern int debug_task;
283 int thread_max = CONFIG_THREAD_MAX;     /* Max number of threads */
284 int task_threadmax = CONFIG_THREAD_MAX;
285 
286 static uint64_t         thread_unique_id = 100;
287 
288 struct _thread_ledger_indices thread_ledgers = { .cpu_time = -1 };
289 static ledger_template_t thread_ledger_template = NULL;
290 static void init_thread_ledgers(void);
291 
292 #if CONFIG_JETSAM
293 void jetsam_on_ledger_cpulimit_exceeded(void);
294 #endif
295 
296 extern int task_thread_soft_limit;
297 
298 #if DEVELOPMENT || DEBUG
299 extern int exc_resource_threads_enabled;
300 #endif /* DEVELOPMENT || DEBUG */
301 
302 /*
303  * Level (in terms of percentage of the limit) at which the CPU usage monitor triggers telemetry.
304  *
305  * (ie when any thread's CPU consumption exceeds 70% of the limit, start taking user
306  *  stacktraces, aka micro-stackshots)
307  */
308 #define CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT 70
309 
310 /* Percentage. Level at which we start gathering telemetry. */
311 static TUNABLE(uint8_t, cpumon_ustackshots_trigger_pct,
312     "cpumon_ustackshots_trigger_pct", CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT);
313 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void);
314 #if DEVELOPMENT || DEBUG
315 void __attribute__((noinline)) SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t, int);
316 #endif /* DEVELOPMENT || DEBUG */
317 
318 /*
319  * The smallest interval over which we support limiting CPU consumption is 1ms
320  */
321 #define MINIMUM_CPULIMIT_INTERVAL_MS 1
322 
323 os_refgrp_decl(static, thread_refgrp, "thread", NULL);
324 
325 static inline void
init_thread_from_template(thread_t thread)326 init_thread_from_template(thread_t thread)
327 {
328 	/*
329 	 * In general, struct thread isn't trivially-copyable, since it may
330 	 * contain pointers to thread-specific state.  This may be enforced at
331 	 * compile time on architectures that store authed + diversified
332 	 * pointers in machine_thread.
333 	 *
334 	 * In this specific case, where we're initializing a new thread from a
335 	 * thread_template, we know all diversified pointers are NULL; these are
336 	 * safe to bitwise copy.
337 	 */
338 #pragma clang diagnostic push
339 #pragma clang diagnostic ignored "-Wnontrivial-memaccess"
340 	memcpy(thread, &thread_template, sizeof(*thread));
341 #pragma clang diagnostic pop
342 }
343 
344 static void
thread_ro_create(task_t parent_task,thread_t th,thread_ro_t tro_tpl)345 thread_ro_create(task_t parent_task, thread_t th, thread_ro_t tro_tpl)
346 {
347 #if __x86_64__
348 	th->t_task = parent_task;
349 #endif
350 	tro_tpl->tro_owner = th;
351 	tro_tpl->tro_task  = parent_task;
352 	th->t_tro = zalloc_ro(ZONE_ID_THREAD_RO, Z_WAITOK | Z_ZERO | Z_NOFAIL);
353 	zalloc_ro_update_elem(ZONE_ID_THREAD_RO, th->t_tro, tro_tpl);
354 }
355 
356 static void
thread_ro_destroy(thread_t th)357 thread_ro_destroy(thread_t th)
358 {
359 	thread_ro_t tro = get_thread_ro(th);
360 #if MACH_BSD
361 	struct ucred *cred = tro->tro_cred;
362 #endif
363 
364 	zfree_ro(ZONE_ID_THREAD_RO, tro);
365 #if MACH_BSD
366 	if (cred) {
367 		uthread_cred_free(cred);
368 	}
369 #endif
370 }
371 
372 #if MACH_BSD
373 extern void kauth_cred_set(struct ucred **, struct ucred *);
374 
375 void
thread_ro_update_cred(thread_ro_t tro,struct ucred * ucred)376 thread_ro_update_cred(thread_ro_t tro, struct ucred *ucred)
377 {
378 	struct ucred *my_cred = tro->tro_cred;
379 	if (my_cred != ucred) {
380 		kauth_cred_set(&my_cred, ucred);
381 		zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_cred, &my_cred);
382 	}
383 }
384 
385 void
thread_ro_update_flags(thread_ro_t tro,thread_ro_flags_t add,thread_ro_flags_t clr)386 thread_ro_update_flags(thread_ro_t tro, thread_ro_flags_t add, thread_ro_flags_t clr)
387 {
388 	thread_ro_flags_t flags = (tro->tro_flags & ~clr) | add;
389 	zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_flags, &flags);
390 }
391 #endif
392 
393 __startup_func
394 thread_t
thread_bootstrap(void)395 thread_bootstrap(void)
396 {
397 	/*
398 	 *	Fill in a template thread for fast initialization.
399 	 */
400 	timer_init(&thread_template.runnable_timer);
401 
402 	init_thread_from_template(&init_thread);
403 	/* fiddle with init thread to skip asserts in set_sched_pri */
404 	init_thread.sched_pri = MAXPRI_KERNEL;
405 
406 	/*
407 	 * We can't quite use ctid yet, on ARM thread_bootstrap() is called
408 	 * before we can call random or anything,
409 	 * so we just make it barely work and it will get fixed up
410 	 * when the first thread is actually made.
411 	 */
412 	*compact_id_resolve(&ctid_table, 0) = &init_thread;
413 	init_thread.ctid = CTID_MASK;
414 
415 	return &init_thread;
416 }
417 
418 void
thread_machine_init_template(void)419 thread_machine_init_template(void)
420 {
421 	machine_thread_template_init(&thread_template);
422 }
423 
424 void
thread_init(void)425 thread_init(void)
426 {
427 	/*
428 	 *	Initialize any machine-dependent
429 	 *	per-thread structures necessary.
430 	 */
431 	machine_thread_init();
432 
433 	init_thread_ledgers();
434 }
435 
436 boolean_t
thread_is_active(thread_t thread)437 thread_is_active(thread_t thread)
438 {
439 	return thread->active;
440 }
441 
442 void
thread_corpse_continue(void)443 thread_corpse_continue(void)
444 {
445 	thread_t thread = current_thread();
446 
447 	thread_terminate_internal(thread);
448 
449 	/*
450 	 * Handle the thread termination directly
451 	 * here instead of returning to userspace.
452 	 */
453 	assert(thread->active == FALSE);
454 	thread_ast_clear(thread, AST_APC);
455 	thread_apc_ast(thread);
456 
457 	panic("thread_corpse_continue");
458 	/*NOTREACHED*/
459 }
460 
461 __dead2
462 static void
thread_terminate_continue(void)463 thread_terminate_continue(void)
464 {
465 	panic("thread_terminate_continue");
466 	/*NOTREACHED*/
467 }
468 
469 /*
470  *	thread_terminate_self:
471  */
472 void
thread_terminate_self(void)473 thread_terminate_self(void)
474 {
475 	thread_t    thread = current_thread();
476 	thread_ro_t tro    = get_thread_ro(thread);
477 	task_t      task   = tro->tro_task;
478 	void *bsd_info = get_bsdtask_info(task);
479 	int threadcnt;
480 
481 	pal_thread_terminate_self(thread);
482 
483 	DTRACE_PROC(lwp__exit);
484 
485 	thread_mtx_lock(thread);
486 
487 	ipc_thread_disable(thread);
488 
489 	thread_mtx_unlock(thread);
490 
491 	thread_sched_call(thread, NULL);
492 
493 	spl_t s = splsched();
494 	thread_lock(thread);
495 
496 	thread_depress_abort_locked(thread);
497 
498 	/*
499 	 * Before we take the thread_lock right above,
500 	 * act_set_ast_reset_pcs() might not yet observe
501 	 * that the thread is inactive, and could have
502 	 * requested an IPI Ack.
503 	 *
504 	 * Once we unlock the thread, we know that
505 	 * act_set_ast_reset_pcs() can't fail to notice
506 	 * that thread->active is false,
507 	 * and won't set new ones.
508 	 */
509 	thread_reset_pcs_ack_IPI(thread);
510 
511 	thread_unlock(thread);
512 
513 	splx(s);
514 
515 #if CONFIG_TASKWATCH
516 	thead_remove_taskwatch(thread);
517 #endif /* CONFIG_TASKWATCH */
518 
519 	work_interval_thread_terminate(thread);
520 
521 	thread_mtx_lock(thread);
522 
523 	thread_policy_reset(thread);
524 
525 	thread_mtx_unlock(thread);
526 
527 	assert(thread->th_work_interval == NULL);
528 
529 	bank_swap_thread_bank_ledger(thread, NULL);
530 
531 	if (kdebug_enable && bsd_hasthreadname(get_bsdthread_info(thread))) {
532 		char threadname[MAXTHREADNAMESIZE];
533 		bsd_getthreadname(get_bsdthread_info(thread), threadname);
534 		kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, threadname);
535 	}
536 
537 	uthread_cleanup(get_bsdthread_info(thread), tro);
538 
539 	if (kdebug_enable && bsd_info && !task_is_exec_copy(task)) {
540 		/* trace out pid before we sign off */
541 		long dbg_arg1 = 0;
542 		long dbg_arg2 = 0;
543 
544 		kdbg_trace_data(get_bsdtask_info(task), &dbg_arg1, &dbg_arg2);
545 #if CONFIG_PERVASIVE_CPI
546 		if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_THR_EXIT)) {
547 			struct recount_usage usage = { 0 };
548 			struct recount_usage perf_only = { 0 };
549 			boolean_t intrs_end = ml_set_interrupts_enabled(FALSE);
550 			recount_current_thread_usage_perf_only(&usage, &perf_only);
551 			ml_set_interrupts_enabled(intrs_end);
552 			KDBG_RELEASE(DBG_MT_INSTRS_CYCLES_THR_EXIT,
553 			    usage.ru_instructions,
554 			    usage.ru_cycles,
555 			    usage.ru_system_time_mach,
556 			    usage.ru_user_time_mach);
557 #if __AMP__
558 			KDBG_RELEASE(DBG_MT_P_INSTRS_CYCLES_THR_EXIT,
559 			    perf_only.ru_instructions,
560 			    perf_only.ru_cycles,
561 			    perf_only.ru_system_time_mach,
562 			    perf_only.ru_user_time_mach);
563 
564 #endif // __AMP__
565 		}
566 #endif/* CONFIG_PERVASIVE_CPI */
567 		KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE_PID, dbg_arg1, dbg_arg2);
568 	}
569 
570 	/*
571 	 * After this subtraction, this thread should never access
572 	 * task->bsd_info unless it got 0 back from the os_atomic_dec.  It
573 	 * could be racing with other threads to be the last thread in the
574 	 * process, and the last thread in the process will tear down the proc
575 	 * structure and zero-out task->bsd_info.
576 	 */
577 	threadcnt = os_atomic_dec(&task->active_thread_count, relaxed);
578 
579 #if CONFIG_COALITIONS
580 	/*
581 	 * Leave the coalitions when last thread of task is exiting and the
582 	 * task is not a corpse.
583 	 */
584 	if (threadcnt == 0 && !task->corpse_info) {
585 		coalitions_remove_task(task);
586 	}
587 #endif
588 
589 	/*
590 	 * If we are the last thread to terminate and the task is
591 	 * associated with a BSD process, perform BSD process exit.
592 	 */
593 	if (threadcnt == 0 && bsd_info != NULL) {
594 		mach_exception_data_type_t subcode = 0;
595 		if (kdebug_enable) {
596 			/* since we're the last thread in this process, trace out the command name too */
597 			long args[4] = { 0 };
598 			kdebug_proc_name_args(bsd_info, args);
599 #if CONFIG_PERVASIVE_CPI
600 			if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_PROC_EXIT)) {
601 				struct recount_usage usage = { 0 };
602 				struct recount_usage perf_only = { 0 };
603 				recount_current_task_usage_perf_only(&usage, &perf_only);
604 				KDBG_RELEASE(DBG_MT_INSTRS_CYCLES_PROC_EXIT,
605 				    usage.ru_instructions,
606 				    usage.ru_cycles,
607 				    usage.ru_system_time_mach,
608 				    usage.ru_user_time_mach);
609 #if __AMP__
610 				KDBG_RELEASE(DBG_MT_P_INSTRS_CYCLES_PROC_EXIT,
611 				    perf_only.ru_instructions,
612 				    perf_only.ru_cycles,
613 				    perf_only.ru_system_time_mach,
614 				    perf_only.ru_user_time_mach);
615 #endif // __AMP__
616 			}
617 #endif/* CONFIG_PERVASIVE_CPI */
618 			KDBG_RELEASE(TRACE_STRING_PROC_EXIT, args[0], args[1], args[2], args[3]);
619 		}
620 
621 		/* Get the exit reason before proc_exit */
622 		subcode = proc_encode_exit_exception_code(bsd_info);
623 		proc_exit(bsd_info);
624 		bsd_info = NULL;
625 		/*
626 		 * if there is crash info in task
627 		 * then do the deliver action since this is
628 		 * last thread for this task.
629 		 */
630 		if (task->corpse_info) {
631 			/* reset all except task name port */
632 			ipc_task_reset(task);
633 			/* enable all task ports (name port unchanged) */
634 			ipc_task_enable(task);
635 			exception_type_t etype = get_exception_from_corpse_crashinfo(task->corpse_info);
636 			task_deliver_crash_notification(task, current_thread(), etype, subcode);
637 		}
638 	}
639 
640 	if (threadcnt == 0) {
641 		task_lock(task);
642 		if (task_is_a_corpse_fork(task)) {
643 			thread_wakeup((event_t)&task->active_thread_count);
644 		}
645 		task_unlock(task);
646 	}
647 
648 	s = splsched();
649 	thread_lock(thread);
650 
651 	/*
652 	 * Ensure that the depress timer is no longer enqueued,
653 	 * so the timer can be safely deallocated
654 	 *
655 	 * TODO: build timer_call_cancel_wait
656 	 */
657 
658 	assert((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) == 0);
659 
660 	uint32_t delay_us = 1;
661 
662 	while (thread->depress_timer_active > 0) {
663 		thread_unlock(thread);
664 		splx(s);
665 
666 		delay(delay_us++);
667 
668 		if (delay_us > USEC_PER_SEC) {
669 			panic("depress timer failed to inactivate!"
670 			    "thread: %p depress_timer_active: %d",
671 			    thread, thread->depress_timer_active);
672 		}
673 
674 		s = splsched();
675 		thread_lock(thread);
676 	}
677 
678 	/*
679 	 *	Cancel wait timer, and wait for
680 	 *	concurrent expirations.
681 	 */
682 	if (thread->wait_timer_armed) {
683 		thread->wait_timer_armed = false;
684 
685 		if (timer_call_cancel(thread->wait_timer)) {
686 			thread->wait_timer_active--;
687 		}
688 	}
689 
690 	delay_us = 1;
691 
692 	while (thread->wait_timer_active > 0) {
693 		thread_unlock(thread);
694 		splx(s);
695 
696 		delay(delay_us++);
697 
698 		if (delay_us > USEC_PER_SEC) {
699 			panic("wait timer failed to inactivate!"
700 			    "thread: %p, wait_timer_active: %d, "
701 			    "wait_timer_armed: %d",
702 			    thread, thread->wait_timer_active,
703 			    thread->wait_timer_armed);
704 		}
705 
706 		s = splsched();
707 		thread_lock(thread);
708 	}
709 
710 	/*
711 	 *	If there is a reserved stack, release it.
712 	 */
713 	if (thread->reserved_stack != 0) {
714 		stack_free_reserved(thread);
715 		thread->reserved_stack = 0;
716 	}
717 
718 	/*
719 	 *	Mark thread as terminating, and block.
720 	 */
721 	thread->state |= TH_TERMINATE;
722 	thread_mark_wait_locked(thread, THREAD_UNINT);
723 
724 	assert(thread->th_work_interval_flags == TH_WORK_INTERVAL_FLAGS_NONE);
725 	assert(thread->kern_promotion_schedpri == 0);
726 	if (thread->rwlock_count > 0) {
727 		panic("rwlock_count is %d for thread %p, possibly it still holds a rwlock", thread->rwlock_count, thread);
728 	}
729 	assert(thread->priority_floor_count == 0);
730 	assert(thread->handoff_thread == THREAD_NULL);
731 	assert(thread->th_work_interval == NULL);
732 	assert(thread->t_rr_state.trr_value == 0);
733 
734 	assert3u(0, ==, thread->sched_flags &
735 	    (TH_SFLAG_WAITQ_PROMOTED |
736 	    TH_SFLAG_RW_PROMOTED |
737 	    TH_SFLAG_EXEC_PROMOTED |
738 	    TH_SFLAG_FLOOR_PROMOTED |
739 	    TH_SFLAG_PROMOTED |
740 	    TH_SFLAG_DEPRESS));
741 
742 	thread_unlock(thread);
743 	/* splsched */
744 
745 	thread_block((thread_continue_t)thread_terminate_continue);
746 	/*NOTREACHED*/
747 }
748 
749 static bool
thread_ref_release(thread_t thread)750 thread_ref_release(thread_t thread)
751 {
752 	if (thread == THREAD_NULL) {
753 		return false;
754 	}
755 
756 	assert_thread_magic(thread);
757 
758 	return os_ref_release_raw(&thread->ref_count, &thread_refgrp) == 0;
759 }
760 
761 /* Drop a thread refcount safely without triggering a zfree */
762 void
thread_deallocate_safe(thread_t thread)763 thread_deallocate_safe(thread_t thread)
764 {
765 	if (__improbable(thread_ref_release(thread))) {
766 		/* enqueue the thread for thread deallocate deamon to call thread_deallocate_complete */
767 		thread_deallocate_enqueue(thread);
768 	}
769 }
770 
771 void
thread_deallocate(thread_t thread)772 thread_deallocate(thread_t thread)
773 {
774 	if (__improbable(thread_ref_release(thread))) {
775 		thread_deallocate_complete(thread);
776 	}
777 }
778 
779 void
thread_deallocate_complete(thread_t thread)780 thread_deallocate_complete(
781 	thread_t                        thread)
782 {
783 	task_t                          task;
784 
785 	assert_thread_magic(thread);
786 
787 	assert(os_ref_get_count_raw(&thread->ref_count) == 0);
788 
789 	if (!(thread->state & TH_TERMINATE2)) {
790 		panic("thread_deallocate: thread not properly terminated");
791 	}
792 
793 	assert(thread->runq == PROCESSOR_NULL);
794 	assert(!(thread->state & TH_WAKING));
795 
796 #if KPC
797 	kpc_thread_destroy(thread);
798 #endif /* KPC */
799 
800 	ipc_thread_terminate(thread);
801 
802 	proc_thread_qos_deallocate(thread);
803 
804 	task = get_threadtask(thread);
805 
806 #ifdef MACH_BSD
807 	uthread_destroy(get_bsdthread_info(thread));
808 #endif /* MACH_BSD */
809 
810 	if (thread->t_ledger) {
811 		ledger_dereference(thread->t_ledger);
812 	}
813 	if (thread->t_threadledger) {
814 		ledger_dereference(thread->t_threadledger);
815 	}
816 
817 	assert(thread->turnstile != TURNSTILE_NULL);
818 	if (thread->turnstile) {
819 		turnstile_deallocate(thread->turnstile);
820 	}
821 	turnstile_compact_id_put(thread->ctsid);
822 
823 	if (IPC_VOUCHER_NULL != thread->ith_voucher) {
824 		ipc_voucher_release(thread->ith_voucher);
825 	}
826 
827 	kfree_data(thread->thread_io_stats, sizeof(struct io_stat_info));
828 #if CONFIG_PREADOPT_TG
829 	if (thread->old_preadopt_thread_group) {
830 		thread_group_release(thread->old_preadopt_thread_group);
831 	}
832 
833 	if (thread->preadopt_thread_group) {
834 		thread_group_release(thread->preadopt_thread_group);
835 	}
836 #endif /* CONFIG_PREADOPT_TG */
837 
838 	if (thread->kernel_stack != 0) {
839 		stack_free(thread);
840 	}
841 
842 	recount_thread_deinit(&thread->th_recount);
843 
844 	lck_mtx_destroy(&thread->mutex, &thread_lck_grp);
845 	machine_thread_destroy(thread);
846 
847 	task_deallocate_grp(task, TASK_GRP_INTERNAL);
848 
849 #if MACH_ASSERT
850 	assert_thread_magic(thread);
851 	thread->thread_magic = 0;
852 #endif /* MACH_ASSERT */
853 
854 	lck_mtx_lock(&tasks_threads_lock);
855 	assert(terminated_threads_count > 0);
856 	queue_remove(&terminated_threads, thread, thread_t, threads);
857 	terminated_threads_count--;
858 	lck_mtx_unlock(&tasks_threads_lock);
859 
860 	timer_call_free(thread->depress_timer);
861 	timer_call_free(thread->wait_timer);
862 
863 	ctid_table_remove(thread);
864 
865 	thread_ro_destroy(thread);
866 	zfree(thread_zone, thread);
867 }
868 
869 /*
870  *	thread_inspect_deallocate:
871  *
872  *	Drop a thread inspection reference.
873  */
874 void
thread_inspect_deallocate(thread_inspect_t thread_inspect)875 thread_inspect_deallocate(
876 	thread_inspect_t                thread_inspect)
877 {
878 	return thread_deallocate((thread_t)thread_inspect);
879 }
880 
881 /*
882  *	thread_read_deallocate:
883  *
884  *	Drop a reference on thread read port.
885  */
886 void
thread_read_deallocate(thread_read_t thread_read)887 thread_read_deallocate(
888 	thread_read_t                thread_read)
889 {
890 	return thread_deallocate((thread_t)thread_read);
891 }
892 
893 
894 /*
895  *	thread_exception_queue_invoke:
896  *
897  *	Deliver EXC_{RESOURCE,GUARD} exception
898  */
899 static void
thread_exception_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)900 thread_exception_queue_invoke(mpsc_queue_chain_t elm,
901     __assert_only mpsc_daemon_queue_t dq)
902 {
903 	struct thread_exception_elt *elt;
904 	task_t task;
905 	thread_t thread;
906 	exception_type_t etype;
907 
908 	assert(dq == &thread_exception_queue);
909 	elt = mpsc_queue_element(elm, struct thread_exception_elt, link);
910 
911 	etype = elt->exception_type;
912 	task = elt->exception_task;
913 	thread = elt->exception_thread;
914 	assert_thread_magic(thread);
915 
916 	kfree_type(struct thread_exception_elt, elt);
917 
918 	/* wait for all the threads in the task to terminate */
919 	task_lock(task);
920 	task_wait_till_threads_terminate_locked(task);
921 	task_unlock(task);
922 
923 	/* Consumes the task ref returned by task_generate_corpse_internal */
924 	task_deallocate(task);
925 	/* Consumes the thread ref returned by task_generate_corpse_internal */
926 	thread_deallocate(thread);
927 
928 	/* Deliver the notification, also clears the corpse. */
929 	task_deliver_crash_notification(task, thread, etype, 0);
930 }
931 
932 static void
thread_backtrace_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)933 thread_backtrace_queue_invoke(mpsc_queue_chain_t elm,
934     __assert_only mpsc_daemon_queue_t dq)
935 {
936 	struct thread_backtrace_elt *elt;
937 	kcdata_object_t obj;
938 	exception_port_t exc_ports[BT_EXC_PORTS_COUNT]; /* send rights */
939 	exception_type_t etype;
940 
941 	assert(dq == &thread_backtrace_queue);
942 	elt = mpsc_queue_element(elm, struct thread_backtrace_elt, link);
943 
944 	obj = elt->obj;
945 	memcpy(exc_ports, elt->exc_ports, sizeof(ipc_port_t) * BT_EXC_PORTS_COUNT);
946 	etype = elt->exception_type;
947 
948 	kfree_type(struct thread_backtrace_elt, elt);
949 
950 	/* Deliver to backtrace exception ports */
951 	exception_deliver_backtrace(obj, exc_ports, etype);
952 
953 	/*
954 	 * Release port right and kcdata object refs given by
955 	 * task_enqueue_exception_with_corpse()
956 	 */
957 
958 	for (unsigned int i = 0; i < BT_EXC_PORTS_COUNT; i++) {
959 		ipc_port_release_send(exc_ports[i]);
960 	}
961 
962 	kcdata_object_release(obj);
963 }
964 
965 /*
966  *	thread_exception_enqueue:
967  *
968  *	Enqueue a corpse port to be delivered an EXC_{RESOURCE,GUARD}.
969  */
970 void
thread_exception_enqueue(task_t task,thread_t thread,exception_type_t etype)971 thread_exception_enqueue(
972 	task_t          task,
973 	thread_t        thread,
974 	exception_type_t etype)
975 {
976 	assert(EXC_RESOURCE == etype || EXC_GUARD == etype);
977 	struct thread_exception_elt *elt = kalloc_type(struct thread_exception_elt, Z_WAITOK | Z_NOFAIL);
978 	elt->exception_type = etype;
979 	elt->exception_task = task;
980 	elt->exception_thread = thread;
981 
982 	mpsc_daemon_enqueue(&thread_exception_queue, &elt->link,
983 	    MPSC_QUEUE_DISABLE_PREEMPTION);
984 }
985 
986 void
thread_backtrace_enqueue(kcdata_object_t obj,exception_port_t ports[static BT_EXC_PORTS_COUNT],exception_type_t etype)987 thread_backtrace_enqueue(
988 	kcdata_object_t  obj,
989 	exception_port_t ports[static BT_EXC_PORTS_COUNT],
990 	exception_type_t etype)
991 {
992 	struct thread_backtrace_elt *elt = kalloc_type(struct thread_backtrace_elt, Z_WAITOK | Z_NOFAIL);
993 	elt->obj = obj;
994 	elt->exception_type = etype;
995 
996 	memcpy(elt->exc_ports, ports, sizeof(ipc_port_t) * BT_EXC_PORTS_COUNT);
997 
998 	mpsc_daemon_enqueue(&thread_backtrace_queue, &elt->link,
999 	    MPSC_QUEUE_DISABLE_PREEMPTION);
1000 }
1001 
1002 /*
1003  *	thread_copy_resource_info
1004  *
1005  *	Copy the resource info counters from source
1006  *	thread to destination thread.
1007  */
1008 void
thread_copy_resource_info(thread_t dst_thread,thread_t src_thread)1009 thread_copy_resource_info(
1010 	thread_t dst_thread,
1011 	thread_t src_thread)
1012 {
1013 	dst_thread->c_switch = src_thread->c_switch;
1014 	dst_thread->p_switch = src_thread->p_switch;
1015 	dst_thread->ps_switch = src_thread->ps_switch;
1016 	dst_thread->sched_time_save = src_thread->sched_time_save;
1017 	dst_thread->runnable_timer = src_thread->runnable_timer;
1018 	dst_thread->vtimer_user_save = src_thread->vtimer_user_save;
1019 	dst_thread->vtimer_prof_save = src_thread->vtimer_prof_save;
1020 	dst_thread->vtimer_rlim_save = src_thread->vtimer_rlim_save;
1021 	dst_thread->vtimer_qos_save = src_thread->vtimer_qos_save;
1022 	dst_thread->syscalls_unix = src_thread->syscalls_unix;
1023 	dst_thread->syscalls_mach = src_thread->syscalls_mach;
1024 	ledger_rollup(dst_thread->t_threadledger, src_thread->t_threadledger);
1025 	recount_thread_copy(&dst_thread->th_recount, &src_thread->th_recount);
1026 	*dst_thread->thread_io_stats = *src_thread->thread_io_stats;
1027 }
1028 
1029 static void
thread_terminate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)1030 thread_terminate_queue_invoke(mpsc_queue_chain_t e,
1031     __assert_only mpsc_daemon_queue_t dq)
1032 {
1033 	thread_t thread = mpsc_queue_element(e, struct thread, mpsc_links);
1034 	task_t task = get_threadtask(thread);
1035 
1036 	assert(dq == &thread_terminate_queue);
1037 
1038 	task_lock(task);
1039 
1040 	/*
1041 	 * if marked for crash reporting, skip reaping.
1042 	 * The corpse delivery thread will clear bit and enqueue
1043 	 * for reaping when done
1044 	 *
1045 	 * Note: the inspection field is set under the task lock
1046 	 *
1047 	 * FIXME[mad]: why enqueue for termination before `inspection` is false ?
1048 	 */
1049 	if (__improbable(thread->inspection)) {
1050 		simple_lock(&crashed_threads_lock, &thread_lck_grp);
1051 		task_unlock(task);
1052 
1053 		enqueue_tail(&crashed_threads_queue, &thread->runq_links);
1054 		simple_unlock(&crashed_threads_lock);
1055 		return;
1056 	}
1057 
1058 	recount_task_rollup_thread(&task->tk_recount, &thread->th_recount);
1059 
1060 	task->total_runnable_time += timer_grab(&thread->runnable_timer);
1061 	task->c_switch += thread->c_switch;
1062 	task->p_switch += thread->p_switch;
1063 	task->ps_switch += thread->ps_switch;
1064 
1065 	task->syscalls_unix += thread->syscalls_unix;
1066 	task->syscalls_mach += thread->syscalls_mach;
1067 
1068 	task->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
1069 	task->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
1070 	task->task_gpu_ns += ml_gpu_stat(thread);
1071 	task->decompressions += thread->decompressions;
1072 
1073 	thread_update_qos_cpu_time(thread);
1074 
1075 	queue_remove(&task->threads, thread, thread_t, task_threads);
1076 	task->thread_count--;
1077 
1078 	/*
1079 	 * If the task is being halted, and there is only one thread
1080 	 * left in the task after this one, then wakeup that thread.
1081 	 */
1082 	if (task->thread_count == 1 && task->halting) {
1083 		thread_wakeup((event_t)&task->halting);
1084 	}
1085 
1086 	task_unlock(task);
1087 
1088 	lck_mtx_lock(&tasks_threads_lock);
1089 	queue_remove(&threads, thread, thread_t, threads);
1090 	threads_count--;
1091 	queue_enter(&terminated_threads, thread, thread_t, threads);
1092 	terminated_threads_count++;
1093 	lck_mtx_unlock(&tasks_threads_lock);
1094 
1095 #if MACH_BSD
1096 	/*
1097 	 * The thread no longer counts against the task's thread count,
1098 	 * we can now wake up any pending joiner.
1099 	 *
1100 	 * Note that the inheritor will be set to `thread` which is
1101 	 * incorrect once it is on the termination queue, however
1102 	 * the termination queue runs at MINPRI_KERNEL which is higher
1103 	 * than any user thread, so this isn't a priority inversion.
1104 	 */
1105 	if (thread_get_tag(thread) & THREAD_TAG_USER_JOIN) {
1106 		struct uthread *uth = get_bsdthread_info(thread);
1107 		mach_port_name_t kport = uthread_joiner_port(uth);
1108 
1109 		/*
1110 		 * Clear the port low two bits to tell pthread that thread is gone.
1111 		 */
1112 #ifndef NO_PORT_GEN
1113 		kport &= ~MACH_PORT_MAKE(0, IE_BITS_GEN_MASK + IE_BITS_GEN_ONE);
1114 #else
1115 		kport |= MACH_PORT_MAKE(0, ~(IE_BITS_GEN_MASK + IE_BITS_GEN_ONE));
1116 #endif
1117 		(void)copyoutmap_atomic32(task->map, kport,
1118 		    uthread_joiner_address(uth));
1119 		uthread_joiner_wake(task, uth);
1120 	}
1121 #endif
1122 
1123 	thread_deallocate(thread);
1124 }
1125 
1126 static void
thread_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)1127 thread_deallocate_queue_invoke(mpsc_queue_chain_t e,
1128     __assert_only mpsc_daemon_queue_t dq)
1129 {
1130 	thread_t thread = mpsc_queue_element(e, struct thread, mpsc_links);
1131 
1132 	assert(dq == &thread_deallocate_queue);
1133 
1134 	thread_deallocate_complete(thread);
1135 }
1136 
1137 /*
1138  *	thread_terminate_enqueue:
1139  *
1140  *	Enqueue a terminating thread for final disposition.
1141  *
1142  *	Called at splsched.
1143  */
1144 void
thread_terminate_enqueue(thread_t thread)1145 thread_terminate_enqueue(
1146 	thread_t                thread)
1147 {
1148 	KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE, thread->thread_id);
1149 
1150 	mpsc_daemon_enqueue(&thread_terminate_queue, &thread->mpsc_links,
1151 	    MPSC_QUEUE_DISABLE_PREEMPTION);
1152 }
1153 
1154 /*
1155  *	thread_deallocate_enqueue:
1156  *
1157  *	Enqueue a thread for final deallocation.
1158  */
1159 static void
thread_deallocate_enqueue(thread_t thread)1160 thread_deallocate_enqueue(
1161 	thread_t                thread)
1162 {
1163 	mpsc_daemon_enqueue(&thread_deallocate_queue, &thread->mpsc_links,
1164 	    MPSC_QUEUE_DISABLE_PREEMPTION);
1165 }
1166 
1167 /*
1168  * thread_terminate_crashed_threads:
1169  * walk the list of crashed threads and put back set of threads
1170  * who are no longer being inspected.
1171  */
1172 void
thread_terminate_crashed_threads(void)1173 thread_terminate_crashed_threads(void)
1174 {
1175 	thread_t th_remove;
1176 
1177 	simple_lock(&crashed_threads_lock, &thread_lck_grp);
1178 	/*
1179 	 * loop through the crashed threads queue
1180 	 * to put any threads that are not being inspected anymore
1181 	 */
1182 
1183 	qe_foreach_element_safe(th_remove, &crashed_threads_queue, runq_links) {
1184 		/* make sure current_thread is never in crashed queue */
1185 		assert(th_remove != current_thread());
1186 
1187 		if (th_remove->inspection == FALSE) {
1188 			remqueue(&th_remove->runq_links);
1189 			mpsc_daemon_enqueue(&thread_terminate_queue, &th_remove->mpsc_links,
1190 			    MPSC_QUEUE_NONE);
1191 		}
1192 	}
1193 
1194 	simple_unlock(&crashed_threads_lock);
1195 }
1196 
1197 /*
1198  *	thread_stack_queue_invoke:
1199  *
1200  *	Perform stack allocation as required due to
1201  *	invoke failures.
1202  */
1203 static void
thread_stack_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)1204 thread_stack_queue_invoke(mpsc_queue_chain_t elm,
1205     __assert_only mpsc_daemon_queue_t dq)
1206 {
1207 	thread_t thread = mpsc_queue_element(elm, struct thread, mpsc_links);
1208 
1209 	assert(dq == &thread_stack_queue);
1210 
1211 	/* allocate stack with interrupts enabled so that we can call into VM */
1212 	stack_alloc(thread);
1213 
1214 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_END, thread_tid(thread), 0, 0, 0, 0);
1215 
1216 	spl_t s = splsched();
1217 	thread_lock(thread);
1218 	thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1219 	thread_unlock(thread);
1220 	splx(s);
1221 }
1222 
1223 /*
1224  *	thread_stack_enqueue:
1225  *
1226  *	Enqueue a thread for stack allocation.
1227  *
1228  *	Called at splsched.
1229  */
1230 void
thread_stack_enqueue(thread_t thread)1231 thread_stack_enqueue(
1232 	thread_t                thread)
1233 {
1234 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_START, thread_tid(thread), 0, 0, 0, 0);
1235 	assert_thread_magic(thread);
1236 
1237 	mpsc_daemon_enqueue(&thread_stack_queue, &thread->mpsc_links,
1238 	    MPSC_QUEUE_DISABLE_PREEMPTION);
1239 }
1240 
1241 void
thread_daemon_init(void)1242 thread_daemon_init(void)
1243 {
1244 	kern_return_t   result;
1245 
1246 	thread_deallocate_daemon_init();
1247 
1248 	thread_deallocate_daemon_register_queue(&thread_terminate_queue,
1249 	    thread_terminate_queue_invoke);
1250 
1251 	thread_deallocate_daemon_register_queue(&thread_deallocate_queue,
1252 	    thread_deallocate_queue_invoke);
1253 
1254 	smr_register_mpsc_queue();
1255 
1256 	ipc_object_deallocate_register_queue();
1257 
1258 	simple_lock_init(&crashed_threads_lock, 0);
1259 	queue_init(&crashed_threads_queue);
1260 
1261 	result = mpsc_daemon_queue_init_with_thread(&thread_stack_queue,
1262 	    thread_stack_queue_invoke, BASEPRI_PREEMPT_HIGH,
1263 	    "daemon.thread-stack", MPSC_DAEMON_INIT_NONE);
1264 	if (result != KERN_SUCCESS) {
1265 		panic("thread_daemon_init: thread_stack_daemon");
1266 	}
1267 
1268 	result = mpsc_daemon_queue_init_with_thread(&thread_exception_queue,
1269 	    thread_exception_queue_invoke, MINPRI_KERNEL,
1270 	    "daemon.thread-exception", MPSC_DAEMON_INIT_NONE);
1271 
1272 	if (result != KERN_SUCCESS) {
1273 		panic("thread_daemon_init: thread_exception_daemon");
1274 	}
1275 
1276 	result = mpsc_daemon_queue_init_with_thread(&thread_backtrace_queue,
1277 	    thread_backtrace_queue_invoke, MINPRI_KERNEL,
1278 	    "daemon.thread-backtrace", MPSC_DAEMON_INIT_NONE);
1279 
1280 	if (result != KERN_SUCCESS) {
1281 		panic("thread_daemon_init: thread_backtrace_daemon");
1282 	}
1283 }
1284 
1285 __options_decl(thread_create_internal_options_t, uint32_t, {
1286 	TH_OPTION_NONE          = 0x00,
1287 	TH_OPTION_NOSUSP        = 0x02,
1288 	TH_OPTION_WORKQ         = 0x04,
1289 	TH_OPTION_MAINTHREAD    = 0x08,
1290 });
1291 
1292 void
main_thread_set_immovable_pinned(thread_t thread)1293 main_thread_set_immovable_pinned(thread_t thread)
1294 {
1295 	ipc_main_thread_set_immovable_pinned(thread);
1296 }
1297 
1298 /*
1299  * Create a new thread.
1300  * Doesn't start the thread running.
1301  *
1302  * Task and tasks_threads_lock are returned locked on success.
1303  */
1304 static kern_return_t
thread_create_internal(task_t parent_task,integer_t priority,thread_continue_t continuation,void * parameter,thread_create_internal_options_t options,thread_t * out_thread)1305 thread_create_internal(
1306 	task_t                                  parent_task,
1307 	integer_t                               priority,
1308 	thread_continue_t                       continuation,
1309 	void                                    *parameter,
1310 	thread_create_internal_options_t        options,
1311 	thread_t                                *out_thread)
1312 {
1313 	thread_t                  new_thread;
1314 	ipc_thread_init_options_t init_options = IPC_THREAD_INIT_NONE;
1315 	struct thread_ro          tro_tpl = { };
1316 	bool first_thread = false;
1317 	kern_return_t kr = KERN_FAILURE;
1318 
1319 	/*
1320 	 *	Allocate a thread and initialize static fields
1321 	 */
1322 	new_thread = zalloc_flags(thread_zone, Z_WAITOK | Z_NOFAIL);
1323 
1324 	if (__improbable(current_thread() == &init_thread)) {
1325 		/*
1326 		 * The first thread ever is a global, but because we want to be
1327 		 * able to zone_id_require() threads, we have to stop using the
1328 		 * global piece of memory we used to boostrap the kernel and
1329 		 * jump to a proper thread from a zone.
1330 		 *
1331 		 * This is why that one thread will inherit its original
1332 		 * state differently.
1333 		 *
1334 		 * Also remember this thread in `vm_pageout_scan_thread`
1335 		 * as this is what the first thread ever becomes.
1336 		 *
1337 		 * Also pre-warm the depress timer since the VM pageout scan
1338 		 * daemon might need to use it.
1339 		 */
1340 		assert(vm_pageout_scan_thread == THREAD_NULL);
1341 		vm_pageout_scan_thread = new_thread;
1342 
1343 		first_thread = true;
1344 #pragma clang diagnostic push
1345 #pragma clang diagnostic ignored "-Wnontrivial-memaccess"
1346 		/* work around 74481146 */
1347 		memcpy(new_thread, &init_thread, sizeof(*new_thread));
1348 #pragma clang diagnostic pop
1349 
1350 		/*
1351 		 * Make the ctid table functional
1352 		 */
1353 		ctid_table_init();
1354 		new_thread->ctid = 0;
1355 	} else {
1356 		init_thread_from_template(new_thread);
1357 	}
1358 
1359 	if (options & TH_OPTION_MAINTHREAD) {
1360 		init_options |= IPC_THREAD_INIT_MAINTHREAD;
1361 	}
1362 
1363 	os_ref_init_count_raw(&new_thread->ref_count, &thread_refgrp, 2);
1364 	machine_thread_create(new_thread, parent_task, first_thread);
1365 
1366 	machine_thread_process_signature(new_thread, parent_task);
1367 
1368 #ifdef MACH_BSD
1369 	uthread_init(parent_task, get_bsdthread_info(new_thread),
1370 	    &tro_tpl, (options & TH_OPTION_WORKQ) != 0);
1371 	if (!task_is_a_corpse(parent_task)) {
1372 		/*
1373 		 * uthread_init will set tro_cred (with a +1)
1374 		 * and tro_proc for live tasks.
1375 		 */
1376 		assert(tro_tpl.tro_cred && tro_tpl.tro_proc);
1377 	}
1378 #endif  /* MACH_BSD */
1379 
1380 	thread_lock_init(new_thread);
1381 	wake_lock_init(new_thread);
1382 
1383 	lck_mtx_init(&new_thread->mutex, &thread_lck_grp, LCK_ATTR_NULL);
1384 
1385 	ipc_thread_init(parent_task, new_thread, &tro_tpl, init_options);
1386 
1387 	thread_ro_create(parent_task, new_thread, &tro_tpl);
1388 
1389 	new_thread->continuation = continuation;
1390 	new_thread->parameter = parameter;
1391 	new_thread->inheritor_flags = TURNSTILE_UPDATE_FLAGS_NONE;
1392 	new_thread->requested_policy = default_thread_requested_policy;
1393 	priority_queue_init(&new_thread->sched_inheritor_queue);
1394 	priority_queue_init(&new_thread->base_inheritor_queue);
1395 #if CONFIG_SCHED_CLUTCH
1396 	priority_queue_entry_init(&new_thread->th_clutch_runq_link);
1397 	priority_queue_entry_init(&new_thread->th_clutch_pri_link);
1398 #endif /* CONFIG_SCHED_CLUTCH */
1399 
1400 #if CONFIG_SCHED_EDGE
1401 	new_thread->th_bound_cluster_enqueued = false;
1402 	for (cluster_shared_rsrc_type_t shared_rsrc_type = CLUSTER_SHARED_RSRC_TYPE_MIN; shared_rsrc_type < CLUSTER_SHARED_RSRC_TYPE_COUNT; shared_rsrc_type++) {
1403 		new_thread->th_shared_rsrc_enqueued[shared_rsrc_type] = false;
1404 		new_thread->th_shared_rsrc_heavy_user[shared_rsrc_type] = false;
1405 		new_thread->th_shared_rsrc_heavy_perf_control[shared_rsrc_type] = false;
1406 	}
1407 #endif /* CONFIG_SCHED_EDGE */
1408 	new_thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
1409 
1410 	/* Allocate I/O Statistics structure */
1411 	new_thread->thread_io_stats = kalloc_data(sizeof(struct io_stat_info),
1412 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1413 
1414 #if KASAN_CLASSIC
1415 	kasan_init_thread(&new_thread->kasan_data);
1416 #endif /* KASAN_CLASSIC */
1417 
1418 #if CONFIG_KCOV
1419 	kcov_init_thread(&new_thread->kcov_data);
1420 #endif
1421 
1422 #if CONFIG_IOSCHED
1423 	/* Clear out the I/O Scheduling info for AppleFSCompression */
1424 	new_thread->decmp_upl = NULL;
1425 #endif /* CONFIG_IOSCHED */
1426 
1427 	new_thread->thread_region_page_shift = 0;
1428 
1429 #if DEVELOPMENT || DEBUG
1430 	task_lock(parent_task);
1431 	uint16_t thread_limit = parent_task->task_thread_limit;
1432 	if (exc_resource_threads_enabled &&
1433 	    thread_limit > 0 &&
1434 	    parent_task->thread_count >= thread_limit &&
1435 	    !parent_task->task_has_crossed_thread_limit &&
1436 	    !(task_is_a_corpse(parent_task))) {
1437 		int thread_count = parent_task->thread_count;
1438 		parent_task->task_has_crossed_thread_limit = TRUE;
1439 		task_unlock(parent_task);
1440 		SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(parent_task, thread_count);
1441 	} else {
1442 		task_unlock(parent_task);
1443 	}
1444 #endif
1445 
1446 	lck_mtx_lock(&tasks_threads_lock);
1447 	task_lock(parent_task);
1448 
1449 	/*
1450 	 * Fail thread creation if parent task is being torn down or has too many threads
1451 	 * If the caller asked for TH_OPTION_NOSUSP, also fail if the parent task is suspended
1452 	 */
1453 	if (parent_task->active == 0 || parent_task->halting ||
1454 	    (parent_task->suspend_count > 0 && (options & TH_OPTION_NOSUSP) != 0) ||
1455 	    (parent_task->thread_count >= task_threadmax && parent_task != kernel_task)) {
1456 		task_unlock(parent_task);
1457 		lck_mtx_unlock(&tasks_threads_lock);
1458 
1459 		ipc_thread_disable(new_thread);
1460 		ipc_thread_terminate(new_thread);
1461 		kfree_data(new_thread->thread_io_stats,
1462 		    sizeof(struct io_stat_info));
1463 		lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
1464 		kr = KERN_FAILURE;
1465 		goto out_thread_cleanup;
1466 	}
1467 
1468 	/* Protected by the tasks_threads_lock */
1469 	new_thread->thread_id = ++thread_unique_id;
1470 
1471 	ctid_table_add(new_thread);
1472 
1473 	/* New threads inherit any default state on the task */
1474 	machine_thread_inherit_taskwide(new_thread, parent_task);
1475 
1476 	task_reference_grp(parent_task, TASK_GRP_INTERNAL);
1477 
1478 	if (parent_task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
1479 		/*
1480 		 * This task has a per-thread CPU limit; make sure this new thread
1481 		 * gets its limit set too, before it gets out of the kernel.
1482 		 */
1483 		act_set_astledger(new_thread);
1484 	}
1485 
1486 	/* Instantiate a thread ledger. Do not fail thread creation if ledger creation fails. */
1487 	if ((new_thread->t_threadledger = ledger_instantiate(thread_ledger_template,
1488 	    LEDGER_CREATE_INACTIVE_ENTRIES)) != LEDGER_NULL) {
1489 		ledger_entry_setactive(new_thread->t_threadledger, thread_ledgers.cpu_time);
1490 	}
1491 
1492 	new_thread->t_bankledger = LEDGER_NULL;
1493 	new_thread->t_deduct_bank_ledger_time = 0;
1494 	new_thread->t_deduct_bank_ledger_energy = 0;
1495 
1496 	new_thread->t_ledger = parent_task->ledger;
1497 	if (new_thread->t_ledger) {
1498 		ledger_reference(new_thread->t_ledger);
1499 	}
1500 
1501 	recount_thread_init(&new_thread->th_recount);
1502 
1503 #if defined(CONFIG_SCHED_MULTIQ)
1504 	/* Cache the task's sched_group */
1505 	new_thread->sched_group = parent_task->sched_group;
1506 #endif /* defined(CONFIG_SCHED_MULTIQ) */
1507 
1508 	/* Cache the task's map */
1509 	new_thread->map = parent_task->map;
1510 
1511 	new_thread->depress_timer = timer_call_alloc(thread_depress_expire, new_thread);
1512 	new_thread->wait_timer = timer_call_alloc(thread_timer_expire, new_thread);
1513 
1514 #if KPC
1515 	kpc_thread_create(new_thread);
1516 #endif
1517 
1518 	/* Set the thread's scheduling parameters */
1519 	new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task);
1520 	new_thread->max_priority = parent_task->max_priority;
1521 	new_thread->task_priority = parent_task->priority;
1522 
1523 #if CONFIG_THREAD_GROUPS
1524 	thread_group_init_thread(new_thread, parent_task);
1525 #endif /* CONFIG_THREAD_GROUPS */
1526 
1527 	int new_priority = (priority < 0) ? parent_task->priority: priority;
1528 	new_priority = (priority < 0)? parent_task->priority: priority;
1529 	if (new_priority > new_thread->max_priority) {
1530 		new_priority = new_thread->max_priority;
1531 	}
1532 #if !defined(XNU_TARGET_OS_OSX)
1533 	if (new_priority < MAXPRI_THROTTLE) {
1534 		new_priority = MAXPRI_THROTTLE;
1535 	}
1536 #endif /* !defined(XNU_TARGET_OS_OSX) */
1537 
1538 	new_thread->importance = new_priority - new_thread->task_priority;
1539 
1540 	sched_set_thread_base_priority(new_thread, new_priority);
1541 
1542 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
1543 	new_thread->sched_stamp = sched_tick;
1544 #if CONFIG_SCHED_CLUTCH
1545 	new_thread->pri_shift = sched_clutch_thread_pri_shift(new_thread, new_thread->th_sched_bucket);
1546 #else /* CONFIG_SCHED_CLUTCH */
1547 	new_thread->pri_shift = sched_pri_shifts[new_thread->th_sched_bucket];
1548 #endif /* CONFIG_SCHED_CLUTCH */
1549 #endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */
1550 
1551 	if (parent_task->max_priority <= MAXPRI_THROTTLE) {
1552 		sched_thread_mode_demote(new_thread, TH_SFLAG_THROTTLED);
1553 	}
1554 
1555 	thread_policy_create(new_thread);
1556 
1557 	/* Chain the thread onto the task's list */
1558 	queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
1559 	parent_task->thread_count++;
1560 
1561 	/* So terminating threads don't need to take the task lock to decrement */
1562 	os_atomic_inc(&parent_task->active_thread_count, relaxed);
1563 
1564 	queue_enter(&threads, new_thread, thread_t, threads);
1565 	threads_count++;
1566 
1567 	new_thread->active = TRUE;
1568 	if (task_is_a_corpse_fork(parent_task)) {
1569 		/* Set the inspection bit if the task is a corpse fork */
1570 		new_thread->inspection = TRUE;
1571 	} else {
1572 		new_thread->inspection = FALSE;
1573 	}
1574 	new_thread->corpse_dup = FALSE;
1575 	new_thread->turnstile = turnstile_alloc();
1576 	new_thread->ctsid = turnstile_compact_id_get();
1577 
1578 
1579 	*out_thread = new_thread;
1580 
1581 	if (kdebug_enable) {
1582 		long args[4] = {};
1583 
1584 		kdbg_trace_data(get_bsdtask_info(parent_task), &args[1], &args[3]);
1585 
1586 		/*
1587 		 * Starting with 26604425, exec'ing creates a new task/thread.
1588 		 *
1589 		 * NEWTHREAD in the current process has two possible meanings:
1590 		 *
1591 		 * 1) Create a new thread for this process.
1592 		 * 2) Create a new thread for the future process this will become in an
1593 		 * exec.
1594 		 *
1595 		 * To disambiguate these, arg3 will be set to TRUE for case #2.
1596 		 *
1597 		 * The value we need to find (TPF_EXEC_COPY) is stable in the case of a
1598 		 * task exec'ing. The read of t_procflags does not take the proc_lock.
1599 		 */
1600 		args[2] = task_is_exec_copy(parent_task) ? 1 : 0;
1601 
1602 		KDBG_RELEASE(TRACE_DATA_NEWTHREAD, (uintptr_t)thread_tid(new_thread),
1603 		    args[1], args[2], args[3]);
1604 
1605 		kdebug_proc_name_args(get_bsdtask_info(parent_task), args);
1606 		KDBG_RELEASE(TRACE_STRING_NEWTHREAD, args[0], args[1], args[2],
1607 		    args[3]);
1608 	}
1609 
1610 	DTRACE_PROC1(lwp__create, thread_t, *out_thread);
1611 
1612 	kr = KERN_SUCCESS;
1613 	goto done;
1614 
1615 out_thread_cleanup:
1616 #ifdef MACH_BSD
1617 	{
1618 		struct uthread *ut = get_bsdthread_info(new_thread);
1619 
1620 		uthread_cleanup(ut, &tro_tpl);
1621 		uthread_destroy(ut);
1622 	}
1623 #endif  /* MACH_BSD */
1624 
1625 	machine_thread_destroy(new_thread);
1626 
1627 	thread_ro_destroy(new_thread);
1628 	zfree(thread_zone, new_thread);
1629 
1630 done:
1631 	return kr;
1632 }
1633 
1634 static kern_return_t
thread_create_with_options_internal(task_t task,thread_t * new_thread,boolean_t from_user,thread_create_internal_options_t options,thread_continue_t continuation)1635 thread_create_with_options_internal(
1636 	task_t                            task,
1637 	thread_t                          *new_thread,
1638 	boolean_t                         from_user,
1639 	thread_create_internal_options_t  options,
1640 	thread_continue_t                 continuation)
1641 {
1642 	kern_return_t           result;
1643 	thread_t                thread;
1644 
1645 	if (task == TASK_NULL || task == kernel_task) {
1646 		return KERN_INVALID_ARGUMENT;
1647 	}
1648 
1649 #if CONFIG_MACF
1650 	if (from_user && current_task() != task &&
1651 	    mac_proc_check_remote_thread_create(task, -1, NULL, 0) != 0) {
1652 		return KERN_DENIED;
1653 	}
1654 #endif
1655 
1656 	result = thread_create_internal(task, -1, continuation, NULL, options, &thread);
1657 	if (result != KERN_SUCCESS) {
1658 		return result;
1659 	}
1660 
1661 	thread->user_stop_count = 1;
1662 	thread_hold(thread);
1663 	if (task->suspend_count > 0) {
1664 		thread_hold(thread);
1665 	}
1666 
1667 	if (from_user) {
1668 		extmod_statistics_incr_thread_create(task);
1669 	}
1670 
1671 	task_unlock(task);
1672 	lck_mtx_unlock(&tasks_threads_lock);
1673 
1674 	*new_thread = thread;
1675 
1676 	return KERN_SUCCESS;
1677 }
1678 
1679 kern_return_t
thread_create_immovable(task_t task,thread_t * new_thread)1680 thread_create_immovable(
1681 	task_t                          task,
1682 	thread_t                        *new_thread)
1683 {
1684 	return thread_create_with_options_internal(task, new_thread, FALSE,
1685 	           TH_OPTION_NONE, (thread_continue_t)thread_bootstrap_return);
1686 }
1687 
1688 kern_return_t
thread_create_from_user(task_t task,thread_t * new_thread)1689 thread_create_from_user(
1690 	task_t                          task,
1691 	thread_t                        *new_thread)
1692 {
1693 	/* All thread ports are created immovable by default */
1694 	return thread_create_with_options_internal(task, new_thread, TRUE, TH_OPTION_NONE,
1695 	           (thread_continue_t)thread_bootstrap_return);
1696 }
1697 
1698 kern_return_t
thread_create_with_continuation(task_t task,thread_t * new_thread,thread_continue_t continuation)1699 thread_create_with_continuation(
1700 	task_t                          task,
1701 	thread_t                        *new_thread,
1702 	thread_continue_t               continuation)
1703 {
1704 	return thread_create_with_options_internal(task, new_thread, FALSE, TH_OPTION_NONE, continuation);
1705 }
1706 
1707 /*
1708  * Create a thread that is already started, but is waiting on an event
1709  */
1710 static kern_return_t
thread_create_waiting_internal(task_t task,thread_continue_t continuation,event_t event,block_hint_t block_hint,thread_create_internal_options_t options,thread_t * new_thread)1711 thread_create_waiting_internal(
1712 	task_t                  task,
1713 	thread_continue_t       continuation,
1714 	event_t                 event,
1715 	block_hint_t            block_hint,
1716 	thread_create_internal_options_t options,
1717 	thread_t                *new_thread)
1718 {
1719 	kern_return_t result;
1720 	thread_t thread;
1721 	wait_interrupt_t wait_interrupt = THREAD_INTERRUPTIBLE;
1722 
1723 	if (task == TASK_NULL || task == kernel_task) {
1724 		return KERN_INVALID_ARGUMENT;
1725 	}
1726 
1727 	result = thread_create_internal(task, -1, continuation, NULL,
1728 	    options, &thread);
1729 	if (result != KERN_SUCCESS) {
1730 		return result;
1731 	}
1732 
1733 	/* note no user_stop_count or thread_hold here */
1734 
1735 	if (task->suspend_count > 0) {
1736 		thread_hold(thread);
1737 	}
1738 
1739 	thread_mtx_lock(thread);
1740 	thread_set_pending_block_hint(thread, block_hint);
1741 	if (options & TH_OPTION_WORKQ) {
1742 		thread->static_param = true;
1743 		event = workq_thread_init_and_wq_lock(task, thread);
1744 	} else if (options & TH_OPTION_MAINTHREAD) {
1745 		wait_interrupt = THREAD_UNINT;
1746 	}
1747 	thread_start_in_assert_wait(thread, event, wait_interrupt);
1748 	thread_mtx_unlock(thread);
1749 
1750 	task_unlock(task);
1751 	lck_mtx_unlock(&tasks_threads_lock);
1752 
1753 	*new_thread = thread;
1754 
1755 	return KERN_SUCCESS;
1756 }
1757 
1758 kern_return_t
main_thread_create_waiting(task_t task,thread_continue_t continuation,event_t event,thread_t * new_thread)1759 main_thread_create_waiting(
1760 	task_t                          task,
1761 	thread_continue_t               continuation,
1762 	event_t                         event,
1763 	thread_t                        *new_thread)
1764 {
1765 	return thread_create_waiting_internal(task, continuation, event,
1766 	           kThreadWaitNone, TH_OPTION_MAINTHREAD, new_thread);
1767 }
1768 
1769 
1770 static kern_return_t
thread_create_running_internal2(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread,boolean_t from_user)1771 thread_create_running_internal2(
1772 	task_t         task,
1773 	int                     flavor,
1774 	thread_state_t          new_state,
1775 	mach_msg_type_number_t  new_state_count,
1776 	thread_t                                *new_thread,
1777 	boolean_t                               from_user)
1778 {
1779 	kern_return_t  result;
1780 	thread_t                                thread;
1781 
1782 	if (task == TASK_NULL || task == kernel_task) {
1783 		return KERN_INVALID_ARGUMENT;
1784 	}
1785 
1786 #if CONFIG_MACF
1787 	if (from_user && current_task() != task &&
1788 	    mac_proc_check_remote_thread_create(task, flavor, new_state, new_state_count) != 0) {
1789 		return KERN_DENIED;
1790 	}
1791 #endif
1792 
1793 	result = thread_create_internal(task, -1,
1794 	    (thread_continue_t)thread_bootstrap_return, NULL,
1795 	    TH_OPTION_NONE, &thread);
1796 	if (result != KERN_SUCCESS) {
1797 		return result;
1798 	}
1799 
1800 	if (task->suspend_count > 0) {
1801 		thread_hold(thread);
1802 	}
1803 
1804 	if (from_user) {
1805 		result = machine_thread_state_convert_from_user(thread, flavor,
1806 		    new_state, new_state_count, NULL, 0, TSSF_FLAGS_NONE);
1807 	}
1808 	if (result == KERN_SUCCESS) {
1809 		result = machine_thread_set_state(thread, flavor, new_state,
1810 		    new_state_count);
1811 	}
1812 	if (result != KERN_SUCCESS) {
1813 		task_unlock(task);
1814 		lck_mtx_unlock(&tasks_threads_lock);
1815 
1816 		thread_terminate(thread);
1817 		thread_deallocate(thread);
1818 		return result;
1819 	}
1820 
1821 	thread_mtx_lock(thread);
1822 	thread_start(thread);
1823 	thread_mtx_unlock(thread);
1824 
1825 	if (from_user) {
1826 		extmod_statistics_incr_thread_create(task);
1827 	}
1828 
1829 	task_unlock(task);
1830 	lck_mtx_unlock(&tasks_threads_lock);
1831 
1832 	*new_thread = thread;
1833 
1834 	return result;
1835 }
1836 
1837 /* Prototype, see justification above */
1838 kern_return_t
1839 thread_create_running(
1840 	task_t         task,
1841 	int                     flavor,
1842 	thread_state_t          new_state,
1843 	mach_msg_type_number_t  new_state_count,
1844 	thread_t                                *new_thread);
1845 
1846 kern_return_t
thread_create_running(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread)1847 thread_create_running(
1848 	task_t         task,
1849 	int                     flavor,
1850 	thread_state_t          new_state,
1851 	mach_msg_type_number_t  new_state_count,
1852 	thread_t                                *new_thread)
1853 {
1854 	return thread_create_running_internal2(
1855 		task, flavor, new_state, new_state_count,
1856 		new_thread, FALSE);
1857 }
1858 
1859 kern_return_t
thread_create_running_from_user(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread)1860 thread_create_running_from_user(
1861 	task_t         task,
1862 	int                     flavor,
1863 	thread_state_t          new_state,
1864 	mach_msg_type_number_t  new_state_count,
1865 	thread_t                                *new_thread)
1866 {
1867 	return thread_create_running_internal2(
1868 		task, flavor, new_state, new_state_count,
1869 		new_thread, TRUE);
1870 }
1871 
1872 kern_return_t
thread_create_workq_waiting(task_t task,thread_continue_t continuation,thread_t * new_thread)1873 thread_create_workq_waiting(
1874 	task_t              task,
1875 	thread_continue_t   continuation,
1876 	thread_t            *new_thread)
1877 {
1878 	/*
1879 	 * Create thread, but don't pin control port just yet, in case someone calls
1880 	 * task_threads() and deallocates pinned port before kernel copyout happens,
1881 	 * which will result in pinned port guard exception. Instead, pin and copyout
1882 	 * atomically during workq_setup_and_run().
1883 	 */
1884 	int options = TH_OPTION_NOSUSP | TH_OPTION_WORKQ;
1885 	return thread_create_waiting_internal(task, continuation, NULL,
1886 	           kThreadWaitParkedWorkQueue, options, new_thread);
1887 }
1888 
1889 /*
1890  *	kernel_thread_create:
1891  *
1892  *	Create a thread in the kernel task
1893  *	to execute in kernel context.
1894  */
1895 kern_return_t
kernel_thread_create(thread_continue_t continuation,void * parameter,integer_t priority,thread_t * new_thread)1896 kernel_thread_create(
1897 	thread_continue_t       continuation,
1898 	void                            *parameter,
1899 	integer_t                       priority,
1900 	thread_t                        *new_thread)
1901 {
1902 	kern_return_t           result;
1903 	thread_t                        thread;
1904 	task_t                          task = kernel_task;
1905 
1906 	result = thread_create_internal(task, priority, continuation, parameter,
1907 	    TH_OPTION_NONE, &thread);
1908 	if (result != KERN_SUCCESS) {
1909 		return result;
1910 	}
1911 
1912 	task_unlock(task);
1913 	lck_mtx_unlock(&tasks_threads_lock);
1914 
1915 	stack_alloc(thread);
1916 	assert(thread->kernel_stack != 0);
1917 #if !defined(XNU_TARGET_OS_OSX)
1918 	if (priority > BASEPRI_KERNEL)
1919 #endif
1920 	thread->reserved_stack = thread->kernel_stack;
1921 
1922 	if (debug_task & 1) {
1923 		kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread, continuation);
1924 	}
1925 	*new_thread = thread;
1926 
1927 	return result;
1928 }
1929 
1930 kern_return_t
kernel_thread_start_priority(thread_continue_t continuation,void * parameter,integer_t priority,thread_t * new_thread)1931 kernel_thread_start_priority(
1932 	thread_continue_t       continuation,
1933 	void                            *parameter,
1934 	integer_t                       priority,
1935 	thread_t                        *new_thread)
1936 {
1937 	kern_return_t   result;
1938 	thread_t                thread;
1939 
1940 	result = kernel_thread_create(continuation, parameter, priority, &thread);
1941 	if (result != KERN_SUCCESS) {
1942 		return result;
1943 	}
1944 
1945 	*new_thread = thread;
1946 
1947 	thread_mtx_lock(thread);
1948 	thread_start(thread);
1949 	thread_mtx_unlock(thread);
1950 
1951 	return result;
1952 }
1953 
1954 kern_return_t
kernel_thread_start(thread_continue_t continuation,void * parameter,thread_t * new_thread)1955 kernel_thread_start(
1956 	thread_continue_t       continuation,
1957 	void                            *parameter,
1958 	thread_t                        *new_thread)
1959 {
1960 	return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
1961 }
1962 
1963 /* Separated into helper function so it can be used by THREAD_BASIC_INFO and THREAD_EXTENDED_INFO */
1964 /* it is assumed that the thread is locked by the caller */
1965 static void
retrieve_thread_basic_info(thread_t thread,thread_basic_info_t basic_info)1966 retrieve_thread_basic_info(thread_t thread, thread_basic_info_t basic_info)
1967 {
1968 	int     state, flags;
1969 
1970 	/* fill in info */
1971 
1972 	thread_read_times(thread, &basic_info->user_time,
1973 	    &basic_info->system_time, NULL);
1974 
1975 	/*
1976 	 *	Update lazy-evaluated scheduler info because someone wants it.
1977 	 */
1978 	if (SCHED(can_update_priority)(thread)) {
1979 		SCHED(update_priority)(thread);
1980 	}
1981 
1982 	basic_info->sleep_time = 0;
1983 
1984 	/*
1985 	 *	To calculate cpu_usage, first correct for timer rate,
1986 	 *	then for 5/8 ageing.  The correction factor [3/5] is
1987 	 *	(1/(5/8) - 1).
1988 	 */
1989 	basic_info->cpu_usage = 0;
1990 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
1991 	if (sched_tick_interval) {
1992 		basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage
1993 		    * TH_USAGE_SCALE) /     sched_tick_interval);
1994 		basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1995 	}
1996 #endif
1997 
1998 	if (basic_info->cpu_usage > TH_USAGE_SCALE) {
1999 		basic_info->cpu_usage = TH_USAGE_SCALE;
2000 	}
2001 
2002 	basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)?
2003 	    POLICY_TIMESHARE: POLICY_RR);
2004 
2005 	flags = 0;
2006 	if (thread->options & TH_OPT_IDLE_THREAD) {
2007 		flags |= TH_FLAGS_IDLE;
2008 	}
2009 
2010 	if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) {
2011 		flags |= TH_FLAGS_GLOBAL_FORCED_IDLE;
2012 	}
2013 
2014 	if (!thread->kernel_stack) {
2015 		flags |= TH_FLAGS_SWAPPED;
2016 	}
2017 
2018 	state = 0;
2019 	if (thread->state & TH_TERMINATE) {
2020 		state = TH_STATE_HALTED;
2021 	} else if (thread->state & TH_RUN) {
2022 		state = TH_STATE_RUNNING;
2023 	} else if (thread->state & TH_UNINT) {
2024 		state = TH_STATE_UNINTERRUPTIBLE;
2025 	} else if (thread->state & TH_SUSP) {
2026 		state = TH_STATE_STOPPED;
2027 	} else if (thread->state & TH_WAIT) {
2028 		state = TH_STATE_WAITING;
2029 	}
2030 
2031 	basic_info->run_state = state;
2032 	basic_info->flags = flags;
2033 
2034 	basic_info->suspend_count = thread->user_stop_count;
2035 
2036 	return;
2037 }
2038 
2039 kern_return_t
thread_info_internal(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)2040 thread_info_internal(
2041 	thread_t                thread,
2042 	thread_flavor_t                 flavor,
2043 	thread_info_t                   thread_info_out,        /* ptr to OUT array */
2044 	mach_msg_type_number_t  *thread_info_count)     /*IN/OUT*/
2045 {
2046 	spl_t   s;
2047 
2048 	if (thread == THREAD_NULL) {
2049 		return KERN_INVALID_ARGUMENT;
2050 	}
2051 
2052 	if (flavor == THREAD_BASIC_INFO) {
2053 		if (*thread_info_count < THREAD_BASIC_INFO_COUNT) {
2054 			return KERN_INVALID_ARGUMENT;
2055 		}
2056 
2057 		s = splsched();
2058 		thread_lock(thread);
2059 
2060 		retrieve_thread_basic_info(thread, (thread_basic_info_t) thread_info_out);
2061 
2062 		thread_unlock(thread);
2063 		splx(s);
2064 
2065 		*thread_info_count = THREAD_BASIC_INFO_COUNT;
2066 
2067 		return KERN_SUCCESS;
2068 	} else if (flavor == THREAD_IDENTIFIER_INFO) {
2069 		thread_identifier_info_t        identifier_info;
2070 
2071 		if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) {
2072 			return KERN_INVALID_ARGUMENT;
2073 		}
2074 
2075 		identifier_info = __IGNORE_WCASTALIGN((thread_identifier_info_t)thread_info_out);
2076 
2077 		s = splsched();
2078 		thread_lock(thread);
2079 
2080 		identifier_info->thread_id = thread->thread_id;
2081 		identifier_info->thread_handle = thread->machine.cthread_self;
2082 		identifier_info->dispatch_qaddr = thread_dispatchqaddr(thread);
2083 
2084 		thread_unlock(thread);
2085 		splx(s);
2086 		return KERN_SUCCESS;
2087 	} else if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
2088 		policy_timeshare_info_t         ts_info;
2089 
2090 		if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT) {
2091 			return KERN_INVALID_ARGUMENT;
2092 		}
2093 
2094 		ts_info = (policy_timeshare_info_t)thread_info_out;
2095 
2096 		s = splsched();
2097 		thread_lock(thread);
2098 
2099 		if (thread->sched_mode != TH_MODE_TIMESHARE) {
2100 			thread_unlock(thread);
2101 			splx(s);
2102 			return KERN_INVALID_POLICY;
2103 		}
2104 
2105 		ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
2106 		if (ts_info->depressed) {
2107 			ts_info->base_priority = DEPRESSPRI;
2108 			ts_info->depress_priority = thread->base_pri;
2109 		} else {
2110 			ts_info->base_priority = thread->base_pri;
2111 			ts_info->depress_priority = -1;
2112 		}
2113 
2114 		ts_info->cur_priority = thread->sched_pri;
2115 		ts_info->max_priority = thread->max_priority;
2116 
2117 		thread_unlock(thread);
2118 		splx(s);
2119 
2120 		*thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
2121 
2122 		return KERN_SUCCESS;
2123 	} else if (flavor == THREAD_SCHED_FIFO_INFO) {
2124 		if (*thread_info_count < POLICY_FIFO_INFO_COUNT) {
2125 			return KERN_INVALID_ARGUMENT;
2126 		}
2127 
2128 		return KERN_INVALID_POLICY;
2129 	} else if (flavor == THREAD_SCHED_RR_INFO) {
2130 		policy_rr_info_t                        rr_info;
2131 		uint32_t quantum_time;
2132 		uint64_t quantum_ns;
2133 
2134 		if (*thread_info_count < POLICY_RR_INFO_COUNT) {
2135 			return KERN_INVALID_ARGUMENT;
2136 		}
2137 
2138 		rr_info = (policy_rr_info_t) thread_info_out;
2139 
2140 		s = splsched();
2141 		thread_lock(thread);
2142 
2143 		if (thread->sched_mode == TH_MODE_TIMESHARE) {
2144 			thread_unlock(thread);
2145 			splx(s);
2146 
2147 			return KERN_INVALID_POLICY;
2148 		}
2149 
2150 		rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
2151 		if (rr_info->depressed) {
2152 			rr_info->base_priority = DEPRESSPRI;
2153 			rr_info->depress_priority = thread->base_pri;
2154 		} else {
2155 			rr_info->base_priority = thread->base_pri;
2156 			rr_info->depress_priority = -1;
2157 		}
2158 
2159 		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
2160 		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
2161 
2162 		rr_info->max_priority = thread->max_priority;
2163 		rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
2164 
2165 		thread_unlock(thread);
2166 		splx(s);
2167 
2168 		*thread_info_count = POLICY_RR_INFO_COUNT;
2169 
2170 		return KERN_SUCCESS;
2171 	} else if (flavor == THREAD_EXTENDED_INFO) {
2172 		thread_basic_info_data_t        basic_info;
2173 		thread_extended_info_t          extended_info = __IGNORE_WCASTALIGN((thread_extended_info_t)thread_info_out);
2174 
2175 		if (*thread_info_count < THREAD_EXTENDED_INFO_COUNT) {
2176 			return KERN_INVALID_ARGUMENT;
2177 		}
2178 
2179 		s = splsched();
2180 		thread_lock(thread);
2181 
2182 		/* NOTE: This mimics fill_taskthreadinfo(), which is the function used by proc_pidinfo() for
2183 		 * the PROC_PIDTHREADINFO flavor (which can't be used on corpses)
2184 		 */
2185 		retrieve_thread_basic_info(thread, &basic_info);
2186 		extended_info->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
2187 		extended_info->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
2188 
2189 		extended_info->pth_cpu_usage = basic_info.cpu_usage;
2190 		extended_info->pth_policy = basic_info.policy;
2191 		extended_info->pth_run_state = basic_info.run_state;
2192 		extended_info->pth_flags = basic_info.flags;
2193 		extended_info->pth_sleep_time = basic_info.sleep_time;
2194 		extended_info->pth_curpri = thread->sched_pri;
2195 		extended_info->pth_priority = thread->base_pri;
2196 		extended_info->pth_maxpriority = thread->max_priority;
2197 
2198 		bsd_getthreadname(get_bsdthread_info(thread), extended_info->pth_name);
2199 
2200 		thread_unlock(thread);
2201 		splx(s);
2202 
2203 		*thread_info_count = THREAD_EXTENDED_INFO_COUNT;
2204 
2205 		return KERN_SUCCESS;
2206 	} else if (flavor == THREAD_DEBUG_INFO_INTERNAL) {
2207 #if DEVELOPMENT || DEBUG
2208 		thread_debug_info_internal_t dbg_info;
2209 		if (*thread_info_count < THREAD_DEBUG_INFO_INTERNAL_COUNT) {
2210 			return KERN_NOT_SUPPORTED;
2211 		}
2212 
2213 		if (thread_info_out == NULL) {
2214 			return KERN_INVALID_ARGUMENT;
2215 		}
2216 
2217 		dbg_info = __IGNORE_WCASTALIGN((thread_debug_info_internal_t)thread_info_out);
2218 		dbg_info->page_creation_count = thread->t_page_creation_count;
2219 
2220 		*thread_info_count = THREAD_DEBUG_INFO_INTERNAL_COUNT;
2221 		return KERN_SUCCESS;
2222 #endif /* DEVELOPMENT || DEBUG */
2223 		return KERN_NOT_SUPPORTED;
2224 	}
2225 
2226 	return KERN_INVALID_ARGUMENT;
2227 }
2228 
2229 static void
_convert_mach_to_time_value(uint64_t time_mach,time_value_t * time)2230 _convert_mach_to_time_value(uint64_t time_mach, time_value_t *time)
2231 {
2232 	clock_sec_t  secs;
2233 	clock_usec_t usecs;
2234 	absolutetime_to_microtime(time_mach, &secs, &usecs);
2235 	time->seconds = (typeof(time->seconds))secs;
2236 	time->microseconds = usecs;
2237 }
2238 
2239 void
thread_read_times(thread_t thread,time_value_t * user_time,time_value_t * system_time,time_value_t * runnable_time)2240 thread_read_times(
2241 	thread_t      thread,
2242 	time_value_t *user_time,
2243 	time_value_t *system_time,
2244 	time_value_t *runnable_time)
2245 {
2246 	if (user_time && system_time) {
2247 		struct recount_times_mach times = recount_thread_times(thread);
2248 		_convert_mach_to_time_value(times.rtm_user, user_time);
2249 		_convert_mach_to_time_value(times.rtm_system, system_time);
2250 	}
2251 
2252 	if (runnable_time) {
2253 		uint64_t runnable_time_mach = timer_grab(&thread->runnable_timer);
2254 		_convert_mach_to_time_value(runnable_time_mach, runnable_time);
2255 	}
2256 }
2257 
2258 uint64_t
thread_get_runtime_self(void)2259 thread_get_runtime_self(void)
2260 {
2261 	/*
2262 	 * Must be guaranteed to stay on the same CPU and not be updated by the
2263 	 * scheduler.
2264 	 */
2265 	boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
2266 	uint64_t time_mach = recount_current_thread_time_mach();
2267 	ml_set_interrupts_enabled(interrupt_state);
2268 	return time_mach;
2269 }
2270 
2271 /*
2272  *	thread_wire_internal:
2273  *
2274  *	Specify that the target thread must always be able
2275  *	to run and to allocate memory.
2276  */
2277 kern_return_t
thread_wire_internal(host_priv_t host_priv,thread_t thread,boolean_t wired,boolean_t * prev_state)2278 thread_wire_internal(
2279 	host_priv_t             host_priv,
2280 	thread_t                thread,
2281 	boolean_t               wired,
2282 	boolean_t               *prev_state)
2283 {
2284 	if (host_priv == NULL || thread != current_thread()) {
2285 		return KERN_INVALID_ARGUMENT;
2286 	}
2287 
2288 	if (prev_state) {
2289 		*prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
2290 	}
2291 
2292 	if (wired) {
2293 		if (!(thread->options & TH_OPT_VMPRIV)) {
2294 			vm_page_free_reserve(1); /* XXX */
2295 		}
2296 		thread->options |= TH_OPT_VMPRIV;
2297 	} else {
2298 		if (thread->options & TH_OPT_VMPRIV) {
2299 			vm_page_free_reserve(-1); /* XXX */
2300 		}
2301 		thread->options &= ~TH_OPT_VMPRIV;
2302 	}
2303 
2304 	return KERN_SUCCESS;
2305 }
2306 
2307 
2308 /*
2309  *	thread_wire:
2310  *
2311  *	User-api wrapper for thread_wire_internal()
2312  */
2313 kern_return_t
thread_wire(host_priv_t host_priv,thread_t thread,boolean_t wired)2314 thread_wire(
2315 	host_priv_t     host_priv,
2316 	thread_t        thread,
2317 	boolean_t       wired)
2318 {
2319 	return thread_wire_internal(host_priv, thread, wired, NULL);
2320 }
2321 
2322 boolean_t
is_external_pageout_thread(void)2323 is_external_pageout_thread(void)
2324 {
2325 	return current_thread() == pgo_iothread_external_state.pgo_iothread;
2326 }
2327 
2328 boolean_t
is_vm_privileged(void)2329 is_vm_privileged(void)
2330 {
2331 	return current_thread()->options & TH_OPT_VMPRIV ? TRUE : FALSE;
2332 }
2333 
2334 boolean_t
set_vm_privilege(boolean_t privileged)2335 set_vm_privilege(boolean_t privileged)
2336 {
2337 	boolean_t       was_vmpriv;
2338 
2339 	if (current_thread()->options & TH_OPT_VMPRIV) {
2340 		was_vmpriv = TRUE;
2341 	} else {
2342 		was_vmpriv = FALSE;
2343 	}
2344 
2345 	if (privileged != FALSE) {
2346 		current_thread()->options |= TH_OPT_VMPRIV;
2347 	} else {
2348 		current_thread()->options &= ~TH_OPT_VMPRIV;
2349 	}
2350 
2351 	return was_vmpriv;
2352 }
2353 
2354 void
thread_floor_boost_set_promotion_locked(thread_t thread)2355 thread_floor_boost_set_promotion_locked(thread_t thread)
2356 {
2357 	assert(thread->priority_floor_count > 0);
2358 
2359 	if (!(thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED)) {
2360 		sched_thread_promote_reason(thread, TH_SFLAG_FLOOR_PROMOTED, 0);
2361 	}
2362 }
2363 
2364 /*!  @function thread_priority_floor_start
2365  *   @abstract boost the current thread priority to floor.
2366  *   @discussion Increase the priority of the current thread to at least MINPRI_FLOOR.
2367  *       The boost will be mantained until a corresponding thread_priority_floor_end()
2368  *       is called. Every call of thread_priority_floor_start() needs to have a corresponding
2369  *       call to thread_priority_floor_end() from the same thread.
2370  *       No thread can return to userspace before calling thread_priority_floor_end().
2371  *
2372  *       NOTE: avoid to use this function. Try to use gate_t or sleep_with_inheritor()
2373  *       instead.
2374  *   @result a token to be given to the corresponding thread_priority_floor_end()
2375  */
2376 thread_pri_floor_t
thread_priority_floor_start(void)2377 thread_priority_floor_start(void)
2378 {
2379 	thread_pri_floor_t ret;
2380 	thread_t thread = current_thread();
2381 	__assert_only uint16_t prev_priority_floor_count;
2382 
2383 	assert(thread->priority_floor_count < UINT16_MAX);
2384 	prev_priority_floor_count = thread->priority_floor_count++;
2385 #if MACH_ASSERT
2386 	/*
2387 	 * Set the ast to check that the
2388 	 * priority_floor_count is going to be set to zero when
2389 	 * going back to userspace.
2390 	 * Set it only once when we increment it for the first time.
2391 	 */
2392 	if (prev_priority_floor_count == 0) {
2393 		act_set_debug_assert();
2394 	}
2395 #endif
2396 
2397 	ret.thread = thread;
2398 	return ret;
2399 }
2400 
2401 /*!  @function thread_priority_floor_end
2402  *   @abstract ends the floor boost.
2403  *   @param token the token obtained from thread_priority_floor_start()
2404  *   @discussion ends the priority floor boost started with thread_priority_floor_start()
2405  */
2406 void
thread_priority_floor_end(thread_pri_floor_t * token)2407 thread_priority_floor_end(thread_pri_floor_t *token)
2408 {
2409 	thread_t thread = current_thread();
2410 
2411 	assert(thread->priority_floor_count > 0);
2412 	assertf(token->thread == thread, "thread_priority_floor_end called from a different thread from thread_priority_floor_start %p %p", thread, token->thread);
2413 
2414 	if ((thread->priority_floor_count-- == 1) && (thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED)) {
2415 		spl_t s = splsched();
2416 		thread_lock(thread);
2417 
2418 		if (thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) {
2419 			sched_thread_unpromote_reason(thread, TH_SFLAG_FLOOR_PROMOTED, 0);
2420 		}
2421 
2422 		thread_unlock(thread);
2423 		splx(s);
2424 	}
2425 
2426 	token->thread = NULL;
2427 }
2428 
2429 /*
2430  * XXX assuming current thread only, for now...
2431  */
2432 void
thread_guard_violation(thread_t thread,mach_exception_data_type_t code,mach_exception_data_type_t subcode,boolean_t fatal)2433 thread_guard_violation(thread_t thread,
2434     mach_exception_data_type_t code, mach_exception_data_type_t subcode, boolean_t fatal)
2435 {
2436 	assert(thread == current_thread());
2437 
2438 	/* Don't set up the AST for kernel threads; this check is needed to ensure
2439 	 * that the guard_exc_* fields in the thread structure are set only by the
2440 	 * current thread and therefore, don't require a lock.
2441 	 */
2442 	if (get_threadtask(thread) == kernel_task) {
2443 		return;
2444 	}
2445 
2446 	assert(EXC_GUARD_DECODE_GUARD_TYPE(code));
2447 
2448 	/*
2449 	 * Use the saved state area of the thread structure
2450 	 * to store all info required to handle the AST when
2451 	 * returning to userspace. It's possible that there is
2452 	 * already a pending guard exception. If it's non-fatal,
2453 	 * it can only be over-written by a fatal exception code.
2454 	 */
2455 	if (thread->guard_exc_info.code && (thread->guard_exc_fatal || !fatal)) {
2456 		return;
2457 	}
2458 
2459 	thread->guard_exc_info.code = code;
2460 	thread->guard_exc_info.subcode = subcode;
2461 	thread->guard_exc_fatal = fatal ? 1 : 0;
2462 
2463 	spl_t s = splsched();
2464 	thread_ast_set(thread, AST_GUARD);
2465 	ast_propagate(thread);
2466 	splx(s);
2467 }
2468 
2469 #if CONFIG_DEBUG_SYSCALL_REJECTION
2470 extern void rejected_syscall_guard_ast(thread_t __unused t, mach_exception_data_type_t code, mach_exception_data_type_t subcode);
2471 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
2472 
2473 /*
2474  *	guard_ast:
2475  *
2476  *	Handle AST_GUARD for a thread. This routine looks at the
2477  *	state saved in the thread structure to determine the cause
2478  *	of this exception. Based on this value, it invokes the
2479  *	appropriate routine which determines other exception related
2480  *	info and raises the exception.
2481  */
2482 void
guard_ast(thread_t t)2483 guard_ast(thread_t t)
2484 {
2485 	const mach_exception_data_type_t
2486 	    code = t->guard_exc_info.code,
2487 	    subcode = t->guard_exc_info.subcode;
2488 
2489 	t->guard_exc_info.code = 0;
2490 	t->guard_exc_info.subcode = 0;
2491 	t->guard_exc_fatal = 0;
2492 
2493 	switch (EXC_GUARD_DECODE_GUARD_TYPE(code)) {
2494 	case GUARD_TYPE_NONE:
2495 		/* lingering AST_GUARD on the processor? */
2496 		break;
2497 	case GUARD_TYPE_MACH_PORT:
2498 		mach_port_guard_ast(t, code, subcode);
2499 		break;
2500 	case GUARD_TYPE_FD:
2501 		fd_guard_ast(t, code, subcode);
2502 		break;
2503 #if CONFIG_VNGUARD
2504 	case GUARD_TYPE_VN:
2505 		vn_guard_ast(t, code, subcode);
2506 		break;
2507 #endif
2508 	case GUARD_TYPE_VIRT_MEMORY:
2509 		virt_memory_guard_ast(t, code, subcode);
2510 		break;
2511 #if CONFIG_DEBUG_SYSCALL_REJECTION
2512 	case GUARD_TYPE_REJECTED_SC:
2513 		rejected_syscall_guard_ast(t, code, subcode);
2514 		break;
2515 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
2516 	default:
2517 		panic("guard_exc_info %llx %llx", code, subcode);
2518 	}
2519 }
2520 
2521 static void
thread_cputime_callback(int warning,__unused const void * arg0,__unused const void * arg1)2522 thread_cputime_callback(int warning, __unused const void *arg0, __unused const void *arg1)
2523 {
2524 	if (warning == LEDGER_WARNING_ROSE_ABOVE) {
2525 #if CONFIG_TELEMETRY
2526 		/*
2527 		 * This thread is in danger of violating the CPU usage monitor. Enable telemetry
2528 		 * on the entire task so there are micro-stackshots available if and when
2529 		 * EXC_RESOURCE is triggered. We could have chosen to enable micro-stackshots
2530 		 * for this thread only; but now that this task is suspect, knowing what all of
2531 		 * its threads are up to will be useful.
2532 		 */
2533 		telemetry_task_ctl(current_task(), TF_CPUMON_WARNING, 1);
2534 #endif
2535 		return;
2536 	}
2537 
2538 #if CONFIG_TELEMETRY
2539 	/*
2540 	 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
2541 	 * exceeded the limit, turn telemetry off for the task.
2542 	 */
2543 	telemetry_task_ctl(current_task(), TF_CPUMON_WARNING, 0);
2544 #endif
2545 
2546 	if (warning == 0) {
2547 		SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU();
2548 	}
2549 }
2550 
2551 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void)2552 SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void)
2553 {
2554 	int          pid                = 0;
2555 	task_t           task                           = current_task();
2556 	thread_t     thread             = current_thread();
2557 	uint64_t     tid                = thread->thread_id;
2558 	const char       *procname          = "unknown";
2559 	time_value_t thread_total_time  = {0, 0};
2560 	time_value_t thread_system_time;
2561 	time_value_t thread_user_time;
2562 	int          action;
2563 	uint8_t      percentage;
2564 	uint32_t     usage_percent = 0;
2565 	uint32_t     interval_sec;
2566 	uint64_t     interval_ns;
2567 	uint64_t     balance_ns;
2568 	boolean_t        fatal = FALSE;
2569 	boolean_t        send_exc_resource = TRUE; /* in addition to RESOURCE_NOTIFY */
2570 	kern_return_t   kr;
2571 
2572 #ifdef EXC_RESOURCE_MONITORS
2573 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
2574 #endif /* EXC_RESOURCE_MONITORS */
2575 	struct ledger_entry_info        lei;
2576 
2577 	assert(thread->t_threadledger != LEDGER_NULL);
2578 
2579 	/*
2580 	 * Extract the fatal bit and suspend the monitor (which clears the bit).
2581 	 */
2582 	task_lock(task);
2583 	if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_CPUMON) {
2584 		fatal = TRUE;
2585 		send_exc_resource = TRUE;
2586 	}
2587 	/* Only one thread can be here at a time.  Whichever makes it through
2588 	 *  first will successfully suspend the monitor and proceed to send the
2589 	 *  notification.  Other threads will get an error trying to suspend the
2590 	 *  monitor and give up on sending the notification.  In the first release,
2591 	 *  the monitor won't be resumed for a number of seconds, but we may
2592 	 *  eventually need to handle low-latency resume.
2593 	 */
2594 	kr = task_suspend_cpumon(task);
2595 	task_unlock(task);
2596 	if (kr == KERN_INVALID_ARGUMENT) {
2597 		return;
2598 	}
2599 
2600 #ifdef MACH_BSD
2601 	pid = proc_selfpid();
2602 	void *bsd_info = get_bsdtask_info(task);
2603 	if (bsd_info != NULL) {
2604 		procname = proc_name_address(bsd_info);
2605 	}
2606 #endif
2607 
2608 	thread_get_cpulimit(&action, &percentage, &interval_ns);
2609 
2610 	interval_sec = (uint32_t)(interval_ns / NSEC_PER_SEC);
2611 
2612 	thread_read_times(thread, &thread_user_time, &thread_system_time, NULL);
2613 	time_value_add(&thread_total_time, &thread_user_time);
2614 	time_value_add(&thread_total_time, &thread_system_time);
2615 	ledger_get_entry_info(thread->t_threadledger, thread_ledgers.cpu_time, &lei);
2616 
2617 	/* credit/debit/balance/limit are in absolute time units;
2618 	 *  the refill info is in nanoseconds. */
2619 	absolutetime_to_nanoseconds(lei.lei_balance, &balance_ns);
2620 	if (lei.lei_last_refill > 0) {
2621 		usage_percent = (uint32_t)((balance_ns * 100ULL) / lei.lei_last_refill);
2622 	}
2623 
2624 	/* TODO: show task total runtime (via TASK_ABSOLUTETIME_INFO)? */
2625 	printf("process %s[%d] thread %llu caught burning CPU! It used more than %d%% CPU over %u seconds\n",
2626 	    procname, pid, tid, percentage, interval_sec);
2627 	printf("  (actual recent usage: %d%% over ~%llu seconds)\n",
2628 	    usage_percent, (lei.lei_last_refill + NSEC_PER_SEC / 2) / NSEC_PER_SEC);
2629 	printf("  Thread lifetime cpu usage %d.%06ds, (%d.%06d user, %d.%06d sys)\n",
2630 	    thread_total_time.seconds, thread_total_time.microseconds,
2631 	    thread_user_time.seconds, thread_user_time.microseconds,
2632 	    thread_system_time.seconds, thread_system_time.microseconds);
2633 	printf("  Ledger balance: %lld; mabs credit: %lld; mabs debit: %lld\n",
2634 	    lei.lei_balance, lei.lei_credit, lei.lei_debit);
2635 	printf("  mabs limit: %llu; mabs period: %llu ns; last refill: %llu ns%s.\n",
2636 	    lei.lei_limit, lei.lei_refill_period, lei.lei_last_refill,
2637 	    (fatal ? " [fatal violation]" : ""));
2638 
2639 	/*
2640 	 *  For now, send RESOURCE_NOTIFY in parallel with EXC_RESOURCE.  Once
2641 	 *  we have logging parity, we will stop sending EXC_RESOURCE (24508922).
2642 	 */
2643 
2644 	/* RESOURCE_NOTIFY MIG specifies nanoseconds of CPU time */
2645 	lei.lei_balance = balance_ns;
2646 	absolutetime_to_nanoseconds(lei.lei_limit, &lei.lei_limit);
2647 	trace_resource_violation(RMON_CPUUSAGE_VIOLATED, &lei);
2648 	kr = send_resource_violation(send_cpu_usage_violation, task, &lei,
2649 	    fatal ? kRNFatalLimitFlag : 0);
2650 	if (kr) {
2651 		printf("send_resource_violation(CPU usage, ...): error %#x\n", kr);
2652 	}
2653 
2654 #ifdef EXC_RESOURCE_MONITORS
2655 	if (send_exc_resource) {
2656 		if (disable_exc_resource) {
2657 			printf("process %s[%d] thread %llu caught burning CPU! "
2658 			    "EXC_RESOURCE%s supressed by a boot-arg\n",
2659 			    procname, pid, tid, fatal ? " (and termination)" : "");
2660 			return;
2661 		}
2662 
2663 		if (audio_active) {
2664 			printf("process %s[%d] thread %llu caught burning CPU! "
2665 			    "EXC_RESOURCE & termination supressed due to audio playback\n",
2666 			    procname, pid, tid);
2667 			return;
2668 		}
2669 	}
2670 
2671 
2672 	if (send_exc_resource) {
2673 		code[0] = code[1] = 0;
2674 		EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_CPU);
2675 		if (fatal) {
2676 			EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR_FATAL);
2677 		} else {
2678 			EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR);
2679 		}
2680 		EXC_RESOURCE_CPUMONITOR_ENCODE_INTERVAL(code[0], interval_sec);
2681 		EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[0], percentage);
2682 		EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[1], usage_percent);
2683 		exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
2684 	}
2685 #endif /* EXC_RESOURCE_MONITORS */
2686 
2687 	if (fatal) {
2688 #if CONFIG_JETSAM
2689 		jetsam_on_ledger_cpulimit_exceeded();
2690 #else
2691 		task_terminate_internal(task);
2692 #endif
2693 	}
2694 }
2695 
2696 bool os_variant_has_internal_diagnostics(const char *subsystem);
2697 
2698 #if DEVELOPMENT || DEBUG
2699 void __attribute__((noinline))
SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task,int thread_count)2700 SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task, int thread_count)
2701 {
2702 	mach_exception_data_type_t code[EXCEPTION_CODE_MAX] = {0};
2703 	int pid = task_pid(task);
2704 	char procname[MAXCOMLEN + 1] = "unknown";
2705 
2706 	if (pid == 1) {
2707 		/*
2708 		 * Cannot suspend launchd
2709 		 */
2710 		return;
2711 	}
2712 
2713 	proc_name(pid, procname, sizeof(procname));
2714 
2715 	if (disable_exc_resource) {
2716 		printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2717 		    "supressed by a boot-arg.\n", procname, pid, thread_count);
2718 		return;
2719 	}
2720 
2721 	if (!os_variant_has_internal_diagnostics("com.apple.xnu")) {
2722 		printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2723 		    "supressed, internal diagnostics disabled.\n", procname, pid, thread_count);
2724 		return;
2725 	}
2726 
2727 	if (audio_active) {
2728 		printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2729 		    "supressed due to audio playback.\n", procname, pid, thread_count);
2730 		return;
2731 	}
2732 
2733 	if (!exc_via_corpse_forking) {
2734 		printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2735 		    "supressed due to corpse forking being disabled.\n", procname, pid,
2736 		    thread_count);
2737 		return;
2738 	}
2739 
2740 	printf("process %s[%d] crossed thread count high watermark (%d), sending "
2741 	    "EXC_RESOURCE\n", procname, pid, thread_count);
2742 
2743 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_THREADS);
2744 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_THREADS_HIGH_WATERMARK);
2745 	EXC_RESOURCE_THREADS_ENCODE_THREADS(code[0], thread_count);
2746 
2747 	task_enqueue_exception_with_corpse(task, EXC_RESOURCE, code, EXCEPTION_CODE_MAX, NULL, FALSE);
2748 }
2749 #endif /* DEVELOPMENT || DEBUG */
2750 
2751 void
thread_update_io_stats(thread_t thread,int size,int io_flags)2752 thread_update_io_stats(thread_t thread, int size, int io_flags)
2753 {
2754 	task_t task = get_threadtask(thread);
2755 	int io_tier;
2756 
2757 	if (thread->thread_io_stats == NULL || task->task_io_stats == NULL) {
2758 		return;
2759 	}
2760 
2761 	if (io_flags & DKIO_READ) {
2762 		UPDATE_IO_STATS(thread->thread_io_stats->disk_reads, size);
2763 		UPDATE_IO_STATS_ATOMIC(task->task_io_stats->disk_reads, size);
2764 	}
2765 
2766 	if (io_flags & DKIO_META) {
2767 		UPDATE_IO_STATS(thread->thread_io_stats->metadata, size);
2768 		UPDATE_IO_STATS_ATOMIC(task->task_io_stats->metadata, size);
2769 	}
2770 
2771 	if (io_flags & DKIO_PAGING) {
2772 		UPDATE_IO_STATS(thread->thread_io_stats->paging, size);
2773 		UPDATE_IO_STATS_ATOMIC(task->task_io_stats->paging, size);
2774 	}
2775 
2776 	io_tier = ((io_flags & DKIO_TIER_MASK) >> DKIO_TIER_SHIFT);
2777 	assert(io_tier < IO_NUM_PRIORITIES);
2778 
2779 	UPDATE_IO_STATS(thread->thread_io_stats->io_priority[io_tier], size);
2780 	UPDATE_IO_STATS_ATOMIC(task->task_io_stats->io_priority[io_tier], size);
2781 
2782 	/* Update Total I/O Counts */
2783 	UPDATE_IO_STATS(thread->thread_io_stats->total_io, size);
2784 	UPDATE_IO_STATS_ATOMIC(task->task_io_stats->total_io, size);
2785 
2786 	if (!(io_flags & DKIO_READ)) {
2787 		DTRACE_IO3(physical_writes, struct task *, task, uint32_t, size, int, io_flags);
2788 		ledger_credit(task->ledger, task_ledgers.physical_writes, size);
2789 	}
2790 }
2791 
2792 static void
init_thread_ledgers(void)2793 init_thread_ledgers(void)
2794 {
2795 	ledger_template_t t;
2796 	int idx;
2797 
2798 	assert(thread_ledger_template == NULL);
2799 
2800 	if ((t = ledger_template_create("Per-thread ledger")) == NULL) {
2801 		panic("couldn't create thread ledger template");
2802 	}
2803 
2804 	if ((idx = ledger_entry_add(t, "cpu_time", "sched", "ns")) < 0) {
2805 		panic("couldn't create cpu_time entry for thread ledger template");
2806 	}
2807 
2808 	if (ledger_set_callback(t, idx, thread_cputime_callback, NULL, NULL) < 0) {
2809 		panic("couldn't set thread ledger callback for cpu_time entry");
2810 	}
2811 
2812 	thread_ledgers.cpu_time = idx;
2813 
2814 	ledger_template_complete(t);
2815 	thread_ledger_template = t;
2816 }
2817 
2818 /*
2819  * Returns the amount of (abs) CPU time that remains before the limit would be
2820  * hit or the amount of time left in the current interval, whichever is smaller.
2821  * This value changes as CPU time is consumed and the ledgers refilled.
2822  * Used to limit the quantum of a thread.
2823  */
2824 uint64_t
thread_cpulimit_remaining(uint64_t now)2825 thread_cpulimit_remaining(uint64_t now)
2826 {
2827 	thread_t thread = current_thread();
2828 
2829 	if ((thread->options &
2830 	    (TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT)) == 0) {
2831 		return UINT64_MAX;
2832 	}
2833 
2834 	/* Amount of time left in the current interval. */
2835 	const uint64_t interval_remaining =
2836 	    ledger_get_interval_remaining(thread->t_threadledger, thread_ledgers.cpu_time, now);
2837 
2838 	/* Amount that can be spent until the limit is hit. */
2839 	const uint64_t remaining =
2840 	    ledger_get_remaining(thread->t_threadledger, thread_ledgers.cpu_time);
2841 
2842 	return MIN(interval_remaining, remaining);
2843 }
2844 
2845 /*
2846  * Returns true if a new interval should be started.
2847  */
2848 bool
thread_cpulimit_interval_has_expired(uint64_t now)2849 thread_cpulimit_interval_has_expired(uint64_t now)
2850 {
2851 	thread_t thread = current_thread();
2852 
2853 	if ((thread->options &
2854 	    (TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT)) == 0) {
2855 		return false;
2856 	}
2857 
2858 	return ledger_get_interval_remaining(thread->t_threadledger,
2859 	           thread_ledgers.cpu_time, now) == 0;
2860 }
2861 
2862 /*
2863  * Balances the ledger and sets the last refill time to `now`.
2864  */
2865 void
thread_cpulimit_restart(uint64_t now)2866 thread_cpulimit_restart(uint64_t now)
2867 {
2868 	thread_t thread = current_thread();
2869 
2870 	assert3u(thread->options & (TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT), !=, 0);
2871 
2872 	ledger_restart(thread->t_threadledger, thread_ledgers.cpu_time, now);
2873 }
2874 
2875 /*
2876  * Returns currently applied CPU usage limit, or 0/0 if none is applied.
2877  */
2878 int
thread_get_cpulimit(int * action,uint8_t * percentage,uint64_t * interval_ns)2879 thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns)
2880 {
2881 	int64_t         abstime = 0;
2882 	uint64_t        limittime = 0;
2883 	thread_t        thread = current_thread();
2884 
2885 	*percentage  = 0;
2886 	*interval_ns = 0;
2887 	*action      = 0;
2888 
2889 	if (thread->t_threadledger == LEDGER_NULL) {
2890 		/*
2891 		 * This thread has no per-thread ledger, so it can't possibly
2892 		 * have a CPU limit applied.
2893 		 */
2894 		return KERN_SUCCESS;
2895 	}
2896 
2897 	ledger_get_period(thread->t_threadledger, thread_ledgers.cpu_time, interval_ns);
2898 	ledger_get_limit(thread->t_threadledger, thread_ledgers.cpu_time, &abstime);
2899 
2900 	if ((abstime == LEDGER_LIMIT_INFINITY) || (*interval_ns == 0)) {
2901 		/*
2902 		 * This thread's CPU time ledger has no period or limit; so it
2903 		 * doesn't have a CPU limit applied.
2904 		 */
2905 		return KERN_SUCCESS;
2906 	}
2907 
2908 	/*
2909 	 * This calculation is the converse to the one in thread_set_cpulimit().
2910 	 */
2911 	absolutetime_to_nanoseconds(abstime, &limittime);
2912 	*percentage = (uint8_t)((limittime * 100ULL) / *interval_ns);
2913 	assert(*percentage <= 100);
2914 
2915 	if (thread->options & TH_OPT_PROC_CPULIMIT) {
2916 		assert((thread->options & TH_OPT_PRVT_CPULIMIT) == 0);
2917 
2918 		*action = THREAD_CPULIMIT_BLOCK;
2919 	} else if (thread->options & TH_OPT_PRVT_CPULIMIT) {
2920 		assert((thread->options & TH_OPT_PROC_CPULIMIT) == 0);
2921 
2922 		*action = THREAD_CPULIMIT_EXCEPTION;
2923 	} else {
2924 		*action = THREAD_CPULIMIT_DISABLE;
2925 	}
2926 
2927 	return KERN_SUCCESS;
2928 }
2929 
2930 /*
2931  * Set CPU usage limit on a thread.
2932  */
2933 int
thread_set_cpulimit(int action,uint8_t percentage,uint64_t interval_ns)2934 thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns)
2935 {
2936 	thread_t        thread = current_thread();
2937 	ledger_t        l;
2938 	uint64_t        limittime = 0;
2939 	uint64_t        abstime = 0;
2940 
2941 	assert(percentage <= 100);
2942 	assert(percentage > 0 || action == THREAD_CPULIMIT_DISABLE);
2943 
2944 	/*
2945 	 * Disallow any change to the CPU limit if the TH_OPT_FORCED_LEDGER
2946 	 * flag is set.
2947 	 */
2948 	if ((thread->options & TH_OPT_FORCED_LEDGER) != 0) {
2949 		return KERN_FAILURE;
2950 	}
2951 
2952 	if (action == THREAD_CPULIMIT_DISABLE) {
2953 		/*
2954 		 * Remove CPU limit, if any exists.
2955 		 */
2956 		if (thread->t_threadledger != LEDGER_NULL) {
2957 			l = thread->t_threadledger;
2958 			ledger_set_limit(l, thread_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0);
2959 			ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_IGNORE);
2960 			thread->options &= ~(TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT);
2961 		}
2962 
2963 		return 0;
2964 	}
2965 
2966 	if (interval_ns < MINIMUM_CPULIMIT_INTERVAL_MS * NSEC_PER_MSEC) {
2967 		return KERN_INVALID_ARGUMENT;
2968 	}
2969 
2970 	l = thread->t_threadledger;
2971 	if (l == LEDGER_NULL) {
2972 		/*
2973 		 * This thread doesn't yet have a per-thread ledger; so create one with the CPU time entry active.
2974 		 */
2975 		if ((l = ledger_instantiate(thread_ledger_template, LEDGER_CREATE_INACTIVE_ENTRIES)) == LEDGER_NULL) {
2976 			return KERN_RESOURCE_SHORTAGE;
2977 		}
2978 
2979 		/*
2980 		 * We are the first to create this thread's ledger, so only activate our entry.
2981 		 */
2982 		ledger_entry_setactive(l, thread_ledgers.cpu_time);
2983 		thread->t_threadledger = l;
2984 	}
2985 
2986 	/*
2987 	 * The limit is specified as a percentage of CPU over an interval in nanoseconds.
2988 	 * Calculate the amount of CPU time that the thread needs to consume in order to hit the limit.
2989 	 */
2990 	limittime = (interval_ns * percentage) / 100;
2991 	nanoseconds_to_absolutetime(limittime, &abstime);
2992 	ledger_set_limit(l, thread_ledgers.cpu_time, abstime, cpumon_ustackshots_trigger_pct);
2993 	/*
2994 	 * Refill the thread's allotted CPU time every interval_ns nanoseconds.
2995 	 */
2996 	ledger_set_period(l, thread_ledgers.cpu_time, interval_ns);
2997 
2998 	if (action == THREAD_CPULIMIT_EXCEPTION) {
2999 		/*
3000 		 * We don't support programming the CPU usage monitor on a task if any of its
3001 		 * threads have a per-thread blocking CPU limit configured.
3002 		 */
3003 		if (thread->options & TH_OPT_PRVT_CPULIMIT) {
3004 			panic("CPU usage monitor activated, but blocking thread limit exists");
3005 		}
3006 
3007 		/*
3008 		 * Make a note that this thread's CPU limit is being used for the task-wide CPU
3009 		 * usage monitor. We don't have to arm the callback which will trigger the
3010 		 * exception, because that was done for us in ledger_instantiate (because the
3011 		 * ledger template used has a default callback).
3012 		 */
3013 		thread->options |= TH_OPT_PROC_CPULIMIT;
3014 	} else {
3015 		/*
3016 		 * We deliberately override any CPU limit imposed by a task-wide limit (eg
3017 		 * CPU usage monitor).
3018 		 */
3019 		thread->options &= ~TH_OPT_PROC_CPULIMIT;
3020 
3021 		thread->options |= TH_OPT_PRVT_CPULIMIT;
3022 		/* The per-thread ledger template by default has a callback for CPU time */
3023 		ledger_disable_callback(l, thread_ledgers.cpu_time);
3024 		ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
3025 	}
3026 
3027 	return 0;
3028 }
3029 
3030 void
thread_sched_call(thread_t thread,sched_call_t call)3031 thread_sched_call(
3032 	thread_t                thread,
3033 	sched_call_t    call)
3034 {
3035 	assert((thread->state & TH_WAIT_REPORT) == 0);
3036 	thread->sched_call = call;
3037 }
3038 
3039 uint64_t
thread_tid(thread_t thread)3040 thread_tid(
3041 	thread_t        thread)
3042 {
3043 	return thread != THREAD_NULL? thread->thread_id: 0;
3044 }
3045 
3046 uint64_t
uthread_tid(struct uthread * uth)3047 uthread_tid(
3048 	struct uthread *uth)
3049 {
3050 	if (uth) {
3051 		return thread_tid(get_machthread(uth));
3052 	}
3053 	return 0;
3054 }
3055 
3056 uint16_t
thread_set_tag(thread_t th,uint16_t tag)3057 thread_set_tag(thread_t th, uint16_t tag)
3058 {
3059 	return thread_set_tag_internal(th, tag);
3060 }
3061 
3062 uint16_t
thread_get_tag(thread_t th)3063 thread_get_tag(thread_t th)
3064 {
3065 	return thread_get_tag_internal(th);
3066 }
3067 
3068 uint64_t
thread_last_run_time(thread_t th)3069 thread_last_run_time(thread_t th)
3070 {
3071 	return th->last_run_time;
3072 }
3073 
3074 /*
3075  * Shared resource contention management
3076  *
3077  * The scheduler attempts to load balance the shared resource intensive
3078  * workloads across clusters to ensure that the resource is not heavily
3079  * contended. The kernel relies on external agents (userspace or
3080  * performance controller) to identify shared resource heavy threads.
3081  * The load balancing is achieved based on the scheduler configuration
3082  * enabled on the platform.
3083  */
3084 
3085 
3086 #if CONFIG_SCHED_EDGE
3087 
3088 /*
3089  * On the Edge scheduler, the load balancing is achieved by looking
3090  * at cluster level shared resource loads and migrating resource heavy
3091  * threads dynamically to under utilized cluster. Therefore, when a
3092  * thread is indicated as a resource heavy thread, the policy set
3093  * routine simply adds a flag to the thread which is looked at by
3094  * the scheduler on thread migration decisions.
3095  */
3096 
3097 boolean_t
thread_shared_rsrc_policy_get(thread_t thread,cluster_shared_rsrc_type_t type)3098 thread_shared_rsrc_policy_get(thread_t thread, cluster_shared_rsrc_type_t type)
3099 {
3100 	return thread->th_shared_rsrc_heavy_user[type] || thread->th_shared_rsrc_heavy_perf_control[type];
3101 }
3102 
3103 __options_decl(sched_edge_rsrc_heavy_thread_state, uint32_t, {
3104 	SCHED_EDGE_RSRC_HEAVY_THREAD_SET = 1,
3105 	SCHED_EDGE_RSRC_HEAVY_THREAD_CLR = 2,
3106 });
3107 
3108 kern_return_t
thread_shared_rsrc_policy_set(thread_t thread,__unused uint32_t index,cluster_shared_rsrc_type_t type,shared_rsrc_policy_agent_t agent)3109 thread_shared_rsrc_policy_set(thread_t thread, __unused uint32_t index, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent)
3110 {
3111 	spl_t s = splsched();
3112 	thread_lock(thread);
3113 
3114 	bool user = (agent == SHARED_RSRC_POLICY_AGENT_DISPATCH) || (agent == SHARED_RSRC_POLICY_AGENT_SYSCTL);
3115 	bool *thread_flags = (user) ? thread->th_shared_rsrc_heavy_user : thread->th_shared_rsrc_heavy_perf_control;
3116 	if (thread_flags[type]) {
3117 		thread_unlock(thread);
3118 		splx(s);
3119 		return KERN_FAILURE;
3120 	}
3121 
3122 	thread_flags[type] = true;
3123 	thread_unlock(thread);
3124 	splx(s);
3125 
3126 	KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_RSRC_HEAVY_THREAD) | DBG_FUNC_NONE, SCHED_EDGE_RSRC_HEAVY_THREAD_SET, thread_tid(thread), type, agent);
3127 	if (thread == current_thread()) {
3128 		if (agent == SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM) {
3129 			ast_on(AST_PREEMPT);
3130 		} else {
3131 			assert(agent != SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
3132 			thread_block(THREAD_CONTINUE_NULL);
3133 		}
3134 	}
3135 	return KERN_SUCCESS;
3136 }
3137 
3138 kern_return_t
thread_shared_rsrc_policy_clear(thread_t thread,cluster_shared_rsrc_type_t type,shared_rsrc_policy_agent_t agent)3139 thread_shared_rsrc_policy_clear(thread_t thread, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent)
3140 {
3141 	spl_t s = splsched();
3142 	thread_lock(thread);
3143 
3144 	bool user = (agent == SHARED_RSRC_POLICY_AGENT_DISPATCH) || (agent == SHARED_RSRC_POLICY_AGENT_SYSCTL);
3145 	bool *thread_flags = (user) ? thread->th_shared_rsrc_heavy_user : thread->th_shared_rsrc_heavy_perf_control;
3146 	if (!thread_flags[type]) {
3147 		thread_unlock(thread);
3148 		splx(s);
3149 		return KERN_FAILURE;
3150 	}
3151 
3152 	thread_flags[type] = false;
3153 	thread_unlock(thread);
3154 	splx(s);
3155 
3156 	KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_RSRC_HEAVY_THREAD) | DBG_FUNC_NONE, SCHED_EDGE_RSRC_HEAVY_THREAD_CLR, thread_tid(thread), type, agent);
3157 	if (thread == current_thread()) {
3158 		if (agent == SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM) {
3159 			ast_on(AST_PREEMPT);
3160 		} else {
3161 			assert(agent != SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
3162 			thread_block(THREAD_CONTINUE_NULL);
3163 		}
3164 	}
3165 	return KERN_SUCCESS;
3166 }
3167 
3168 #else /* CONFIG_SCHED_EDGE */
3169 
3170 /*
3171  * On non-Edge schedulers, the shared resource contention
3172  * is managed by simply binding threads to specific clusters
3173  * based on the worker index passed by the agents marking
3174  * this thread as resource heavy threads. The thread binding
3175  * approach does not provide any rebalancing opportunities;
3176  * it can also suffer from scheduling delays if the cluster
3177  * where the thread is bound is contended.
3178  */
3179 
3180 boolean_t
thread_shared_rsrc_policy_get(__unused thread_t thread,__unused cluster_shared_rsrc_type_t type)3181 thread_shared_rsrc_policy_get(__unused thread_t thread, __unused cluster_shared_rsrc_type_t type)
3182 {
3183 	return false;
3184 }
3185 
3186 kern_return_t
thread_shared_rsrc_policy_set(thread_t thread,uint32_t index,__unused cluster_shared_rsrc_type_t type,__unused shared_rsrc_policy_agent_t agent)3187 thread_shared_rsrc_policy_set(thread_t thread, uint32_t index, __unused cluster_shared_rsrc_type_t type, __unused shared_rsrc_policy_agent_t agent)
3188 {
3189 	return thread_bind_cluster_id(thread, index, THREAD_BIND_SOFT | THREAD_BIND_ELIGIBLE_ONLY);
3190 }
3191 
3192 kern_return_t
thread_shared_rsrc_policy_clear(thread_t thread,__unused cluster_shared_rsrc_type_t type,__unused shared_rsrc_policy_agent_t agent)3193 thread_shared_rsrc_policy_clear(thread_t thread, __unused cluster_shared_rsrc_type_t type, __unused shared_rsrc_policy_agent_t agent)
3194 {
3195 	return thread_bind_cluster_id(thread, 0, THREAD_UNBIND);
3196 }
3197 
3198 #endif /* CONFIG_SCHED_EDGE */
3199 
3200 uint64_t
thread_dispatchqaddr(thread_t thread)3201 thread_dispatchqaddr(
3202 	thread_t                thread)
3203 {
3204 	uint64_t        dispatchqueue_addr;
3205 	uint64_t        thread_handle;
3206 	task_t          task;
3207 
3208 	if (thread == THREAD_NULL) {
3209 		return 0;
3210 	}
3211 
3212 	thread_handle = thread->machine.cthread_self;
3213 	if (thread_handle == 0) {
3214 		return 0;
3215 	}
3216 
3217 	task = get_threadtask(thread);
3218 	void *bsd_info = get_bsdtask_info(task);
3219 	if (thread->inspection == TRUE) {
3220 		dispatchqueue_addr = thread_handle + get_task_dispatchqueue_offset(task);
3221 	} else if (bsd_info) {
3222 		dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(bsd_info);
3223 	} else {
3224 		dispatchqueue_addr = 0;
3225 	}
3226 
3227 	return dispatchqueue_addr;
3228 }
3229 
3230 
3231 uint64_t
thread_wqquantum_addr(thread_t thread)3232 thread_wqquantum_addr(thread_t thread)
3233 {
3234 	uint64_t thread_handle;
3235 	task_t   task;
3236 
3237 	if (thread == THREAD_NULL) {
3238 		return 0;
3239 	}
3240 
3241 	thread_handle = thread->machine.cthread_self;
3242 	if (thread_handle == 0) {
3243 		return 0;
3244 	}
3245 	task = get_threadtask(thread);
3246 
3247 	uint64_t wq_quantum_expiry_offset = get_wq_quantum_offset_from_proc(get_bsdtask_info(task));
3248 	if (wq_quantum_expiry_offset == 0) {
3249 		return 0;
3250 	}
3251 
3252 	return wq_quantum_expiry_offset + thread_handle;
3253 }
3254 
3255 uint64_t
thread_rettokern_addr(thread_t thread)3256 thread_rettokern_addr(
3257 	thread_t                thread)
3258 {
3259 	uint64_t        rettokern_addr;
3260 	uint64_t        rettokern_offset;
3261 	uint64_t        thread_handle;
3262 	task_t          task;
3263 	void            *bsd_info;
3264 
3265 	if (thread == THREAD_NULL) {
3266 		return 0;
3267 	}
3268 
3269 	thread_handle = thread->machine.cthread_self;
3270 	if (thread_handle == 0) {
3271 		return 0;
3272 	}
3273 	task = get_threadtask(thread);
3274 	bsd_info = get_bsdtask_info(task);
3275 
3276 	if (bsd_info) {
3277 		rettokern_offset = get_return_to_kernel_offset_from_proc(bsd_info);
3278 
3279 		/* Return 0 if return to kernel offset is not initialized. */
3280 		if (rettokern_offset == 0) {
3281 			rettokern_addr = 0;
3282 		} else {
3283 			rettokern_addr = thread_handle + rettokern_offset;
3284 		}
3285 	} else {
3286 		rettokern_addr = 0;
3287 	}
3288 
3289 	return rettokern_addr;
3290 }
3291 
3292 /*
3293  * Export routines to other components for things that are done as macros
3294  * within the osfmk component.
3295  */
3296 
3297 void
thread_mtx_lock(thread_t thread)3298 thread_mtx_lock(thread_t thread)
3299 {
3300 	lck_mtx_lock(&thread->mutex);
3301 }
3302 
3303 void
thread_mtx_unlock(thread_t thread)3304 thread_mtx_unlock(thread_t thread)
3305 {
3306 	lck_mtx_unlock(&thread->mutex);
3307 }
3308 
3309 void
thread_reference(thread_t thread)3310 thread_reference(
3311 	thread_t        thread)
3312 {
3313 	if (thread != THREAD_NULL) {
3314 		zone_id_require(ZONE_ID_THREAD, sizeof(struct thread), thread);
3315 		os_ref_retain_raw(&thread->ref_count, &thread_refgrp);
3316 	}
3317 }
3318 
3319 void
thread_require(thread_t thread)3320 thread_require(thread_t thread)
3321 {
3322 	zone_id_require(ZONE_ID_THREAD, sizeof(struct thread), thread);
3323 }
3324 
3325 #undef thread_should_halt
3326 
3327 boolean_t
thread_should_halt(thread_t th)3328 thread_should_halt(
3329 	thread_t                th)
3330 {
3331 	return thread_should_halt_fast(th);
3332 }
3333 
3334 /*
3335  * thread_set_voucher_name - reset the voucher port name bound to this thread
3336  *
3337  * Conditions:  nothing locked
3338  */
3339 
3340 kern_return_t
thread_set_voucher_name(mach_port_name_t voucher_name)3341 thread_set_voucher_name(mach_port_name_t voucher_name)
3342 {
3343 	thread_t thread = current_thread();
3344 	ipc_voucher_t new_voucher = IPC_VOUCHER_NULL;
3345 	ipc_voucher_t voucher;
3346 	ledger_t bankledger = NULL;
3347 	struct thread_group *banktg = NULL;
3348 	uint32_t persona_id = 0;
3349 
3350 	if (MACH_PORT_DEAD == voucher_name) {
3351 		return KERN_INVALID_RIGHT;
3352 	}
3353 
3354 	/*
3355 	 * agressively convert to voucher reference
3356 	 */
3357 	if (MACH_PORT_VALID(voucher_name)) {
3358 		new_voucher = convert_port_name_to_voucher(voucher_name);
3359 		if (IPC_VOUCHER_NULL == new_voucher) {
3360 			return KERN_INVALID_ARGUMENT;
3361 		}
3362 	}
3363 	bank_get_bank_ledger_thread_group_and_persona(new_voucher, &bankledger, &banktg, &persona_id);
3364 
3365 	thread_mtx_lock(thread);
3366 	voucher = thread->ith_voucher;
3367 	thread->ith_voucher_name = voucher_name;
3368 	thread->ith_voucher = new_voucher;
3369 	thread_mtx_unlock(thread);
3370 
3371 	bank_swap_thread_bank_ledger(thread, bankledger);
3372 #if CONFIG_THREAD_GROUPS
3373 	thread_group_set_bank(thread, banktg);
3374 #endif /* CONFIG_THREAD_GROUPS */
3375 
3376 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3377 	    MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
3378 	    (uintptr_t)thread_tid(thread),
3379 	    (uintptr_t)voucher_name,
3380 	    VM_KERNEL_ADDRPERM((uintptr_t)new_voucher),
3381 	    persona_id, 0);
3382 
3383 	if (IPC_VOUCHER_NULL != voucher) {
3384 		ipc_voucher_release(voucher);
3385 	}
3386 
3387 	return KERN_SUCCESS;
3388 }
3389 
3390 /*
3391  *  thread_get_mach_voucher - return a voucher reference for the specified thread voucher
3392  *
3393  *  Conditions:  nothing locked
3394  *
3395  *  NOTE:       At the moment, there is no distinction between the current and effective
3396  *		vouchers because we only set them at the thread level currently.
3397  */
3398 kern_return_t
thread_get_mach_voucher(thread_act_t thread,mach_voucher_selector_t __unused which,ipc_voucher_t * voucherp)3399 thread_get_mach_voucher(
3400 	thread_act_t            thread,
3401 	mach_voucher_selector_t __unused which,
3402 	ipc_voucher_t           *voucherp)
3403 {
3404 	ipc_voucher_t           voucher;
3405 
3406 	if (THREAD_NULL == thread) {
3407 		return KERN_INVALID_ARGUMENT;
3408 	}
3409 
3410 	thread_mtx_lock(thread);
3411 	voucher = thread->ith_voucher;
3412 
3413 	if (IPC_VOUCHER_NULL != voucher) {
3414 		ipc_voucher_reference(voucher);
3415 		thread_mtx_unlock(thread);
3416 		*voucherp = voucher;
3417 		return KERN_SUCCESS;
3418 	}
3419 
3420 	thread_mtx_unlock(thread);
3421 
3422 	*voucherp = IPC_VOUCHER_NULL;
3423 	return KERN_SUCCESS;
3424 }
3425 
3426 /*
3427  *  thread_set_mach_voucher - set a voucher reference for the specified thread voucher
3428  *
3429  *  Conditions: callers holds a reference on the voucher.
3430  *		nothing locked.
3431  *
3432  *  We grab another reference to the voucher and bind it to the thread.
3433  *  The old voucher reference associated with the thread is
3434  *  discarded.
3435  */
3436 kern_return_t
thread_set_mach_voucher(thread_t thread,ipc_voucher_t voucher)3437 thread_set_mach_voucher(
3438 	thread_t                thread,
3439 	ipc_voucher_t           voucher)
3440 {
3441 	ipc_voucher_t old_voucher;
3442 	ledger_t bankledger = NULL;
3443 	struct thread_group *banktg = NULL;
3444 	uint32_t persona_id = 0;
3445 
3446 	if (THREAD_NULL == thread) {
3447 		return KERN_INVALID_ARGUMENT;
3448 	}
3449 
3450 	bank_get_bank_ledger_thread_group_and_persona(voucher, &bankledger, &banktg, &persona_id);
3451 
3452 	thread_mtx_lock(thread);
3453 	/*
3454 	 * Once the thread is started, we will look at `ith_voucher` without
3455 	 * holding any lock.
3456 	 *
3457 	 * Setting the voucher hence can only be done by current_thread() or
3458 	 * before it started. "started" flips under the thread mutex and must be
3459 	 * tested under it too.
3460 	 */
3461 	if (thread != current_thread() && thread->started) {
3462 		thread_mtx_unlock(thread);
3463 		return KERN_INVALID_ARGUMENT;
3464 	}
3465 
3466 	ipc_voucher_reference(voucher);
3467 	old_voucher = thread->ith_voucher;
3468 	thread->ith_voucher = voucher;
3469 	thread->ith_voucher_name = MACH_PORT_NULL;
3470 	thread_mtx_unlock(thread);
3471 
3472 	bank_swap_thread_bank_ledger(thread, bankledger);
3473 #if CONFIG_THREAD_GROUPS
3474 	thread_group_set_bank(thread, banktg);
3475 #endif /* CONFIG_THREAD_GROUPS */
3476 
3477 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3478 	    MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
3479 	    (uintptr_t)thread_tid(thread),
3480 	    (uintptr_t)MACH_PORT_NULL,
3481 	    VM_KERNEL_ADDRPERM((uintptr_t)voucher),
3482 	    persona_id, 0);
3483 
3484 	ipc_voucher_release(old_voucher);
3485 
3486 	return KERN_SUCCESS;
3487 }
3488 
3489 /*
3490  *  thread_swap_mach_voucher - swap a voucher reference for the specified thread voucher
3491  *
3492  *  Conditions: callers holds a reference on the new and presumed old voucher(s).
3493  *		nothing locked.
3494  *
3495  *  This function is no longer supported.
3496  */
3497 kern_return_t
thread_swap_mach_voucher(__unused thread_t thread,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)3498 thread_swap_mach_voucher(
3499 	__unused thread_t               thread,
3500 	__unused ipc_voucher_t          new_voucher,
3501 	ipc_voucher_t                   *in_out_old_voucher)
3502 {
3503 	/*
3504 	 * Currently this function is only called from a MIG generated
3505 	 * routine which doesn't release the reference on the voucher
3506 	 * addressed by in_out_old_voucher. To avoid leaking this reference,
3507 	 * a call to release it has been added here.
3508 	 */
3509 	ipc_voucher_release(*in_out_old_voucher);
3510 	OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
3511 }
3512 
3513 /*
3514  *  thread_get_current_voucher_origin_pid - get the pid of the originator of the current voucher.
3515  */
3516 kern_return_t
thread_get_current_voucher_origin_pid(int32_t * pid)3517 thread_get_current_voucher_origin_pid(
3518 	int32_t      *pid)
3519 {
3520 	return thread_get_voucher_origin_pid(current_thread(), pid);
3521 }
3522 
3523 /*
3524  *  thread_get_current_voucher_origin_pid - get the pid of the originator of the current voucher.
3525  */
3526 kern_return_t
thread_get_voucher_origin_pid(thread_t thread,int32_t * pid)3527 thread_get_voucher_origin_pid(thread_t thread, int32_t *pid)
3528 {
3529 	uint32_t buf_size = sizeof(*pid);
3530 	return mach_voucher_attr_command(thread->ith_voucher,
3531 	           MACH_VOUCHER_ATTR_KEY_BANK,
3532 	           BANK_ORIGINATOR_PID,
3533 	           NULL,
3534 	           0,
3535 	           (mach_voucher_attr_content_t)pid,
3536 	           &buf_size);
3537 }
3538 
3539 #if CONFIG_THREAD_GROUPS
3540 /*
3541  * Returns the current thread's voucher-carried thread group
3542  *
3543  * Reference is borrowed from this being the current voucher, so it does NOT
3544  * return a reference to the group.
3545  */
3546 struct thread_group *
thread_get_current_voucher_thread_group(thread_t thread)3547 thread_get_current_voucher_thread_group(thread_t thread)
3548 {
3549 	assert(thread == current_thread());
3550 
3551 	if (thread->ith_voucher == NULL) {
3552 		return NULL;
3553 	}
3554 
3555 	ledger_t bankledger = NULL;
3556 	struct thread_group *banktg = NULL;
3557 
3558 	bank_get_bank_ledger_thread_group_and_persona(thread->ith_voucher, &bankledger, &banktg, NULL);
3559 
3560 	return banktg;
3561 }
3562 
3563 #endif /* CONFIG_THREAD_GROUPS */
3564 
3565 #if CONFIG_COALITIONS
3566 
3567 uint64_t
thread_get_current_voucher_resource_coalition_id(thread_t thread)3568 thread_get_current_voucher_resource_coalition_id(thread_t thread)
3569 {
3570 	uint64_t id = 0;
3571 	assert(thread == current_thread());
3572 	if (thread->ith_voucher != NULL) {
3573 		id = bank_get_bank_ledger_resource_coalition_id(thread->ith_voucher);
3574 	}
3575 	return id;
3576 }
3577 
3578 #endif /* CONFIG_COALITIONS */
3579 
3580 extern struct workqueue *
3581 proc_get_wqptr(void *proc);
3582 
3583 static bool
task_supports_cooperative_workqueue(task_t task)3584 task_supports_cooperative_workqueue(task_t task)
3585 {
3586 	void *bsd_info = get_bsdtask_info(task);
3587 
3588 	assert(task == current_task());
3589 	if (bsd_info == NULL) {
3590 		return false;
3591 	}
3592 
3593 	uint64_t wq_quantum_expiry_offset = get_wq_quantum_offset_from_proc(bsd_info);
3594 	/* userspace may not yet have called workq_open yet */
3595 	struct workqueue *wq = proc_get_wqptr(bsd_info);
3596 
3597 	return (wq != NULL) && (wq_quantum_expiry_offset != 0);
3598 }
3599 
3600 /* Not safe to call from scheduler paths - should only be called on self */
3601 bool
thread_supports_cooperative_workqueue(thread_t thread)3602 thread_supports_cooperative_workqueue(thread_t thread)
3603 {
3604 	struct uthread *uth = get_bsdthread_info(thread);
3605 	task_t task = get_threadtask(thread);
3606 
3607 	assert(thread == current_thread());
3608 
3609 	return task_supports_cooperative_workqueue(task) &&
3610 	       bsdthread_part_of_cooperative_workqueue(uth);
3611 }
3612 
3613 static inline bool
thread_has_armed_workqueue_quantum(thread_t thread)3614 thread_has_armed_workqueue_quantum(thread_t thread)
3615 {
3616 	return thread->workq_quantum_deadline != 0;
3617 }
3618 
3619 /*
3620  * The workq quantum is a lazy timer that is evaluated at 2 specific times in
3621  * the scheduler:
3622  *
3623  * - context switch time
3624  * - scheduler quantum expiry time.
3625  *
3626  * We're currently expressing the workq quantum with a 0.5 scale factor of the
3627  * scheduler quantum. It is possible that if the workq quantum is rearmed
3628  * shortly after the scheduler quantum begins, we could have a large delay
3629  * between when the workq quantum next expires and when it actually is noticed.
3630  *
3631  * A potential future improvement for the wq quantum expiry logic is to compare
3632  * it to the next actual scheduler quantum deadline and expire it if it is
3633  * within a certain leeway.
3634  */
3635 static inline uint64_t
thread_workq_quantum_size(thread_t thread)3636 thread_workq_quantum_size(thread_t thread)
3637 {
3638 	return (uint64_t) (SCHED(initial_quantum_size)(thread) / 2);
3639 }
3640 
3641 /*
3642  * Always called by thread on itself - either at AST boundary after processing
3643  * an existing quantum expiry, or when a new quantum is armed before the thread
3644  * goes out to userspace to handle a thread request
3645  */
3646 void
thread_arm_workqueue_quantum(thread_t thread)3647 thread_arm_workqueue_quantum(thread_t thread)
3648 {
3649 	/*
3650 	 * If the task is not opted into wq quantum notification, or if the thread
3651 	 * is not part of the cooperative workqueue, don't even bother with tracking
3652 	 * the quantum or calculating expiry
3653 	 */
3654 	if (!thread_supports_cooperative_workqueue(thread)) {
3655 		assert(thread->workq_quantum_deadline == 0);
3656 		return;
3657 	}
3658 
3659 	assert(current_thread() == thread);
3660 	assert(thread_get_tag(thread) & THREAD_TAG_WORKQUEUE);
3661 
3662 	uint64_t current_runtime = thread_get_runtime_self();
3663 	uint64_t deadline = thread_workq_quantum_size(thread) + current_runtime;
3664 
3665 	/*
3666 	 * The update of a workqueue quantum should always be followed by the update
3667 	 * of the AST - see explanation in kern/thread.h for synchronization of this
3668 	 * field
3669 	 */
3670 	thread->workq_quantum_deadline = deadline;
3671 
3672 	/* We're arming a new quantum, clear any previous expiry notification */
3673 	act_clear_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3674 
3675 	WQ_TRACE(TRACE_wq_quantum_arm, current_runtime, deadline, 0, 0);
3676 
3677 	WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, thread->workq_quantum_deadline, true);
3678 }
3679 
3680 /* Called by a thread on itself when it is about to park */
3681 void
thread_disarm_workqueue_quantum(thread_t thread)3682 thread_disarm_workqueue_quantum(thread_t thread)
3683 {
3684 	/* The update of a workqueue quantum should always be followed by the update
3685 	 * of the AST - see explanation in kern/thread.h for synchronization of this
3686 	 * field */
3687 	thread->workq_quantum_deadline = 0;
3688 	act_clear_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3689 
3690 	WQ_TRACE(TRACE_wq_quantum_disarm, 0, 0, 0, 0);
3691 
3692 	WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, thread->workq_quantum_deadline, false);
3693 }
3694 
3695 /* This is called at context switch time on a thread that may not be self,
3696  * and at AST time
3697  */
3698 bool
thread_has_expired_workqueue_quantum(thread_t thread,bool should_trace)3699 thread_has_expired_workqueue_quantum(thread_t thread, bool should_trace)
3700 {
3701 	if (!thread_has_armed_workqueue_quantum(thread)) {
3702 		return false;
3703 	}
3704 	/* We do not do a thread_get_runtime_self() here since this function is
3705 	 * called from context switch time or during scheduler quantum expiry and
3706 	 * therefore, we may not be evaluating it on the current thread/self.
3707 	 *
3708 	 * In addition, the timers on the thread have just been updated recently so
3709 	 * we don't need to update them again.
3710 	 */
3711 	uint64_t runtime = recount_thread_time_mach(thread);
3712 	bool expired = runtime > thread->workq_quantum_deadline;
3713 
3714 	if (expired && should_trace) {
3715 		WQ_TRACE(TRACE_wq_quantum_expired, runtime, thread->workq_quantum_deadline, 0, 0);
3716 	}
3717 
3718 	return expired;
3719 }
3720 
3721 /*
3722  * Called on a thread that is being context switched out or during quantum
3723  * expiry on self. Only called from scheduler paths.
3724  */
3725 void
thread_evaluate_workqueue_quantum_expiry(thread_t thread)3726 thread_evaluate_workqueue_quantum_expiry(thread_t thread)
3727 {
3728 	if (thread_has_expired_workqueue_quantum(thread, true)) {
3729 		act_set_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3730 	}
3731 }
3732 
3733 boolean_t
thread_has_thread_name(thread_t th)3734 thread_has_thread_name(thread_t th)
3735 {
3736 	if (th) {
3737 		return bsd_hasthreadname(get_bsdthread_info(th));
3738 	}
3739 
3740 	/*
3741 	 * This is an odd case; clients may set the thread name based on the lack of
3742 	 * a name, but in this context there is no uthread to attach the name to.
3743 	 */
3744 	return FALSE;
3745 }
3746 
3747 void
thread_set_thread_name(thread_t th,const char * name)3748 thread_set_thread_name(thread_t th, const char* name)
3749 {
3750 	if (th && name) {
3751 		bsd_setthreadname(get_bsdthread_info(th), name);
3752 	}
3753 }
3754 
3755 void
thread_get_thread_name(thread_t th,char * name)3756 thread_get_thread_name(thread_t th, char* name)
3757 {
3758 	if (!name) {
3759 		return;
3760 	}
3761 	if (th) {
3762 		bsd_getthreadname(get_bsdthread_info(th), name);
3763 	} else {
3764 		name[0] = '\0';
3765 	}
3766 }
3767 
3768 void
thread_set_honor_qlimit(thread_t thread)3769 thread_set_honor_qlimit(thread_t thread)
3770 {
3771 	thread->options |= TH_OPT_HONOR_QLIMIT;
3772 }
3773 
3774 void
thread_clear_honor_qlimit(thread_t thread)3775 thread_clear_honor_qlimit(thread_t thread)
3776 {
3777 	thread->options &= (~TH_OPT_HONOR_QLIMIT);
3778 }
3779 
3780 /*
3781  * thread_enable_send_importance - set/clear the SEND_IMPORTANCE thread option bit.
3782  */
3783 void
thread_enable_send_importance(thread_t thread,boolean_t enable)3784 thread_enable_send_importance(thread_t thread, boolean_t enable)
3785 {
3786 	if (enable == TRUE) {
3787 		thread->options |= TH_OPT_SEND_IMPORTANCE;
3788 	} else {
3789 		thread->options &= ~TH_OPT_SEND_IMPORTANCE;
3790 	}
3791 }
3792 
3793 kern_return_t
thread_get_ipc_propagate_attr(thread_t thread,struct thread_attr_for_ipc_propagation * attr)3794 thread_get_ipc_propagate_attr(thread_t thread, struct thread_attr_for_ipc_propagation *attr)
3795 {
3796 	int iotier;
3797 	int qos;
3798 
3799 	if (thread == NULL || attr == NULL) {
3800 		return KERN_INVALID_ARGUMENT;
3801 	}
3802 
3803 	iotier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO);
3804 	qos = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS);
3805 
3806 	attr->tafip_iotier = iotier;
3807 	attr->tafip_qos = qos;
3808 
3809 	return KERN_SUCCESS;
3810 }
3811 
3812 /*
3813  * thread_set_allocation_name - .
3814  */
3815 
3816 kern_allocation_name_t
thread_set_allocation_name(kern_allocation_name_t new_name)3817 thread_set_allocation_name(kern_allocation_name_t new_name)
3818 {
3819 	kern_allocation_name_t ret;
3820 	thread_kernel_state_t kstate = thread_get_kernel_state(current_thread());
3821 	ret = kstate->allocation_name;
3822 	// fifo
3823 	if (!new_name || !kstate->allocation_name) {
3824 		kstate->allocation_name = new_name;
3825 	}
3826 	return ret;
3827 }
3828 
3829 void *
thread_iokit_tls_get(uint32_t index)3830 thread_iokit_tls_get(uint32_t index)
3831 {
3832 	assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
3833 	return current_thread()->saved.iokit.tls[index];
3834 }
3835 
3836 void
thread_iokit_tls_set(uint32_t index,void * data)3837 thread_iokit_tls_set(uint32_t index, void * data)
3838 {
3839 	assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
3840 	current_thread()->saved.iokit.tls[index] = data;
3841 }
3842 
3843 uint64_t
thread_get_last_wait_duration(thread_t thread)3844 thread_get_last_wait_duration(thread_t thread)
3845 {
3846 	return thread->last_made_runnable_time - thread->last_run_time;
3847 }
3848 
3849 integer_t
thread_kern_get_pri(thread_t thr)3850 thread_kern_get_pri(thread_t thr)
3851 {
3852 	return thr->base_pri;
3853 }
3854 
3855 void
thread_kern_set_pri(thread_t thr,integer_t pri)3856 thread_kern_set_pri(thread_t thr, integer_t pri)
3857 {
3858 	sched_set_kernel_thread_priority(thr, pri);
3859 }
3860 
3861 integer_t
thread_kern_get_kernel_maxpri(void)3862 thread_kern_get_kernel_maxpri(void)
3863 {
3864 	return MAXPRI_KERNEL;
3865 }
3866 /*
3867  *	thread_port_with_flavor_no_senders
3868  *
3869  *	Called whenever the Mach port system detects no-senders on
3870  *	the thread inspect or read port. These ports are allocated lazily and
3871  *	should be deallocated here when there are no senders remaining.
3872  */
3873 static void
thread_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)3874 thread_port_with_flavor_no_senders(
3875 	ipc_port_t          port,
3876 	mach_port_mscount_t mscount __unused)
3877 {
3878 	thread_ro_t tro;
3879 	thread_t thread;
3880 	mach_thread_flavor_t flavor;
3881 	ipc_kobject_type_t kotype;
3882 
3883 	ip_mq_lock(port);
3884 	if (port->ip_srights > 0) {
3885 		ip_mq_unlock(port);
3886 		return;
3887 	}
3888 	kotype = ip_kotype(port);
3889 	assert((IKOT_THREAD_READ == kotype) || (IKOT_THREAD_INSPECT == kotype));
3890 	thread = ipc_kobject_get_locked(port, kotype);
3891 	if (thread != THREAD_NULL) {
3892 		thread_reference(thread);
3893 	}
3894 	ip_mq_unlock(port);
3895 
3896 	if (thread == THREAD_NULL) {
3897 		/* The thread is exiting or disabled; it will eventually deallocate the port */
3898 		return;
3899 	}
3900 
3901 	if (kotype == IKOT_THREAD_READ) {
3902 		flavor = THREAD_FLAVOR_READ;
3903 	} else {
3904 		flavor = THREAD_FLAVOR_INSPECT;
3905 	}
3906 
3907 	thread_mtx_lock(thread);
3908 	ip_mq_lock(port);
3909 
3910 	/*
3911 	 * If the port is no longer active, then ipc_thread_terminate() ran
3912 	 * and destroyed the kobject already. Just deallocate the task
3913 	 * ref we took and go away.
3914 	 *
3915 	 * It is also possible that several nsrequests are in flight,
3916 	 * only one shall NULL-out the port entry, and this is the one
3917 	 * that gets to dealloc the port.
3918 	 *
3919 	 * Check for a stale no-senders notification. A call to any function
3920 	 * that vends out send rights to this port could resurrect it between
3921 	 * this notification being generated and actually being handled here.
3922 	 */
3923 	tro = get_thread_ro(thread);
3924 	if (!ip_active(port) ||
3925 	    tro->tro_ports[flavor] != port ||
3926 	    port->ip_srights > 0) {
3927 		ip_mq_unlock(port);
3928 		thread_mtx_unlock(thread);
3929 		thread_deallocate(thread);
3930 		return;
3931 	}
3932 
3933 	assert(tro->tro_ports[flavor] == port);
3934 	zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_ports[flavor]);
3935 	thread_mtx_unlock(thread);
3936 
3937 	ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
3938 
3939 	thread_deallocate(thread);
3940 }
3941 
3942 /*
3943  * The 'thread_region_page_shift' is used by footprint
3944  * to specify the page size that it will use to
3945  * accomplish its accounting work on the task being
3946  * inspected. Since footprint uses a thread for each
3947  * task that it works on, we need to keep the page_shift
3948  * on a per-thread basis.
3949  */
3950 
3951 int
thread_self_region_page_shift(void)3952 thread_self_region_page_shift(void)
3953 {
3954 	/*
3955 	 * Return the page shift that this thread
3956 	 * would like to use for its accounting work.
3957 	 */
3958 	return current_thread()->thread_region_page_shift;
3959 }
3960 
3961 void
thread_self_region_page_shift_set(int pgshift)3962 thread_self_region_page_shift_set(
3963 	int pgshift)
3964 {
3965 	/*
3966 	 * Set the page shift that this thread
3967 	 * would like to use for its accounting work
3968 	 * when dealing with a task.
3969 	 */
3970 	current_thread()->thread_region_page_shift = pgshift;
3971 }
3972 
3973 __startup_func
3974 static void
ctid_table_init(void)3975 ctid_table_init(void)
3976 {
3977 	/*
3978 	 * Pretend the early boot setup didn't exist,
3979 	 * and pick a mangling nonce.
3980 	 */
3981 	*compact_id_resolve(&ctid_table, 0) = THREAD_NULL;
3982 	ctid_nonce = (uint32_t)early_random() & CTID_MASK;
3983 }
3984 
3985 
3986 /*
3987  * This maps the [0, CTID_MAX_THREAD_NUMBER] range
3988  * to [1, CTID_MAX_THREAD_NUMBER + 1 == CTID_MASK]
3989  * so that in mangled form, '0' is an invalid CTID.
3990  */
3991 static ctid_t
ctid_mangle(compact_id_t cid)3992 ctid_mangle(compact_id_t cid)
3993 {
3994 	return (cid == ctid_nonce ? CTID_MASK : cid) ^ ctid_nonce;
3995 }
3996 
3997 static compact_id_t
ctid_unmangle(ctid_t ctid)3998 ctid_unmangle(ctid_t ctid)
3999 {
4000 	ctid ^= ctid_nonce;
4001 	return ctid == CTID_MASK ? ctid_nonce : ctid;
4002 }
4003 
4004 void
ctid_table_add(thread_t thread)4005 ctid_table_add(thread_t thread)
4006 {
4007 	compact_id_t cid;
4008 
4009 	cid = compact_id_get(&ctid_table, CTID_MAX_THREAD_NUMBER, thread);
4010 	thread->ctid = ctid_mangle(cid);
4011 }
4012 
4013 void
ctid_table_remove(thread_t thread)4014 ctid_table_remove(thread_t thread)
4015 {
4016 	__assert_only thread_t value;
4017 
4018 	value = compact_id_put(&ctid_table, ctid_unmangle(thread->ctid));
4019 	assert3p(value, ==, thread);
4020 	thread->ctid = 0;
4021 }
4022 
4023 thread_t
ctid_get_thread_unsafe(ctid_t ctid)4024 ctid_get_thread_unsafe(ctid_t ctid)
4025 {
4026 	if (ctid) {
4027 		return *compact_id_resolve(&ctid_table, ctid_unmangle(ctid));
4028 	}
4029 	return THREAD_NULL;
4030 }
4031 
4032 thread_t
ctid_get_thread(ctid_t ctid)4033 ctid_get_thread(ctid_t ctid)
4034 {
4035 	thread_t thread = THREAD_NULL;
4036 
4037 	if (ctid) {
4038 		thread = *compact_id_resolve(&ctid_table, ctid_unmangle(ctid));
4039 		assert(thread && thread->ctid == ctid);
4040 	}
4041 	return thread;
4042 }
4043 
4044 ctid_t
thread_get_ctid(thread_t thread)4045 thread_get_ctid(thread_t thread)
4046 {
4047 	return thread->ctid;
4048 }
4049 
4050 /*
4051  * Adjust code signature dependent thread state.
4052  *
4053  * Called to allow code signature dependent adjustments to the thread
4054  * state. Note that this is usually called twice for the main thread:
4055  * Once at thread creation by thread_create, when the signature is
4056  * potentially not attached yet (which is usually the case for the
4057  * first/main thread of a task), and once after the task's signature
4058  * has actually been attached.
4059  *
4060  */
4061 kern_return_t
thread_process_signature(thread_t thread,task_t task)4062 thread_process_signature(thread_t thread, task_t task)
4063 {
4064 	return machine_thread_process_signature(thread, task);
4065 }
4066 
4067 
4068 #if CONFIG_DTRACE
4069 uint32_t
dtrace_get_thread_predcache(thread_t thread)4070 dtrace_get_thread_predcache(thread_t thread)
4071 {
4072 	if (thread != THREAD_NULL) {
4073 		return thread->t_dtrace_predcache;
4074 	} else {
4075 		return 0;
4076 	}
4077 }
4078 
4079 int64_t
dtrace_get_thread_vtime(thread_t thread)4080 dtrace_get_thread_vtime(thread_t thread)
4081 {
4082 	if (thread != THREAD_NULL) {
4083 		return thread->t_dtrace_vtime;
4084 	} else {
4085 		return 0;
4086 	}
4087 }
4088 
4089 int
dtrace_get_thread_last_cpu_id(thread_t thread)4090 dtrace_get_thread_last_cpu_id(thread_t thread)
4091 {
4092 	if ((thread != THREAD_NULL) && (thread->last_processor != PROCESSOR_NULL)) {
4093 		return thread->last_processor->cpu_id;
4094 	} else {
4095 		return -1;
4096 	}
4097 }
4098 
4099 int64_t
dtrace_get_thread_tracing(thread_t thread)4100 dtrace_get_thread_tracing(thread_t thread)
4101 {
4102 	if (thread != THREAD_NULL) {
4103 		return thread->t_dtrace_tracing;
4104 	} else {
4105 		return 0;
4106 	}
4107 }
4108 
4109 uint16_t
dtrace_get_thread_inprobe(thread_t thread)4110 dtrace_get_thread_inprobe(thread_t thread)
4111 {
4112 	if (thread != THREAD_NULL) {
4113 		return thread->t_dtrace_inprobe;
4114 	} else {
4115 		return 0;
4116 	}
4117 }
4118 
4119 vm_offset_t
thread_get_kernel_stack(thread_t thread)4120 thread_get_kernel_stack(thread_t thread)
4121 {
4122 	if (thread != THREAD_NULL) {
4123 		return thread->kernel_stack;
4124 	} else {
4125 		return 0;
4126 	}
4127 }
4128 
4129 #if KASAN
4130 struct kasan_thread_data *
kasan_get_thread_data(thread_t thread)4131 kasan_get_thread_data(thread_t thread)
4132 {
4133 	return &thread->kasan_data;
4134 }
4135 #endif
4136 
4137 #if CONFIG_KCOV
4138 kcov_thread_data_t *
kcov_get_thread_data(thread_t thread)4139 kcov_get_thread_data(thread_t thread)
4140 {
4141 	return &thread->kcov_data;
4142 }
4143 #endif
4144 
4145 #if CONFIG_STKSZ
4146 /*
4147  * Returns base of a thread's kernel stack.
4148  *
4149  * Coverage sanitizer instruments every function including those that participates in stack handoff between threads.
4150  * There is a window in which CPU still holds old values but stack has been handed over to anoher thread already.
4151  * In this window kernel_stack is 0 but CPU still uses the original stack (until contex switch occurs). The original
4152  * kernel_stack value is preserved in ksancov_stack during this window.
4153  */
4154 vm_offset_t
kcov_stksz_get_thread_stkbase(thread_t thread)4155 kcov_stksz_get_thread_stkbase(thread_t thread)
4156 {
4157 	if (thread != THREAD_NULL) {
4158 		kcov_thread_data_t *data = kcov_get_thread_data(thread);
4159 		if (data->ktd_stksz.kst_stack) {
4160 			return data->ktd_stksz.kst_stack;
4161 		} else {
4162 			return thread->kernel_stack;
4163 		}
4164 	} else {
4165 		return 0;
4166 	}
4167 }
4168 
4169 vm_offset_t
kcov_stksz_get_thread_stksize(thread_t thread)4170 kcov_stksz_get_thread_stksize(thread_t thread)
4171 {
4172 	if (thread != THREAD_NULL) {
4173 		return kernel_stack_size;
4174 	} else {
4175 		return 0;
4176 	}
4177 }
4178 
4179 void
kcov_stksz_set_thread_stack(thread_t thread,vm_offset_t stack)4180 kcov_stksz_set_thread_stack(thread_t thread, vm_offset_t stack)
4181 {
4182 	kcov_thread_data_t *data = kcov_get_thread_data(thread);
4183 	data->ktd_stksz.kst_stack = stack;
4184 }
4185 #endif /* CONFIG_STKSZ */
4186 
4187 int64_t
dtrace_calc_thread_recent_vtime(thread_t thread)4188 dtrace_calc_thread_recent_vtime(thread_t thread)
4189 {
4190 	if (thread == THREAD_NULL) {
4191 		return 0;
4192 	}
4193 
4194 	struct recount_usage usage = { 0 };
4195 	recount_current_thread_usage(&usage);
4196 	return (int64_t)(usage.ru_system_time_mach + usage.ru_user_time_mach);
4197 }
4198 
4199 void
dtrace_set_thread_predcache(thread_t thread,uint32_t predcache)4200 dtrace_set_thread_predcache(thread_t thread, uint32_t predcache)
4201 {
4202 	if (thread != THREAD_NULL) {
4203 		thread->t_dtrace_predcache = predcache;
4204 	}
4205 }
4206 
4207 void
dtrace_set_thread_vtime(thread_t thread,int64_t vtime)4208 dtrace_set_thread_vtime(thread_t thread, int64_t vtime)
4209 {
4210 	if (thread != THREAD_NULL) {
4211 		thread->t_dtrace_vtime = vtime;
4212 	}
4213 }
4214 
4215 void
dtrace_set_thread_tracing(thread_t thread,int64_t accum)4216 dtrace_set_thread_tracing(thread_t thread, int64_t accum)
4217 {
4218 	if (thread != THREAD_NULL) {
4219 		thread->t_dtrace_tracing = accum;
4220 	}
4221 }
4222 
4223 void
dtrace_set_thread_inprobe(thread_t thread,uint16_t inprobe)4224 dtrace_set_thread_inprobe(thread_t thread, uint16_t inprobe)
4225 {
4226 	if (thread != THREAD_NULL) {
4227 		thread->t_dtrace_inprobe = inprobe;
4228 	}
4229 }
4230 
4231 void
dtrace_thread_bootstrap(void)4232 dtrace_thread_bootstrap(void)
4233 {
4234 	task_t task = current_task();
4235 
4236 	if (task->thread_count == 1) {
4237 		thread_t thread = current_thread();
4238 		if (thread->t_dtrace_flags & TH_DTRACE_EXECSUCCESS) {
4239 			thread->t_dtrace_flags &= ~TH_DTRACE_EXECSUCCESS;
4240 			DTRACE_PROC(exec__success);
4241 			KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXEC),
4242 			    task_pid(task));
4243 		}
4244 		DTRACE_PROC(start);
4245 	}
4246 	DTRACE_PROC(lwp__start);
4247 }
4248 
4249 void
dtrace_thread_didexec(thread_t thread)4250 dtrace_thread_didexec(thread_t thread)
4251 {
4252 	thread->t_dtrace_flags |= TH_DTRACE_EXECSUCCESS;
4253 }
4254 #endif /* CONFIG_DTRACE */
4255