xref: /xnu-10002.81.5/osfmk/kern/thread.c (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	kern/thread.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61  *	Date:	1986
62  *
63  *	Thread management primitives implementation.
64  */
65 /*
66  * Copyright (c) 1993 The University of Utah and
67  * the Computer Systems Laboratory (CSL).  All rights reserved.
68  *
69  * Permission to use, copy, modify and distribute this software and its
70  * documentation is hereby granted, provided that both the copyright
71  * notice and this permission notice appear in all copies of the
72  * software, derivative works or modified versions, and any portions
73  * thereof, and that both notices appear in supporting documentation.
74  *
75  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78  *
79  * CSL requests users of this software to return to [email protected] any
80  * improvements that they make and grant CSL redistribution rights.
81  *
82  */
83 
84 #include <mach/mach_types.h>
85 #include <mach/boolean.h>
86 #include <mach/policy.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_special_ports.h>
89 #include <mach/thread_act.h>
90 #include <mach/thread_status.h>
91 #include <mach/time_value.h>
92 #include <mach/vm_param.h>
93 
94 #include <machine/thread.h>
95 #include <machine/pal_routines.h>
96 #include <machine/limits.h>
97 
98 #include <kern/kern_types.h>
99 #include <kern/kalloc.h>
100 #include <kern/cpu_data.h>
101 #include <kern/extmod_statistics.h>
102 #include <kern/ipc_mig.h>
103 #include <kern/ipc_tt.h>
104 #include <kern/mach_param.h>
105 #include <kern/machine.h>
106 #include <kern/misc_protos.h>
107 #include <kern/processor.h>
108 #include <kern/queue.h>
109 #include <kern/restartable.h>
110 #include <kern/sched.h>
111 #include <kern/sched_prim.h>
112 #include <kern/syscall_subr.h>
113 #include <kern/task.h>
114 #include <kern/thread.h>
115 #include <kern/thread_group.h>
116 #include <kern/coalition.h>
117 #include <kern/host.h>
118 #include <kern/zalloc.h>
119 #include <kern/assert.h>
120 #include <kern/exc_resource.h>
121 #include <kern/exc_guard.h>
122 #include <kern/telemetry.h>
123 #include <kern/policy_internal.h>
124 #include <kern/turnstile.h>
125 #include <kern/sched_clutch.h>
126 #include <kern/recount.h>
127 #include <kern/smr.h>
128 #include <kern/ast.h>
129 #include <kern/compact_id.h>
130 
131 #include <corpses/task_corpse.h>
132 #if KPC
133 #include <kern/kpc.h>
134 #endif
135 
136 #if CONFIG_PERVASIVE_CPI
137 #include <kern/monotonic.h>
138 #include <machine/monotonic.h>
139 #endif /* CONFIG_PERVASIVE_CPI */
140 
141 #include <ipc/ipc_kmsg.h>
142 #include <ipc/ipc_port.h>
143 #include <bank/bank_types.h>
144 
145 #include <vm/vm_kern.h>
146 #include <vm/vm_pageout.h>
147 
148 #include <sys/kdebug.h>
149 #include <sys/bsdtask_info.h>
150 #include <mach/sdt.h>
151 #include <san/kasan.h>
152 #include <san/kcov_stksz.h>
153 
154 #include <stdatomic.h>
155 
156 #if defined(HAS_APPLE_PAC)
157 #include <ptrauth.h>
158 #include <arm64/proc_reg.h>
159 #endif /* defined(HAS_APPLE_PAC) */
160 
161 /*
162  * Exported interfaces
163  */
164 #include <mach/task_server.h>
165 #include <mach/thread_act_server.h>
166 #include <mach/mach_host_server.h>
167 #include <mach/host_priv_server.h>
168 #include <mach/mach_voucher_server.h>
169 #include <kern/policy_internal.h>
170 
171 #if CONFIG_MACF
172 #include <security/mac_mach_internal.h>
173 #endif
174 
175 #include <pthread/workqueue_trace.h>
176 
177 
178 LCK_GRP_DECLARE(thread_lck_grp, "thread");
179 
180 static SECURITY_READ_ONLY_LATE(zone_t) thread_zone;
181 ZONE_DEFINE_ID(ZONE_ID_THREAD_RO, "threads_ro", struct thread_ro, ZC_READONLY);
182 
183 static void thread_port_with_flavor_no_senders(ipc_port_t, mach_port_mscount_t);
184 
185 IPC_KOBJECT_DEFINE(IKOT_THREAD_CONTROL);
186 IPC_KOBJECT_DEFINE(IKOT_THREAD_READ,
187     .iko_op_no_senders = thread_port_with_flavor_no_senders);
188 IPC_KOBJECT_DEFINE(IKOT_THREAD_INSPECT,
189     .iko_op_no_senders = thread_port_with_flavor_no_senders);
190 
191 static struct mpsc_daemon_queue thread_stack_queue;
192 static struct mpsc_daemon_queue thread_terminate_queue;
193 static struct mpsc_daemon_queue thread_deallocate_queue;
194 static struct mpsc_daemon_queue thread_exception_queue;
195 static struct mpsc_daemon_queue thread_backtrace_queue;
196 
197 decl_simple_lock_data(static, crashed_threads_lock);
198 static queue_head_t             crashed_threads_queue;
199 
200 struct thread_exception_elt {
201 	struct mpsc_queue_chain link;
202 	exception_type_t        exception_type;
203 	task_t                  exception_task;
204 	thread_t                exception_thread;
205 };
206 
207 struct thread_backtrace_elt {
208 	struct mpsc_queue_chain link;
209 	exception_type_t        exception_type;
210 	kcdata_object_t         obj;
211 	exception_port_t        exc_ports[BT_EXC_PORTS_COUNT]; /* send rights */
212 };
213 
214 static SECURITY_READ_ONLY_LATE(struct thread) thread_template = {
215 #if MACH_ASSERT
216 	.thread_magic               = THREAD_MAGIC,
217 #endif /* MACH_ASSERT */
218 	.wait_result                = THREAD_WAITING,
219 	.options                    = THREAD_ABORTSAFE,
220 	.state                      = TH_WAIT | TH_UNINT,
221 	.th_sched_bucket            = TH_BUCKET_RUN,
222 	.base_pri                   = BASEPRI_DEFAULT,
223 	.realtime.deadline          = UINT64_MAX,
224 	.last_made_runnable_time    = THREAD_NOT_RUNNABLE,
225 	.last_basepri_change_time   = THREAD_NOT_RUNNABLE,
226 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
227 	.pri_shift                  = INT8_MAX,
228 #endif
229 	/* timers are initialized in thread_bootstrap */
230 };
231 
232 #define CTID_SIZE_BIT           20
233 #define CTID_MASK               ((1u << CTID_SIZE_BIT) - 1)
234 #define CTID_MAX_THREAD_NUMBER  (CTID_MASK - 1)
235 static_assert(CTID_MAX_THREAD_NUMBER <= COMPACT_ID_MAX);
236 
237 #ifndef __LITTLE_ENDIAN__
238 #error "ctid relies on the ls bits of uint32_t to be populated"
239 #endif
240 
241 __startup_data
242 static struct thread init_thread;
243 static SECURITY_READ_ONLY_LATE(uint32_t) ctid_nonce;
244 COMPACT_ID_TABLE_DEFINE(static, ctid_table);
245 
246 __startup_func
247 static void
thread_zone_startup(void)248 thread_zone_startup(void)
249 {
250 	size_t size = sizeof(struct thread);
251 
252 #ifdef MACH_BSD
253 	size += roundup(uthread_size, _Alignof(struct thread));
254 #endif
255 	thread_zone = zone_create_ext("threads", size,
256 	    ZC_SEQUESTER | ZC_ZFREE_CLEARMEM, ZONE_ID_THREAD, NULL);
257 }
258 STARTUP(ZALLOC, STARTUP_RANK_FOURTH, thread_zone_startup);
259 
260 static void thread_deallocate_enqueue(thread_t thread);
261 static void thread_deallocate_complete(thread_t thread);
262 
263 static void ctid_table_remove(thread_t thread);
264 static void ctid_table_add(thread_t thread);
265 static void ctid_table_init(void);
266 
267 #ifdef MACH_BSD
268 extern void proc_exit(void *);
269 extern mach_exception_data_type_t proc_encode_exit_exception_code(void *);
270 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
271 extern uint64_t get_return_to_kernel_offset_from_proc(void *p);
272 extern uint64_t get_wq_quantum_offset_from_proc(void *);
273 extern int      proc_selfpid(void);
274 extern void     proc_name(int, char*, int);
275 extern char *   proc_name_address(void *p);
276 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
277 extern void kdebug_proc_name_args(struct proc *proc, long args[static 4]);
278 #endif /* MACH_BSD */
279 
280 extern bool bsdthread_part_of_cooperative_workqueue(struct uthread *uth);
281 extern bool disable_exc_resource;
282 extern bool disable_exc_resource_during_audio;
283 extern int audio_active;
284 extern int debug_task;
285 int thread_max = CONFIG_THREAD_MAX;     /* Max number of threads */
286 int task_threadmax = CONFIG_THREAD_MAX;
287 
288 static uint64_t         thread_unique_id = 100;
289 
290 struct _thread_ledger_indices thread_ledgers = { .cpu_time = -1 };
291 static ledger_template_t thread_ledger_template = NULL;
292 static void init_thread_ledgers(void);
293 
294 #if CONFIG_JETSAM
295 void jetsam_on_ledger_cpulimit_exceeded(void);
296 #endif
297 
298 extern int task_thread_soft_limit;
299 
300 
301 /*
302  * Level (in terms of percentage of the limit) at which the CPU usage monitor triggers telemetry.
303  *
304  * (ie when any thread's CPU consumption exceeds 70% of the limit, start taking user
305  *  stacktraces, aka micro-stackshots)
306  */
307 #define CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT 70
308 
309 /* Percentage. Level at which we start gathering telemetry. */
310 static TUNABLE(uint8_t, cpumon_ustackshots_trigger_pct,
311     "cpumon_ustackshots_trigger_pct", CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT);
312 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void);
313 
314 #if DEVELOPMENT || DEBUG
315 TUNABLE_WRITEABLE(int, exc_resource_threads_enabled, "exc_resource_threads_enabled", 1);
316 
317 void __attribute__((noinline)) SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t, int);
318 #endif /* DEVELOPMENT || DEBUG */
319 
320 /*
321  * The smallest interval over which we support limiting CPU consumption is 1ms
322  */
323 #define MINIMUM_CPULIMIT_INTERVAL_MS 1
324 
325 os_refgrp_decl(static, thread_refgrp, "thread", NULL);
326 
327 static inline void
init_thread_from_template(thread_t thread)328 init_thread_from_template(thread_t thread)
329 {
330 	/*
331 	 * In general, struct thread isn't trivially-copyable, since it may
332 	 * contain pointers to thread-specific state.  This may be enforced at
333 	 * compile time on architectures that store authed + diversified
334 	 * pointers in machine_thread.
335 	 *
336 	 * In this specific case, where we're initializing a new thread from a
337 	 * thread_template, we know all diversified pointers are NULL; these are
338 	 * safe to bitwise copy.
339 	 */
340 #pragma clang diagnostic push
341 #pragma clang diagnostic ignored "-Wnontrivial-memaccess"
342 	memcpy(thread, &thread_template, sizeof(*thread));
343 #pragma clang diagnostic pop
344 }
345 
346 static void
thread_ro_create(task_t parent_task,thread_t th,thread_ro_t tro_tpl)347 thread_ro_create(task_t parent_task, thread_t th, thread_ro_t tro_tpl)
348 {
349 #if __x86_64__
350 	th->t_task = parent_task;
351 #endif
352 	tro_tpl->tro_owner = th;
353 	tro_tpl->tro_task  = parent_task;
354 	th->t_tro = zalloc_ro(ZONE_ID_THREAD_RO, Z_WAITOK | Z_ZERO | Z_NOFAIL);
355 	zalloc_ro_update_elem(ZONE_ID_THREAD_RO, th->t_tro, tro_tpl);
356 }
357 
358 static void
thread_ro_destroy(thread_t th)359 thread_ro_destroy(thread_t th)
360 {
361 	thread_ro_t tro = get_thread_ro(th);
362 #if MACH_BSD
363 	struct ucred *cred = tro->tro_cred;
364 #endif
365 
366 	zfree_ro(ZONE_ID_THREAD_RO, tro);
367 #if MACH_BSD
368 	if (cred) {
369 		uthread_cred_free(cred);
370 	}
371 #endif
372 }
373 
374 #if MACH_BSD
375 extern void kauth_cred_set(struct ucred **, struct ucred *);
376 
377 void
thread_ro_update_cred(thread_ro_t tro,struct ucred * ucred)378 thread_ro_update_cred(thread_ro_t tro, struct ucred *ucred)
379 {
380 	struct ucred *my_cred = tro->tro_cred;
381 	if (my_cred != ucred) {
382 		kauth_cred_set(&my_cred, ucred);
383 		zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_cred, &my_cred);
384 	}
385 }
386 
387 void
thread_ro_update_flags(thread_ro_t tro,thread_ro_flags_t add,thread_ro_flags_t clr)388 thread_ro_update_flags(thread_ro_t tro, thread_ro_flags_t add, thread_ro_flags_t clr)
389 {
390 	thread_ro_flags_t flags = (tro->tro_flags & ~clr) | add;
391 	zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_flags, &flags);
392 }
393 #endif
394 
395 __startup_func
396 thread_t
thread_bootstrap(void)397 thread_bootstrap(void)
398 {
399 	/*
400 	 *	Fill in a template thread for fast initialization.
401 	 */
402 	timer_init(&thread_template.runnable_timer);
403 
404 	init_thread_from_template(&init_thread);
405 	/* fiddle with init thread to skip asserts in set_sched_pri */
406 	init_thread.sched_pri = MAXPRI_KERNEL;
407 
408 	/*
409 	 * We can't quite use ctid yet, on ARM thread_bootstrap() is called
410 	 * before we can call random or anything,
411 	 * so we just make it barely work and it will get fixed up
412 	 * when the first thread is actually made.
413 	 */
414 	*compact_id_resolve(&ctid_table, 0) = &init_thread;
415 	init_thread.ctid = CTID_MASK;
416 
417 	return &init_thread;
418 }
419 
420 void
thread_machine_init_template(void)421 thread_machine_init_template(void)
422 {
423 	machine_thread_template_init(&thread_template);
424 }
425 
426 void
thread_init(void)427 thread_init(void)
428 {
429 	/*
430 	 *	Initialize any machine-dependent
431 	 *	per-thread structures necessary.
432 	 */
433 	machine_thread_init();
434 
435 	init_thread_ledgers();
436 }
437 
438 boolean_t
thread_is_active(thread_t thread)439 thread_is_active(thread_t thread)
440 {
441 	return thread->active;
442 }
443 
444 void
thread_corpse_continue(void)445 thread_corpse_continue(void)
446 {
447 	thread_t thread = current_thread();
448 
449 	thread_terminate_internal(thread);
450 
451 	/*
452 	 * Handle the thread termination directly
453 	 * here instead of returning to userspace.
454 	 */
455 	assert(thread->active == FALSE);
456 	thread_ast_clear(thread, AST_APC);
457 	thread_apc_ast(thread);
458 
459 	panic("thread_corpse_continue");
460 	/*NOTREACHED*/
461 }
462 
463 __dead2
464 static void
thread_terminate_continue(void)465 thread_terminate_continue(void)
466 {
467 	panic("thread_terminate_continue");
468 	/*NOTREACHED*/
469 }
470 
471 /*
472  *	thread_terminate_self:
473  */
474 void
thread_terminate_self(void)475 thread_terminate_self(void)
476 {
477 	thread_t    thread = current_thread();
478 	thread_ro_t tro    = get_thread_ro(thread);
479 	task_t      task   = tro->tro_task;
480 	void *bsd_info = get_bsdtask_info(task);
481 	int threadcnt;
482 
483 	pal_thread_terminate_self(thread);
484 
485 	DTRACE_PROC(lwp__exit);
486 
487 	thread_mtx_lock(thread);
488 
489 	ipc_thread_disable(thread);
490 
491 	thread_mtx_unlock(thread);
492 
493 	thread_sched_call(thread, NULL);
494 
495 	spl_t s = splsched();
496 	thread_lock(thread);
497 
498 	thread_depress_abort_locked(thread);
499 
500 	/*
501 	 * Before we take the thread_lock right above,
502 	 * act_set_ast_reset_pcs() might not yet observe
503 	 * that the thread is inactive, and could have
504 	 * requested an IPI Ack.
505 	 *
506 	 * Once we unlock the thread, we know that
507 	 * act_set_ast_reset_pcs() can't fail to notice
508 	 * that thread->active is false,
509 	 * and won't set new ones.
510 	 */
511 	thread_reset_pcs_ack_IPI(thread);
512 
513 	thread_unlock(thread);
514 
515 	splx(s);
516 
517 #if CONFIG_TASKWATCH
518 	thead_remove_taskwatch(thread);
519 #endif /* CONFIG_TASKWATCH */
520 
521 	work_interval_thread_terminate(thread);
522 
523 	thread_mtx_lock(thread);
524 
525 	thread_policy_reset(thread);
526 
527 	thread_mtx_unlock(thread);
528 
529 	assert(thread->th_work_interval == NULL);
530 
531 	bank_swap_thread_bank_ledger(thread, NULL);
532 
533 	if (kdebug_enable && bsd_hasthreadname(get_bsdthread_info(thread))) {
534 		char threadname[MAXTHREADNAMESIZE];
535 		bsd_getthreadname(get_bsdthread_info(thread), threadname);
536 		kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, threadname);
537 	}
538 
539 	uthread_cleanup(get_bsdthread_info(thread), tro);
540 
541 	if (kdebug_enable && bsd_info && !task_is_exec_copy(task)) {
542 		/* trace out pid before we sign off */
543 		long dbg_arg1 = 0;
544 		long dbg_arg2 = 0;
545 
546 		kdbg_trace_data(get_bsdtask_info(task), &dbg_arg1, &dbg_arg2);
547 #if CONFIG_PERVASIVE_CPI
548 		if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_THR_EXIT)) {
549 			struct recount_usage usage = { 0 };
550 			struct recount_usage perf_only = { 0 };
551 			boolean_t intrs_end = ml_set_interrupts_enabled(FALSE);
552 			recount_current_thread_usage_perf_only(&usage, &perf_only);
553 			ml_set_interrupts_enabled(intrs_end);
554 			KDBG_RELEASE(DBG_MT_INSTRS_CYCLES_THR_EXIT,
555 			    usage.ru_instructions,
556 			    usage.ru_cycles,
557 			    usage.ru_system_time_mach,
558 			    usage.ru_user_time_mach);
559 #if __AMP__
560 			KDBG_RELEASE(DBG_MT_P_INSTRS_CYCLES_THR_EXIT,
561 			    perf_only.ru_instructions,
562 			    perf_only.ru_cycles,
563 			    perf_only.ru_system_time_mach,
564 			    perf_only.ru_user_time_mach);
565 
566 #endif // __AMP__
567 		}
568 #endif/* CONFIG_PERVASIVE_CPI */
569 		KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE_PID, dbg_arg1, dbg_arg2);
570 	}
571 
572 	/*
573 	 * After this subtraction, this thread should never access
574 	 * task->bsd_info unless it got 0 back from the os_atomic_dec.  It
575 	 * could be racing with other threads to be the last thread in the
576 	 * process, and the last thread in the process will tear down the proc
577 	 * structure and zero-out task->bsd_info.
578 	 */
579 	threadcnt = os_atomic_dec(&task->active_thread_count, relaxed);
580 
581 #if CONFIG_COALITIONS
582 	/*
583 	 * Leave the coalitions when last thread of task is exiting and the
584 	 * task is not a corpse.
585 	 */
586 	if (threadcnt == 0 && !task->corpse_info) {
587 		coalitions_remove_task(task);
588 	}
589 #endif
590 
591 	/*
592 	 * If we are the last thread to terminate and the task is
593 	 * associated with a BSD process, perform BSD process exit.
594 	 */
595 	if (threadcnt == 0 && bsd_info != NULL) {
596 		mach_exception_data_type_t subcode = 0;
597 		if (kdebug_enable) {
598 			/* since we're the last thread in this process, trace out the command name too */
599 			long args[4] = { 0 };
600 			kdebug_proc_name_args(bsd_info, args);
601 #if CONFIG_PERVASIVE_CPI
602 			if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_PROC_EXIT)) {
603 				struct recount_usage usage = { 0 };
604 				struct recount_usage perf_only = { 0 };
605 				recount_current_task_usage_perf_only(&usage, &perf_only);
606 				KDBG_RELEASE(DBG_MT_INSTRS_CYCLES_PROC_EXIT,
607 				    usage.ru_instructions,
608 				    usage.ru_cycles,
609 				    usage.ru_system_time_mach,
610 				    usage.ru_user_time_mach);
611 #if __AMP__
612 				KDBG_RELEASE(DBG_MT_P_INSTRS_CYCLES_PROC_EXIT,
613 				    perf_only.ru_instructions,
614 				    perf_only.ru_cycles,
615 				    perf_only.ru_system_time_mach,
616 				    perf_only.ru_user_time_mach);
617 #endif // __AMP__
618 			}
619 #endif/* CONFIG_PERVASIVE_CPI */
620 			KDBG_RELEASE(TRACE_STRING_PROC_EXIT, args[0], args[1], args[2], args[3]);
621 		}
622 
623 		/* Get the exit reason before proc_exit */
624 		subcode = proc_encode_exit_exception_code(bsd_info);
625 		proc_exit(bsd_info);
626 		bsd_info = NULL;
627 		/*
628 		 * if there is crash info in task
629 		 * then do the deliver action since this is
630 		 * last thread for this task.
631 		 */
632 		if (task->corpse_info) {
633 			/* reset all except task name port */
634 			ipc_task_reset(task);
635 			/* enable all task ports (name port unchanged) */
636 			ipc_task_enable(task);
637 			exception_type_t etype = get_exception_from_corpse_crashinfo(task->corpse_info);
638 			task_deliver_crash_notification(task, current_thread(), etype, subcode);
639 		}
640 	}
641 
642 	if (threadcnt == 0) {
643 		task_lock(task);
644 		if (task_is_a_corpse_fork(task)) {
645 			thread_wakeup((event_t)&task->active_thread_count);
646 		}
647 		task_unlock(task);
648 	}
649 
650 
651 	s = splsched();
652 	thread_lock(thread);
653 
654 	/*
655 	 * Ensure that the depress timer is no longer enqueued,
656 	 * so the timer can be safely deallocated
657 	 *
658 	 * TODO: build timer_call_cancel_wait
659 	 */
660 
661 	assert((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) == 0);
662 
663 	uint32_t delay_us = 1;
664 
665 	while (thread->depress_timer_active > 0) {
666 		thread_unlock(thread);
667 		splx(s);
668 
669 		delay(delay_us++);
670 
671 		if (delay_us > USEC_PER_SEC) {
672 			panic("depress timer failed to inactivate!"
673 			    "thread: %p depress_timer_active: %d",
674 			    thread, thread->depress_timer_active);
675 		}
676 
677 		s = splsched();
678 		thread_lock(thread);
679 	}
680 
681 	/*
682 	 *	Cancel wait timer, and wait for
683 	 *	concurrent expirations.
684 	 */
685 	if (thread->wait_timer_armed) {
686 		thread->wait_timer_armed = false;
687 
688 		if (timer_call_cancel(thread->wait_timer)) {
689 			thread->wait_timer_active--;
690 		}
691 	}
692 
693 	delay_us = 1;
694 
695 	while (thread->wait_timer_active > 0) {
696 		thread_unlock(thread);
697 		splx(s);
698 
699 		delay(delay_us++);
700 
701 		if (delay_us > USEC_PER_SEC) {
702 			panic("wait timer failed to inactivate!"
703 			    "thread: %p, wait_timer_active: %d, "
704 			    "wait_timer_armed: %d",
705 			    thread, thread->wait_timer_active,
706 			    thread->wait_timer_armed);
707 		}
708 
709 		s = splsched();
710 		thread_lock(thread);
711 	}
712 
713 	/*
714 	 *	If there is a reserved stack, release it.
715 	 */
716 	if (thread->reserved_stack != 0) {
717 		stack_free_reserved(thread);
718 		thread->reserved_stack = 0;
719 	}
720 
721 	/*
722 	 *	Mark thread as terminating, and block.
723 	 */
724 	thread->state |= TH_TERMINATE;
725 	thread_mark_wait_locked(thread, THREAD_UNINT);
726 
727 	assert(thread->th_work_interval_flags == TH_WORK_INTERVAL_FLAGS_NONE);
728 	assert(thread->kern_promotion_schedpri == 0);
729 	if (thread->rwlock_count > 0) {
730 		panic("rwlock_count is %d for thread %p, possibly it still holds a rwlock", thread->rwlock_count, thread);
731 	}
732 	assert(thread->priority_floor_count == 0);
733 	assert(thread->handoff_thread == THREAD_NULL);
734 	assert(thread->th_work_interval == NULL);
735 	assert(thread->t_rr_state.trr_value == 0);
736 
737 	assert3u(0, ==, thread->sched_flags &
738 	    (TH_SFLAG_WAITQ_PROMOTED |
739 	    TH_SFLAG_RW_PROMOTED |
740 	    TH_SFLAG_EXEC_PROMOTED |
741 	    TH_SFLAG_FLOOR_PROMOTED |
742 	    TH_SFLAG_PROMOTED |
743 	    TH_SFLAG_DEPRESS));
744 
745 	thread_unlock(thread);
746 	/* splsched */
747 
748 	thread_block((thread_continue_t)thread_terminate_continue);
749 	/*NOTREACHED*/
750 }
751 
752 static bool
thread_ref_release(thread_t thread)753 thread_ref_release(thread_t thread)
754 {
755 	if (thread == THREAD_NULL) {
756 		return false;
757 	}
758 
759 	assert_thread_magic(thread);
760 
761 	return os_ref_release_raw(&thread->ref_count, &thread_refgrp) == 0;
762 }
763 
764 /* Drop a thread refcount safely without triggering a zfree */
765 void
thread_deallocate_safe(thread_t thread)766 thread_deallocate_safe(thread_t thread)
767 {
768 	if (__improbable(thread_ref_release(thread))) {
769 		/* enqueue the thread for thread deallocate deamon to call thread_deallocate_complete */
770 		thread_deallocate_enqueue(thread);
771 	}
772 }
773 
774 void
thread_deallocate(thread_t thread)775 thread_deallocate(thread_t thread)
776 {
777 	if (__improbable(thread_ref_release(thread))) {
778 		thread_deallocate_complete(thread);
779 	}
780 }
781 
782 void
thread_deallocate_complete(thread_t thread)783 thread_deallocate_complete(
784 	thread_t                        thread)
785 {
786 	task_t                          task;
787 
788 	assert_thread_magic(thread);
789 
790 	assert(os_ref_get_count_raw(&thread->ref_count) == 0);
791 
792 	if (!(thread->state & TH_TERMINATE2)) {
793 		panic("thread_deallocate: thread not properly terminated");
794 	}
795 
796 	assert(thread->runq == PROCESSOR_NULL);
797 	assert(!(thread->state & TH_WAKING));
798 
799 #if KPC
800 	kpc_thread_destroy(thread);
801 #endif /* KPC */
802 
803 	ipc_thread_terminate(thread);
804 
805 	proc_thread_qos_deallocate(thread);
806 
807 	task = get_threadtask(thread);
808 
809 #ifdef MACH_BSD
810 	uthread_destroy(get_bsdthread_info(thread));
811 #endif /* MACH_BSD */
812 
813 	if (thread->t_ledger) {
814 		ledger_dereference(thread->t_ledger);
815 	}
816 	if (thread->t_threadledger) {
817 		ledger_dereference(thread->t_threadledger);
818 	}
819 
820 	assert(thread->turnstile != TURNSTILE_NULL);
821 	if (thread->turnstile) {
822 		turnstile_deallocate(thread->turnstile);
823 	}
824 	turnstile_compact_id_put(thread->ctsid);
825 
826 	if (IPC_VOUCHER_NULL != thread->ith_voucher) {
827 		ipc_voucher_release(thread->ith_voucher);
828 	}
829 
830 	kfree_data(thread->thread_io_stats, sizeof(struct io_stat_info));
831 #if CONFIG_PREADOPT_TG
832 	if (thread->old_preadopt_thread_group) {
833 		thread_group_release(thread->old_preadopt_thread_group);
834 	}
835 
836 	if (thread->preadopt_thread_group) {
837 		thread_group_release(thread->preadopt_thread_group);
838 	}
839 #endif /* CONFIG_PREADOPT_TG */
840 
841 	if (thread->kernel_stack != 0) {
842 		stack_free(thread);
843 	}
844 
845 	recount_thread_deinit(&thread->th_recount);
846 
847 	lck_mtx_destroy(&thread->mutex, &thread_lck_grp);
848 	machine_thread_destroy(thread);
849 
850 	task_deallocate_grp(task, TASK_GRP_INTERNAL);
851 
852 #if MACH_ASSERT
853 	assert_thread_magic(thread);
854 	thread->thread_magic = 0;
855 #endif /* MACH_ASSERT */
856 
857 	lck_mtx_lock(&tasks_threads_lock);
858 	assert(terminated_threads_count > 0);
859 	queue_remove(&terminated_threads, thread, thread_t, threads);
860 	terminated_threads_count--;
861 	lck_mtx_unlock(&tasks_threads_lock);
862 
863 	timer_call_free(thread->depress_timer);
864 	timer_call_free(thread->wait_timer);
865 
866 	ctid_table_remove(thread);
867 
868 	thread_ro_destroy(thread);
869 	zfree(thread_zone, thread);
870 }
871 
872 /*
873  *	thread_inspect_deallocate:
874  *
875  *	Drop a thread inspection reference.
876  */
877 void
thread_inspect_deallocate(thread_inspect_t thread_inspect)878 thread_inspect_deallocate(
879 	thread_inspect_t                thread_inspect)
880 {
881 	return thread_deallocate((thread_t)thread_inspect);
882 }
883 
884 /*
885  *	thread_read_deallocate:
886  *
887  *	Drop a reference on thread read port.
888  */
889 void
thread_read_deallocate(thread_read_t thread_read)890 thread_read_deallocate(
891 	thread_read_t                thread_read)
892 {
893 	return thread_deallocate((thread_t)thread_read);
894 }
895 
896 
897 /*
898  *	thread_exception_queue_invoke:
899  *
900  *	Deliver EXC_{RESOURCE,GUARD} exception
901  */
902 static void
thread_exception_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)903 thread_exception_queue_invoke(mpsc_queue_chain_t elm,
904     __assert_only mpsc_daemon_queue_t dq)
905 {
906 	struct thread_exception_elt *elt;
907 	task_t task;
908 	thread_t thread;
909 	exception_type_t etype;
910 
911 	assert(dq == &thread_exception_queue);
912 	elt = mpsc_queue_element(elm, struct thread_exception_elt, link);
913 
914 	etype = elt->exception_type;
915 	task = elt->exception_task;
916 	thread = elt->exception_thread;
917 	assert_thread_magic(thread);
918 
919 	kfree_type(struct thread_exception_elt, elt);
920 
921 	/* wait for all the threads in the task to terminate */
922 	task_lock(task);
923 	task_wait_till_threads_terminate_locked(task);
924 	task_unlock(task);
925 
926 	/* Consumes the task ref returned by task_generate_corpse_internal */
927 	task_deallocate(task);
928 	/* Consumes the thread ref returned by task_generate_corpse_internal */
929 	thread_deallocate(thread);
930 
931 	/* Deliver the notification, also clears the corpse. */
932 	task_deliver_crash_notification(task, thread, etype, 0);
933 }
934 
935 static void
thread_backtrace_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)936 thread_backtrace_queue_invoke(mpsc_queue_chain_t elm,
937     __assert_only mpsc_daemon_queue_t dq)
938 {
939 	struct thread_backtrace_elt *elt;
940 	kcdata_object_t obj;
941 	exception_port_t exc_ports[BT_EXC_PORTS_COUNT]; /* send rights */
942 	exception_type_t etype;
943 
944 	assert(dq == &thread_backtrace_queue);
945 	elt = mpsc_queue_element(elm, struct thread_backtrace_elt, link);
946 
947 	obj = elt->obj;
948 	memcpy(exc_ports, elt->exc_ports, sizeof(ipc_port_t) * BT_EXC_PORTS_COUNT);
949 	etype = elt->exception_type;
950 
951 	kfree_type(struct thread_backtrace_elt, elt);
952 
953 	/* Deliver to backtrace exception ports */
954 	exception_deliver_backtrace(obj, exc_ports, etype);
955 
956 	/*
957 	 * Release port right and kcdata object refs given by
958 	 * task_enqueue_exception_with_corpse()
959 	 */
960 
961 	for (unsigned int i = 0; i < BT_EXC_PORTS_COUNT; i++) {
962 		ipc_port_release_send(exc_ports[i]);
963 	}
964 
965 	kcdata_object_release(obj);
966 }
967 
968 /*
969  *	thread_exception_enqueue:
970  *
971  *	Enqueue a corpse port to be delivered an EXC_{RESOURCE,GUARD}.
972  */
973 void
thread_exception_enqueue(task_t task,thread_t thread,exception_type_t etype)974 thread_exception_enqueue(
975 	task_t          task,
976 	thread_t        thread,
977 	exception_type_t etype)
978 {
979 	assert(EXC_RESOURCE == etype || EXC_GUARD == etype);
980 	struct thread_exception_elt *elt = kalloc_type(struct thread_exception_elt, Z_WAITOK | Z_NOFAIL);
981 	elt->exception_type = etype;
982 	elt->exception_task = task;
983 	elt->exception_thread = thread;
984 
985 	mpsc_daemon_enqueue(&thread_exception_queue, &elt->link,
986 	    MPSC_QUEUE_DISABLE_PREEMPTION);
987 }
988 
989 void
thread_backtrace_enqueue(kcdata_object_t obj,exception_port_t ports[static BT_EXC_PORTS_COUNT],exception_type_t etype)990 thread_backtrace_enqueue(
991 	kcdata_object_t  obj,
992 	exception_port_t ports[static BT_EXC_PORTS_COUNT],
993 	exception_type_t etype)
994 {
995 	struct thread_backtrace_elt *elt = kalloc_type(struct thread_backtrace_elt, Z_WAITOK | Z_NOFAIL);
996 	elt->obj = obj;
997 	elt->exception_type = etype;
998 
999 	memcpy(elt->exc_ports, ports, sizeof(ipc_port_t) * BT_EXC_PORTS_COUNT);
1000 
1001 	mpsc_daemon_enqueue(&thread_backtrace_queue, &elt->link,
1002 	    MPSC_QUEUE_DISABLE_PREEMPTION);
1003 }
1004 
1005 /*
1006  *	thread_copy_resource_info
1007  *
1008  *	Copy the resource info counters from source
1009  *	thread to destination thread.
1010  */
1011 void
thread_copy_resource_info(thread_t dst_thread,thread_t src_thread)1012 thread_copy_resource_info(
1013 	thread_t dst_thread,
1014 	thread_t src_thread)
1015 {
1016 	dst_thread->c_switch = src_thread->c_switch;
1017 	dst_thread->p_switch = src_thread->p_switch;
1018 	dst_thread->ps_switch = src_thread->ps_switch;
1019 	dst_thread->sched_time_save = src_thread->sched_time_save;
1020 	dst_thread->runnable_timer = src_thread->runnable_timer;
1021 	dst_thread->vtimer_user_save = src_thread->vtimer_user_save;
1022 	dst_thread->vtimer_prof_save = src_thread->vtimer_prof_save;
1023 	dst_thread->vtimer_rlim_save = src_thread->vtimer_rlim_save;
1024 	dst_thread->vtimer_qos_save = src_thread->vtimer_qos_save;
1025 	dst_thread->syscalls_unix = src_thread->syscalls_unix;
1026 	dst_thread->syscalls_mach = src_thread->syscalls_mach;
1027 	ledger_rollup(dst_thread->t_threadledger, src_thread->t_threadledger);
1028 	recount_thread_copy(&dst_thread->th_recount, &src_thread->th_recount);
1029 	*dst_thread->thread_io_stats = *src_thread->thread_io_stats;
1030 }
1031 
1032 static void
thread_terminate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)1033 thread_terminate_queue_invoke(mpsc_queue_chain_t e,
1034     __assert_only mpsc_daemon_queue_t dq)
1035 {
1036 	thread_t thread = mpsc_queue_element(e, struct thread, mpsc_links);
1037 	task_t task = get_threadtask(thread);
1038 
1039 	assert(dq == &thread_terminate_queue);
1040 
1041 	task_lock(task);
1042 
1043 	/*
1044 	 * if marked for crash reporting, skip reaping.
1045 	 * The corpse delivery thread will clear bit and enqueue
1046 	 * for reaping when done
1047 	 *
1048 	 * Note: the inspection field is set under the task lock
1049 	 *
1050 	 * FIXME[mad]: why enqueue for termination before `inspection` is false ?
1051 	 */
1052 	if (__improbable(thread->inspection)) {
1053 		simple_lock(&crashed_threads_lock, &thread_lck_grp);
1054 		task_unlock(task);
1055 
1056 		enqueue_tail(&crashed_threads_queue, &thread->runq_links);
1057 		simple_unlock(&crashed_threads_lock);
1058 		return;
1059 	}
1060 
1061 	recount_task_rollup_thread(&task->tk_recount, &thread->th_recount);
1062 
1063 	task->total_runnable_time += timer_grab(&thread->runnable_timer);
1064 	task->c_switch += thread->c_switch;
1065 	task->p_switch += thread->p_switch;
1066 	task->ps_switch += thread->ps_switch;
1067 
1068 	task->syscalls_unix += thread->syscalls_unix;
1069 	task->syscalls_mach += thread->syscalls_mach;
1070 
1071 	task->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
1072 	task->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
1073 	task->task_gpu_ns += ml_gpu_stat(thread);
1074 	task->decompressions += thread->decompressions;
1075 
1076 	thread_update_qos_cpu_time(thread);
1077 
1078 	queue_remove(&task->threads, thread, thread_t, task_threads);
1079 	task->thread_count--;
1080 
1081 	/*
1082 	 * If the task is being halted, and there is only one thread
1083 	 * left in the task after this one, then wakeup that thread.
1084 	 */
1085 	if (task->thread_count == 1 && task->halting) {
1086 		thread_wakeup((event_t)&task->halting);
1087 	}
1088 
1089 	task_unlock(task);
1090 
1091 	lck_mtx_lock(&tasks_threads_lock);
1092 	queue_remove(&threads, thread, thread_t, threads);
1093 	threads_count--;
1094 	queue_enter(&terminated_threads, thread, thread_t, threads);
1095 	terminated_threads_count++;
1096 	lck_mtx_unlock(&tasks_threads_lock);
1097 
1098 #if MACH_BSD
1099 	/*
1100 	 * The thread no longer counts against the task's thread count,
1101 	 * we can now wake up any pending joiner.
1102 	 *
1103 	 * Note that the inheritor will be set to `thread` which is
1104 	 * incorrect once it is on the termination queue, however
1105 	 * the termination queue runs at MINPRI_KERNEL which is higher
1106 	 * than any user thread, so this isn't a priority inversion.
1107 	 */
1108 	if (thread_get_tag(thread) & THREAD_TAG_USER_JOIN) {
1109 		struct uthread *uth = get_bsdthread_info(thread);
1110 		mach_port_name_t kport = uthread_joiner_port(uth);
1111 
1112 		/*
1113 		 * Clear the port low two bits to tell pthread that thread is gone.
1114 		 */
1115 #ifndef NO_PORT_GEN
1116 		kport &= ~MACH_PORT_MAKE(0, IE_BITS_GEN_MASK + IE_BITS_GEN_ONE);
1117 #else
1118 		kport |= MACH_PORT_MAKE(0, ~(IE_BITS_GEN_MASK + IE_BITS_GEN_ONE));
1119 #endif
1120 		(void)copyoutmap_atomic32(task->map, kport,
1121 		    uthread_joiner_address(uth));
1122 		uthread_joiner_wake(task, uth);
1123 	}
1124 #endif
1125 
1126 	thread_deallocate(thread);
1127 }
1128 
1129 static void
thread_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)1130 thread_deallocate_queue_invoke(mpsc_queue_chain_t e,
1131     __assert_only mpsc_daemon_queue_t dq)
1132 {
1133 	thread_t thread = mpsc_queue_element(e, struct thread, mpsc_links);
1134 
1135 	assert(dq == &thread_deallocate_queue);
1136 
1137 	thread_deallocate_complete(thread);
1138 }
1139 
1140 /*
1141  *	thread_terminate_enqueue:
1142  *
1143  *	Enqueue a terminating thread for final disposition.
1144  *
1145  *	Called at splsched.
1146  */
1147 void
thread_terminate_enqueue(thread_t thread)1148 thread_terminate_enqueue(
1149 	thread_t                thread)
1150 {
1151 	KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE, thread->thread_id);
1152 
1153 	mpsc_daemon_enqueue(&thread_terminate_queue, &thread->mpsc_links,
1154 	    MPSC_QUEUE_DISABLE_PREEMPTION);
1155 }
1156 
1157 /*
1158  *	thread_deallocate_enqueue:
1159  *
1160  *	Enqueue a thread for final deallocation.
1161  */
1162 static void
thread_deallocate_enqueue(thread_t thread)1163 thread_deallocate_enqueue(
1164 	thread_t                thread)
1165 {
1166 	mpsc_daemon_enqueue(&thread_deallocate_queue, &thread->mpsc_links,
1167 	    MPSC_QUEUE_DISABLE_PREEMPTION);
1168 }
1169 
1170 /*
1171  * thread_terminate_crashed_threads:
1172  * walk the list of crashed threads and put back set of threads
1173  * who are no longer being inspected.
1174  */
1175 void
thread_terminate_crashed_threads(void)1176 thread_terminate_crashed_threads(void)
1177 {
1178 	thread_t th_remove;
1179 
1180 	simple_lock(&crashed_threads_lock, &thread_lck_grp);
1181 	/*
1182 	 * loop through the crashed threads queue
1183 	 * to put any threads that are not being inspected anymore
1184 	 */
1185 
1186 	qe_foreach_element_safe(th_remove, &crashed_threads_queue, runq_links) {
1187 		/* make sure current_thread is never in crashed queue */
1188 		assert(th_remove != current_thread());
1189 
1190 		if (th_remove->inspection == FALSE) {
1191 			remqueue(&th_remove->runq_links);
1192 			mpsc_daemon_enqueue(&thread_terminate_queue, &th_remove->mpsc_links,
1193 			    MPSC_QUEUE_NONE);
1194 		}
1195 	}
1196 
1197 	simple_unlock(&crashed_threads_lock);
1198 }
1199 
1200 /*
1201  *	thread_stack_queue_invoke:
1202  *
1203  *	Perform stack allocation as required due to
1204  *	invoke failures.
1205  */
1206 static void
thread_stack_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)1207 thread_stack_queue_invoke(mpsc_queue_chain_t elm,
1208     __assert_only mpsc_daemon_queue_t dq)
1209 {
1210 	thread_t thread = mpsc_queue_element(elm, struct thread, mpsc_links);
1211 
1212 	assert(dq == &thread_stack_queue);
1213 
1214 	/* allocate stack with interrupts enabled so that we can call into VM */
1215 	stack_alloc(thread);
1216 
1217 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_END, thread_tid(thread), 0, 0, 0, 0);
1218 
1219 	spl_t s = splsched();
1220 	thread_lock(thread);
1221 	thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1222 	thread_unlock(thread);
1223 	splx(s);
1224 }
1225 
1226 /*
1227  *	thread_stack_enqueue:
1228  *
1229  *	Enqueue a thread for stack allocation.
1230  *
1231  *	Called at splsched.
1232  */
1233 void
thread_stack_enqueue(thread_t thread)1234 thread_stack_enqueue(
1235 	thread_t                thread)
1236 {
1237 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_START, thread_tid(thread), 0, 0, 0, 0);
1238 	assert_thread_magic(thread);
1239 
1240 	mpsc_daemon_enqueue(&thread_stack_queue, &thread->mpsc_links,
1241 	    MPSC_QUEUE_DISABLE_PREEMPTION);
1242 }
1243 
1244 void
thread_daemon_init(void)1245 thread_daemon_init(void)
1246 {
1247 	kern_return_t   result;
1248 
1249 	thread_deallocate_daemon_init();
1250 
1251 	thread_deallocate_daemon_register_queue(&thread_terminate_queue,
1252 	    thread_terminate_queue_invoke);
1253 
1254 	thread_deallocate_daemon_register_queue(&thread_deallocate_queue,
1255 	    thread_deallocate_queue_invoke);
1256 
1257 	ipc_object_deallocate_register_queue();
1258 
1259 	simple_lock_init(&crashed_threads_lock, 0);
1260 	queue_init(&crashed_threads_queue);
1261 
1262 	result = mpsc_daemon_queue_init_with_thread(&thread_stack_queue,
1263 	    thread_stack_queue_invoke, BASEPRI_PREEMPT_HIGH,
1264 	    "daemon.thread-stack", MPSC_DAEMON_INIT_NONE);
1265 	if (result != KERN_SUCCESS) {
1266 		panic("thread_daemon_init: thread_stack_daemon");
1267 	}
1268 
1269 	result = mpsc_daemon_queue_init_with_thread(&thread_exception_queue,
1270 	    thread_exception_queue_invoke, MINPRI_KERNEL,
1271 	    "daemon.thread-exception", MPSC_DAEMON_INIT_NONE);
1272 
1273 	if (result != KERN_SUCCESS) {
1274 		panic("thread_daemon_init: thread_exception_daemon");
1275 	}
1276 
1277 	result = mpsc_daemon_queue_init_with_thread(&thread_backtrace_queue,
1278 	    thread_backtrace_queue_invoke, MINPRI_KERNEL,
1279 	    "daemon.thread-backtrace", MPSC_DAEMON_INIT_NONE);
1280 
1281 	if (result != KERN_SUCCESS) {
1282 		panic("thread_daemon_init: thread_backtrace_daemon");
1283 	}
1284 }
1285 
1286 __options_decl(thread_create_internal_options_t, uint32_t, {
1287 	TH_OPTION_NONE          = 0x00,
1288 	TH_OPTION_NOSUSP        = 0x02,
1289 	TH_OPTION_WORKQ         = 0x04,
1290 	TH_OPTION_MAINTHREAD    = 0x08,
1291 });
1292 
1293 void
main_thread_set_immovable_pinned(thread_t thread)1294 main_thread_set_immovable_pinned(thread_t thread)
1295 {
1296 	ipc_main_thread_set_immovable_pinned(thread);
1297 }
1298 
1299 /*
1300  * Create a new thread.
1301  * Doesn't start the thread running.
1302  *
1303  * Task and tasks_threads_lock are returned locked on success.
1304  */
1305 static kern_return_t
thread_create_internal(task_t parent_task,integer_t priority,thread_continue_t continuation,void * parameter,thread_create_internal_options_t options,thread_t * out_thread)1306 thread_create_internal(
1307 	task_t                                  parent_task,
1308 	integer_t                               priority,
1309 	thread_continue_t                       continuation,
1310 	void                                    *parameter,
1311 	thread_create_internal_options_t        options,
1312 	thread_t                                *out_thread)
1313 {
1314 	thread_t                  new_thread;
1315 	ipc_thread_init_options_t init_options = IPC_THREAD_INIT_NONE;
1316 	struct thread_ro          tro_tpl = { };
1317 	bool first_thread = false;
1318 	kern_return_t kr = KERN_FAILURE;
1319 
1320 	/*
1321 	 *	Allocate a thread and initialize static fields
1322 	 */
1323 	new_thread = zalloc_flags(thread_zone, Z_WAITOK | Z_NOFAIL);
1324 
1325 	if (__improbable(current_thread() == &init_thread)) {
1326 		/*
1327 		 * The first thread ever is a global, but because we want to be
1328 		 * able to zone_id_require() threads, we have to stop using the
1329 		 * global piece of memory we used to boostrap the kernel and
1330 		 * jump to a proper thread from a zone.
1331 		 *
1332 		 * This is why that one thread will inherit its original
1333 		 * state differently.
1334 		 *
1335 		 * Also remember this thread in `vm_pageout_scan_thread`
1336 		 * as this is what the first thread ever becomes.
1337 		 *
1338 		 * Also pre-warm the depress timer since the VM pageout scan
1339 		 * daemon might need to use it.
1340 		 */
1341 		assert(vm_pageout_scan_thread == THREAD_NULL);
1342 		vm_pageout_scan_thread = new_thread;
1343 
1344 		first_thread = true;
1345 #pragma clang diagnostic push
1346 #pragma clang diagnostic ignored "-Wnontrivial-memaccess"
1347 		/* work around 74481146 */
1348 		memcpy(new_thread, &init_thread, sizeof(*new_thread));
1349 #pragma clang diagnostic pop
1350 
1351 		/*
1352 		 * Make the ctid table functional
1353 		 */
1354 		ctid_table_init();
1355 		new_thread->ctid = 0;
1356 	} else {
1357 		init_thread_from_template(new_thread);
1358 	}
1359 
1360 	if (options & TH_OPTION_MAINTHREAD) {
1361 		init_options |= IPC_THREAD_INIT_MAINTHREAD;
1362 	}
1363 
1364 	os_ref_init_count_raw(&new_thread->ref_count, &thread_refgrp, 2);
1365 	machine_thread_create(new_thread, parent_task, first_thread);
1366 
1367 	machine_thread_process_signature(new_thread, parent_task);
1368 
1369 #ifdef MACH_BSD
1370 	uthread_init(parent_task, get_bsdthread_info(new_thread),
1371 	    &tro_tpl, (options & TH_OPTION_WORKQ) != 0);
1372 	if (!task_is_a_corpse(parent_task)) {
1373 		/*
1374 		 * uthread_init will set tro_cred (with a +1)
1375 		 * and tro_proc for live tasks.
1376 		 */
1377 		assert(tro_tpl.tro_cred && tro_tpl.tro_proc);
1378 	}
1379 #endif  /* MACH_BSD */
1380 
1381 	thread_lock_init(new_thread);
1382 	wake_lock_init(new_thread);
1383 
1384 	lck_mtx_init(&new_thread->mutex, &thread_lck_grp, LCK_ATTR_NULL);
1385 
1386 	ipc_thread_init(parent_task, new_thread, &tro_tpl, init_options);
1387 
1388 	thread_ro_create(parent_task, new_thread, &tro_tpl);
1389 
1390 	new_thread->continuation = continuation;
1391 	new_thread->parameter = parameter;
1392 	new_thread->inheritor_flags = TURNSTILE_UPDATE_FLAGS_NONE;
1393 	new_thread->requested_policy = default_thread_requested_policy;
1394 	priority_queue_init(&new_thread->sched_inheritor_queue);
1395 	priority_queue_init(&new_thread->base_inheritor_queue);
1396 #if CONFIG_SCHED_CLUTCH
1397 	priority_queue_entry_init(&new_thread->th_clutch_runq_link);
1398 	priority_queue_entry_init(&new_thread->th_clutch_pri_link);
1399 #endif /* CONFIG_SCHED_CLUTCH */
1400 
1401 #if CONFIG_SCHED_EDGE
1402 	new_thread->th_bound_cluster_enqueued = false;
1403 	for (cluster_shared_rsrc_type_t shared_rsrc_type = CLUSTER_SHARED_RSRC_TYPE_MIN; shared_rsrc_type < CLUSTER_SHARED_RSRC_TYPE_COUNT; shared_rsrc_type++) {
1404 		new_thread->th_shared_rsrc_enqueued[shared_rsrc_type] = false;
1405 		new_thread->th_shared_rsrc_heavy_user[shared_rsrc_type] = false;
1406 		new_thread->th_shared_rsrc_heavy_perf_control[shared_rsrc_type] = false;
1407 	}
1408 #endif /* CONFIG_SCHED_EDGE */
1409 	new_thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
1410 
1411 	/* Allocate I/O Statistics structure */
1412 	new_thread->thread_io_stats = kalloc_data(sizeof(struct io_stat_info),
1413 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1414 
1415 #if KASAN_CLASSIC
1416 	kasan_init_thread(&new_thread->kasan_data);
1417 #endif /* KASAN_CLASSIC */
1418 
1419 #if CONFIG_KCOV
1420 	kcov_init_thread(&new_thread->kcov_data);
1421 #endif
1422 
1423 #if CONFIG_IOSCHED
1424 	/* Clear out the I/O Scheduling info for AppleFSCompression */
1425 	new_thread->decmp_upl = NULL;
1426 #endif /* CONFIG_IOSCHED */
1427 
1428 	new_thread->thread_region_page_shift = 0;
1429 
1430 #if DEVELOPMENT || DEBUG
1431 	task_lock(parent_task);
1432 	uint16_t thread_limit = parent_task->task_thread_limit;
1433 	if (exc_resource_threads_enabled &&
1434 	    thread_limit > 0 &&
1435 	    parent_task->thread_count >= thread_limit &&
1436 	    !parent_task->task_has_crossed_thread_limit &&
1437 	    !(task_is_a_corpse(parent_task))) {
1438 		int thread_count = parent_task->thread_count;
1439 		parent_task->task_has_crossed_thread_limit = TRUE;
1440 		task_unlock(parent_task);
1441 		SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(parent_task, thread_count);
1442 	} else {
1443 		task_unlock(parent_task);
1444 	}
1445 #endif
1446 
1447 	lck_mtx_lock(&tasks_threads_lock);
1448 	task_lock(parent_task);
1449 
1450 	/*
1451 	 * Fail thread creation if parent task is being torn down or has too many threads
1452 	 * If the caller asked for TH_OPTION_NOSUSP, also fail if the parent task is suspended
1453 	 */
1454 	if (parent_task->active == 0 || parent_task->halting ||
1455 	    (parent_task->suspend_count > 0 && (options & TH_OPTION_NOSUSP) != 0) ||
1456 	    (parent_task->thread_count >= task_threadmax && parent_task != kernel_task)) {
1457 		task_unlock(parent_task);
1458 		lck_mtx_unlock(&tasks_threads_lock);
1459 
1460 		ipc_thread_disable(new_thread);
1461 		ipc_thread_terminate(new_thread);
1462 		kfree_data(new_thread->thread_io_stats,
1463 		    sizeof(struct io_stat_info));
1464 		lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
1465 		kr = KERN_FAILURE;
1466 		goto out_thread_cleanup;
1467 	}
1468 
1469 	/* Protected by the tasks_threads_lock */
1470 	new_thread->thread_id = ++thread_unique_id;
1471 
1472 	ctid_table_add(new_thread);
1473 
1474 	/* New threads inherit any default state on the task */
1475 	machine_thread_inherit_taskwide(new_thread, parent_task);
1476 
1477 	task_reference_grp(parent_task, TASK_GRP_INTERNAL);
1478 
1479 	if (parent_task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
1480 		/*
1481 		 * This task has a per-thread CPU limit; make sure this new thread
1482 		 * gets its limit set too, before it gets out of the kernel.
1483 		 */
1484 		act_set_astledger(new_thread);
1485 	}
1486 
1487 	/* Instantiate a thread ledger. Do not fail thread creation if ledger creation fails. */
1488 	if ((new_thread->t_threadledger = ledger_instantiate(thread_ledger_template,
1489 	    LEDGER_CREATE_INACTIVE_ENTRIES)) != LEDGER_NULL) {
1490 		ledger_entry_setactive(new_thread->t_threadledger, thread_ledgers.cpu_time);
1491 	}
1492 
1493 	new_thread->t_bankledger = LEDGER_NULL;
1494 	new_thread->t_deduct_bank_ledger_time = 0;
1495 	new_thread->t_deduct_bank_ledger_energy = 0;
1496 
1497 	new_thread->t_ledger = parent_task->ledger;
1498 	if (new_thread->t_ledger) {
1499 		ledger_reference(new_thread->t_ledger);
1500 	}
1501 
1502 	recount_thread_init(&new_thread->th_recount);
1503 
1504 #if defined(CONFIG_SCHED_MULTIQ)
1505 	/* Cache the task's sched_group */
1506 	new_thread->sched_group = parent_task->sched_group;
1507 #endif /* defined(CONFIG_SCHED_MULTIQ) */
1508 
1509 	/* Cache the task's map */
1510 	new_thread->map = parent_task->map;
1511 
1512 	new_thread->depress_timer = timer_call_alloc(thread_depress_expire, new_thread);
1513 	new_thread->wait_timer = timer_call_alloc(thread_timer_expire, new_thread);
1514 
1515 #if KPC
1516 	kpc_thread_create(new_thread);
1517 #endif
1518 
1519 	/* Set the thread's scheduling parameters */
1520 	new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task);
1521 	new_thread->max_priority = parent_task->max_priority;
1522 	new_thread->task_priority = parent_task->priority;
1523 
1524 #if CONFIG_THREAD_GROUPS
1525 	thread_group_init_thread(new_thread, parent_task);
1526 #endif /* CONFIG_THREAD_GROUPS */
1527 
1528 	int new_priority = (priority < 0) ? parent_task->priority: priority;
1529 	new_priority = (priority < 0)? parent_task->priority: priority;
1530 	if (new_priority > new_thread->max_priority) {
1531 		new_priority = new_thread->max_priority;
1532 	}
1533 #if !defined(XNU_TARGET_OS_OSX)
1534 	if (new_priority < MAXPRI_THROTTLE) {
1535 		new_priority = MAXPRI_THROTTLE;
1536 	}
1537 #endif /* !defined(XNU_TARGET_OS_OSX) */
1538 
1539 	new_thread->importance = new_priority - new_thread->task_priority;
1540 
1541 	sched_set_thread_base_priority(new_thread, new_priority);
1542 
1543 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
1544 	new_thread->sched_stamp = sched_tick;
1545 #if CONFIG_SCHED_CLUTCH
1546 	new_thread->pri_shift = sched_clutch_thread_pri_shift(new_thread, new_thread->th_sched_bucket);
1547 #else /* CONFIG_SCHED_CLUTCH */
1548 	new_thread->pri_shift = sched_pri_shifts[new_thread->th_sched_bucket];
1549 #endif /* CONFIG_SCHED_CLUTCH */
1550 #endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */
1551 
1552 	if (parent_task->max_priority <= MAXPRI_THROTTLE) {
1553 		sched_thread_mode_demote(new_thread, TH_SFLAG_THROTTLED);
1554 	}
1555 
1556 	thread_policy_create(new_thread);
1557 
1558 	/* Chain the thread onto the task's list */
1559 	queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
1560 	parent_task->thread_count++;
1561 
1562 	/* So terminating threads don't need to take the task lock to decrement */
1563 	os_atomic_inc(&parent_task->active_thread_count, relaxed);
1564 
1565 	queue_enter(&threads, new_thread, thread_t, threads);
1566 	threads_count++;
1567 
1568 	new_thread->active = TRUE;
1569 	if (task_is_a_corpse_fork(parent_task)) {
1570 		/* Set the inspection bit if the task is a corpse fork */
1571 		new_thread->inspection = TRUE;
1572 	} else {
1573 		new_thread->inspection = FALSE;
1574 	}
1575 	new_thread->corpse_dup = FALSE;
1576 	new_thread->turnstile = turnstile_alloc();
1577 	new_thread->ctsid = turnstile_compact_id_get();
1578 
1579 
1580 	*out_thread = new_thread;
1581 
1582 	if (kdebug_enable) {
1583 		long args[4] = {};
1584 
1585 		kdbg_trace_data(get_bsdtask_info(parent_task), &args[1], &args[3]);
1586 
1587 		/*
1588 		 * Starting with 26604425, exec'ing creates a new task/thread.
1589 		 *
1590 		 * NEWTHREAD in the current process has two possible meanings:
1591 		 *
1592 		 * 1) Create a new thread for this process.
1593 		 * 2) Create a new thread for the future process this will become in an
1594 		 * exec.
1595 		 *
1596 		 * To disambiguate these, arg3 will be set to TRUE for case #2.
1597 		 *
1598 		 * The value we need to find (TPF_EXEC_COPY) is stable in the case of a
1599 		 * task exec'ing. The read of t_procflags does not take the proc_lock.
1600 		 */
1601 		args[2] = task_is_exec_copy(parent_task) ? 1 : 0;
1602 
1603 		KDBG_RELEASE(TRACE_DATA_NEWTHREAD, (uintptr_t)thread_tid(new_thread),
1604 		    args[1], args[2], args[3]);
1605 
1606 		kdebug_proc_name_args(get_bsdtask_info(parent_task), args);
1607 		KDBG_RELEASE(TRACE_STRING_NEWTHREAD, args[0], args[1], args[2],
1608 		    args[3]);
1609 	}
1610 
1611 	DTRACE_PROC1(lwp__create, thread_t, *out_thread);
1612 
1613 	kr = KERN_SUCCESS;
1614 	goto done;
1615 
1616 out_thread_cleanup:
1617 #ifdef MACH_BSD
1618 	{
1619 		struct uthread *ut = get_bsdthread_info(new_thread);
1620 
1621 		uthread_cleanup(ut, &tro_tpl);
1622 		uthread_destroy(ut);
1623 	}
1624 #endif  /* MACH_BSD */
1625 
1626 	machine_thread_destroy(new_thread);
1627 
1628 	thread_ro_destroy(new_thread);
1629 	zfree(thread_zone, new_thread);
1630 
1631 done:
1632 	return kr;
1633 }
1634 
1635 static kern_return_t
thread_create_with_options_internal(task_t task,thread_t * new_thread,boolean_t from_user,thread_create_internal_options_t options,thread_continue_t continuation)1636 thread_create_with_options_internal(
1637 	task_t                            task,
1638 	thread_t                          *new_thread,
1639 	boolean_t                         from_user,
1640 	thread_create_internal_options_t  options,
1641 	thread_continue_t                 continuation)
1642 {
1643 	kern_return_t           result;
1644 	thread_t                thread;
1645 
1646 	if (task == TASK_NULL || task == kernel_task) {
1647 		return KERN_INVALID_ARGUMENT;
1648 	}
1649 
1650 #if CONFIG_MACF
1651 	if (from_user && current_task() != task &&
1652 	    mac_proc_check_remote_thread_create(task, -1, NULL, 0) != 0) {
1653 		return KERN_DENIED;
1654 	}
1655 #endif
1656 
1657 	result = thread_create_internal(task, -1, continuation, NULL, options, &thread);
1658 	if (result != KERN_SUCCESS) {
1659 		return result;
1660 	}
1661 
1662 	thread->user_stop_count = 1;
1663 	thread_hold(thread);
1664 	if (task->suspend_count > 0) {
1665 		thread_hold(thread);
1666 	}
1667 
1668 	if (from_user) {
1669 		extmod_statistics_incr_thread_create(task);
1670 	}
1671 
1672 	task_unlock(task);
1673 	lck_mtx_unlock(&tasks_threads_lock);
1674 
1675 	*new_thread = thread;
1676 
1677 	return KERN_SUCCESS;
1678 }
1679 
1680 kern_return_t
thread_create_immovable(task_t task,thread_t * new_thread)1681 thread_create_immovable(
1682 	task_t                          task,
1683 	thread_t                        *new_thread)
1684 {
1685 	return thread_create_with_options_internal(task, new_thread, FALSE,
1686 	           TH_OPTION_NONE, (thread_continue_t)thread_bootstrap_return);
1687 }
1688 
1689 kern_return_t
thread_create_from_user(task_t task,thread_t * new_thread)1690 thread_create_from_user(
1691 	task_t                          task,
1692 	thread_t                        *new_thread)
1693 {
1694 	/* All thread ports are created immovable by default */
1695 	return thread_create_with_options_internal(task, new_thread, TRUE, TH_OPTION_NONE,
1696 	           (thread_continue_t)thread_bootstrap_return);
1697 }
1698 
1699 kern_return_t
thread_create_with_continuation(task_t task,thread_t * new_thread,thread_continue_t continuation)1700 thread_create_with_continuation(
1701 	task_t                          task,
1702 	thread_t                        *new_thread,
1703 	thread_continue_t               continuation)
1704 {
1705 	return thread_create_with_options_internal(task, new_thread, FALSE, TH_OPTION_NONE, continuation);
1706 }
1707 
1708 /*
1709  * Create a thread that is already started, but is waiting on an event
1710  */
1711 static kern_return_t
thread_create_waiting_internal(task_t task,thread_continue_t continuation,event_t event,block_hint_t block_hint,thread_create_internal_options_t options,thread_t * new_thread)1712 thread_create_waiting_internal(
1713 	task_t                  task,
1714 	thread_continue_t       continuation,
1715 	event_t                 event,
1716 	block_hint_t            block_hint,
1717 	thread_create_internal_options_t options,
1718 	thread_t                *new_thread)
1719 {
1720 	kern_return_t result;
1721 	thread_t thread;
1722 	wait_interrupt_t wait_interrupt = THREAD_INTERRUPTIBLE;
1723 
1724 	if (task == TASK_NULL || task == kernel_task) {
1725 		return KERN_INVALID_ARGUMENT;
1726 	}
1727 
1728 	result = thread_create_internal(task, -1, continuation, NULL,
1729 	    options, &thread);
1730 	if (result != KERN_SUCCESS) {
1731 		return result;
1732 	}
1733 
1734 	/* note no user_stop_count or thread_hold here */
1735 
1736 	if (task->suspend_count > 0) {
1737 		thread_hold(thread);
1738 	}
1739 
1740 	thread_mtx_lock(thread);
1741 	thread_set_pending_block_hint(thread, block_hint);
1742 	if (options & TH_OPTION_WORKQ) {
1743 		thread->static_param = true;
1744 		event = workq_thread_init_and_wq_lock(task, thread);
1745 	} else if (options & TH_OPTION_MAINTHREAD) {
1746 		wait_interrupt = THREAD_UNINT;
1747 	}
1748 	thread_start_in_assert_wait(thread,
1749 	    assert_wait_queue(event), CAST_EVENT64_T(event),
1750 	    wait_interrupt);
1751 	thread_mtx_unlock(thread);
1752 
1753 	task_unlock(task);
1754 	lck_mtx_unlock(&tasks_threads_lock);
1755 
1756 	*new_thread = thread;
1757 
1758 	return KERN_SUCCESS;
1759 }
1760 
1761 kern_return_t
main_thread_create_waiting(task_t task,thread_continue_t continuation,event_t event,thread_t * new_thread)1762 main_thread_create_waiting(
1763 	task_t                          task,
1764 	thread_continue_t               continuation,
1765 	event_t                         event,
1766 	thread_t                        *new_thread)
1767 {
1768 	return thread_create_waiting_internal(task, continuation, event,
1769 	           kThreadWaitNone, TH_OPTION_MAINTHREAD, new_thread);
1770 }
1771 
1772 
1773 static kern_return_t
thread_create_running_internal2(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread,boolean_t from_user)1774 thread_create_running_internal2(
1775 	task_t         task,
1776 	int                     flavor,
1777 	thread_state_t          new_state,
1778 	mach_msg_type_number_t  new_state_count,
1779 	thread_t                                *new_thread,
1780 	boolean_t                               from_user)
1781 {
1782 	kern_return_t  result;
1783 	thread_t                                thread;
1784 
1785 	if (task == TASK_NULL || task == kernel_task) {
1786 		return KERN_INVALID_ARGUMENT;
1787 	}
1788 
1789 #if CONFIG_MACF
1790 	if (from_user && current_task() != task &&
1791 	    mac_proc_check_remote_thread_create(task, flavor, new_state, new_state_count) != 0) {
1792 		return KERN_DENIED;
1793 	}
1794 #endif
1795 
1796 	result = thread_create_internal(task, -1,
1797 	    (thread_continue_t)thread_bootstrap_return, NULL,
1798 	    TH_OPTION_NONE, &thread);
1799 	if (result != KERN_SUCCESS) {
1800 		return result;
1801 	}
1802 
1803 	if (task->suspend_count > 0) {
1804 		thread_hold(thread);
1805 	}
1806 
1807 	if (from_user) {
1808 		result = machine_thread_state_convert_from_user(thread, flavor,
1809 		    new_state, new_state_count, NULL, 0, TSSF_FLAGS_NONE);
1810 	}
1811 	if (result == KERN_SUCCESS) {
1812 		result = machine_thread_set_state(thread, flavor, new_state,
1813 		    new_state_count);
1814 	}
1815 	if (result != KERN_SUCCESS) {
1816 		task_unlock(task);
1817 		lck_mtx_unlock(&tasks_threads_lock);
1818 
1819 		thread_terminate(thread);
1820 		thread_deallocate(thread);
1821 		return result;
1822 	}
1823 
1824 	thread_mtx_lock(thread);
1825 	thread_start(thread);
1826 	thread_mtx_unlock(thread);
1827 
1828 	if (from_user) {
1829 		extmod_statistics_incr_thread_create(task);
1830 	}
1831 
1832 	task_unlock(task);
1833 	lck_mtx_unlock(&tasks_threads_lock);
1834 
1835 	*new_thread = thread;
1836 
1837 	return result;
1838 }
1839 
1840 /* Prototype, see justification above */
1841 kern_return_t
1842 thread_create_running(
1843 	task_t         task,
1844 	int                     flavor,
1845 	thread_state_t          new_state,
1846 	mach_msg_type_number_t  new_state_count,
1847 	thread_t                                *new_thread);
1848 
1849 kern_return_t
thread_create_running(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread)1850 thread_create_running(
1851 	task_t         task,
1852 	int                     flavor,
1853 	thread_state_t          new_state,
1854 	mach_msg_type_number_t  new_state_count,
1855 	thread_t                                *new_thread)
1856 {
1857 	return thread_create_running_internal2(
1858 		task, flavor, new_state, new_state_count,
1859 		new_thread, FALSE);
1860 }
1861 
1862 kern_return_t
thread_create_running_from_user(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread)1863 thread_create_running_from_user(
1864 	task_t         task,
1865 	int                     flavor,
1866 	thread_state_t          new_state,
1867 	mach_msg_type_number_t  new_state_count,
1868 	thread_t                                *new_thread)
1869 {
1870 	return thread_create_running_internal2(
1871 		task, flavor, new_state, new_state_count,
1872 		new_thread, TRUE);
1873 }
1874 
1875 kern_return_t
thread_create_workq_waiting(task_t task,thread_continue_t continuation,thread_t * new_thread)1876 thread_create_workq_waiting(
1877 	task_t              task,
1878 	thread_continue_t   continuation,
1879 	thread_t            *new_thread)
1880 {
1881 	/*
1882 	 * Create thread, but don't pin control port just yet, in case someone calls
1883 	 * task_threads() and deallocates pinned port before kernel copyout happens,
1884 	 * which will result in pinned port guard exception. Instead, pin and copyout
1885 	 * atomically during workq_setup_and_run().
1886 	 */
1887 	int options = TH_OPTION_NOSUSP | TH_OPTION_WORKQ;
1888 	return thread_create_waiting_internal(task, continuation, NULL,
1889 	           kThreadWaitParkedWorkQueue, options, new_thread);
1890 }
1891 
1892 /*
1893  *	kernel_thread_create:
1894  *
1895  *	Create a thread in the kernel task
1896  *	to execute in kernel context.
1897  */
1898 kern_return_t
kernel_thread_create(thread_continue_t continuation,void * parameter,integer_t priority,thread_t * new_thread)1899 kernel_thread_create(
1900 	thread_continue_t       continuation,
1901 	void                            *parameter,
1902 	integer_t                       priority,
1903 	thread_t                        *new_thread)
1904 {
1905 	kern_return_t           result;
1906 	thread_t                        thread;
1907 	task_t                          task = kernel_task;
1908 
1909 	result = thread_create_internal(task, priority, continuation, parameter,
1910 	    TH_OPTION_NONE, &thread);
1911 	if (result != KERN_SUCCESS) {
1912 		return result;
1913 	}
1914 
1915 	task_unlock(task);
1916 	lck_mtx_unlock(&tasks_threads_lock);
1917 
1918 	stack_alloc(thread);
1919 	assert(thread->kernel_stack != 0);
1920 #if !defined(XNU_TARGET_OS_OSX)
1921 	if (priority > BASEPRI_KERNEL)
1922 #endif
1923 	thread->reserved_stack = thread->kernel_stack;
1924 
1925 	if (debug_task & 1) {
1926 		kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread, continuation);
1927 	}
1928 	*new_thread = thread;
1929 
1930 	return result;
1931 }
1932 
1933 kern_return_t
kernel_thread_start_priority(thread_continue_t continuation,void * parameter,integer_t priority,thread_t * new_thread)1934 kernel_thread_start_priority(
1935 	thread_continue_t       continuation,
1936 	void                            *parameter,
1937 	integer_t                       priority,
1938 	thread_t                        *new_thread)
1939 {
1940 	kern_return_t   result;
1941 	thread_t                thread;
1942 
1943 	result = kernel_thread_create(continuation, parameter, priority, &thread);
1944 	if (result != KERN_SUCCESS) {
1945 		return result;
1946 	}
1947 
1948 	*new_thread = thread;
1949 
1950 	thread_mtx_lock(thread);
1951 	thread_start(thread);
1952 	thread_mtx_unlock(thread);
1953 
1954 	return result;
1955 }
1956 
1957 kern_return_t
kernel_thread_start(thread_continue_t continuation,void * parameter,thread_t * new_thread)1958 kernel_thread_start(
1959 	thread_continue_t       continuation,
1960 	void                            *parameter,
1961 	thread_t                        *new_thread)
1962 {
1963 	return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
1964 }
1965 
1966 /* Separated into helper function so it can be used by THREAD_BASIC_INFO and THREAD_EXTENDED_INFO */
1967 /* it is assumed that the thread is locked by the caller */
1968 static void
retrieve_thread_basic_info(thread_t thread,thread_basic_info_t basic_info)1969 retrieve_thread_basic_info(thread_t thread, thread_basic_info_t basic_info)
1970 {
1971 	int     state, flags;
1972 
1973 	/* fill in info */
1974 
1975 	thread_read_times(thread, &basic_info->user_time,
1976 	    &basic_info->system_time, NULL);
1977 
1978 	/*
1979 	 *	Update lazy-evaluated scheduler info because someone wants it.
1980 	 */
1981 	if (SCHED(can_update_priority)(thread)) {
1982 		SCHED(update_priority)(thread);
1983 	}
1984 
1985 	basic_info->sleep_time = 0;
1986 
1987 	/*
1988 	 *	To calculate cpu_usage, first correct for timer rate,
1989 	 *	then for 5/8 ageing.  The correction factor [3/5] is
1990 	 *	(1/(5/8) - 1).
1991 	 */
1992 	basic_info->cpu_usage = 0;
1993 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
1994 	if (sched_tick_interval) {
1995 		basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage
1996 		    * TH_USAGE_SCALE) /     sched_tick_interval);
1997 		basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1998 	}
1999 #endif
2000 
2001 	if (basic_info->cpu_usage > TH_USAGE_SCALE) {
2002 		basic_info->cpu_usage = TH_USAGE_SCALE;
2003 	}
2004 
2005 	basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)?
2006 	    POLICY_TIMESHARE: POLICY_RR);
2007 
2008 	flags = 0;
2009 	if (thread->options & TH_OPT_IDLE_THREAD) {
2010 		flags |= TH_FLAGS_IDLE;
2011 	}
2012 
2013 	if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) {
2014 		flags |= TH_FLAGS_GLOBAL_FORCED_IDLE;
2015 	}
2016 
2017 	if (!thread->kernel_stack) {
2018 		flags |= TH_FLAGS_SWAPPED;
2019 	}
2020 
2021 	state = 0;
2022 	if (thread->state & TH_TERMINATE) {
2023 		state = TH_STATE_HALTED;
2024 	} else if (thread->state & TH_RUN) {
2025 		state = TH_STATE_RUNNING;
2026 	} else if (thread->state & TH_UNINT) {
2027 		state = TH_STATE_UNINTERRUPTIBLE;
2028 	} else if (thread->state & TH_SUSP) {
2029 		state = TH_STATE_STOPPED;
2030 	} else if (thread->state & TH_WAIT) {
2031 		state = TH_STATE_WAITING;
2032 	}
2033 
2034 	basic_info->run_state = state;
2035 	basic_info->flags = flags;
2036 
2037 	basic_info->suspend_count = thread->user_stop_count;
2038 
2039 	return;
2040 }
2041 
2042 kern_return_t
thread_info_internal(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)2043 thread_info_internal(
2044 	thread_t                thread,
2045 	thread_flavor_t                 flavor,
2046 	thread_info_t                   thread_info_out,        /* ptr to OUT array */
2047 	mach_msg_type_number_t  *thread_info_count)     /*IN/OUT*/
2048 {
2049 	spl_t   s;
2050 
2051 	if (thread == THREAD_NULL) {
2052 		return KERN_INVALID_ARGUMENT;
2053 	}
2054 
2055 	if (flavor == THREAD_BASIC_INFO) {
2056 		if (*thread_info_count < THREAD_BASIC_INFO_COUNT) {
2057 			return KERN_INVALID_ARGUMENT;
2058 		}
2059 
2060 		s = splsched();
2061 		thread_lock(thread);
2062 
2063 		retrieve_thread_basic_info(thread, (thread_basic_info_t) thread_info_out);
2064 
2065 		thread_unlock(thread);
2066 		splx(s);
2067 
2068 		*thread_info_count = THREAD_BASIC_INFO_COUNT;
2069 
2070 		return KERN_SUCCESS;
2071 	} else if (flavor == THREAD_IDENTIFIER_INFO) {
2072 		thread_identifier_info_t        identifier_info;
2073 
2074 		if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) {
2075 			return KERN_INVALID_ARGUMENT;
2076 		}
2077 
2078 		identifier_info = __IGNORE_WCASTALIGN((thread_identifier_info_t)thread_info_out);
2079 
2080 		s = splsched();
2081 		thread_lock(thread);
2082 
2083 		identifier_info->thread_id = thread->thread_id;
2084 		identifier_info->thread_handle = thread->machine.cthread_self;
2085 		identifier_info->dispatch_qaddr = thread_dispatchqaddr(thread);
2086 
2087 		thread_unlock(thread);
2088 		splx(s);
2089 		return KERN_SUCCESS;
2090 	} else if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
2091 		policy_timeshare_info_t         ts_info;
2092 
2093 		if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT) {
2094 			return KERN_INVALID_ARGUMENT;
2095 		}
2096 
2097 		ts_info = (policy_timeshare_info_t)thread_info_out;
2098 
2099 		s = splsched();
2100 		thread_lock(thread);
2101 
2102 		if (thread->sched_mode != TH_MODE_TIMESHARE) {
2103 			thread_unlock(thread);
2104 			splx(s);
2105 			return KERN_INVALID_POLICY;
2106 		}
2107 
2108 		ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
2109 		if (ts_info->depressed) {
2110 			ts_info->base_priority = DEPRESSPRI;
2111 			ts_info->depress_priority = thread->base_pri;
2112 		} else {
2113 			ts_info->base_priority = thread->base_pri;
2114 			ts_info->depress_priority = -1;
2115 		}
2116 
2117 		ts_info->cur_priority = thread->sched_pri;
2118 		ts_info->max_priority = thread->max_priority;
2119 
2120 		thread_unlock(thread);
2121 		splx(s);
2122 
2123 		*thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
2124 
2125 		return KERN_SUCCESS;
2126 	} else if (flavor == THREAD_SCHED_FIFO_INFO) {
2127 		if (*thread_info_count < POLICY_FIFO_INFO_COUNT) {
2128 			return KERN_INVALID_ARGUMENT;
2129 		}
2130 
2131 		return KERN_INVALID_POLICY;
2132 	} else if (flavor == THREAD_SCHED_RR_INFO) {
2133 		policy_rr_info_t                        rr_info;
2134 		uint32_t quantum_time;
2135 		uint64_t quantum_ns;
2136 
2137 		if (*thread_info_count < POLICY_RR_INFO_COUNT) {
2138 			return KERN_INVALID_ARGUMENT;
2139 		}
2140 
2141 		rr_info = (policy_rr_info_t) thread_info_out;
2142 
2143 		s = splsched();
2144 		thread_lock(thread);
2145 
2146 		if (thread->sched_mode == TH_MODE_TIMESHARE) {
2147 			thread_unlock(thread);
2148 			splx(s);
2149 
2150 			return KERN_INVALID_POLICY;
2151 		}
2152 
2153 		rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
2154 		if (rr_info->depressed) {
2155 			rr_info->base_priority = DEPRESSPRI;
2156 			rr_info->depress_priority = thread->base_pri;
2157 		} else {
2158 			rr_info->base_priority = thread->base_pri;
2159 			rr_info->depress_priority = -1;
2160 		}
2161 
2162 		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
2163 		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
2164 
2165 		rr_info->max_priority = thread->max_priority;
2166 		rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
2167 
2168 		thread_unlock(thread);
2169 		splx(s);
2170 
2171 		*thread_info_count = POLICY_RR_INFO_COUNT;
2172 
2173 		return KERN_SUCCESS;
2174 	} else if (flavor == THREAD_EXTENDED_INFO) {
2175 		thread_basic_info_data_t        basic_info;
2176 		thread_extended_info_t          extended_info = __IGNORE_WCASTALIGN((thread_extended_info_t)thread_info_out);
2177 
2178 		if (*thread_info_count < THREAD_EXTENDED_INFO_COUNT) {
2179 			return KERN_INVALID_ARGUMENT;
2180 		}
2181 
2182 		s = splsched();
2183 		thread_lock(thread);
2184 
2185 		/* NOTE: This mimics fill_taskthreadinfo(), which is the function used by proc_pidinfo() for
2186 		 * the PROC_PIDTHREADINFO flavor (which can't be used on corpses)
2187 		 */
2188 		retrieve_thread_basic_info(thread, &basic_info);
2189 		extended_info->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
2190 		extended_info->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
2191 
2192 		extended_info->pth_cpu_usage = basic_info.cpu_usage;
2193 		extended_info->pth_policy = basic_info.policy;
2194 		extended_info->pth_run_state = basic_info.run_state;
2195 		extended_info->pth_flags = basic_info.flags;
2196 		extended_info->pth_sleep_time = basic_info.sleep_time;
2197 		extended_info->pth_curpri = thread->sched_pri;
2198 		extended_info->pth_priority = thread->base_pri;
2199 		extended_info->pth_maxpriority = thread->max_priority;
2200 
2201 		bsd_getthreadname(get_bsdthread_info(thread), extended_info->pth_name);
2202 
2203 		thread_unlock(thread);
2204 		splx(s);
2205 
2206 		*thread_info_count = THREAD_EXTENDED_INFO_COUNT;
2207 
2208 		return KERN_SUCCESS;
2209 	} else if (flavor == THREAD_DEBUG_INFO_INTERNAL) {
2210 #if DEVELOPMENT || DEBUG
2211 		thread_debug_info_internal_t dbg_info;
2212 		if (*thread_info_count < THREAD_DEBUG_INFO_INTERNAL_COUNT) {
2213 			return KERN_NOT_SUPPORTED;
2214 		}
2215 
2216 		if (thread_info_out == NULL) {
2217 			return KERN_INVALID_ARGUMENT;
2218 		}
2219 
2220 		dbg_info = __IGNORE_WCASTALIGN((thread_debug_info_internal_t)thread_info_out);
2221 		dbg_info->page_creation_count = thread->t_page_creation_count;
2222 
2223 		*thread_info_count = THREAD_DEBUG_INFO_INTERNAL_COUNT;
2224 		return KERN_SUCCESS;
2225 #endif /* DEVELOPMENT || DEBUG */
2226 		return KERN_NOT_SUPPORTED;
2227 	}
2228 
2229 	return KERN_INVALID_ARGUMENT;
2230 }
2231 
2232 static void
_convert_mach_to_time_value(uint64_t time_mach,time_value_t * time)2233 _convert_mach_to_time_value(uint64_t time_mach, time_value_t *time)
2234 {
2235 	clock_sec_t  secs;
2236 	clock_usec_t usecs;
2237 	absolutetime_to_microtime(time_mach, &secs, &usecs);
2238 	time->seconds = (typeof(time->seconds))secs;
2239 	time->microseconds = usecs;
2240 }
2241 
2242 void
thread_read_times(thread_t thread,time_value_t * user_time,time_value_t * system_time,time_value_t * runnable_time)2243 thread_read_times(
2244 	thread_t      thread,
2245 	time_value_t *user_time,
2246 	time_value_t *system_time,
2247 	time_value_t *runnable_time)
2248 {
2249 	if (user_time && system_time) {
2250 		struct recount_times_mach times = recount_thread_times(thread);
2251 		_convert_mach_to_time_value(times.rtm_user, user_time);
2252 		_convert_mach_to_time_value(times.rtm_system, system_time);
2253 	}
2254 
2255 	if (runnable_time) {
2256 		uint64_t runnable_time_mach = timer_grab(&thread->runnable_timer);
2257 		_convert_mach_to_time_value(runnable_time_mach, runnable_time);
2258 	}
2259 }
2260 
2261 uint64_t
thread_get_runtime_self(void)2262 thread_get_runtime_self(void)
2263 {
2264 	/*
2265 	 * Must be guaranteed to stay on the same CPU and not be updated by the
2266 	 * scheduler.
2267 	 */
2268 	boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
2269 	uint64_t time_mach = recount_current_thread_time_mach();
2270 	ml_set_interrupts_enabled(interrupt_state);
2271 	return time_mach;
2272 }
2273 
2274 /*
2275  *	thread_wire_internal:
2276  *
2277  *	Specify that the target thread must always be able
2278  *	to run and to allocate memory.
2279  */
2280 kern_return_t
thread_wire_internal(host_priv_t host_priv,thread_t thread,boolean_t wired,boolean_t * prev_state)2281 thread_wire_internal(
2282 	host_priv_t             host_priv,
2283 	thread_t                thread,
2284 	boolean_t               wired,
2285 	boolean_t               *prev_state)
2286 {
2287 	if (host_priv == NULL || thread != current_thread()) {
2288 		return KERN_INVALID_ARGUMENT;
2289 	}
2290 
2291 	if (prev_state) {
2292 		*prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
2293 	}
2294 
2295 	if (wired) {
2296 		if (!(thread->options & TH_OPT_VMPRIV)) {
2297 			vm_page_free_reserve(1); /* XXX */
2298 		}
2299 		thread->options |= TH_OPT_VMPRIV;
2300 	} else {
2301 		if (thread->options & TH_OPT_VMPRIV) {
2302 			vm_page_free_reserve(-1); /* XXX */
2303 		}
2304 		thread->options &= ~TH_OPT_VMPRIV;
2305 	}
2306 
2307 	return KERN_SUCCESS;
2308 }
2309 
2310 
2311 /*
2312  *	thread_wire:
2313  *
2314  *	User-api wrapper for thread_wire_internal()
2315  */
2316 kern_return_t
thread_wire(host_priv_t host_priv,thread_t thread,boolean_t wired)2317 thread_wire(
2318 	host_priv_t     host_priv,
2319 	thread_t        thread,
2320 	boolean_t       wired)
2321 {
2322 	return thread_wire_internal(host_priv, thread, wired, NULL);
2323 }
2324 
2325 boolean_t
is_external_pageout_thread(void)2326 is_external_pageout_thread(void)
2327 {
2328 	return current_thread() == pgo_iothread_external_state.pgo_iothread;
2329 }
2330 
2331 boolean_t
is_vm_privileged(void)2332 is_vm_privileged(void)
2333 {
2334 	return current_thread()->options & TH_OPT_VMPRIV ? TRUE : FALSE;
2335 }
2336 
2337 boolean_t
set_vm_privilege(boolean_t privileged)2338 set_vm_privilege(boolean_t privileged)
2339 {
2340 	boolean_t       was_vmpriv;
2341 
2342 	if (current_thread()->options & TH_OPT_VMPRIV) {
2343 		was_vmpriv = TRUE;
2344 	} else {
2345 		was_vmpriv = FALSE;
2346 	}
2347 
2348 	if (privileged != FALSE) {
2349 		current_thread()->options |= TH_OPT_VMPRIV;
2350 	} else {
2351 		current_thread()->options &= ~TH_OPT_VMPRIV;
2352 	}
2353 
2354 	return was_vmpriv;
2355 }
2356 
2357 void
thread_floor_boost_set_promotion_locked(thread_t thread)2358 thread_floor_boost_set_promotion_locked(thread_t thread)
2359 {
2360 	assert(thread->priority_floor_count > 0);
2361 
2362 	if (!(thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED)) {
2363 		sched_thread_promote_reason(thread, TH_SFLAG_FLOOR_PROMOTED, 0);
2364 	}
2365 }
2366 
2367 /*!  @function thread_priority_floor_start
2368  *   @abstract boost the current thread priority to floor.
2369  *   @discussion Increase the priority of the current thread to at least MINPRI_FLOOR.
2370  *       The boost will be mantained until a corresponding thread_priority_floor_end()
2371  *       is called. Every call of thread_priority_floor_start() needs to have a corresponding
2372  *       call to thread_priority_floor_end() from the same thread.
2373  *       No thread can return to userspace before calling thread_priority_floor_end().
2374  *
2375  *       NOTE: avoid to use this function. Try to use gate_t or sleep_with_inheritor()
2376  *       instead.
2377  *   @result a token to be given to the corresponding thread_priority_floor_end()
2378  */
2379 thread_pri_floor_t
thread_priority_floor_start(void)2380 thread_priority_floor_start(void)
2381 {
2382 	thread_pri_floor_t ret;
2383 	thread_t thread = current_thread();
2384 	__assert_only uint16_t prev_priority_floor_count;
2385 
2386 	assert(thread->priority_floor_count < UINT16_MAX);
2387 	prev_priority_floor_count = thread->priority_floor_count++;
2388 #if MACH_ASSERT
2389 	/*
2390 	 * Set the ast to check that the
2391 	 * priority_floor_count is going to be set to zero when
2392 	 * going back to userspace.
2393 	 * Set it only once when we increment it for the first time.
2394 	 */
2395 	if (prev_priority_floor_count == 0) {
2396 		act_set_debug_assert();
2397 	}
2398 #endif
2399 
2400 	ret.thread = thread;
2401 	return ret;
2402 }
2403 
2404 /*!  @function thread_priority_floor_end
2405  *   @abstract ends the floor boost.
2406  *   @param token the token obtained from thread_priority_floor_start()
2407  *   @discussion ends the priority floor boost started with thread_priority_floor_start()
2408  */
2409 void
thread_priority_floor_end(thread_pri_floor_t * token)2410 thread_priority_floor_end(thread_pri_floor_t *token)
2411 {
2412 	thread_t thread = current_thread();
2413 
2414 	assert(thread->priority_floor_count > 0);
2415 	assertf(token->thread == thread, "thread_priority_floor_end called from a different thread from thread_priority_floor_start %p %p", thread, token->thread);
2416 
2417 	if ((thread->priority_floor_count-- == 1) && (thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED)) {
2418 		spl_t s = splsched();
2419 		thread_lock(thread);
2420 
2421 		if (thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) {
2422 			sched_thread_unpromote_reason(thread, TH_SFLAG_FLOOR_PROMOTED, 0);
2423 		}
2424 
2425 		thread_unlock(thread);
2426 		splx(s);
2427 	}
2428 
2429 	token->thread = NULL;
2430 }
2431 
2432 /*
2433  * XXX assuming current thread only, for now...
2434  */
2435 void
thread_guard_violation(thread_t thread,mach_exception_data_type_t code,mach_exception_data_type_t subcode,boolean_t fatal)2436 thread_guard_violation(thread_t thread,
2437     mach_exception_data_type_t code, mach_exception_data_type_t subcode, boolean_t fatal)
2438 {
2439 	assert(thread == current_thread());
2440 
2441 	/* Don't set up the AST for kernel threads; this check is needed to ensure
2442 	 * that the guard_exc_* fields in the thread structure are set only by the
2443 	 * current thread and therefore, don't require a lock.
2444 	 */
2445 	if (get_threadtask(thread) == kernel_task) {
2446 		return;
2447 	}
2448 
2449 	assert(EXC_GUARD_DECODE_GUARD_TYPE(code));
2450 
2451 	/*
2452 	 * Use the saved state area of the thread structure
2453 	 * to store all info required to handle the AST when
2454 	 * returning to userspace. It's possible that there is
2455 	 * already a pending guard exception. If it's non-fatal,
2456 	 * it can only be over-written by a fatal exception code.
2457 	 */
2458 	if (thread->guard_exc_info.code && (thread->guard_exc_fatal || !fatal)) {
2459 		return;
2460 	}
2461 
2462 	thread->guard_exc_info.code = code;
2463 	thread->guard_exc_info.subcode = subcode;
2464 	thread->guard_exc_fatal = fatal ? 1 : 0;
2465 
2466 	spl_t s = splsched();
2467 	thread_ast_set(thread, AST_GUARD);
2468 	ast_propagate(thread);
2469 	splx(s);
2470 }
2471 
2472 #if CONFIG_DEBUG_SYSCALL_REJECTION
2473 extern void rejected_syscall_guard_ast(thread_t __unused t, mach_exception_data_type_t code, mach_exception_data_type_t subcode);
2474 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
2475 
2476 /*
2477  *	guard_ast:
2478  *
2479  *	Handle AST_GUARD for a thread. This routine looks at the
2480  *	state saved in the thread structure to determine the cause
2481  *	of this exception. Based on this value, it invokes the
2482  *	appropriate routine which determines other exception related
2483  *	info and raises the exception.
2484  */
2485 void
guard_ast(thread_t t)2486 guard_ast(thread_t t)
2487 {
2488 	const mach_exception_data_type_t
2489 	    code = t->guard_exc_info.code,
2490 	    subcode = t->guard_exc_info.subcode;
2491 
2492 	t->guard_exc_info.code = 0;
2493 	t->guard_exc_info.subcode = 0;
2494 	t->guard_exc_fatal = 0;
2495 
2496 	switch (EXC_GUARD_DECODE_GUARD_TYPE(code)) {
2497 	case GUARD_TYPE_NONE:
2498 		/* lingering AST_GUARD on the processor? */
2499 		break;
2500 	case GUARD_TYPE_MACH_PORT:
2501 		mach_port_guard_ast(t, code, subcode);
2502 		break;
2503 	case GUARD_TYPE_FD:
2504 		fd_guard_ast(t, code, subcode);
2505 		break;
2506 #if CONFIG_VNGUARD
2507 	case GUARD_TYPE_VN:
2508 		vn_guard_ast(t, code, subcode);
2509 		break;
2510 #endif
2511 	case GUARD_TYPE_VIRT_MEMORY:
2512 		virt_memory_guard_ast(t, code, subcode);
2513 		break;
2514 #if CONFIG_DEBUG_SYSCALL_REJECTION
2515 	case GUARD_TYPE_REJECTED_SC:
2516 		rejected_syscall_guard_ast(t, code, subcode);
2517 		break;
2518 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
2519 	default:
2520 		panic("guard_exc_info %llx %llx", code, subcode);
2521 	}
2522 }
2523 
2524 static void
thread_cputime_callback(int warning,__unused const void * arg0,__unused const void * arg1)2525 thread_cputime_callback(int warning, __unused const void *arg0, __unused const void *arg1)
2526 {
2527 	if (warning == LEDGER_WARNING_ROSE_ABOVE) {
2528 #if CONFIG_TELEMETRY
2529 		/*
2530 		 * This thread is in danger of violating the CPU usage monitor. Enable telemetry
2531 		 * on the entire task so there are micro-stackshots available if and when
2532 		 * EXC_RESOURCE is triggered. We could have chosen to enable micro-stackshots
2533 		 * for this thread only; but now that this task is suspect, knowing what all of
2534 		 * its threads are up to will be useful.
2535 		 */
2536 		telemetry_task_ctl(current_task(), TF_CPUMON_WARNING, 1);
2537 #endif
2538 		return;
2539 	}
2540 
2541 #if CONFIG_TELEMETRY
2542 	/*
2543 	 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
2544 	 * exceeded the limit, turn telemetry off for the task.
2545 	 */
2546 	telemetry_task_ctl(current_task(), TF_CPUMON_WARNING, 0);
2547 #endif
2548 
2549 	if (warning == 0) {
2550 		SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU();
2551 	}
2552 }
2553 
2554 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void)2555 SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void)
2556 {
2557 	int          pid                = 0;
2558 	task_t           task                           = current_task();
2559 	thread_t     thread             = current_thread();
2560 	uint64_t     tid                = thread->thread_id;
2561 	const char       *procname          = "unknown";
2562 	time_value_t thread_total_time  = {0, 0};
2563 	time_value_t thread_system_time;
2564 	time_value_t thread_user_time;
2565 	int          action;
2566 	uint8_t      percentage;
2567 	uint32_t     usage_percent = 0;
2568 	uint32_t     interval_sec;
2569 	uint64_t     interval_ns;
2570 	uint64_t     balance_ns;
2571 	boolean_t        fatal = FALSE;
2572 	boolean_t        send_exc_resource = TRUE; /* in addition to RESOURCE_NOTIFY */
2573 	kern_return_t   kr;
2574 
2575 #ifdef EXC_RESOURCE_MONITORS
2576 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
2577 #endif /* EXC_RESOURCE_MONITORS */
2578 	struct ledger_entry_info        lei;
2579 
2580 	assert(thread->t_threadledger != LEDGER_NULL);
2581 
2582 	/*
2583 	 * Extract the fatal bit and suspend the monitor (which clears the bit).
2584 	 */
2585 	task_lock(task);
2586 	if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_CPUMON) {
2587 		fatal = TRUE;
2588 		send_exc_resource = TRUE;
2589 	}
2590 	/* Only one thread can be here at a time.  Whichever makes it through
2591 	 *  first will successfully suspend the monitor and proceed to send the
2592 	 *  notification.  Other threads will get an error trying to suspend the
2593 	 *  monitor and give up on sending the notification.  In the first release,
2594 	 *  the monitor won't be resumed for a number of seconds, but we may
2595 	 *  eventually need to handle low-latency resume.
2596 	 */
2597 	kr = task_suspend_cpumon(task);
2598 	task_unlock(task);
2599 	if (kr == KERN_INVALID_ARGUMENT) {
2600 		return;
2601 	}
2602 
2603 #ifdef MACH_BSD
2604 	pid = proc_selfpid();
2605 	void *bsd_info = get_bsdtask_info(task);
2606 	if (bsd_info != NULL) {
2607 		procname = proc_name_address(bsd_info);
2608 	}
2609 #endif
2610 
2611 	thread_get_cpulimit(&action, &percentage, &interval_ns);
2612 
2613 	interval_sec = (uint32_t)(interval_ns / NSEC_PER_SEC);
2614 
2615 	thread_read_times(thread, &thread_user_time, &thread_system_time, NULL);
2616 	time_value_add(&thread_total_time, &thread_user_time);
2617 	time_value_add(&thread_total_time, &thread_system_time);
2618 	ledger_get_entry_info(thread->t_threadledger, thread_ledgers.cpu_time, &lei);
2619 
2620 	/* credit/debit/balance/limit are in absolute time units;
2621 	 *  the refill info is in nanoseconds. */
2622 	absolutetime_to_nanoseconds(lei.lei_balance, &balance_ns);
2623 	if (lei.lei_last_refill > 0) {
2624 		usage_percent = (uint32_t)((balance_ns * 100ULL) / lei.lei_last_refill);
2625 	}
2626 
2627 	/* TODO: show task total runtime (via TASK_ABSOLUTETIME_INFO)? */
2628 	printf("process %s[%d] thread %llu caught burning CPU! It used more than %d%% CPU over %u seconds\n",
2629 	    procname, pid, tid, percentage, interval_sec);
2630 	printf("  (actual recent usage: %d%% over ~%llu seconds)\n",
2631 	    usage_percent, (lei.lei_last_refill + NSEC_PER_SEC / 2) / NSEC_PER_SEC);
2632 	printf("  Thread lifetime cpu usage %d.%06ds, (%d.%06d user, %d.%06d sys)\n",
2633 	    thread_total_time.seconds, thread_total_time.microseconds,
2634 	    thread_user_time.seconds, thread_user_time.microseconds,
2635 	    thread_system_time.seconds, thread_system_time.microseconds);
2636 	printf("  Ledger balance: %lld; mabs credit: %lld; mabs debit: %lld\n",
2637 	    lei.lei_balance, lei.lei_credit, lei.lei_debit);
2638 	printf("  mabs limit: %llu; mabs period: %llu ns; last refill: %llu ns%s.\n",
2639 	    lei.lei_limit, lei.lei_refill_period, lei.lei_last_refill,
2640 	    (fatal ? " [fatal violation]" : ""));
2641 
2642 	/*
2643 	 *  For now, send RESOURCE_NOTIFY in parallel with EXC_RESOURCE.  Once
2644 	 *  we have logging parity, we will stop sending EXC_RESOURCE (24508922).
2645 	 */
2646 
2647 	/* RESOURCE_NOTIFY MIG specifies nanoseconds of CPU time */
2648 	lei.lei_balance = balance_ns;
2649 	absolutetime_to_nanoseconds(lei.lei_limit, &lei.lei_limit);
2650 	trace_resource_violation(RMON_CPUUSAGE_VIOLATED, &lei);
2651 	kr = send_resource_violation(send_cpu_usage_violation, task, &lei,
2652 	    fatal ? kRNFatalLimitFlag : 0);
2653 	if (kr) {
2654 		printf("send_resource_violation(CPU usage, ...): error %#x\n", kr);
2655 	}
2656 
2657 #ifdef EXC_RESOURCE_MONITORS
2658 	if (send_exc_resource) {
2659 		if (disable_exc_resource) {
2660 			printf("process %s[%d] thread %llu caught burning CPU! "
2661 			    "EXC_RESOURCE%s suppressed by a boot-arg\n",
2662 			    procname, pid, tid, fatal ? " (and termination)" : "");
2663 			return;
2664 		}
2665 
2666 		if (disable_exc_resource_during_audio && audio_active) {
2667 			printf("process %s[%d] thread %llu caught burning CPU! "
2668 			    "EXC_RESOURCE & termination suppressed due to audio playback\n",
2669 			    procname, pid, tid);
2670 			return;
2671 		}
2672 	}
2673 
2674 
2675 	if (send_exc_resource) {
2676 		code[0] = code[1] = 0;
2677 		EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_CPU);
2678 		if (fatal) {
2679 			EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR_FATAL);
2680 		} else {
2681 			EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR);
2682 		}
2683 		EXC_RESOURCE_CPUMONITOR_ENCODE_INTERVAL(code[0], interval_sec);
2684 		EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[0], percentage);
2685 		EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[1], usage_percent);
2686 		exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
2687 	}
2688 #endif /* EXC_RESOURCE_MONITORS */
2689 
2690 	if (fatal) {
2691 #if CONFIG_JETSAM
2692 		jetsam_on_ledger_cpulimit_exceeded();
2693 #else
2694 		task_terminate_internal(task);
2695 #endif
2696 	}
2697 }
2698 
2699 bool os_variant_has_internal_diagnostics(const char *subsystem);
2700 
2701 #if DEVELOPMENT || DEBUG
2702 
2703 void __attribute__((noinline))
SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task,int thread_count)2704 SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task, int thread_count)
2705 {
2706 	mach_exception_data_type_t code[EXCEPTION_CODE_MAX] = {0};
2707 	int pid = task_pid(task);
2708 	char procname[MAXCOMLEN + 1] = "unknown";
2709 
2710 	if (pid == 1) {
2711 		/*
2712 		 * Cannot suspend launchd
2713 		 */
2714 		return;
2715 	}
2716 
2717 	proc_name(pid, procname, sizeof(procname));
2718 
2719 	/*
2720 	 * Skip all checks for testing when exc_resource_threads_enabled is overriden
2721 	 */
2722 	if (exc_resource_threads_enabled == 2) {
2723 		goto skip_checks;
2724 	}
2725 
2726 	if (disable_exc_resource) {
2727 		printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2728 		    "suppressed by a boot-arg.\n", procname, pid, thread_count);
2729 		return;
2730 	}
2731 
2732 	if (!os_variant_has_internal_diagnostics("com.apple.xnu")) {
2733 		printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2734 		    "suppressed, internal diagnostics disabled.\n", procname, pid, thread_count);
2735 		return;
2736 	}
2737 
2738 	if (disable_exc_resource_during_audio && audio_active) {
2739 		printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2740 		    "suppressed due to audio playback.\n", procname, pid, thread_count);
2741 		return;
2742 	}
2743 
2744 	if (!exc_via_corpse_forking) {
2745 		printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2746 		    "suppressed due to corpse forking being disabled.\n", procname, pid,
2747 		    thread_count);
2748 		return;
2749 	}
2750 
2751 skip_checks:
2752 	printf("process %s[%d] crossed thread count high watermark (%d), sending "
2753 	    "EXC_RESOURCE\n", procname, pid, thread_count);
2754 
2755 	EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_THREADS);
2756 	EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_THREADS_HIGH_WATERMARK);
2757 	EXC_RESOURCE_THREADS_ENCODE_THREADS(code[0], thread_count);
2758 
2759 	task_enqueue_exception_with_corpse(task, EXC_RESOURCE, code, EXCEPTION_CODE_MAX, NULL, FALSE);
2760 }
2761 #endif /* DEVELOPMENT || DEBUG */
2762 
2763 void
thread_update_io_stats(thread_t thread,int size,int io_flags)2764 thread_update_io_stats(thread_t thread, int size, int io_flags)
2765 {
2766 	task_t task = get_threadtask(thread);
2767 	int io_tier;
2768 
2769 	if (thread->thread_io_stats == NULL || task->task_io_stats == NULL) {
2770 		return;
2771 	}
2772 
2773 	if (io_flags & DKIO_READ) {
2774 		UPDATE_IO_STATS(thread->thread_io_stats->disk_reads, size);
2775 		UPDATE_IO_STATS_ATOMIC(task->task_io_stats->disk_reads, size);
2776 	}
2777 
2778 	if (io_flags & DKIO_META) {
2779 		UPDATE_IO_STATS(thread->thread_io_stats->metadata, size);
2780 		UPDATE_IO_STATS_ATOMIC(task->task_io_stats->metadata, size);
2781 	}
2782 
2783 	if (io_flags & DKIO_PAGING) {
2784 		UPDATE_IO_STATS(thread->thread_io_stats->paging, size);
2785 		UPDATE_IO_STATS_ATOMIC(task->task_io_stats->paging, size);
2786 	}
2787 
2788 	io_tier = ((io_flags & DKIO_TIER_MASK) >> DKIO_TIER_SHIFT);
2789 	assert(io_tier < IO_NUM_PRIORITIES);
2790 
2791 	UPDATE_IO_STATS(thread->thread_io_stats->io_priority[io_tier], size);
2792 	UPDATE_IO_STATS_ATOMIC(task->task_io_stats->io_priority[io_tier], size);
2793 
2794 	/* Update Total I/O Counts */
2795 	UPDATE_IO_STATS(thread->thread_io_stats->total_io, size);
2796 	UPDATE_IO_STATS_ATOMIC(task->task_io_stats->total_io, size);
2797 
2798 	if (!(io_flags & DKIO_READ)) {
2799 		DTRACE_IO3(physical_writes, struct task *, task, uint32_t, size, int, io_flags);
2800 		ledger_credit(task->ledger, task_ledgers.physical_writes, size);
2801 	}
2802 }
2803 
2804 static void
init_thread_ledgers(void)2805 init_thread_ledgers(void)
2806 {
2807 	ledger_template_t t;
2808 	int idx;
2809 
2810 	assert(thread_ledger_template == NULL);
2811 
2812 	if ((t = ledger_template_create("Per-thread ledger")) == NULL) {
2813 		panic("couldn't create thread ledger template");
2814 	}
2815 
2816 	if ((idx = ledger_entry_add(t, "cpu_time", "sched", "ns")) < 0) {
2817 		panic("couldn't create cpu_time entry for thread ledger template");
2818 	}
2819 
2820 	if (ledger_set_callback(t, idx, thread_cputime_callback, NULL, NULL) < 0) {
2821 		panic("couldn't set thread ledger callback for cpu_time entry");
2822 	}
2823 
2824 	thread_ledgers.cpu_time = idx;
2825 
2826 	ledger_template_complete(t);
2827 	thread_ledger_template = t;
2828 }
2829 
2830 /*
2831  * Returns the amount of (abs) CPU time that remains before the limit would be
2832  * hit or the amount of time left in the current interval, whichever is smaller.
2833  * This value changes as CPU time is consumed and the ledgers refilled.
2834  * Used to limit the quantum of a thread.
2835  */
2836 uint64_t
thread_cpulimit_remaining(uint64_t now)2837 thread_cpulimit_remaining(uint64_t now)
2838 {
2839 	thread_t thread = current_thread();
2840 
2841 	if ((thread->options &
2842 	    (TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT)) == 0) {
2843 		return UINT64_MAX;
2844 	}
2845 
2846 	/* Amount of time left in the current interval. */
2847 	const uint64_t interval_remaining =
2848 	    ledger_get_interval_remaining(thread->t_threadledger, thread_ledgers.cpu_time, now);
2849 
2850 	/* Amount that can be spent until the limit is hit. */
2851 	const uint64_t remaining =
2852 	    ledger_get_remaining(thread->t_threadledger, thread_ledgers.cpu_time);
2853 
2854 	return MIN(interval_remaining, remaining);
2855 }
2856 
2857 /*
2858  * Returns true if a new interval should be started.
2859  */
2860 bool
thread_cpulimit_interval_has_expired(uint64_t now)2861 thread_cpulimit_interval_has_expired(uint64_t now)
2862 {
2863 	thread_t thread = current_thread();
2864 
2865 	if ((thread->options &
2866 	    (TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT)) == 0) {
2867 		return false;
2868 	}
2869 
2870 	return ledger_get_interval_remaining(thread->t_threadledger,
2871 	           thread_ledgers.cpu_time, now) == 0;
2872 }
2873 
2874 /*
2875  * Balances the ledger and sets the last refill time to `now`.
2876  */
2877 void
thread_cpulimit_restart(uint64_t now)2878 thread_cpulimit_restart(uint64_t now)
2879 {
2880 	thread_t thread = current_thread();
2881 
2882 	assert3u(thread->options & (TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT), !=, 0);
2883 
2884 	ledger_restart(thread->t_threadledger, thread_ledgers.cpu_time, now);
2885 }
2886 
2887 /*
2888  * Returns currently applied CPU usage limit, or 0/0 if none is applied.
2889  */
2890 int
thread_get_cpulimit(int * action,uint8_t * percentage,uint64_t * interval_ns)2891 thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns)
2892 {
2893 	int64_t         abstime = 0;
2894 	uint64_t        limittime = 0;
2895 	thread_t        thread = current_thread();
2896 
2897 	*percentage  = 0;
2898 	*interval_ns = 0;
2899 	*action      = 0;
2900 
2901 	if (thread->t_threadledger == LEDGER_NULL) {
2902 		/*
2903 		 * This thread has no per-thread ledger, so it can't possibly
2904 		 * have a CPU limit applied.
2905 		 */
2906 		return KERN_SUCCESS;
2907 	}
2908 
2909 	ledger_get_period(thread->t_threadledger, thread_ledgers.cpu_time, interval_ns);
2910 	ledger_get_limit(thread->t_threadledger, thread_ledgers.cpu_time, &abstime);
2911 
2912 	if ((abstime == LEDGER_LIMIT_INFINITY) || (*interval_ns == 0)) {
2913 		/*
2914 		 * This thread's CPU time ledger has no period or limit; so it
2915 		 * doesn't have a CPU limit applied.
2916 		 */
2917 		return KERN_SUCCESS;
2918 	}
2919 
2920 	/*
2921 	 * This calculation is the converse to the one in thread_set_cpulimit().
2922 	 */
2923 	absolutetime_to_nanoseconds(abstime, &limittime);
2924 	*percentage = (uint8_t)((limittime * 100ULL) / *interval_ns);
2925 	assert(*percentage <= 100);
2926 
2927 	if (thread->options & TH_OPT_PROC_CPULIMIT) {
2928 		assert((thread->options & TH_OPT_PRVT_CPULIMIT) == 0);
2929 
2930 		*action = THREAD_CPULIMIT_BLOCK;
2931 	} else if (thread->options & TH_OPT_PRVT_CPULIMIT) {
2932 		assert((thread->options & TH_OPT_PROC_CPULIMIT) == 0);
2933 
2934 		*action = THREAD_CPULIMIT_EXCEPTION;
2935 	} else {
2936 		*action = THREAD_CPULIMIT_DISABLE;
2937 	}
2938 
2939 	return KERN_SUCCESS;
2940 }
2941 
2942 /*
2943  * Set CPU usage limit on a thread.
2944  */
2945 int
thread_set_cpulimit(int action,uint8_t percentage,uint64_t interval_ns)2946 thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns)
2947 {
2948 	thread_t        thread = current_thread();
2949 	ledger_t        l;
2950 	uint64_t        limittime = 0;
2951 	uint64_t        abstime = 0;
2952 
2953 	assert(percentage <= 100);
2954 	assert(percentage > 0 || action == THREAD_CPULIMIT_DISABLE);
2955 
2956 	/*
2957 	 * Disallow any change to the CPU limit if the TH_OPT_FORCED_LEDGER
2958 	 * flag is set.
2959 	 */
2960 	if ((thread->options & TH_OPT_FORCED_LEDGER) != 0) {
2961 		return KERN_FAILURE;
2962 	}
2963 
2964 	if (action == THREAD_CPULIMIT_DISABLE) {
2965 		/*
2966 		 * Remove CPU limit, if any exists.
2967 		 */
2968 		if (thread->t_threadledger != LEDGER_NULL) {
2969 			l = thread->t_threadledger;
2970 			ledger_set_limit(l, thread_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0);
2971 			ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_IGNORE);
2972 			thread->options &= ~(TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT);
2973 		}
2974 
2975 		return 0;
2976 	}
2977 
2978 	if (interval_ns < MINIMUM_CPULIMIT_INTERVAL_MS * NSEC_PER_MSEC) {
2979 		return KERN_INVALID_ARGUMENT;
2980 	}
2981 
2982 	l = thread->t_threadledger;
2983 	if (l == LEDGER_NULL) {
2984 		/*
2985 		 * This thread doesn't yet have a per-thread ledger; so create one with the CPU time entry active.
2986 		 */
2987 		if ((l = ledger_instantiate(thread_ledger_template, LEDGER_CREATE_INACTIVE_ENTRIES)) == LEDGER_NULL) {
2988 			return KERN_RESOURCE_SHORTAGE;
2989 		}
2990 
2991 		/*
2992 		 * We are the first to create this thread's ledger, so only activate our entry.
2993 		 */
2994 		ledger_entry_setactive(l, thread_ledgers.cpu_time);
2995 		thread->t_threadledger = l;
2996 	}
2997 
2998 	/*
2999 	 * The limit is specified as a percentage of CPU over an interval in nanoseconds.
3000 	 * Calculate the amount of CPU time that the thread needs to consume in order to hit the limit.
3001 	 */
3002 	limittime = (interval_ns * percentage) / 100;
3003 	nanoseconds_to_absolutetime(limittime, &abstime);
3004 	ledger_set_limit(l, thread_ledgers.cpu_time, abstime, cpumon_ustackshots_trigger_pct);
3005 	/*
3006 	 * Refill the thread's allotted CPU time every interval_ns nanoseconds.
3007 	 */
3008 	ledger_set_period(l, thread_ledgers.cpu_time, interval_ns);
3009 
3010 	if (action == THREAD_CPULIMIT_EXCEPTION) {
3011 		/*
3012 		 * We don't support programming the CPU usage monitor on a task if any of its
3013 		 * threads have a per-thread blocking CPU limit configured.
3014 		 */
3015 		if (thread->options & TH_OPT_PRVT_CPULIMIT) {
3016 			panic("CPU usage monitor activated, but blocking thread limit exists");
3017 		}
3018 
3019 		/*
3020 		 * Make a note that this thread's CPU limit is being used for the task-wide CPU
3021 		 * usage monitor. We don't have to arm the callback which will trigger the
3022 		 * exception, because that was done for us in ledger_instantiate (because the
3023 		 * ledger template used has a default callback).
3024 		 */
3025 		thread->options |= TH_OPT_PROC_CPULIMIT;
3026 	} else {
3027 		/*
3028 		 * We deliberately override any CPU limit imposed by a task-wide limit (eg
3029 		 * CPU usage monitor).
3030 		 */
3031 		thread->options &= ~TH_OPT_PROC_CPULIMIT;
3032 
3033 		thread->options |= TH_OPT_PRVT_CPULIMIT;
3034 		/* The per-thread ledger template by default has a callback for CPU time */
3035 		ledger_disable_callback(l, thread_ledgers.cpu_time);
3036 		ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
3037 	}
3038 
3039 	return 0;
3040 }
3041 
3042 void
thread_sched_call(thread_t thread,sched_call_t call)3043 thread_sched_call(
3044 	thread_t                thread,
3045 	sched_call_t    call)
3046 {
3047 	assert((thread->state & TH_WAIT_REPORT) == 0);
3048 	thread->sched_call = call;
3049 }
3050 
3051 uint64_t
thread_tid(thread_t thread)3052 thread_tid(
3053 	thread_t        thread)
3054 {
3055 	return thread != THREAD_NULL? thread->thread_id: 0;
3056 }
3057 
3058 uint64_t
uthread_tid(struct uthread * uth)3059 uthread_tid(
3060 	struct uthread *uth)
3061 {
3062 	if (uth) {
3063 		return thread_tid(get_machthread(uth));
3064 	}
3065 	return 0;
3066 }
3067 
3068 uint16_t
thread_set_tag(thread_t th,uint16_t tag)3069 thread_set_tag(thread_t th, uint16_t tag)
3070 {
3071 	return thread_set_tag_internal(th, tag);
3072 }
3073 
3074 uint16_t
thread_get_tag(thread_t th)3075 thread_get_tag(thread_t th)
3076 {
3077 	return thread_get_tag_internal(th);
3078 }
3079 
3080 uint64_t
thread_last_run_time(thread_t th)3081 thread_last_run_time(thread_t th)
3082 {
3083 	return th->last_run_time;
3084 }
3085 
3086 /*
3087  * Shared resource contention management
3088  *
3089  * The scheduler attempts to load balance the shared resource intensive
3090  * workloads across clusters to ensure that the resource is not heavily
3091  * contended. The kernel relies on external agents (userspace or
3092  * performance controller) to identify shared resource heavy threads.
3093  * The load balancing is achieved based on the scheduler configuration
3094  * enabled on the platform.
3095  */
3096 
3097 
3098 #if CONFIG_SCHED_EDGE
3099 
3100 /*
3101  * On the Edge scheduler, the load balancing is achieved by looking
3102  * at cluster level shared resource loads and migrating resource heavy
3103  * threads dynamically to under utilized cluster. Therefore, when a
3104  * thread is indicated as a resource heavy thread, the policy set
3105  * routine simply adds a flag to the thread which is looked at by
3106  * the scheduler on thread migration decisions.
3107  */
3108 
3109 boolean_t
thread_shared_rsrc_policy_get(thread_t thread,cluster_shared_rsrc_type_t type)3110 thread_shared_rsrc_policy_get(thread_t thread, cluster_shared_rsrc_type_t type)
3111 {
3112 	return thread->th_shared_rsrc_heavy_user[type] || thread->th_shared_rsrc_heavy_perf_control[type];
3113 }
3114 
3115 __options_decl(sched_edge_rsrc_heavy_thread_state, uint32_t, {
3116 	SCHED_EDGE_RSRC_HEAVY_THREAD_SET = 1,
3117 	SCHED_EDGE_RSRC_HEAVY_THREAD_CLR = 2,
3118 });
3119 
3120 kern_return_t
thread_shared_rsrc_policy_set(thread_t thread,__unused uint32_t index,cluster_shared_rsrc_type_t type,shared_rsrc_policy_agent_t agent)3121 thread_shared_rsrc_policy_set(thread_t thread, __unused uint32_t index, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent)
3122 {
3123 	spl_t s = splsched();
3124 	thread_lock(thread);
3125 
3126 	bool user = (agent == SHARED_RSRC_POLICY_AGENT_DISPATCH) || (agent == SHARED_RSRC_POLICY_AGENT_SYSCTL);
3127 	bool *thread_flags = (user) ? thread->th_shared_rsrc_heavy_user : thread->th_shared_rsrc_heavy_perf_control;
3128 	if (thread_flags[type]) {
3129 		thread_unlock(thread);
3130 		splx(s);
3131 		return KERN_FAILURE;
3132 	}
3133 
3134 	thread_flags[type] = true;
3135 	thread_unlock(thread);
3136 	splx(s);
3137 
3138 	KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_RSRC_HEAVY_THREAD) | DBG_FUNC_NONE, SCHED_EDGE_RSRC_HEAVY_THREAD_SET, thread_tid(thread), type, agent);
3139 	if (thread == current_thread()) {
3140 		if (agent == SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM) {
3141 			ast_on(AST_PREEMPT);
3142 		} else {
3143 			assert(agent != SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
3144 			thread_block(THREAD_CONTINUE_NULL);
3145 		}
3146 	}
3147 	return KERN_SUCCESS;
3148 }
3149 
3150 kern_return_t
thread_shared_rsrc_policy_clear(thread_t thread,cluster_shared_rsrc_type_t type,shared_rsrc_policy_agent_t agent)3151 thread_shared_rsrc_policy_clear(thread_t thread, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent)
3152 {
3153 	spl_t s = splsched();
3154 	thread_lock(thread);
3155 
3156 	bool user = (agent == SHARED_RSRC_POLICY_AGENT_DISPATCH) || (agent == SHARED_RSRC_POLICY_AGENT_SYSCTL);
3157 	bool *thread_flags = (user) ? thread->th_shared_rsrc_heavy_user : thread->th_shared_rsrc_heavy_perf_control;
3158 	if (!thread_flags[type]) {
3159 		thread_unlock(thread);
3160 		splx(s);
3161 		return KERN_FAILURE;
3162 	}
3163 
3164 	thread_flags[type] = false;
3165 	thread_unlock(thread);
3166 	splx(s);
3167 
3168 	KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_RSRC_HEAVY_THREAD) | DBG_FUNC_NONE, SCHED_EDGE_RSRC_HEAVY_THREAD_CLR, thread_tid(thread), type, agent);
3169 	if (thread == current_thread()) {
3170 		if (agent == SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM) {
3171 			ast_on(AST_PREEMPT);
3172 		} else {
3173 			assert(agent != SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
3174 			thread_block(THREAD_CONTINUE_NULL);
3175 		}
3176 	}
3177 	return KERN_SUCCESS;
3178 }
3179 
3180 #else /* CONFIG_SCHED_EDGE */
3181 
3182 /*
3183  * On non-Edge schedulers, the shared resource contention
3184  * is managed by simply binding threads to specific clusters
3185  * based on the worker index passed by the agents marking
3186  * this thread as resource heavy threads. The thread binding
3187  * approach does not provide any rebalancing opportunities;
3188  * it can also suffer from scheduling delays if the cluster
3189  * where the thread is bound is contended.
3190  */
3191 
3192 boolean_t
thread_shared_rsrc_policy_get(__unused thread_t thread,__unused cluster_shared_rsrc_type_t type)3193 thread_shared_rsrc_policy_get(__unused thread_t thread, __unused cluster_shared_rsrc_type_t type)
3194 {
3195 	return false;
3196 }
3197 
3198 kern_return_t
thread_shared_rsrc_policy_set(thread_t thread,uint32_t index,__unused cluster_shared_rsrc_type_t type,__unused shared_rsrc_policy_agent_t agent)3199 thread_shared_rsrc_policy_set(thread_t thread, uint32_t index, __unused cluster_shared_rsrc_type_t type, __unused shared_rsrc_policy_agent_t agent)
3200 {
3201 	return thread_bind_cluster_id(thread, index, THREAD_BIND_SOFT | THREAD_BIND_ELIGIBLE_ONLY);
3202 }
3203 
3204 kern_return_t
thread_shared_rsrc_policy_clear(thread_t thread,__unused cluster_shared_rsrc_type_t type,__unused shared_rsrc_policy_agent_t agent)3205 thread_shared_rsrc_policy_clear(thread_t thread, __unused cluster_shared_rsrc_type_t type, __unused shared_rsrc_policy_agent_t agent)
3206 {
3207 	return thread_bind_cluster_id(thread, 0, THREAD_UNBIND);
3208 }
3209 
3210 #endif /* CONFIG_SCHED_EDGE */
3211 
3212 uint64_t
thread_dispatchqaddr(thread_t thread)3213 thread_dispatchqaddr(
3214 	thread_t                thread)
3215 {
3216 	uint64_t        dispatchqueue_addr;
3217 	uint64_t        thread_handle;
3218 	task_t          task;
3219 
3220 	if (thread == THREAD_NULL) {
3221 		return 0;
3222 	}
3223 
3224 	thread_handle = thread->machine.cthread_self;
3225 	if (thread_handle == 0) {
3226 		return 0;
3227 	}
3228 
3229 	task = get_threadtask(thread);
3230 	void *bsd_info = get_bsdtask_info(task);
3231 	if (thread->inspection == TRUE) {
3232 		dispatchqueue_addr = thread_handle + get_task_dispatchqueue_offset(task);
3233 	} else if (bsd_info) {
3234 		dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(bsd_info);
3235 	} else {
3236 		dispatchqueue_addr = 0;
3237 	}
3238 
3239 	return dispatchqueue_addr;
3240 }
3241 
3242 
3243 uint64_t
thread_wqquantum_addr(thread_t thread)3244 thread_wqquantum_addr(thread_t thread)
3245 {
3246 	uint64_t thread_handle;
3247 	task_t   task;
3248 
3249 	if (thread == THREAD_NULL) {
3250 		return 0;
3251 	}
3252 
3253 	thread_handle = thread->machine.cthread_self;
3254 	if (thread_handle == 0) {
3255 		return 0;
3256 	}
3257 	task = get_threadtask(thread);
3258 
3259 	uint64_t wq_quantum_expiry_offset = get_wq_quantum_offset_from_proc(get_bsdtask_info(task));
3260 	if (wq_quantum_expiry_offset == 0) {
3261 		return 0;
3262 	}
3263 
3264 	return wq_quantum_expiry_offset + thread_handle;
3265 }
3266 
3267 uint64_t
thread_rettokern_addr(thread_t thread)3268 thread_rettokern_addr(
3269 	thread_t                thread)
3270 {
3271 	uint64_t        rettokern_addr;
3272 	uint64_t        rettokern_offset;
3273 	uint64_t        thread_handle;
3274 	task_t          task;
3275 	void            *bsd_info;
3276 
3277 	if (thread == THREAD_NULL) {
3278 		return 0;
3279 	}
3280 
3281 	thread_handle = thread->machine.cthread_self;
3282 	if (thread_handle == 0) {
3283 		return 0;
3284 	}
3285 	task = get_threadtask(thread);
3286 	bsd_info = get_bsdtask_info(task);
3287 
3288 	if (bsd_info) {
3289 		rettokern_offset = get_return_to_kernel_offset_from_proc(bsd_info);
3290 
3291 		/* Return 0 if return to kernel offset is not initialized. */
3292 		if (rettokern_offset == 0) {
3293 			rettokern_addr = 0;
3294 		} else {
3295 			rettokern_addr = thread_handle + rettokern_offset;
3296 		}
3297 	} else {
3298 		rettokern_addr = 0;
3299 	}
3300 
3301 	return rettokern_addr;
3302 }
3303 
3304 /*
3305  * Export routines to other components for things that are done as macros
3306  * within the osfmk component.
3307  */
3308 
3309 void
thread_mtx_lock(thread_t thread)3310 thread_mtx_lock(thread_t thread)
3311 {
3312 	lck_mtx_lock(&thread->mutex);
3313 }
3314 
3315 void
thread_mtx_unlock(thread_t thread)3316 thread_mtx_unlock(thread_t thread)
3317 {
3318 	lck_mtx_unlock(&thread->mutex);
3319 }
3320 
3321 void
thread_reference(thread_t thread)3322 thread_reference(
3323 	thread_t        thread)
3324 {
3325 	if (thread != THREAD_NULL) {
3326 		zone_id_require(ZONE_ID_THREAD, sizeof(struct thread), thread);
3327 		os_ref_retain_raw(&thread->ref_count, &thread_refgrp);
3328 	}
3329 }
3330 
3331 void
thread_require(thread_t thread)3332 thread_require(thread_t thread)
3333 {
3334 	zone_id_require(ZONE_ID_THREAD, sizeof(struct thread), thread);
3335 }
3336 
3337 #undef thread_should_halt
3338 
3339 boolean_t
thread_should_halt(thread_t th)3340 thread_should_halt(
3341 	thread_t                th)
3342 {
3343 	return thread_should_halt_fast(th);
3344 }
3345 
3346 /*
3347  * thread_set_voucher_name - reset the voucher port name bound to this thread
3348  *
3349  * Conditions:  nothing locked
3350  */
3351 
3352 kern_return_t
thread_set_voucher_name(mach_port_name_t voucher_name)3353 thread_set_voucher_name(mach_port_name_t voucher_name)
3354 {
3355 	thread_t thread = current_thread();
3356 	ipc_voucher_t new_voucher = IPC_VOUCHER_NULL;
3357 	ipc_voucher_t voucher;
3358 	ledger_t bankledger = NULL;
3359 	struct thread_group *banktg = NULL;
3360 	uint32_t persona_id = 0;
3361 
3362 	if (MACH_PORT_DEAD == voucher_name) {
3363 		return KERN_INVALID_RIGHT;
3364 	}
3365 
3366 	/*
3367 	 * agressively convert to voucher reference
3368 	 */
3369 	if (MACH_PORT_VALID(voucher_name)) {
3370 		new_voucher = convert_port_name_to_voucher(voucher_name);
3371 		if (IPC_VOUCHER_NULL == new_voucher) {
3372 			return KERN_INVALID_ARGUMENT;
3373 		}
3374 	}
3375 	bank_get_bank_ledger_thread_group_and_persona(new_voucher, &bankledger, &banktg, &persona_id);
3376 
3377 	thread_mtx_lock(thread);
3378 	voucher = thread->ith_voucher;
3379 	thread->ith_voucher_name = voucher_name;
3380 	thread->ith_voucher = new_voucher;
3381 	thread_mtx_unlock(thread);
3382 
3383 	bank_swap_thread_bank_ledger(thread, bankledger);
3384 #if CONFIG_THREAD_GROUPS
3385 	thread_group_set_bank(thread, banktg);
3386 #endif /* CONFIG_THREAD_GROUPS */
3387 
3388 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3389 	    MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
3390 	    (uintptr_t)thread_tid(thread),
3391 	    (uintptr_t)voucher_name,
3392 	    VM_KERNEL_ADDRPERM((uintptr_t)new_voucher),
3393 	    persona_id, 0);
3394 
3395 	if (IPC_VOUCHER_NULL != voucher) {
3396 		ipc_voucher_release(voucher);
3397 	}
3398 
3399 	return KERN_SUCCESS;
3400 }
3401 
3402 /*
3403  *  thread_get_mach_voucher - return a voucher reference for the specified thread voucher
3404  *
3405  *  Conditions:  nothing locked
3406  *
3407  *  NOTE:       At the moment, there is no distinction between the current and effective
3408  *		vouchers because we only set them at the thread level currently.
3409  */
3410 kern_return_t
thread_get_mach_voucher(thread_act_t thread,mach_voucher_selector_t __unused which,ipc_voucher_t * voucherp)3411 thread_get_mach_voucher(
3412 	thread_act_t            thread,
3413 	mach_voucher_selector_t __unused which,
3414 	ipc_voucher_t           *voucherp)
3415 {
3416 	ipc_voucher_t           voucher;
3417 
3418 	if (THREAD_NULL == thread) {
3419 		return KERN_INVALID_ARGUMENT;
3420 	}
3421 
3422 	thread_mtx_lock(thread);
3423 	voucher = thread->ith_voucher;
3424 
3425 	if (IPC_VOUCHER_NULL != voucher) {
3426 		ipc_voucher_reference(voucher);
3427 		thread_mtx_unlock(thread);
3428 		*voucherp = voucher;
3429 		return KERN_SUCCESS;
3430 	}
3431 
3432 	thread_mtx_unlock(thread);
3433 
3434 	*voucherp = IPC_VOUCHER_NULL;
3435 	return KERN_SUCCESS;
3436 }
3437 
3438 /*
3439  *  thread_set_mach_voucher - set a voucher reference for the specified thread voucher
3440  *
3441  *  Conditions: callers holds a reference on the voucher.
3442  *		nothing locked.
3443  *
3444  *  We grab another reference to the voucher and bind it to the thread.
3445  *  The old voucher reference associated with the thread is
3446  *  discarded.
3447  */
3448 kern_return_t
thread_set_mach_voucher(thread_t thread,ipc_voucher_t voucher)3449 thread_set_mach_voucher(
3450 	thread_t                thread,
3451 	ipc_voucher_t           voucher)
3452 {
3453 	ipc_voucher_t old_voucher;
3454 	ledger_t bankledger = NULL;
3455 	struct thread_group *banktg = NULL;
3456 	uint32_t persona_id = 0;
3457 
3458 	if (THREAD_NULL == thread) {
3459 		return KERN_INVALID_ARGUMENT;
3460 	}
3461 
3462 	bank_get_bank_ledger_thread_group_and_persona(voucher, &bankledger, &banktg, &persona_id);
3463 
3464 	thread_mtx_lock(thread);
3465 	/*
3466 	 * Once the thread is started, we will look at `ith_voucher` without
3467 	 * holding any lock.
3468 	 *
3469 	 * Setting the voucher hence can only be done by current_thread() or
3470 	 * before it started. "started" flips under the thread mutex and must be
3471 	 * tested under it too.
3472 	 */
3473 	if (thread != current_thread() && thread->started) {
3474 		thread_mtx_unlock(thread);
3475 		return KERN_INVALID_ARGUMENT;
3476 	}
3477 
3478 	ipc_voucher_reference(voucher);
3479 	old_voucher = thread->ith_voucher;
3480 	thread->ith_voucher = voucher;
3481 	thread->ith_voucher_name = MACH_PORT_NULL;
3482 	thread_mtx_unlock(thread);
3483 
3484 	bank_swap_thread_bank_ledger(thread, bankledger);
3485 #if CONFIG_THREAD_GROUPS
3486 	thread_group_set_bank(thread, banktg);
3487 #endif /* CONFIG_THREAD_GROUPS */
3488 
3489 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3490 	    MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
3491 	    (uintptr_t)thread_tid(thread),
3492 	    (uintptr_t)MACH_PORT_NULL,
3493 	    VM_KERNEL_ADDRPERM((uintptr_t)voucher),
3494 	    persona_id, 0);
3495 
3496 	ipc_voucher_release(old_voucher);
3497 
3498 	return KERN_SUCCESS;
3499 }
3500 
3501 /*
3502  *  thread_swap_mach_voucher - swap a voucher reference for the specified thread voucher
3503  *
3504  *  Conditions: callers holds a reference on the new and presumed old voucher(s).
3505  *		nothing locked.
3506  *
3507  *  This function is no longer supported.
3508  */
3509 kern_return_t
thread_swap_mach_voucher(__unused thread_t thread,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)3510 thread_swap_mach_voucher(
3511 	__unused thread_t               thread,
3512 	__unused ipc_voucher_t          new_voucher,
3513 	ipc_voucher_t                   *in_out_old_voucher)
3514 {
3515 	/*
3516 	 * Currently this function is only called from a MIG generated
3517 	 * routine which doesn't release the reference on the voucher
3518 	 * addressed by in_out_old_voucher. To avoid leaking this reference,
3519 	 * a call to release it has been added here.
3520 	 */
3521 	ipc_voucher_release(*in_out_old_voucher);
3522 	OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
3523 }
3524 
3525 /*
3526  *  thread_get_current_voucher_origin_pid - get the pid of the originator of the current voucher.
3527  */
3528 kern_return_t
thread_get_current_voucher_origin_pid(int32_t * pid)3529 thread_get_current_voucher_origin_pid(
3530 	int32_t      *pid)
3531 {
3532 	return thread_get_voucher_origin_pid(current_thread(), pid);
3533 }
3534 
3535 /*
3536  *  thread_get_current_voucher_origin_pid - get the pid of the originator of the current voucher.
3537  */
3538 kern_return_t
thread_get_voucher_origin_pid(thread_t thread,int32_t * pid)3539 thread_get_voucher_origin_pid(thread_t thread, int32_t *pid)
3540 {
3541 	uint32_t buf_size = sizeof(*pid);
3542 	return mach_voucher_attr_command(thread->ith_voucher,
3543 	           MACH_VOUCHER_ATTR_KEY_BANK,
3544 	           BANK_ORIGINATOR_PID,
3545 	           NULL,
3546 	           0,
3547 	           (mach_voucher_attr_content_t)pid,
3548 	           &buf_size);
3549 }
3550 
3551 #if CONFIG_THREAD_GROUPS
3552 /*
3553  * Returns the current thread's voucher-carried thread group
3554  *
3555  * Reference is borrowed from this being the current voucher, so it does NOT
3556  * return a reference to the group.
3557  */
3558 struct thread_group *
thread_get_current_voucher_thread_group(thread_t thread)3559 thread_get_current_voucher_thread_group(thread_t thread)
3560 {
3561 	assert(thread == current_thread());
3562 
3563 	if (thread->ith_voucher == NULL) {
3564 		return NULL;
3565 	}
3566 
3567 	ledger_t bankledger = NULL;
3568 	struct thread_group *banktg = NULL;
3569 
3570 	bank_get_bank_ledger_thread_group_and_persona(thread->ith_voucher, &bankledger, &banktg, NULL);
3571 
3572 	return banktg;
3573 }
3574 
3575 #endif /* CONFIG_THREAD_GROUPS */
3576 
3577 #if CONFIG_COALITIONS
3578 
3579 uint64_t
thread_get_current_voucher_resource_coalition_id(thread_t thread)3580 thread_get_current_voucher_resource_coalition_id(thread_t thread)
3581 {
3582 	uint64_t id = 0;
3583 	assert(thread == current_thread());
3584 	if (thread->ith_voucher != NULL) {
3585 		id = bank_get_bank_ledger_resource_coalition_id(thread->ith_voucher);
3586 	}
3587 	return id;
3588 }
3589 
3590 #endif /* CONFIG_COALITIONS */
3591 
3592 extern struct workqueue *
3593 proc_get_wqptr(void *proc);
3594 
3595 static bool
task_supports_cooperative_workqueue(task_t task)3596 task_supports_cooperative_workqueue(task_t task)
3597 {
3598 	void *bsd_info = get_bsdtask_info(task);
3599 
3600 	assert(task == current_task());
3601 	if (bsd_info == NULL) {
3602 		return false;
3603 	}
3604 
3605 	uint64_t wq_quantum_expiry_offset = get_wq_quantum_offset_from_proc(bsd_info);
3606 	/* userspace may not yet have called workq_open yet */
3607 	struct workqueue *wq = proc_get_wqptr(bsd_info);
3608 
3609 	return (wq != NULL) && (wq_quantum_expiry_offset != 0);
3610 }
3611 
3612 /* Not safe to call from scheduler paths - should only be called on self */
3613 bool
thread_supports_cooperative_workqueue(thread_t thread)3614 thread_supports_cooperative_workqueue(thread_t thread)
3615 {
3616 	struct uthread *uth = get_bsdthread_info(thread);
3617 	task_t task = get_threadtask(thread);
3618 
3619 	assert(thread == current_thread());
3620 
3621 	return task_supports_cooperative_workqueue(task) &&
3622 	       bsdthread_part_of_cooperative_workqueue(uth);
3623 }
3624 
3625 static inline bool
thread_has_armed_workqueue_quantum(thread_t thread)3626 thread_has_armed_workqueue_quantum(thread_t thread)
3627 {
3628 	return thread->workq_quantum_deadline != 0;
3629 }
3630 
3631 /*
3632  * The workq quantum is a lazy timer that is evaluated at 2 specific times in
3633  * the scheduler:
3634  *
3635  * - context switch time
3636  * - scheduler quantum expiry time.
3637  *
3638  * We're currently expressing the workq quantum with a 0.5 scale factor of the
3639  * scheduler quantum. It is possible that if the workq quantum is rearmed
3640  * shortly after the scheduler quantum begins, we could have a large delay
3641  * between when the workq quantum next expires and when it actually is noticed.
3642  *
3643  * A potential future improvement for the wq quantum expiry logic is to compare
3644  * it to the next actual scheduler quantum deadline and expire it if it is
3645  * within a certain leeway.
3646  */
3647 static inline uint64_t
thread_workq_quantum_size(thread_t thread)3648 thread_workq_quantum_size(thread_t thread)
3649 {
3650 	return (uint64_t) (SCHED(initial_quantum_size)(thread) / 2);
3651 }
3652 
3653 /*
3654  * Always called by thread on itself - either at AST boundary after processing
3655  * an existing quantum expiry, or when a new quantum is armed before the thread
3656  * goes out to userspace to handle a thread request
3657  */
3658 void
thread_arm_workqueue_quantum(thread_t thread)3659 thread_arm_workqueue_quantum(thread_t thread)
3660 {
3661 	/*
3662 	 * If the task is not opted into wq quantum notification, or if the thread
3663 	 * is not part of the cooperative workqueue, don't even bother with tracking
3664 	 * the quantum or calculating expiry
3665 	 */
3666 	if (!thread_supports_cooperative_workqueue(thread)) {
3667 		assert(thread->workq_quantum_deadline == 0);
3668 		return;
3669 	}
3670 
3671 	assert(current_thread() == thread);
3672 	assert(thread_get_tag(thread) & THREAD_TAG_WORKQUEUE);
3673 
3674 	uint64_t current_runtime = thread_get_runtime_self();
3675 	uint64_t deadline = thread_workq_quantum_size(thread) + current_runtime;
3676 
3677 	/*
3678 	 * The update of a workqueue quantum should always be followed by the update
3679 	 * of the AST - see explanation in kern/thread.h for synchronization of this
3680 	 * field
3681 	 */
3682 	thread->workq_quantum_deadline = deadline;
3683 
3684 	/* We're arming a new quantum, clear any previous expiry notification */
3685 	act_clear_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3686 
3687 	WQ_TRACE(TRACE_wq_quantum_arm, current_runtime, deadline, 0, 0);
3688 
3689 	WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, thread->workq_quantum_deadline, true);
3690 }
3691 
3692 /* Called by a thread on itself when it is about to park */
3693 void
thread_disarm_workqueue_quantum(thread_t thread)3694 thread_disarm_workqueue_quantum(thread_t thread)
3695 {
3696 	/* The update of a workqueue quantum should always be followed by the update
3697 	 * of the AST - see explanation in kern/thread.h for synchronization of this
3698 	 * field */
3699 	thread->workq_quantum_deadline = 0;
3700 	act_clear_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3701 
3702 	WQ_TRACE(TRACE_wq_quantum_disarm, 0, 0, 0, 0);
3703 
3704 	WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, thread->workq_quantum_deadline, false);
3705 }
3706 
3707 /* This is called at context switch time on a thread that may not be self,
3708  * and at AST time
3709  */
3710 bool
thread_has_expired_workqueue_quantum(thread_t thread,bool should_trace)3711 thread_has_expired_workqueue_quantum(thread_t thread, bool should_trace)
3712 {
3713 	if (!thread_has_armed_workqueue_quantum(thread)) {
3714 		return false;
3715 	}
3716 	/* We do not do a thread_get_runtime_self() here since this function is
3717 	 * called from context switch time or during scheduler quantum expiry and
3718 	 * therefore, we may not be evaluating it on the current thread/self.
3719 	 *
3720 	 * In addition, the timers on the thread have just been updated recently so
3721 	 * we don't need to update them again.
3722 	 */
3723 	uint64_t runtime = recount_thread_time_mach(thread);
3724 	bool expired = runtime > thread->workq_quantum_deadline;
3725 
3726 	if (expired && should_trace) {
3727 		WQ_TRACE(TRACE_wq_quantum_expired, runtime, thread->workq_quantum_deadline, 0, 0);
3728 	}
3729 
3730 	return expired;
3731 }
3732 
3733 /*
3734  * Called on a thread that is being context switched out or during quantum
3735  * expiry on self. Only called from scheduler paths.
3736  */
3737 void
thread_evaluate_workqueue_quantum_expiry(thread_t thread)3738 thread_evaluate_workqueue_quantum_expiry(thread_t thread)
3739 {
3740 	if (thread_has_expired_workqueue_quantum(thread, true)) {
3741 		act_set_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3742 	}
3743 }
3744 
3745 boolean_t
thread_has_thread_name(thread_t th)3746 thread_has_thread_name(thread_t th)
3747 {
3748 	if (th) {
3749 		return bsd_hasthreadname(get_bsdthread_info(th));
3750 	}
3751 
3752 	/*
3753 	 * This is an odd case; clients may set the thread name based on the lack of
3754 	 * a name, but in this context there is no uthread to attach the name to.
3755 	 */
3756 	return FALSE;
3757 }
3758 
3759 void
thread_set_thread_name(thread_t th,const char * name)3760 thread_set_thread_name(thread_t th, const char* name)
3761 {
3762 	if (th && name) {
3763 		bsd_setthreadname(get_bsdthread_info(th), thread_tid(th), name);
3764 	}
3765 }
3766 
3767 void
thread_get_thread_name(thread_t th,char * name)3768 thread_get_thread_name(thread_t th, char* name)
3769 {
3770 	if (!name) {
3771 		return;
3772 	}
3773 	if (th) {
3774 		bsd_getthreadname(get_bsdthread_info(th), name);
3775 	} else {
3776 		name[0] = '\0';
3777 	}
3778 }
3779 
3780 void
thread_set_honor_qlimit(thread_t thread)3781 thread_set_honor_qlimit(thread_t thread)
3782 {
3783 	thread->options |= TH_OPT_HONOR_QLIMIT;
3784 }
3785 
3786 void
thread_clear_honor_qlimit(thread_t thread)3787 thread_clear_honor_qlimit(thread_t thread)
3788 {
3789 	thread->options &= (~TH_OPT_HONOR_QLIMIT);
3790 }
3791 
3792 /*
3793  * thread_enable_send_importance - set/clear the SEND_IMPORTANCE thread option bit.
3794  */
3795 void
thread_enable_send_importance(thread_t thread,boolean_t enable)3796 thread_enable_send_importance(thread_t thread, boolean_t enable)
3797 {
3798 	if (enable == TRUE) {
3799 		thread->options |= TH_OPT_SEND_IMPORTANCE;
3800 	} else {
3801 		thread->options &= ~TH_OPT_SEND_IMPORTANCE;
3802 	}
3803 }
3804 
3805 kern_return_t
thread_get_ipc_propagate_attr(thread_t thread,struct thread_attr_for_ipc_propagation * attr)3806 thread_get_ipc_propagate_attr(thread_t thread, struct thread_attr_for_ipc_propagation *attr)
3807 {
3808 	int iotier;
3809 	int qos;
3810 
3811 	if (thread == NULL || attr == NULL) {
3812 		return KERN_INVALID_ARGUMENT;
3813 	}
3814 
3815 	iotier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO);
3816 	qos = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS);
3817 
3818 	if (!qos) {
3819 		qos = thread_user_promotion_qos_for_pri(thread->base_pri);
3820 	}
3821 
3822 	attr->tafip_iotier = iotier;
3823 	attr->tafip_qos = qos;
3824 
3825 	return KERN_SUCCESS;
3826 }
3827 
3828 /*
3829  * thread_set_allocation_name - .
3830  */
3831 
3832 kern_allocation_name_t
thread_set_allocation_name(kern_allocation_name_t new_name)3833 thread_set_allocation_name(kern_allocation_name_t new_name)
3834 {
3835 	kern_allocation_name_t ret;
3836 	thread_kernel_state_t kstate = thread_get_kernel_state(current_thread());
3837 	ret = kstate->allocation_name;
3838 	// fifo
3839 	if (!new_name || !kstate->allocation_name) {
3840 		kstate->allocation_name = new_name;
3841 	}
3842 	return ret;
3843 }
3844 
3845 void *
thread_iokit_tls_get(uint32_t index)3846 thread_iokit_tls_get(uint32_t index)
3847 {
3848 	assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
3849 	return current_thread()->saved.iokit.tls[index];
3850 }
3851 
3852 void
thread_iokit_tls_set(uint32_t index,void * data)3853 thread_iokit_tls_set(uint32_t index, void * data)
3854 {
3855 	assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
3856 	current_thread()->saved.iokit.tls[index] = data;
3857 }
3858 
3859 uint64_t
thread_get_last_wait_duration(thread_t thread)3860 thread_get_last_wait_duration(thread_t thread)
3861 {
3862 	return thread->last_made_runnable_time - thread->last_run_time;
3863 }
3864 
3865 integer_t
thread_kern_get_pri(thread_t thr)3866 thread_kern_get_pri(thread_t thr)
3867 {
3868 	return thr->base_pri;
3869 }
3870 
3871 void
thread_kern_set_pri(thread_t thr,integer_t pri)3872 thread_kern_set_pri(thread_t thr, integer_t pri)
3873 {
3874 	sched_set_kernel_thread_priority(thr, pri);
3875 }
3876 
3877 integer_t
thread_kern_get_kernel_maxpri(void)3878 thread_kern_get_kernel_maxpri(void)
3879 {
3880 	return MAXPRI_KERNEL;
3881 }
3882 /*
3883  *	thread_port_with_flavor_no_senders
3884  *
3885  *	Called whenever the Mach port system detects no-senders on
3886  *	the thread inspect or read port. These ports are allocated lazily and
3887  *	should be deallocated here when there are no senders remaining.
3888  */
3889 static void
thread_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)3890 thread_port_with_flavor_no_senders(
3891 	ipc_port_t          port,
3892 	mach_port_mscount_t mscount __unused)
3893 {
3894 	thread_ro_t tro;
3895 	thread_t thread;
3896 	mach_thread_flavor_t flavor;
3897 	ipc_kobject_type_t kotype;
3898 
3899 	ip_mq_lock(port);
3900 	if (port->ip_srights > 0) {
3901 		ip_mq_unlock(port);
3902 		return;
3903 	}
3904 	kotype = ip_kotype(port);
3905 	assert((IKOT_THREAD_READ == kotype) || (IKOT_THREAD_INSPECT == kotype));
3906 	thread = ipc_kobject_get_locked(port, kotype);
3907 	if (thread != THREAD_NULL) {
3908 		thread_reference(thread);
3909 	}
3910 	ip_mq_unlock(port);
3911 
3912 	if (thread == THREAD_NULL) {
3913 		/* The thread is exiting or disabled; it will eventually deallocate the port */
3914 		return;
3915 	}
3916 
3917 	if (kotype == IKOT_THREAD_READ) {
3918 		flavor = THREAD_FLAVOR_READ;
3919 	} else {
3920 		flavor = THREAD_FLAVOR_INSPECT;
3921 	}
3922 
3923 	thread_mtx_lock(thread);
3924 	ip_mq_lock(port);
3925 
3926 	/*
3927 	 * If the port is no longer active, then ipc_thread_terminate() ran
3928 	 * and destroyed the kobject already. Just deallocate the task
3929 	 * ref we took and go away.
3930 	 *
3931 	 * It is also possible that several nsrequests are in flight,
3932 	 * only one shall NULL-out the port entry, and this is the one
3933 	 * that gets to dealloc the port.
3934 	 *
3935 	 * Check for a stale no-senders notification. A call to any function
3936 	 * that vends out send rights to this port could resurrect it between
3937 	 * this notification being generated and actually being handled here.
3938 	 */
3939 	tro = get_thread_ro(thread);
3940 	if (!ip_active(port) ||
3941 	    tro->tro_ports[flavor] != port ||
3942 	    port->ip_srights > 0) {
3943 		ip_mq_unlock(port);
3944 		thread_mtx_unlock(thread);
3945 		thread_deallocate(thread);
3946 		return;
3947 	}
3948 
3949 	assert(tro->tro_ports[flavor] == port);
3950 	zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_ports[flavor]);
3951 	thread_mtx_unlock(thread);
3952 
3953 	ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
3954 
3955 	thread_deallocate(thread);
3956 }
3957 
3958 /*
3959  * The 'thread_region_page_shift' is used by footprint
3960  * to specify the page size that it will use to
3961  * accomplish its accounting work on the task being
3962  * inspected. Since footprint uses a thread for each
3963  * task that it works on, we need to keep the page_shift
3964  * on a per-thread basis.
3965  */
3966 
3967 int
thread_self_region_page_shift(void)3968 thread_self_region_page_shift(void)
3969 {
3970 	/*
3971 	 * Return the page shift that this thread
3972 	 * would like to use for its accounting work.
3973 	 */
3974 	return current_thread()->thread_region_page_shift;
3975 }
3976 
3977 void
thread_self_region_page_shift_set(int pgshift)3978 thread_self_region_page_shift_set(
3979 	int pgshift)
3980 {
3981 	/*
3982 	 * Set the page shift that this thread
3983 	 * would like to use for its accounting work
3984 	 * when dealing with a task.
3985 	 */
3986 	current_thread()->thread_region_page_shift = pgshift;
3987 }
3988 
3989 __startup_func
3990 static void
ctid_table_init(void)3991 ctid_table_init(void)
3992 {
3993 	/*
3994 	 * Pretend the early boot setup didn't exist,
3995 	 * and pick a mangling nonce.
3996 	 */
3997 	*compact_id_resolve(&ctid_table, 0) = THREAD_NULL;
3998 	ctid_nonce = (uint32_t)early_random() & CTID_MASK;
3999 }
4000 
4001 
4002 /*
4003  * This maps the [0, CTID_MAX_THREAD_NUMBER] range
4004  * to [1, CTID_MAX_THREAD_NUMBER + 1 == CTID_MASK]
4005  * so that in mangled form, '0' is an invalid CTID.
4006  */
4007 static ctid_t
ctid_mangle(compact_id_t cid)4008 ctid_mangle(compact_id_t cid)
4009 {
4010 	return (cid == ctid_nonce ? CTID_MASK : cid) ^ ctid_nonce;
4011 }
4012 
4013 static compact_id_t
ctid_unmangle(ctid_t ctid)4014 ctid_unmangle(ctid_t ctid)
4015 {
4016 	ctid ^= ctid_nonce;
4017 	return ctid == CTID_MASK ? ctid_nonce : ctid;
4018 }
4019 
4020 void
ctid_table_add(thread_t thread)4021 ctid_table_add(thread_t thread)
4022 {
4023 	compact_id_t cid;
4024 
4025 	cid = compact_id_get(&ctid_table, CTID_MAX_THREAD_NUMBER, thread);
4026 	thread->ctid = ctid_mangle(cid);
4027 }
4028 
4029 void
ctid_table_remove(thread_t thread)4030 ctid_table_remove(thread_t thread)
4031 {
4032 	__assert_only thread_t value;
4033 
4034 	value = compact_id_put(&ctid_table, ctid_unmangle(thread->ctid));
4035 	assert3p(value, ==, thread);
4036 	thread->ctid = 0;
4037 }
4038 
4039 thread_t
ctid_get_thread_unsafe(ctid_t ctid)4040 ctid_get_thread_unsafe(ctid_t ctid)
4041 {
4042 	if (ctid) {
4043 		return *compact_id_resolve(&ctid_table, ctid_unmangle(ctid));
4044 	}
4045 	return THREAD_NULL;
4046 }
4047 
4048 thread_t
ctid_get_thread(ctid_t ctid)4049 ctid_get_thread(ctid_t ctid)
4050 {
4051 	thread_t thread = THREAD_NULL;
4052 
4053 	if (ctid) {
4054 		thread = *compact_id_resolve(&ctid_table, ctid_unmangle(ctid));
4055 		assert(thread && thread->ctid == ctid);
4056 	}
4057 	return thread;
4058 }
4059 
4060 ctid_t
thread_get_ctid(thread_t thread)4061 thread_get_ctid(thread_t thread)
4062 {
4063 	return thread->ctid;
4064 }
4065 
4066 /*
4067  * Adjust code signature dependent thread state.
4068  *
4069  * Called to allow code signature dependent adjustments to the thread
4070  * state. Note that this is usually called twice for the main thread:
4071  * Once at thread creation by thread_create, when the signature is
4072  * potentially not attached yet (which is usually the case for the
4073  * first/main thread of a task), and once after the task's signature
4074  * has actually been attached.
4075  *
4076  */
4077 kern_return_t
thread_process_signature(thread_t thread,task_t task)4078 thread_process_signature(thread_t thread, task_t task)
4079 {
4080 	return machine_thread_process_signature(thread, task);
4081 }
4082 
4083 
4084 #if CONFIG_DTRACE
4085 uint32_t
dtrace_get_thread_predcache(thread_t thread)4086 dtrace_get_thread_predcache(thread_t thread)
4087 {
4088 	if (thread != THREAD_NULL) {
4089 		return thread->t_dtrace_predcache;
4090 	} else {
4091 		return 0;
4092 	}
4093 }
4094 
4095 int64_t
dtrace_get_thread_vtime(thread_t thread)4096 dtrace_get_thread_vtime(thread_t thread)
4097 {
4098 	if (thread != THREAD_NULL) {
4099 		return thread->t_dtrace_vtime;
4100 	} else {
4101 		return 0;
4102 	}
4103 }
4104 
4105 int
dtrace_get_thread_last_cpu_id(thread_t thread)4106 dtrace_get_thread_last_cpu_id(thread_t thread)
4107 {
4108 	if ((thread != THREAD_NULL) && (thread->last_processor != PROCESSOR_NULL)) {
4109 		return thread->last_processor->cpu_id;
4110 	} else {
4111 		return -1;
4112 	}
4113 }
4114 
4115 int64_t
dtrace_get_thread_tracing(thread_t thread)4116 dtrace_get_thread_tracing(thread_t thread)
4117 {
4118 	if (thread != THREAD_NULL) {
4119 		return thread->t_dtrace_tracing;
4120 	} else {
4121 		return 0;
4122 	}
4123 }
4124 
4125 uint16_t
dtrace_get_thread_inprobe(thread_t thread)4126 dtrace_get_thread_inprobe(thread_t thread)
4127 {
4128 	if (thread != THREAD_NULL) {
4129 		return thread->t_dtrace_inprobe;
4130 	} else {
4131 		return 0;
4132 	}
4133 }
4134 
4135 vm_offset_t
thread_get_kernel_stack(thread_t thread)4136 thread_get_kernel_stack(thread_t thread)
4137 {
4138 	if (thread != THREAD_NULL) {
4139 		return thread->kernel_stack;
4140 	} else {
4141 		return 0;
4142 	}
4143 }
4144 
4145 #if KASAN
4146 struct kasan_thread_data *
kasan_get_thread_data(thread_t thread)4147 kasan_get_thread_data(thread_t thread)
4148 {
4149 	return &thread->kasan_data;
4150 }
4151 #endif
4152 
4153 #if CONFIG_KCOV
4154 kcov_thread_data_t *
kcov_get_thread_data(thread_t thread)4155 kcov_get_thread_data(thread_t thread)
4156 {
4157 	return &thread->kcov_data;
4158 }
4159 #endif
4160 
4161 #if CONFIG_STKSZ
4162 /*
4163  * Returns base of a thread's kernel stack.
4164  *
4165  * Coverage sanitizer instruments every function including those that participates in stack handoff between threads.
4166  * There is a window in which CPU still holds old values but stack has been handed over to anoher thread already.
4167  * In this window kernel_stack is 0 but CPU still uses the original stack (until contex switch occurs). The original
4168  * kernel_stack value is preserved in ksancov_stack during this window.
4169  */
4170 vm_offset_t
kcov_stksz_get_thread_stkbase(thread_t thread)4171 kcov_stksz_get_thread_stkbase(thread_t thread)
4172 {
4173 	if (thread != THREAD_NULL) {
4174 		kcov_thread_data_t *data = kcov_get_thread_data(thread);
4175 		if (data->ktd_stksz.kst_stack) {
4176 			return data->ktd_stksz.kst_stack;
4177 		} else {
4178 			return thread->kernel_stack;
4179 		}
4180 	} else {
4181 		return 0;
4182 	}
4183 }
4184 
4185 vm_offset_t
kcov_stksz_get_thread_stksize(thread_t thread)4186 kcov_stksz_get_thread_stksize(thread_t thread)
4187 {
4188 	if (thread != THREAD_NULL) {
4189 		return kernel_stack_size;
4190 	} else {
4191 		return 0;
4192 	}
4193 }
4194 
4195 void
kcov_stksz_set_thread_stack(thread_t thread,vm_offset_t stack)4196 kcov_stksz_set_thread_stack(thread_t thread, vm_offset_t stack)
4197 {
4198 	kcov_thread_data_t *data = kcov_get_thread_data(thread);
4199 	data->ktd_stksz.kst_stack = stack;
4200 }
4201 #endif /* CONFIG_STKSZ */
4202 
4203 int64_t
dtrace_calc_thread_recent_vtime(thread_t thread)4204 dtrace_calc_thread_recent_vtime(thread_t thread)
4205 {
4206 	if (thread == THREAD_NULL) {
4207 		return 0;
4208 	}
4209 
4210 	struct recount_usage usage = { 0 };
4211 	recount_current_thread_usage(&usage);
4212 	return (int64_t)(usage.ru_system_time_mach + usage.ru_user_time_mach);
4213 }
4214 
4215 void
dtrace_set_thread_predcache(thread_t thread,uint32_t predcache)4216 dtrace_set_thread_predcache(thread_t thread, uint32_t predcache)
4217 {
4218 	if (thread != THREAD_NULL) {
4219 		thread->t_dtrace_predcache = predcache;
4220 	}
4221 }
4222 
4223 void
dtrace_set_thread_vtime(thread_t thread,int64_t vtime)4224 dtrace_set_thread_vtime(thread_t thread, int64_t vtime)
4225 {
4226 	if (thread != THREAD_NULL) {
4227 		thread->t_dtrace_vtime = vtime;
4228 	}
4229 }
4230 
4231 void
dtrace_set_thread_tracing(thread_t thread,int64_t accum)4232 dtrace_set_thread_tracing(thread_t thread, int64_t accum)
4233 {
4234 	if (thread != THREAD_NULL) {
4235 		thread->t_dtrace_tracing = accum;
4236 	}
4237 }
4238 
4239 void
dtrace_set_thread_inprobe(thread_t thread,uint16_t inprobe)4240 dtrace_set_thread_inprobe(thread_t thread, uint16_t inprobe)
4241 {
4242 	if (thread != THREAD_NULL) {
4243 		thread->t_dtrace_inprobe = inprobe;
4244 	}
4245 }
4246 
4247 void
dtrace_thread_bootstrap(void)4248 dtrace_thread_bootstrap(void)
4249 {
4250 	task_t task = current_task();
4251 
4252 	if (task->thread_count == 1) {
4253 		thread_t thread = current_thread();
4254 		if (thread->t_dtrace_flags & TH_DTRACE_EXECSUCCESS) {
4255 			thread->t_dtrace_flags &= ~TH_DTRACE_EXECSUCCESS;
4256 			DTRACE_PROC(exec__success);
4257 			KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXEC),
4258 			    task_pid(task));
4259 		}
4260 		DTRACE_PROC(start);
4261 	}
4262 	DTRACE_PROC(lwp__start);
4263 }
4264 
4265 void
dtrace_thread_didexec(thread_t thread)4266 dtrace_thread_didexec(thread_t thread)
4267 {
4268 	thread->t_dtrace_flags |= TH_DTRACE_EXECSUCCESS;
4269 }
4270 #endif /* CONFIG_DTRACE */
4271