1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/thread.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61 * Date: 1986
62 *
63 * Thread management primitives implementation.
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to [email protected] any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
84 #include <mach/mach_types.h>
85 #include <mach/boolean.h>
86 #include <mach/policy.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_special_ports.h>
89 #include <mach/thread_act.h>
90 #include <mach/thread_status.h>
91 #include <mach/time_value.h>
92 #include <mach/vm_param.h>
93
94 #include <machine/thread.h>
95 #include <machine/pal_routines.h>
96 #include <machine/limits.h>
97
98 #include <kern/kern_types.h>
99 #include <kern/kalloc.h>
100 #include <kern/cpu_data.h>
101 #include <kern/extmod_statistics.h>
102 #include <kern/ipc_mig.h>
103 #include <kern/ipc_tt.h>
104 #include <kern/mach_param.h>
105 #include <kern/machine.h>
106 #include <kern/misc_protos.h>
107 #include <kern/processor.h>
108 #include <kern/queue.h>
109 #include <kern/sched.h>
110 #include <kern/sched_prim.h>
111 #include <kern/syscall_subr.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/thread_group.h>
115 #include <kern/coalition.h>
116 #include <kern/host.h>
117 #include <kern/zalloc.h>
118 #include <kern/assert.h>
119 #include <kern/exc_resource.h>
120 #include <kern/exc_guard.h>
121 #include <kern/telemetry.h>
122 #include <kern/policy_internal.h>
123 #include <kern/turnstile.h>
124 #include <kern/sched_clutch.h>
125 #include <kern/hazard.h>
126 #include <kern/ast.h>
127
128 #include <corpses/task_corpse.h>
129 #if KPC
130 #include <kern/kpc.h>
131 #endif
132
133 #if MONOTONIC
134 #include <kern/monotonic.h>
135 #include <machine/monotonic.h>
136 #endif /* MONOTONIC */
137
138 #include <ipc/ipc_kmsg.h>
139 #include <ipc/ipc_port.h>
140 #include <bank/bank_types.h>
141
142 #include <vm/vm_kern.h>
143 #include <vm/vm_pageout.h>
144
145 #include <sys/kdebug.h>
146 #include <sys/bsdtask_info.h>
147 #include <mach/sdt.h>
148 #include <san/kasan.h>
149 #include <san/kcov_stksz.h>
150
151 #include <stdatomic.h>
152
153 #if defined(HAS_APPLE_PAC)
154 #include <ptrauth.h>
155 #include <arm64/proc_reg.h>
156 #endif /* defined(HAS_APPLE_PAC) */
157
158 /*
159 * Exported interfaces
160 */
161 #include <mach/task_server.h>
162 #include <mach/thread_act_server.h>
163 #include <mach/mach_host_server.h>
164 #include <mach/host_priv_server.h>
165 #include <mach/mach_voucher_server.h>
166 #include <kern/policy_internal.h>
167
168 #if CONFIG_MACF
169 #include <security/mac_mach_internal.h>
170 #endif
171
172 #include <pthread/workqueue_trace.h>
173
174 LCK_GRP_DECLARE(thread_lck_grp, "thread");
175
176 static SECURITY_READ_ONLY_LATE(zone_t) thread_zone;
177 ZONE_DEFINE_ID(ZONE_ID_THREAD_RO, "threads_ro", struct thread_ro, ZC_READONLY);
178
179 static void thread_port_with_flavor_no_senders(ipc_port_t, mach_port_mscount_t);
180
181 IPC_KOBJECT_DEFINE(IKOT_THREAD_CONTROL);
182 IPC_KOBJECT_DEFINE(IKOT_THREAD_READ,
183 .iko_op_no_senders = thread_port_with_flavor_no_senders);
184 IPC_KOBJECT_DEFINE(IKOT_THREAD_INSPECT,
185 .iko_op_no_senders = thread_port_with_flavor_no_senders);
186
187 static struct mpsc_daemon_queue thread_stack_queue;
188 static struct mpsc_daemon_queue thread_terminate_queue;
189 static struct mpsc_daemon_queue thread_deallocate_queue;
190 static struct mpsc_daemon_queue thread_exception_queue;
191
192 decl_simple_lock_data(static, crashed_threads_lock);
193 static queue_head_t crashed_threads_queue;
194
195 struct thread_exception_elt {
196 struct mpsc_queue_chain link;
197 exception_type_t exception_type;
198 task_t exception_task;
199 thread_t exception_thread;
200 };
201
202 static SECURITY_READ_ONLY_LATE(struct thread) thread_template = {
203 #if MACH_ASSERT
204 .thread_magic = THREAD_MAGIC,
205 #endif /* MACH_ASSERT */
206 .wait_result = THREAD_WAITING,
207 .options = THREAD_ABORTSAFE,
208 .state = TH_WAIT | TH_UNINT,
209 .th_sched_bucket = TH_BUCKET_RUN,
210 .base_pri = BASEPRI_DEFAULT,
211 .realtime.deadline = UINT64_MAX,
212 .last_made_runnable_time = THREAD_NOT_RUNNABLE,
213 .last_basepri_change_time = THREAD_NOT_RUNNABLE,
214 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
215 .pri_shift = INT8_MAX,
216 #endif
217 /* timers are initialized in thread_bootstrap */
218 };
219
220 __startup_func
221 static void
thread_zone_startup(void)222 thread_zone_startup(void)
223 {
224 size_t size = sizeof(struct thread);
225
226 #ifdef MACH_BSD
227 size += roundup(uthread_size, _Alignof(struct thread));
228 #endif
229 thread_zone = zone_create_ext("threads", size,
230 ZC_ZFREE_CLEARMEM, ZONE_ID_THREAD, NULL);
231 }
232 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, thread_zone_startup);
233
234 __startup_data
235 static struct thread init_thread;
236 static void thread_deallocate_enqueue(thread_t thread);
237 static void thread_deallocate_complete(thread_t thread);
238
239 #ifdef MACH_BSD
240 extern void proc_exit(void *);
241 extern mach_exception_data_type_t proc_encode_exit_exception_code(void *);
242 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
243 extern uint64_t get_return_to_kernel_offset_from_proc(void *p);
244 extern uint64_t get_wq_quantum_offset_from_proc(void *);
245 extern int proc_selfpid(void);
246 extern void proc_name(int, char*, int);
247 extern char * proc_name_address(void *p);
248 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
249 #endif /* MACH_BSD */
250
251 extern bool bsdthread_part_of_cooperative_workqueue(struct uthread *uth);
252 extern int disable_exc_resource;
253 extern int audio_active;
254 extern int debug_task;
255 int thread_max = CONFIG_THREAD_MAX; /* Max number of threads */
256 int task_threadmax = CONFIG_THREAD_MAX;
257
258 static uint64_t thread_unique_id = 100;
259
260 struct _thread_ledger_indices thread_ledgers = { .cpu_time = -1 };
261 static ledger_template_t thread_ledger_template = NULL;
262 static void init_thread_ledgers(void);
263
264 #if CONFIG_JETSAM
265 void jetsam_on_ledger_cpulimit_exceeded(void);
266 #endif
267
268 extern int task_thread_soft_limit;
269
270 #if DEVELOPMENT || DEBUG
271 extern int exc_resource_threads_enabled;
272 #endif /* DEVELOPMENT || DEBUG */
273
274 /*
275 * Level (in terms of percentage of the limit) at which the CPU usage monitor triggers telemetry.
276 *
277 * (ie when any thread's CPU consumption exceeds 70% of the limit, start taking user
278 * stacktraces, aka micro-stackshots)
279 */
280 #define CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT 70
281
282 /* Percentage. Level at which we start gathering telemetry. */
283 static TUNABLE(uint8_t, cpumon_ustackshots_trigger_pct,
284 "cpumon_ustackshots_trigger_pct", CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT);
285 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void);
286 #if DEVELOPMENT || DEBUG
287 void __attribute__((noinline)) SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t, int);
288 #endif /* DEVELOPMENT || DEBUG */
289
290 /*
291 * The smallest interval over which we support limiting CPU consumption is 1ms
292 */
293 #define MINIMUM_CPULIMIT_INTERVAL_MS 1
294
295 os_refgrp_decl(static, thread_refgrp, "thread", NULL);
296
297 static inline void
init_thread_from_template(thread_t thread)298 init_thread_from_template(thread_t thread)
299 {
300 /*
301 * In general, struct thread isn't trivially-copyable, since it may
302 * contain pointers to thread-specific state. This may be enforced at
303 * compile time on architectures that store authed + diversified
304 * pointers in machine_thread.
305 *
306 * In this specific case, where we're initializing a new thread from a
307 * thread_template, we know all diversified pointers are NULL; these are
308 * safe to bitwise copy.
309 */
310 #pragma clang diagnostic push
311 #pragma clang diagnostic ignored "-Wnontrivial-memaccess"
312 memcpy(thread, &thread_template, sizeof(*thread));
313 #pragma clang diagnostic pop
314 }
315
316 static void
thread_ro_create(task_t parent_task,thread_t th,thread_ro_t tro_tpl)317 thread_ro_create(task_t parent_task, thread_t th, thread_ro_t tro_tpl)
318 {
319 #if __x86_64__ || __arm__
320 th->t_task = parent_task;
321 #endif
322 tro_tpl->tro_owner = th;
323 tro_tpl->tro_task = parent_task;
324 th->t_tro = zalloc_ro(ZONE_ID_THREAD_RO, Z_WAITOK | Z_ZERO | Z_NOFAIL);
325 zalloc_ro_update_elem(ZONE_ID_THREAD_RO, th->t_tro, tro_tpl);
326 }
327
328 static void
thread_ro_destroy(thread_t th)329 thread_ro_destroy(thread_t th)
330 {
331 thread_ro_t tro = get_thread_ro(th);
332 #if MACH_BSD
333 struct ucred *cred = tro->tro_cred;
334 #endif
335
336 zfree_ro(ZONE_ID_THREAD_RO, tro);
337 #if MACH_BSD
338 if (cred) {
339 uthread_cred_free(cred);
340 }
341 #endif
342 }
343
344 #if MACH_BSD
345 extern void kauth_cred_set(struct ucred **, struct ucred *);
346
347 void
thread_ro_update_cred(thread_ro_t tro,struct ucred * ucred)348 thread_ro_update_cred(thread_ro_t tro, struct ucred *ucred)
349 {
350 struct ucred *my_cred = tro->tro_cred;
351 if (my_cred != ucred) {
352 kauth_cred_set(&my_cred, ucred);
353 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_cred, &my_cred);
354 }
355 }
356
357 void
thread_ro_update_flags(thread_ro_t tro,thread_ro_flags_t add,thread_ro_flags_t clr)358 thread_ro_update_flags(thread_ro_t tro, thread_ro_flags_t add, thread_ro_flags_t clr)
359 {
360 thread_ro_flags_t flags = (tro->tro_flags & ~clr) | add;
361 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_flags, &flags);
362 }
363 #endif
364
365 thread_t
thread_bootstrap(void)366 thread_bootstrap(void)
367 {
368 /*
369 * Fill in a template thread for fast initialization.
370 */
371 timer_init(&thread_template.user_timer);
372 timer_init(&thread_template.system_timer);
373 timer_init(&thread_template.ptime);
374 timer_init(&thread_template.runnable_timer);
375
376 init_thread_from_template(&init_thread);
377 /* fiddle with init thread to skip asserts in set_sched_pri */
378 init_thread.sched_pri = MAXPRI_KERNEL;
379
380 return &init_thread;
381 }
382
383 void
thread_machine_init_template(void)384 thread_machine_init_template(void)
385 {
386 machine_thread_template_init(&thread_template);
387 }
388
389 void
thread_init(void)390 thread_init(void)
391 {
392 /*
393 * Initialize any machine-dependent
394 * per-thread structures necessary.
395 */
396 machine_thread_init();
397
398 init_thread_ledgers();
399 }
400
401 boolean_t
thread_is_active(thread_t thread)402 thread_is_active(thread_t thread)
403 {
404 return thread->active;
405 }
406
407 void
thread_corpse_continue(void)408 thread_corpse_continue(void)
409 {
410 thread_t thread = current_thread();
411
412 thread_terminate_internal(thread);
413
414 /*
415 * Handle the thread termination directly
416 * here instead of returning to userspace.
417 */
418 assert(thread->active == FALSE);
419 thread_ast_clear(thread, AST_APC);
420 thread_apc_ast(thread);
421
422 panic("thread_corpse_continue");
423 /*NOTREACHED*/
424 }
425
426 __dead2
427 static void
thread_terminate_continue(void)428 thread_terminate_continue(void)
429 {
430 panic("thread_terminate_continue");
431 /*NOTREACHED*/
432 }
433
434 /*
435 * thread_terminate_self:
436 */
437 void
thread_terminate_self(void)438 thread_terminate_self(void)
439 {
440 thread_t thread = current_thread();
441 thread_ro_t tro = get_thread_ro(thread);
442 task_t task = tro->tro_task;
443 int threadcnt;
444
445 pal_thread_terminate_self(thread);
446
447 DTRACE_PROC(lwp__exit);
448
449 thread_mtx_lock(thread);
450
451 ipc_thread_disable(thread);
452
453 thread_mtx_unlock(thread);
454
455 thread_sched_call(thread, NULL);
456
457 spl_t s = splsched();
458 thread_lock(thread);
459
460 thread_depress_abort_locked(thread);
461
462 thread_unlock(thread);
463 splx(s);
464
465 #if CONFIG_TASKWATCH
466 thead_remove_taskwatch(thread);
467 #endif /* CONFIG_TASKWATCH */
468
469 work_interval_thread_terminate(thread);
470
471 thread_mtx_lock(thread);
472
473 thread_policy_reset(thread);
474
475 thread_mtx_unlock(thread);
476
477 assert(thread->th_work_interval == NULL);
478
479 bank_swap_thread_bank_ledger(thread, NULL);
480
481 if (kdebug_enable && bsd_hasthreadname(get_bsdthread_info(thread))) {
482 char threadname[MAXTHREADNAMESIZE];
483 bsd_getthreadname(get_bsdthread_info(thread), threadname);
484 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, threadname);
485 }
486
487 uthread_cleanup(get_bsdthread_info(thread), tro);
488
489 if (kdebug_enable && task->bsd_info && !task_is_exec_copy(task)) {
490 /* trace out pid before we sign off */
491 long dbg_arg1 = 0;
492 long dbg_arg2 = 0;
493
494 kdbg_trace_data(task->bsd_info, &dbg_arg1, &dbg_arg2);
495 #if MONOTONIC
496 if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_THR_EXIT)) {
497 uint64_t counts[MT_CORE_NFIXED];
498 uint64_t thread_user_time;
499 uint64_t thread_system_time;
500 thread_user_time = timer_grab(&thread->user_timer);
501 thread_system_time = timer_grab(&thread->system_timer);
502 mt_fixed_thread_counts(thread, counts);
503 KDBG_RELEASE(DBG_MT_INSTRS_CYCLES_THR_EXIT,
504 #ifdef MT_CORE_INSTRS
505 counts[MT_CORE_INSTRS],
506 #else /* defined(MT_CORE_INSTRS) */
507 0,
508 #endif/* !defined(MT_CORE_INSTRS) */
509 counts[MT_CORE_CYCLES],
510 thread_system_time, thread_user_time);
511 }
512 #endif/* MONOTONIC */
513 KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE_PID, dbg_arg1, dbg_arg2);
514 }
515
516 /*
517 * After this subtraction, this thread should never access
518 * task->bsd_info unless it got 0 back from the os_atomic_dec. It
519 * could be racing with other threads to be the last thread in the
520 * process, and the last thread in the process will tear down the proc
521 * structure and zero-out task->bsd_info.
522 */
523 threadcnt = os_atomic_dec(&task->active_thread_count, relaxed);
524
525 #if CONFIG_COALITIONS
526 /*
527 * Leave the coalitions when last thread of task is exiting and the
528 * task is not a corpse.
529 */
530 if (threadcnt == 0 && !task->corpse_info) {
531 coalitions_remove_task(task);
532 }
533 #endif
534
535 /*
536 * If we are the last thread to terminate and the task is
537 * associated with a BSD process, perform BSD process exit.
538 */
539 if (threadcnt == 0 && task->bsd_info != NULL && !task_is_exec_copy(task)) {
540 mach_exception_data_type_t subcode = 0;
541 if (kdebug_enable) {
542 /* since we're the last thread in this process, trace out the command name too */
543 long args[4] = {};
544 kdbg_trace_string(task->bsd_info, &args[0], &args[1], &args[2], &args[3]);
545 #if MONOTONIC
546 if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_PROC_EXIT)) {
547 uint64_t counts[MT_CORE_NFIXED];
548 uint64_t task_user_time;
549 uint64_t task_system_time;
550 mt_fixed_task_counts(task, counts);
551 /* since the thread time is not yet added to the task */
552 task_user_time = task->total_user_time + timer_grab(&thread->user_timer);
553 task_system_time = task->total_system_time + timer_grab(&thread->system_timer);
554 KDBG_RELEASE((DBG_MT_INSTRS_CYCLES_PROC_EXIT),
555 #ifdef MT_CORE_INSTRS
556 counts[MT_CORE_INSTRS],
557 #else /* defined(MT_CORE_INSTRS) */
558 0,
559 #endif/* !defined(MT_CORE_INSTRS) */
560 counts[MT_CORE_CYCLES],
561 task_system_time, task_user_time);
562 }
563 #endif/* MONOTONIC */
564 KDBG_RELEASE(TRACE_STRING_PROC_EXIT, args[0], args[1], args[2], args[3]);
565 }
566
567 /* Get the exit reason before proc_exit */
568 subcode = proc_encode_exit_exception_code(task->bsd_info);
569 proc_exit(task->bsd_info);
570 /*
571 * if there is crash info in task
572 * then do the deliver action since this is
573 * last thread for this task.
574 */
575 if (task->corpse_info) {
576 /* reset all except task name port */
577 ipc_task_reset(task);
578 /* enable all task ports (name port unchanged) */
579 ipc_task_enable(task);
580 exception_type_t etype = get_exception_from_corpse_crashinfo(task->corpse_info);
581 task_deliver_crash_notification(task, current_thread(), etype, subcode);
582 }
583 }
584
585 if (threadcnt == 0) {
586 task_lock(task);
587 if (task_is_a_corpse_fork(task)) {
588 thread_wakeup((event_t)&task->active_thread_count);
589 }
590 task_unlock(task);
591 }
592
593 s = splsched();
594 thread_lock(thread);
595
596 /*
597 * Ensure that the depress timer is no longer enqueued,
598 * so the timer can be safely deallocated
599 *
600 * TODO: build timer_call_cancel_wait
601 */
602
603 assert((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) == 0);
604
605 uint32_t delay_us = 1;
606
607 while (thread->depress_timer_active > 0) {
608 thread_unlock(thread);
609 splx(s);
610
611 delay(delay_us++);
612
613 if (delay_us > USEC_PER_SEC) {
614 panic("depress timer failed to inactivate!"
615 "thread: %p depress_timer_active: %d",
616 thread, thread->depress_timer_active);
617 }
618
619 s = splsched();
620 thread_lock(thread);
621 }
622
623 /*
624 * Cancel wait timer, and wait for
625 * concurrent expirations.
626 */
627 if (thread->wait_timer_is_set) {
628 thread->wait_timer_is_set = FALSE;
629
630 if (timer_call_cancel(thread->wait_timer)) {
631 thread->wait_timer_active--;
632 }
633 }
634
635 delay_us = 1;
636
637 while (thread->wait_timer_active > 0) {
638 thread_unlock(thread);
639 splx(s);
640
641 delay(delay_us++);
642
643 if (delay_us > USEC_PER_SEC) {
644 panic("wait timer failed to inactivate!"
645 "thread: %p wait_timer_active: %d",
646 thread, thread->wait_timer_active);
647 }
648
649 s = splsched();
650 thread_lock(thread);
651 }
652
653 /*
654 * If there is a reserved stack, release it.
655 */
656 if (thread->reserved_stack != 0) {
657 stack_free_reserved(thread);
658 thread->reserved_stack = 0;
659 }
660
661 /*
662 * Mark thread as terminating, and block.
663 */
664 thread->state |= TH_TERMINATE;
665 thread_mark_wait_locked(thread, THREAD_UNINT);
666
667 assert(thread->th_work_interval_flags == TH_WORK_INTERVAL_FLAGS_NONE);
668 assert(thread->kern_promotion_schedpri == 0);
669 assert(thread->rwlock_count == 0);
670 assert(thread->priority_floor_count == 0);
671 assert(thread->handoff_thread == THREAD_NULL);
672 assert(thread->th_work_interval == NULL);
673
674 assert((thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) == 0);
675 assert((thread->sched_flags & TH_SFLAG_RW_PROMOTED) == 0);
676 assert((thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) == 0);
677 assert((thread->sched_flags & TH_SFLAG_EXEC_PROMOTED) == 0);
678 assert((thread->sched_flags & TH_SFLAG_PROMOTED) == 0);
679 assert((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) == 0);
680 thread_unlock(thread);
681 /* splsched */
682
683 thread_block((thread_continue_t)thread_terminate_continue);
684 /*NOTREACHED*/
685 }
686
687 static bool
thread_ref_release(thread_t thread)688 thread_ref_release(thread_t thread)
689 {
690 if (thread == THREAD_NULL) {
691 return false;
692 }
693
694 assert_thread_magic(thread);
695
696 return os_ref_release_raw(&thread->ref_count, &thread_refgrp) == 0;
697 }
698
699 /* Drop a thread refcount safely without triggering a zfree */
700 void
thread_deallocate_safe(thread_t thread)701 thread_deallocate_safe(thread_t thread)
702 {
703 if (__improbable(thread_ref_release(thread))) {
704 /* enqueue the thread for thread deallocate deamon to call thread_deallocate_complete */
705 thread_deallocate_enqueue(thread);
706 }
707 }
708
709 void
thread_deallocate(thread_t thread)710 thread_deallocate(thread_t thread)
711 {
712 if (__improbable(thread_ref_release(thread))) {
713 thread_deallocate_complete(thread);
714 }
715 }
716
717 void
thread_deallocate_complete(thread_t thread)718 thread_deallocate_complete(
719 thread_t thread)
720 {
721 task_t task;
722
723 assert_thread_magic(thread);
724
725 assert(os_ref_get_count_raw(&thread->ref_count) == 0);
726
727 if (!(thread->state & TH_TERMINATE2)) {
728 panic("thread_deallocate: thread not properly terminated");
729 }
730
731 assert(thread->runq == PROCESSOR_NULL);
732
733 #if KPC
734 kpc_thread_destroy(thread);
735 #endif
736
737 ipc_thread_terminate(thread);
738
739 proc_thread_qos_deallocate(thread);
740
741 task = get_threadtask(thread);
742
743 #ifdef MACH_BSD
744 uthread_destroy(get_bsdthread_info(thread));
745 #endif /* MACH_BSD */
746
747 if (thread->t_ledger) {
748 ledger_dereference(thread->t_ledger);
749 }
750 if (thread->t_threadledger) {
751 ledger_dereference(thread->t_threadledger);
752 }
753
754 assert(thread->turnstile != TURNSTILE_NULL);
755 if (thread->turnstile) {
756 turnstile_deallocate(thread->turnstile);
757 }
758
759 if (IPC_VOUCHER_NULL != thread->ith_voucher) {
760 ipc_voucher_release(thread->ith_voucher);
761 }
762
763 kfree_data(thread->thread_io_stats, sizeof(struct io_stat_info));
764 #if CONFIG_PREADOPT_TG
765 if (thread->old_preadopt_thread_group) {
766 thread_group_release(thread->old_preadopt_thread_group);
767 }
768
769 if (thread->preadopt_thread_group) {
770 thread_group_release(thread->preadopt_thread_group);
771 }
772 #endif
773
774 if (thread->kernel_stack != 0) {
775 stack_free(thread);
776 }
777
778 lck_mtx_destroy(&thread->mutex, &thread_lck_grp);
779 machine_thread_destroy(thread);
780
781 task_deallocate_grp(task, TASK_GRP_INTERNAL);
782
783 #if MACH_ASSERT
784 assert_thread_magic(thread);
785 thread->thread_magic = 0;
786 #endif /* MACH_ASSERT */
787
788 lck_mtx_lock(&tasks_threads_lock);
789 assert(terminated_threads_count > 0);
790 queue_remove(&terminated_threads, thread, thread_t, threads);
791 terminated_threads_count--;
792 lck_mtx_unlock(&tasks_threads_lock);
793
794 timer_call_free(thread->depress_timer);
795 timer_call_free(thread->wait_timer);
796
797 thread_ro_destroy(thread);
798 zfree(thread_zone, thread);
799 }
800
801 /*
802 * thread_inspect_deallocate:
803 *
804 * Drop a thread inspection reference.
805 */
806 void
thread_inspect_deallocate(thread_inspect_t thread_inspect)807 thread_inspect_deallocate(
808 thread_inspect_t thread_inspect)
809 {
810 return thread_deallocate((thread_t)thread_inspect);
811 }
812
813 /*
814 * thread_read_deallocate:
815 *
816 * Drop a reference on thread read port.
817 */
818 void
thread_read_deallocate(thread_read_t thread_read)819 thread_read_deallocate(
820 thread_read_t thread_read)
821 {
822 return thread_deallocate((thread_t)thread_read);
823 }
824
825
826 /*
827 * thread_exception_queue_invoke:
828 *
829 * Deliver EXC_{RESOURCE,GUARD} exception
830 */
831 static void
thread_exception_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)832 thread_exception_queue_invoke(mpsc_queue_chain_t elm,
833 __assert_only mpsc_daemon_queue_t dq)
834 {
835 struct thread_exception_elt *elt;
836 task_t task;
837 thread_t thread;
838 exception_type_t etype;
839
840 assert(dq == &thread_exception_queue);
841 elt = mpsc_queue_element(elm, struct thread_exception_elt, link);
842
843 etype = elt->exception_type;
844 task = elt->exception_task;
845 thread = elt->exception_thread;
846 assert_thread_magic(thread);
847
848 kfree_type(struct thread_exception_elt, elt);
849
850 /* wait for all the threads in the task to terminate */
851 task_lock(task);
852 task_wait_till_threads_terminate_locked(task);
853 task_unlock(task);
854
855 /* Consumes the task ref returned by task_generate_corpse_internal */
856 task_deallocate(task);
857 /* Consumes the thread ref returned by task_generate_corpse_internal */
858 thread_deallocate(thread);
859
860 /* Deliver the notification, also clears the corpse. */
861 task_deliver_crash_notification(task, thread, etype, 0);
862 }
863
864 /*
865 * thread_exception_enqueue:
866 *
867 * Enqueue a corpse port to be delivered an EXC_{RESOURCE,GUARD}.
868 */
869 void
thread_exception_enqueue(task_t task,thread_t thread,exception_type_t etype)870 thread_exception_enqueue(
871 task_t task,
872 thread_t thread,
873 exception_type_t etype)
874 {
875 assert(EXC_RESOURCE == etype || EXC_GUARD == etype);
876 struct thread_exception_elt *elt = kalloc_type(struct thread_exception_elt, Z_WAITOK);
877 elt->exception_type = etype;
878 elt->exception_task = task;
879 elt->exception_thread = thread;
880
881 mpsc_daemon_enqueue(&thread_exception_queue, &elt->link,
882 MPSC_QUEUE_DISABLE_PREEMPTION);
883 }
884
885 /*
886 * thread_copy_resource_info
887 *
888 * Copy the resource info counters from source
889 * thread to destination thread.
890 */
891 void
thread_copy_resource_info(thread_t dst_thread,thread_t src_thread)892 thread_copy_resource_info(
893 thread_t dst_thread,
894 thread_t src_thread)
895 {
896 dst_thread->c_switch = src_thread->c_switch;
897 dst_thread->p_switch = src_thread->p_switch;
898 dst_thread->ps_switch = src_thread->ps_switch;
899 dst_thread->precise_user_kernel_time = src_thread->precise_user_kernel_time;
900 dst_thread->user_timer = src_thread->user_timer;
901 dst_thread->user_timer_save = src_thread->user_timer_save;
902 dst_thread->system_timer = src_thread->system_timer;
903 dst_thread->system_timer_save = src_thread->system_timer_save;
904 dst_thread->runnable_timer = src_thread->runnable_timer;
905 dst_thread->vtimer_user_save = src_thread->vtimer_user_save;
906 dst_thread->vtimer_prof_save = src_thread->vtimer_prof_save;
907 dst_thread->vtimer_rlim_save = src_thread->vtimer_rlim_save;
908 dst_thread->vtimer_qos_save = src_thread->vtimer_qos_save;
909 dst_thread->syscalls_unix = src_thread->syscalls_unix;
910 dst_thread->syscalls_mach = src_thread->syscalls_mach;
911 ledger_rollup(dst_thread->t_threadledger, src_thread->t_threadledger);
912 *dst_thread->thread_io_stats = *src_thread->thread_io_stats;
913 }
914
915 static void
thread_terminate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)916 thread_terminate_queue_invoke(mpsc_queue_chain_t e,
917 __assert_only mpsc_daemon_queue_t dq)
918 {
919 thread_t thread = mpsc_queue_element(e, struct thread, mpsc_links);
920 task_t task = get_threadtask(thread);
921
922 assert(dq == &thread_terminate_queue);
923
924 task_lock(task);
925
926 /*
927 * if marked for crash reporting, skip reaping.
928 * The corpse delivery thread will clear bit and enqueue
929 * for reaping when done
930 *
931 * Note: the inspection field is set under the task lock
932 *
933 * FIXME[mad]: why enqueue for termination before `inspection` is false ?
934 */
935 if (__improbable(thread->inspection)) {
936 simple_lock(&crashed_threads_lock, &thread_lck_grp);
937 task_unlock(task);
938
939 enqueue_tail(&crashed_threads_queue, &thread->runq_links);
940 simple_unlock(&crashed_threads_lock);
941 return;
942 }
943
944
945 task->total_user_time += timer_grab(&thread->user_timer);
946 task->total_ptime += timer_grab(&thread->ptime);
947 task->total_runnable_time += timer_grab(&thread->runnable_timer);
948 if (thread->precise_user_kernel_time) {
949 task->total_system_time += timer_grab(&thread->system_timer);
950 } else {
951 task->total_user_time += timer_grab(&thread->system_timer);
952 }
953
954 task->c_switch += thread->c_switch;
955 task->p_switch += thread->p_switch;
956 task->ps_switch += thread->ps_switch;
957
958 task->syscalls_unix += thread->syscalls_unix;
959 task->syscalls_mach += thread->syscalls_mach;
960
961 task->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
962 task->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
963 task->task_gpu_ns += ml_gpu_stat(thread);
964 task->task_energy += ml_energy_stat(thread);
965 task->decompressions += thread->decompressions;
966
967 #if MONOTONIC
968 mt_terminate_update(task, thread);
969 #endif /* MONOTONIC */
970
971 thread_update_qos_cpu_time(thread);
972
973 queue_remove(&task->threads, thread, thread_t, task_threads);
974 task->thread_count--;
975
976 /*
977 * If the task is being halted, and there is only one thread
978 * left in the task after this one, then wakeup that thread.
979 */
980 if (task->thread_count == 1 && task->halting) {
981 thread_wakeup((event_t)&task->halting);
982 }
983
984 task_unlock(task);
985
986 lck_mtx_lock(&tasks_threads_lock);
987 queue_remove(&threads, thread, thread_t, threads);
988 threads_count--;
989 queue_enter(&terminated_threads, thread, thread_t, threads);
990 terminated_threads_count++;
991 lck_mtx_unlock(&tasks_threads_lock);
992
993 #if MACH_BSD
994 /*
995 * The thread no longer counts against the task's thread count,
996 * we can now wake up any pending joiner.
997 *
998 * Note that the inheritor will be set to `thread` which is
999 * incorrect once it is on the termination queue, however
1000 * the termination queue runs at MINPRI_KERNEL which is higher
1001 * than any user thread, so this isn't a priority inversion.
1002 */
1003 if (thread_get_tag(thread) & THREAD_TAG_USER_JOIN) {
1004 struct uthread *uth = get_bsdthread_info(thread);
1005 mach_port_name_t kport = uthread_joiner_port(uth);
1006
1007 /*
1008 * Clear the port low two bits to tell pthread that thread is gone.
1009 */
1010 #ifndef NO_PORT_GEN
1011 kport &= ~MACH_PORT_MAKE(0, IE_BITS_GEN_MASK + IE_BITS_GEN_ONE);
1012 #else
1013 kport |= MACH_PORT_MAKE(0, ~(IE_BITS_GEN_MASK + IE_BITS_GEN_ONE));
1014 #endif
1015 (void)copyoutmap_atomic32(task->map, kport,
1016 uthread_joiner_address(uth));
1017 uthread_joiner_wake(task, uth);
1018 }
1019 #endif
1020
1021 thread_deallocate(thread);
1022 }
1023
1024 static void
thread_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)1025 thread_deallocate_queue_invoke(mpsc_queue_chain_t e,
1026 __assert_only mpsc_daemon_queue_t dq)
1027 {
1028 thread_t thread = mpsc_queue_element(e, struct thread, mpsc_links);
1029
1030 assert(dq == &thread_deallocate_queue);
1031
1032 thread_deallocate_complete(thread);
1033 }
1034
1035 /*
1036 * thread_terminate_enqueue:
1037 *
1038 * Enqueue a terminating thread for final disposition.
1039 *
1040 * Called at splsched.
1041 */
1042 void
thread_terminate_enqueue(thread_t thread)1043 thread_terminate_enqueue(
1044 thread_t thread)
1045 {
1046 KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE, thread->thread_id);
1047
1048 mpsc_daemon_enqueue(&thread_terminate_queue, &thread->mpsc_links,
1049 MPSC_QUEUE_DISABLE_PREEMPTION);
1050 }
1051
1052 /*
1053 * thread_deallocate_enqueue:
1054 *
1055 * Enqueue a thread for final deallocation.
1056 */
1057 static void
thread_deallocate_enqueue(thread_t thread)1058 thread_deallocate_enqueue(
1059 thread_t thread)
1060 {
1061 mpsc_daemon_enqueue(&thread_deallocate_queue, &thread->mpsc_links,
1062 MPSC_QUEUE_DISABLE_PREEMPTION);
1063 }
1064
1065 /*
1066 * thread_terminate_crashed_threads:
1067 * walk the list of crashed threads and put back set of threads
1068 * who are no longer being inspected.
1069 */
1070 void
thread_terminate_crashed_threads(void)1071 thread_terminate_crashed_threads(void)
1072 {
1073 thread_t th_remove;
1074
1075 simple_lock(&crashed_threads_lock, &thread_lck_grp);
1076 /*
1077 * loop through the crashed threads queue
1078 * to put any threads that are not being inspected anymore
1079 */
1080
1081 qe_foreach_element_safe(th_remove, &crashed_threads_queue, runq_links) {
1082 /* make sure current_thread is never in crashed queue */
1083 assert(th_remove != current_thread());
1084
1085 if (th_remove->inspection == FALSE) {
1086 remqueue(&th_remove->runq_links);
1087 mpsc_daemon_enqueue(&thread_terminate_queue, &th_remove->mpsc_links,
1088 MPSC_QUEUE_NONE);
1089 }
1090 }
1091
1092 simple_unlock(&crashed_threads_lock);
1093 }
1094
1095 /*
1096 * thread_stack_queue_invoke:
1097 *
1098 * Perform stack allocation as required due to
1099 * invoke failures.
1100 */
1101 static void
thread_stack_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)1102 thread_stack_queue_invoke(mpsc_queue_chain_t elm,
1103 __assert_only mpsc_daemon_queue_t dq)
1104 {
1105 thread_t thread = mpsc_queue_element(elm, struct thread, mpsc_links);
1106
1107 assert(dq == &thread_stack_queue);
1108
1109 /* allocate stack with interrupts enabled so that we can call into VM */
1110 stack_alloc(thread);
1111
1112 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_END, thread_tid(thread), 0, 0, 0, 0);
1113
1114 spl_t s = splsched();
1115 thread_lock(thread);
1116 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1117 thread_unlock(thread);
1118 splx(s);
1119 }
1120
1121 /*
1122 * thread_stack_enqueue:
1123 *
1124 * Enqueue a thread for stack allocation.
1125 *
1126 * Called at splsched.
1127 */
1128 void
thread_stack_enqueue(thread_t thread)1129 thread_stack_enqueue(
1130 thread_t thread)
1131 {
1132 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_START, thread_tid(thread), 0, 0, 0, 0);
1133 assert_thread_magic(thread);
1134
1135 mpsc_daemon_enqueue(&thread_stack_queue, &thread->mpsc_links,
1136 MPSC_QUEUE_DISABLE_PREEMPTION);
1137 }
1138
1139 void
thread_daemon_init(void)1140 thread_daemon_init(void)
1141 {
1142 kern_return_t result;
1143
1144 thread_deallocate_daemon_init();
1145
1146 thread_deallocate_daemon_register_queue(&thread_terminate_queue,
1147 thread_terminate_queue_invoke);
1148
1149 thread_deallocate_daemon_register_queue(&thread_deallocate_queue,
1150 thread_deallocate_queue_invoke);
1151
1152 hazard_register_mpsc_queue();
1153
1154 ipc_object_deallocate_register_queue();
1155
1156 simple_lock_init(&crashed_threads_lock, 0);
1157 queue_init(&crashed_threads_queue);
1158
1159 result = mpsc_daemon_queue_init_with_thread(&thread_stack_queue,
1160 thread_stack_queue_invoke, BASEPRI_PREEMPT_HIGH,
1161 "daemon.thread-stack", MPSC_DAEMON_INIT_NONE);
1162 if (result != KERN_SUCCESS) {
1163 panic("thread_daemon_init: thread_stack_daemon");
1164 }
1165
1166 result = mpsc_daemon_queue_init_with_thread(&thread_exception_queue,
1167 thread_exception_queue_invoke, MINPRI_KERNEL,
1168 "daemon.thread-exception", MPSC_DAEMON_INIT_NONE);
1169 if (result != KERN_SUCCESS) {
1170 panic("thread_daemon_init: thread_exception_daemon");
1171 }
1172 }
1173
1174 __options_decl(thread_create_internal_options_t, uint32_t, {
1175 TH_OPTION_NONE = 0x00,
1176 TH_OPTION_NOSUSP = 0x02,
1177 TH_OPTION_WORKQ = 0x04,
1178 TH_OPTION_MAINTHREAD = 0x08,
1179 });
1180
1181 void
main_thread_set_immovable_pinned(thread_t thread)1182 main_thread_set_immovable_pinned(thread_t thread)
1183 {
1184 ipc_main_thread_set_immovable_pinned(thread);
1185 }
1186
1187 /*
1188 * Create a new thread.
1189 * Doesn't start the thread running.
1190 *
1191 * Task and tasks_threads_lock are returned locked on success.
1192 */
1193 static kern_return_t
thread_create_internal(task_t parent_task,integer_t priority,thread_continue_t continuation,void * parameter,thread_create_internal_options_t options,thread_t * out_thread)1194 thread_create_internal(
1195 task_t parent_task,
1196 integer_t priority,
1197 thread_continue_t continuation,
1198 void *parameter,
1199 thread_create_internal_options_t options,
1200 thread_t *out_thread)
1201 {
1202 thread_t new_thread;
1203 ipc_thread_init_options_t init_options = IPC_THREAD_INIT_NONE;
1204 struct thread_ro tro_tpl = { };
1205 bool first_thread = false;
1206
1207 /*
1208 * Allocate a thread and initialize static fields
1209 */
1210 new_thread = zalloc_flags(thread_zone, Z_WAITOK | Z_NOFAIL);
1211
1212 if (__improbable(current_thread() == &init_thread)) {
1213 /*
1214 * The first thread ever is a global, but because we want to be
1215 * able to zone_id_require() threads, we have to stop using the
1216 * global piece of memory we used to boostrap the kernel and
1217 * jump to a proper thread from a zone.
1218 *
1219 * This is why that one thread will inherit its original
1220 * state differently.
1221 *
1222 * Also remember this thread in `vm_pageout_scan_thread`
1223 * as this is what the first thread ever becomes.
1224 *
1225 * Also pre-warm the depress timer since the VM pageout scan
1226 * daemon might need to use it.
1227 */
1228 assert(vm_pageout_scan_thread == THREAD_NULL);
1229 vm_pageout_scan_thread = new_thread;
1230
1231 first_thread = true;
1232 #pragma clang diagnostic push
1233 #pragma clang diagnostic ignored "-Wnontrivial-memaccess"
1234 /* work around 74481146 */
1235 memcpy(new_thread, &init_thread, sizeof(*new_thread));
1236 #pragma clang diagnostic pop
1237 } else {
1238 init_thread_from_template(new_thread);
1239 }
1240
1241 if (options & TH_OPTION_MAINTHREAD) {
1242 init_options |= IPC_THREAD_INIT_MAINTHREAD;
1243 }
1244
1245 os_ref_init_count_raw(&new_thread->ref_count, &thread_refgrp, 2);
1246 machine_thread_create(new_thread, parent_task, first_thread);
1247
1248 #ifdef MACH_BSD
1249 uthread_init(parent_task, get_bsdthread_info(new_thread),
1250 &tro_tpl, (options & TH_OPTION_WORKQ) != 0);
1251 if (!is_corpsetask(parent_task)) {
1252 /*
1253 * uthread_init will set tro_cred (with a +1)
1254 * and tro_proc for live tasks.
1255 */
1256 assert(tro_tpl.tro_cred && tro_tpl.tro_proc);
1257 }
1258 #endif /* MACH_BSD */
1259
1260 thread_lock_init(new_thread);
1261 wake_lock_init(new_thread);
1262
1263 lck_mtx_init(&new_thread->mutex, &thread_lck_grp, LCK_ATTR_NULL);
1264
1265 ipc_thread_init(parent_task, new_thread, &tro_tpl, init_options);
1266
1267 thread_ro_create(parent_task, new_thread, &tro_tpl);
1268
1269 new_thread->continuation = continuation;
1270 new_thread->parameter = parameter;
1271 new_thread->inheritor_flags = TURNSTILE_UPDATE_FLAGS_NONE;
1272 new_thread->requested_policy = default_thread_requested_policy;
1273 priority_queue_init(&new_thread->sched_inheritor_queue);
1274 priority_queue_init(&new_thread->base_inheritor_queue);
1275 #if CONFIG_SCHED_CLUTCH
1276 priority_queue_entry_init(&new_thread->th_clutch_runq_link);
1277 priority_queue_entry_init(&new_thread->th_clutch_pri_link);
1278 #endif /* CONFIG_SCHED_CLUTCH */
1279
1280 #if CONFIG_SCHED_EDGE
1281 new_thread->th_bound_cluster_enqueued = false;
1282 for (cluster_shared_rsrc_type_t shared_rsrc_type = CLUSTER_SHARED_RSRC_TYPE_MIN; shared_rsrc_type < CLUSTER_SHARED_RSRC_TYPE_COUNT; shared_rsrc_type++) {
1283 new_thread->th_shared_rsrc_enqueued[shared_rsrc_type] = false;
1284 new_thread->th_shared_rsrc_heavy_user[shared_rsrc_type] = false;
1285 new_thread->th_shared_rsrc_heavy_perf_control[shared_rsrc_type] = false;
1286 }
1287 #endif /* CONFIG_SCHED_EDGE */
1288 new_thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
1289
1290 /* Allocate I/O Statistics structure */
1291 new_thread->thread_io_stats = kalloc_data(sizeof(struct io_stat_info),
1292 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1293
1294 #if KASAN
1295 kasan_init_thread(&new_thread->kasan_data);
1296 #endif
1297
1298 #if CONFIG_KCOV
1299 kcov_init_thread(&new_thread->kcov_data);
1300 #endif
1301
1302 #if CONFIG_IOSCHED
1303 /* Clear out the I/O Scheduling info for AppleFSCompression */
1304 new_thread->decmp_upl = NULL;
1305 #endif /* CONFIG_IOSCHED */
1306
1307 new_thread->thread_region_page_shift = 0;
1308
1309 #if DEVELOPMENT || DEBUG
1310 task_lock(parent_task);
1311 uint16_t thread_limit = parent_task->task_thread_limit;
1312 if (exc_resource_threads_enabled &&
1313 thread_limit > 0 &&
1314 parent_task->thread_count >= thread_limit &&
1315 !parent_task->task_has_crossed_thread_limit &&
1316 !(parent_task->t_flags & TF_CORPSE)) {
1317 int thread_count = parent_task->thread_count;
1318 parent_task->task_has_crossed_thread_limit = TRUE;
1319 task_unlock(parent_task);
1320 SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(parent_task, thread_count);
1321 } else {
1322 task_unlock(parent_task);
1323 }
1324 #endif
1325
1326 lck_mtx_lock(&tasks_threads_lock);
1327 task_lock(parent_task);
1328
1329 /*
1330 * Fail thread creation if parent task is being torn down or has too many threads
1331 * If the caller asked for TH_OPTION_NOSUSP, also fail if the parent task is suspended
1332 */
1333 if (parent_task->active == 0 || parent_task->halting ||
1334 (parent_task->suspend_count > 0 && (options & TH_OPTION_NOSUSP) != 0) ||
1335 (parent_task->thread_count >= task_threadmax && parent_task != kernel_task)) {
1336 task_unlock(parent_task);
1337 lck_mtx_unlock(&tasks_threads_lock);
1338
1339 #ifdef MACH_BSD
1340 {
1341 struct uthread *ut = get_bsdthread_info(new_thread);
1342
1343 uthread_cleanup(ut, &tro_tpl);
1344 uthread_destroy(ut);
1345 }
1346 #endif /* MACH_BSD */
1347 ipc_thread_disable(new_thread);
1348 ipc_thread_terminate(new_thread);
1349 kfree_data(new_thread->thread_io_stats,
1350 sizeof(struct io_stat_info));
1351 lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
1352 machine_thread_destroy(new_thread);
1353 thread_ro_destroy(new_thread);
1354 zfree(thread_zone, new_thread);
1355 return KERN_FAILURE;
1356 }
1357
1358 /* Protected by the tasks_threads_lock */
1359 new_thread->thread_id = ++thread_unique_id;
1360
1361 /* New threads inherit any default state on the task */
1362 machine_thread_inherit_taskwide(new_thread, parent_task);
1363
1364 task_reference_grp(parent_task, TASK_GRP_INTERNAL);
1365
1366 if (parent_task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
1367 /*
1368 * This task has a per-thread CPU limit; make sure this new thread
1369 * gets its limit set too, before it gets out of the kernel.
1370 */
1371 act_set_astledger(new_thread);
1372 }
1373
1374 /* Instantiate a thread ledger. Do not fail thread creation if ledger creation fails. */
1375 if ((new_thread->t_threadledger = ledger_instantiate(thread_ledger_template,
1376 LEDGER_CREATE_INACTIVE_ENTRIES)) != LEDGER_NULL) {
1377 ledger_entry_setactive(new_thread->t_threadledger, thread_ledgers.cpu_time);
1378 }
1379
1380 new_thread->t_bankledger = LEDGER_NULL;
1381 new_thread->t_deduct_bank_ledger_time = 0;
1382 new_thread->t_deduct_bank_ledger_energy = 0;
1383
1384 new_thread->t_ledger = parent_task->ledger;
1385 if (new_thread->t_ledger) {
1386 ledger_reference(new_thread->t_ledger);
1387 }
1388
1389 #if defined(CONFIG_SCHED_MULTIQ)
1390 /* Cache the task's sched_group */
1391 new_thread->sched_group = parent_task->sched_group;
1392 #endif /* defined(CONFIG_SCHED_MULTIQ) */
1393
1394 /* Cache the task's map */
1395 new_thread->map = parent_task->map;
1396
1397 new_thread->depress_timer = timer_call_alloc(thread_depress_expire, new_thread);
1398 new_thread->wait_timer = timer_call_alloc(thread_timer_expire, new_thread);
1399
1400 #if KPC
1401 kpc_thread_create(new_thread);
1402 #endif
1403
1404 /* Set the thread's scheduling parameters */
1405 new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task);
1406 new_thread->max_priority = parent_task->max_priority;
1407 new_thread->task_priority = parent_task->priority;
1408
1409 #if CONFIG_THREAD_GROUPS
1410 thread_group_init_thread(new_thread, parent_task);
1411 #endif /* CONFIG_THREAD_GROUPS */
1412
1413 int new_priority = (priority < 0) ? parent_task->priority: priority;
1414 new_priority = (priority < 0)? parent_task->priority: priority;
1415 if (new_priority > new_thread->max_priority) {
1416 new_priority = new_thread->max_priority;
1417 }
1418 #if !defined(XNU_TARGET_OS_OSX)
1419 if (new_priority < MAXPRI_THROTTLE) {
1420 new_priority = MAXPRI_THROTTLE;
1421 }
1422 #endif /* !defined(XNU_TARGET_OS_OSX) */
1423
1424 new_thread->importance = new_priority - new_thread->task_priority;
1425
1426 sched_set_thread_base_priority(new_thread, new_priority);
1427
1428 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
1429 new_thread->sched_stamp = sched_tick;
1430 #if CONFIG_SCHED_CLUTCH
1431 new_thread->pri_shift = sched_clutch_thread_pri_shift(new_thread, new_thread->th_sched_bucket);
1432 #else /* CONFIG_SCHED_CLUTCH */
1433 new_thread->pri_shift = sched_pri_shifts[new_thread->th_sched_bucket];
1434 #endif /* CONFIG_SCHED_CLUTCH */
1435 #endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */
1436
1437 if (parent_task->max_priority <= MAXPRI_THROTTLE) {
1438 sched_thread_mode_demote(new_thread, TH_SFLAG_THROTTLED);
1439 }
1440
1441 thread_policy_create(new_thread);
1442
1443 /* Chain the thread onto the task's list */
1444 queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
1445 parent_task->thread_count++;
1446
1447 /* So terminating threads don't need to take the task lock to decrement */
1448 os_atomic_inc(&parent_task->active_thread_count, relaxed);
1449
1450 queue_enter(&threads, new_thread, thread_t, threads);
1451 threads_count++;
1452
1453 new_thread->active = TRUE;
1454 if (task_is_a_corpse_fork(parent_task)) {
1455 /* Set the inspection bit if the task is a corpse fork */
1456 new_thread->inspection = TRUE;
1457 } else {
1458 new_thread->inspection = FALSE;
1459 }
1460 new_thread->corpse_dup = FALSE;
1461 new_thread->turnstile = turnstile_alloc();
1462
1463
1464 *out_thread = new_thread;
1465
1466 if (kdebug_enable) {
1467 long args[4] = {};
1468
1469 kdbg_trace_data(parent_task->bsd_info, &args[1], &args[3]);
1470
1471 /*
1472 * Starting with 26604425, exec'ing creates a new task/thread.
1473 *
1474 * NEWTHREAD in the current process has two possible meanings:
1475 *
1476 * 1) Create a new thread for this process.
1477 * 2) Create a new thread for the future process this will become in an
1478 * exec.
1479 *
1480 * To disambiguate these, arg3 will be set to TRUE for case #2.
1481 *
1482 * The value we need to find (TPF_EXEC_COPY) is stable in the case of a
1483 * task exec'ing. The read of t_procflags does not take the proc_lock.
1484 */
1485 args[2] = task_is_exec_copy(parent_task) ? 1 : 0;
1486
1487 KDBG_RELEASE(TRACE_DATA_NEWTHREAD, (uintptr_t)thread_tid(new_thread),
1488 args[1], args[2], args[3]);
1489
1490 kdbg_trace_string(parent_task->bsd_info, &args[0], &args[1],
1491 &args[2], &args[3]);
1492 KDBG_RELEASE(TRACE_STRING_NEWTHREAD, args[0], args[1], args[2],
1493 args[3]);
1494 }
1495
1496 DTRACE_PROC1(lwp__create, thread_t, *out_thread);
1497
1498 return KERN_SUCCESS;
1499 }
1500
1501 static kern_return_t
thread_create_with_options_internal(task_t task,thread_t * new_thread,boolean_t from_user,thread_create_internal_options_t options,thread_continue_t continuation)1502 thread_create_with_options_internal(
1503 task_t task,
1504 thread_t *new_thread,
1505 boolean_t from_user,
1506 thread_create_internal_options_t options,
1507 thread_continue_t continuation)
1508 {
1509 kern_return_t result;
1510 thread_t thread;
1511
1512 if (task == TASK_NULL || task == kernel_task) {
1513 return KERN_INVALID_ARGUMENT;
1514 }
1515
1516 #if CONFIG_MACF
1517 if (from_user && current_task() != task &&
1518 mac_proc_check_remote_thread_create(task, -1, NULL, 0) != 0) {
1519 return KERN_DENIED;
1520 }
1521 #endif
1522
1523 result = thread_create_internal(task, -1, continuation, NULL, options, &thread);
1524 if (result != KERN_SUCCESS) {
1525 return result;
1526 }
1527
1528 thread->user_stop_count = 1;
1529 thread_hold(thread);
1530 if (task->suspend_count > 0) {
1531 thread_hold(thread);
1532 }
1533
1534 if (from_user) {
1535 extmod_statistics_incr_thread_create(task);
1536 }
1537
1538 task_unlock(task);
1539 lck_mtx_unlock(&tasks_threads_lock);
1540
1541 *new_thread = thread;
1542
1543 return KERN_SUCCESS;
1544 }
1545
1546 kern_return_t
thread_create_immovable(task_t task,thread_t * new_thread)1547 thread_create_immovable(
1548 task_t task,
1549 thread_t *new_thread)
1550 {
1551 return thread_create_with_options_internal(task, new_thread, FALSE,
1552 TH_OPTION_NONE, (thread_continue_t)thread_bootstrap_return);
1553 }
1554
1555 kern_return_t
thread_create_from_user(task_t task,thread_t * new_thread)1556 thread_create_from_user(
1557 task_t task,
1558 thread_t *new_thread)
1559 {
1560 /* All thread ports are created immovable by default */
1561 return thread_create_with_options_internal(task, new_thread, TRUE, TH_OPTION_NONE,
1562 (thread_continue_t)thread_bootstrap_return);
1563 }
1564
1565 kern_return_t
thread_create_with_continuation(task_t task,thread_t * new_thread,thread_continue_t continuation)1566 thread_create_with_continuation(
1567 task_t task,
1568 thread_t *new_thread,
1569 thread_continue_t continuation)
1570 {
1571 return thread_create_with_options_internal(task, new_thread, FALSE, TH_OPTION_NONE, continuation);
1572 }
1573
1574 /*
1575 * Create a thread that is already started, but is waiting on an event
1576 */
1577 static kern_return_t
thread_create_waiting_internal(task_t task,thread_continue_t continuation,event_t event,block_hint_t block_hint,thread_create_internal_options_t options,thread_t * new_thread)1578 thread_create_waiting_internal(
1579 task_t task,
1580 thread_continue_t continuation,
1581 event_t event,
1582 block_hint_t block_hint,
1583 thread_create_internal_options_t options,
1584 thread_t *new_thread)
1585 {
1586 kern_return_t result;
1587 thread_t thread;
1588
1589 if (task == TASK_NULL || task == kernel_task) {
1590 return KERN_INVALID_ARGUMENT;
1591 }
1592
1593 result = thread_create_internal(task, -1, continuation, NULL,
1594 options, &thread);
1595 if (result != KERN_SUCCESS) {
1596 return result;
1597 }
1598
1599 /* note no user_stop_count or thread_hold here */
1600
1601 if (task->suspend_count > 0) {
1602 thread_hold(thread);
1603 }
1604
1605 thread_mtx_lock(thread);
1606 thread_set_pending_block_hint(thread, block_hint);
1607 if (options & TH_OPTION_WORKQ) {
1608 thread->static_param = true;
1609 event = workq_thread_init_and_wq_lock(task, thread);
1610 }
1611 thread_start_in_assert_wait(thread, event, THREAD_INTERRUPTIBLE);
1612 thread_mtx_unlock(thread);
1613
1614 task_unlock(task);
1615 lck_mtx_unlock(&tasks_threads_lock);
1616
1617 *new_thread = thread;
1618
1619 return KERN_SUCCESS;
1620 }
1621
1622 kern_return_t
main_thread_create_waiting(task_t task,thread_continue_t continuation,event_t event,thread_t * new_thread)1623 main_thread_create_waiting(
1624 task_t task,
1625 thread_continue_t continuation,
1626 event_t event,
1627 thread_t *new_thread)
1628 {
1629 return thread_create_waiting_internal(task, continuation, event,
1630 kThreadWaitNone, TH_OPTION_MAINTHREAD, new_thread);
1631 }
1632
1633
1634 static kern_return_t
thread_create_running_internal2(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread,boolean_t from_user)1635 thread_create_running_internal2(
1636 task_t task,
1637 int flavor,
1638 thread_state_t new_state,
1639 mach_msg_type_number_t new_state_count,
1640 thread_t *new_thread,
1641 boolean_t from_user)
1642 {
1643 kern_return_t result;
1644 thread_t thread;
1645
1646 if (task == TASK_NULL || task == kernel_task) {
1647 return KERN_INVALID_ARGUMENT;
1648 }
1649
1650 #if CONFIG_MACF
1651 if (from_user && current_task() != task &&
1652 mac_proc_check_remote_thread_create(task, flavor, new_state, new_state_count) != 0) {
1653 return KERN_DENIED;
1654 }
1655 #endif
1656
1657 result = thread_create_internal(task, -1,
1658 (thread_continue_t)thread_bootstrap_return, NULL,
1659 TH_OPTION_NONE, &thread);
1660 if (result != KERN_SUCCESS) {
1661 return result;
1662 }
1663
1664 if (task->suspend_count > 0) {
1665 thread_hold(thread);
1666 }
1667
1668 if (from_user) {
1669 result = machine_thread_state_convert_from_user(thread, flavor,
1670 new_state, new_state_count, NULL, 0, TSSF_FLAGS_NONE);
1671 }
1672 if (result == KERN_SUCCESS) {
1673 result = machine_thread_set_state(thread, flavor, new_state,
1674 new_state_count);
1675 }
1676 if (result != KERN_SUCCESS) {
1677 task_unlock(task);
1678 lck_mtx_unlock(&tasks_threads_lock);
1679
1680 thread_terminate(thread);
1681 thread_deallocate(thread);
1682 return result;
1683 }
1684
1685 thread_mtx_lock(thread);
1686 thread_start(thread);
1687 thread_mtx_unlock(thread);
1688
1689 if (from_user) {
1690 extmod_statistics_incr_thread_create(task);
1691 }
1692
1693 task_unlock(task);
1694 lck_mtx_unlock(&tasks_threads_lock);
1695
1696 *new_thread = thread;
1697
1698 return result;
1699 }
1700
1701 /* Prototype, see justification above */
1702 kern_return_t
1703 thread_create_running(
1704 task_t task,
1705 int flavor,
1706 thread_state_t new_state,
1707 mach_msg_type_number_t new_state_count,
1708 thread_t *new_thread);
1709
1710 kern_return_t
thread_create_running(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread)1711 thread_create_running(
1712 task_t task,
1713 int flavor,
1714 thread_state_t new_state,
1715 mach_msg_type_number_t new_state_count,
1716 thread_t *new_thread)
1717 {
1718 return thread_create_running_internal2(
1719 task, flavor, new_state, new_state_count,
1720 new_thread, FALSE);
1721 }
1722
1723 kern_return_t
thread_create_running_from_user(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread)1724 thread_create_running_from_user(
1725 task_t task,
1726 int flavor,
1727 thread_state_t new_state,
1728 mach_msg_type_number_t new_state_count,
1729 thread_t *new_thread)
1730 {
1731 return thread_create_running_internal2(
1732 task, flavor, new_state, new_state_count,
1733 new_thread, TRUE);
1734 }
1735
1736 kern_return_t
thread_create_workq_waiting(task_t task,thread_continue_t continuation,thread_t * new_thread)1737 thread_create_workq_waiting(
1738 task_t task,
1739 thread_continue_t continuation,
1740 thread_t *new_thread)
1741 {
1742 /*
1743 * Create thread, but don't pin control port just yet, in case someone calls
1744 * task_threads() and deallocates pinned port before kernel copyout happens,
1745 * which will result in pinned port guard exception. Instead, pin and copyout
1746 * atomically during workq_setup_and_run().
1747 */
1748 int options = TH_OPTION_NOSUSP | TH_OPTION_WORKQ;
1749 return thread_create_waiting_internal(task, continuation, NULL,
1750 kThreadWaitParkedWorkQueue, options, new_thread);
1751 }
1752
1753 /*
1754 * kernel_thread_create:
1755 *
1756 * Create a thread in the kernel task
1757 * to execute in kernel context.
1758 */
1759 kern_return_t
kernel_thread_create(thread_continue_t continuation,void * parameter,integer_t priority,thread_t * new_thread)1760 kernel_thread_create(
1761 thread_continue_t continuation,
1762 void *parameter,
1763 integer_t priority,
1764 thread_t *new_thread)
1765 {
1766 kern_return_t result;
1767 thread_t thread;
1768 task_t task = kernel_task;
1769
1770 result = thread_create_internal(task, priority, continuation, parameter,
1771 TH_OPTION_NONE, &thread);
1772 if (result != KERN_SUCCESS) {
1773 return result;
1774 }
1775
1776 task_unlock(task);
1777 lck_mtx_unlock(&tasks_threads_lock);
1778
1779 stack_alloc(thread);
1780 assert(thread->kernel_stack != 0);
1781 #if !defined(XNU_TARGET_OS_OSX)
1782 if (priority > BASEPRI_KERNEL)
1783 #endif
1784 thread->reserved_stack = thread->kernel_stack;
1785
1786 if (debug_task & 1) {
1787 kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread, continuation);
1788 }
1789 *new_thread = thread;
1790
1791 return result;
1792 }
1793
1794 kern_return_t
kernel_thread_start_priority(thread_continue_t continuation,void * parameter,integer_t priority,thread_t * new_thread)1795 kernel_thread_start_priority(
1796 thread_continue_t continuation,
1797 void *parameter,
1798 integer_t priority,
1799 thread_t *new_thread)
1800 {
1801 kern_return_t result;
1802 thread_t thread;
1803
1804 result = kernel_thread_create(continuation, parameter, priority, &thread);
1805 if (result != KERN_SUCCESS) {
1806 return result;
1807 }
1808
1809 *new_thread = thread;
1810
1811 thread_mtx_lock(thread);
1812 thread_start(thread);
1813 thread_mtx_unlock(thread);
1814
1815 return result;
1816 }
1817
1818 kern_return_t
kernel_thread_start(thread_continue_t continuation,void * parameter,thread_t * new_thread)1819 kernel_thread_start(
1820 thread_continue_t continuation,
1821 void *parameter,
1822 thread_t *new_thread)
1823 {
1824 return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
1825 }
1826
1827 /* Separated into helper function so it can be used by THREAD_BASIC_INFO and THREAD_EXTENDED_INFO */
1828 /* it is assumed that the thread is locked by the caller */
1829 static void
retrieve_thread_basic_info(thread_t thread,thread_basic_info_t basic_info)1830 retrieve_thread_basic_info(thread_t thread, thread_basic_info_t basic_info)
1831 {
1832 int state, flags;
1833
1834 /* fill in info */
1835
1836 thread_read_times(thread, &basic_info->user_time,
1837 &basic_info->system_time, NULL);
1838
1839 /*
1840 * Update lazy-evaluated scheduler info because someone wants it.
1841 */
1842 if (SCHED(can_update_priority)(thread)) {
1843 SCHED(update_priority)(thread);
1844 }
1845
1846 basic_info->sleep_time = 0;
1847
1848 /*
1849 * To calculate cpu_usage, first correct for timer rate,
1850 * then for 5/8 ageing. The correction factor [3/5] is
1851 * (1/(5/8) - 1).
1852 */
1853 basic_info->cpu_usage = 0;
1854 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
1855 if (sched_tick_interval) {
1856 basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage
1857 * TH_USAGE_SCALE) / sched_tick_interval);
1858 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1859 }
1860 #endif
1861
1862 if (basic_info->cpu_usage > TH_USAGE_SCALE) {
1863 basic_info->cpu_usage = TH_USAGE_SCALE;
1864 }
1865
1866 basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)?
1867 POLICY_TIMESHARE: POLICY_RR);
1868
1869 flags = 0;
1870 if (thread->options & TH_OPT_IDLE_THREAD) {
1871 flags |= TH_FLAGS_IDLE;
1872 }
1873
1874 if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) {
1875 flags |= TH_FLAGS_GLOBAL_FORCED_IDLE;
1876 }
1877
1878 if (!thread->kernel_stack) {
1879 flags |= TH_FLAGS_SWAPPED;
1880 }
1881
1882 state = 0;
1883 if (thread->state & TH_TERMINATE) {
1884 state = TH_STATE_HALTED;
1885 } else if (thread->state & TH_RUN) {
1886 state = TH_STATE_RUNNING;
1887 } else if (thread->state & TH_UNINT) {
1888 state = TH_STATE_UNINTERRUPTIBLE;
1889 } else if (thread->state & TH_SUSP) {
1890 state = TH_STATE_STOPPED;
1891 } else if (thread->state & TH_WAIT) {
1892 state = TH_STATE_WAITING;
1893 }
1894
1895 basic_info->run_state = state;
1896 basic_info->flags = flags;
1897
1898 basic_info->suspend_count = thread->user_stop_count;
1899
1900 return;
1901 }
1902
1903 kern_return_t
thread_info_internal(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)1904 thread_info_internal(
1905 thread_t thread,
1906 thread_flavor_t flavor,
1907 thread_info_t thread_info_out, /* ptr to OUT array */
1908 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
1909 {
1910 spl_t s;
1911
1912 if (thread == THREAD_NULL) {
1913 return KERN_INVALID_ARGUMENT;
1914 }
1915
1916 if (flavor == THREAD_BASIC_INFO) {
1917 if (*thread_info_count < THREAD_BASIC_INFO_COUNT) {
1918 return KERN_INVALID_ARGUMENT;
1919 }
1920
1921 s = splsched();
1922 thread_lock(thread);
1923
1924 retrieve_thread_basic_info(thread, (thread_basic_info_t) thread_info_out);
1925
1926 thread_unlock(thread);
1927 splx(s);
1928
1929 *thread_info_count = THREAD_BASIC_INFO_COUNT;
1930
1931 return KERN_SUCCESS;
1932 } else if (flavor == THREAD_IDENTIFIER_INFO) {
1933 thread_identifier_info_t identifier_info;
1934
1935 if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) {
1936 return KERN_INVALID_ARGUMENT;
1937 }
1938
1939 identifier_info = __IGNORE_WCASTALIGN((thread_identifier_info_t)thread_info_out);
1940
1941 s = splsched();
1942 thread_lock(thread);
1943
1944 identifier_info->thread_id = thread->thread_id;
1945 identifier_info->thread_handle = thread->machine.cthread_self;
1946 identifier_info->dispatch_qaddr = thread_dispatchqaddr(thread);
1947
1948 thread_unlock(thread);
1949 splx(s);
1950 return KERN_SUCCESS;
1951 } else if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
1952 policy_timeshare_info_t ts_info;
1953
1954 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT) {
1955 return KERN_INVALID_ARGUMENT;
1956 }
1957
1958 ts_info = (policy_timeshare_info_t)thread_info_out;
1959
1960 s = splsched();
1961 thread_lock(thread);
1962
1963 if (thread->sched_mode != TH_MODE_TIMESHARE) {
1964 thread_unlock(thread);
1965 splx(s);
1966 return KERN_INVALID_POLICY;
1967 }
1968
1969 ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
1970 if (ts_info->depressed) {
1971 ts_info->base_priority = DEPRESSPRI;
1972 ts_info->depress_priority = thread->base_pri;
1973 } else {
1974 ts_info->base_priority = thread->base_pri;
1975 ts_info->depress_priority = -1;
1976 }
1977
1978 ts_info->cur_priority = thread->sched_pri;
1979 ts_info->max_priority = thread->max_priority;
1980
1981 thread_unlock(thread);
1982 splx(s);
1983
1984 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
1985
1986 return KERN_SUCCESS;
1987 } else if (flavor == THREAD_SCHED_FIFO_INFO) {
1988 if (*thread_info_count < POLICY_FIFO_INFO_COUNT) {
1989 return KERN_INVALID_ARGUMENT;
1990 }
1991
1992 return KERN_INVALID_POLICY;
1993 } else if (flavor == THREAD_SCHED_RR_INFO) {
1994 policy_rr_info_t rr_info;
1995 uint32_t quantum_time;
1996 uint64_t quantum_ns;
1997
1998 if (*thread_info_count < POLICY_RR_INFO_COUNT) {
1999 return KERN_INVALID_ARGUMENT;
2000 }
2001
2002 rr_info = (policy_rr_info_t) thread_info_out;
2003
2004 s = splsched();
2005 thread_lock(thread);
2006
2007 if (thread->sched_mode == TH_MODE_TIMESHARE) {
2008 thread_unlock(thread);
2009 splx(s);
2010
2011 return KERN_INVALID_POLICY;
2012 }
2013
2014 rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
2015 if (rr_info->depressed) {
2016 rr_info->base_priority = DEPRESSPRI;
2017 rr_info->depress_priority = thread->base_pri;
2018 } else {
2019 rr_info->base_priority = thread->base_pri;
2020 rr_info->depress_priority = -1;
2021 }
2022
2023 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
2024 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
2025
2026 rr_info->max_priority = thread->max_priority;
2027 rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
2028
2029 thread_unlock(thread);
2030 splx(s);
2031
2032 *thread_info_count = POLICY_RR_INFO_COUNT;
2033
2034 return KERN_SUCCESS;
2035 } else if (flavor == THREAD_EXTENDED_INFO) {
2036 thread_basic_info_data_t basic_info;
2037 thread_extended_info_t extended_info = __IGNORE_WCASTALIGN((thread_extended_info_t)thread_info_out);
2038
2039 if (*thread_info_count < THREAD_EXTENDED_INFO_COUNT) {
2040 return KERN_INVALID_ARGUMENT;
2041 }
2042
2043 s = splsched();
2044 thread_lock(thread);
2045
2046 /* NOTE: This mimics fill_taskthreadinfo(), which is the function used by proc_pidinfo() for
2047 * the PROC_PIDTHREADINFO flavor (which can't be used on corpses)
2048 */
2049 retrieve_thread_basic_info(thread, &basic_info);
2050 extended_info->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
2051 extended_info->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
2052
2053 extended_info->pth_cpu_usage = basic_info.cpu_usage;
2054 extended_info->pth_policy = basic_info.policy;
2055 extended_info->pth_run_state = basic_info.run_state;
2056 extended_info->pth_flags = basic_info.flags;
2057 extended_info->pth_sleep_time = basic_info.sleep_time;
2058 extended_info->pth_curpri = thread->sched_pri;
2059 extended_info->pth_priority = thread->base_pri;
2060 extended_info->pth_maxpriority = thread->max_priority;
2061
2062 bsd_getthreadname(get_bsdthread_info(thread), extended_info->pth_name);
2063
2064 thread_unlock(thread);
2065 splx(s);
2066
2067 *thread_info_count = THREAD_EXTENDED_INFO_COUNT;
2068
2069 return KERN_SUCCESS;
2070 } else if (flavor == THREAD_DEBUG_INFO_INTERNAL) {
2071 #if DEVELOPMENT || DEBUG
2072 thread_debug_info_internal_t dbg_info;
2073 if (*thread_info_count < THREAD_DEBUG_INFO_INTERNAL_COUNT) {
2074 return KERN_NOT_SUPPORTED;
2075 }
2076
2077 if (thread_info_out == NULL) {
2078 return KERN_INVALID_ARGUMENT;
2079 }
2080
2081 dbg_info = __IGNORE_WCASTALIGN((thread_debug_info_internal_t)thread_info_out);
2082 dbg_info->page_creation_count = thread->t_page_creation_count;
2083
2084 *thread_info_count = THREAD_DEBUG_INFO_INTERNAL_COUNT;
2085 return KERN_SUCCESS;
2086 #endif /* DEVELOPMENT || DEBUG */
2087 return KERN_NOT_SUPPORTED;
2088 }
2089
2090 return KERN_INVALID_ARGUMENT;
2091 }
2092
2093 void
thread_read_times(thread_t thread,time_value_t * user_time,time_value_t * system_time,time_value_t * runnable_time)2094 thread_read_times(
2095 thread_t thread,
2096 time_value_t *user_time,
2097 time_value_t *system_time,
2098 time_value_t *runnable_time)
2099 {
2100 clock_sec_t secs;
2101 clock_usec_t usecs;
2102 uint64_t tval_user, tval_system;
2103
2104 tval_user = timer_grab(&thread->user_timer);
2105 tval_system = timer_grab(&thread->system_timer);
2106
2107 if (thread->precise_user_kernel_time) {
2108 absolutetime_to_microtime(tval_user, &secs, &usecs);
2109 user_time->seconds = (typeof(user_time->seconds))secs;
2110 user_time->microseconds = usecs;
2111
2112 absolutetime_to_microtime(tval_system, &secs, &usecs);
2113 system_time->seconds = (typeof(system_time->seconds))secs;
2114 system_time->microseconds = usecs;
2115 } else {
2116 /* system_timer may represent either sys or user */
2117 tval_user += tval_system;
2118 absolutetime_to_microtime(tval_user, &secs, &usecs);
2119 user_time->seconds = (typeof(user_time->seconds))secs;
2120 user_time->microseconds = usecs;
2121
2122 system_time->seconds = 0;
2123 system_time->microseconds = 0;
2124 }
2125
2126 if (runnable_time) {
2127 uint64_t tval_runnable = timer_grab(&thread->runnable_timer);
2128 absolutetime_to_microtime(tval_runnable, &secs, &usecs);
2129 runnable_time->seconds = (typeof(runnable_time->seconds))secs;
2130 runnable_time->microseconds = usecs;
2131 }
2132 }
2133
2134 uint64_t
thread_get_runtime_self(void)2135 thread_get_runtime_self(void)
2136 {
2137 boolean_t interrupt_state;
2138 uint64_t runtime;
2139 thread_t thread = NULL;
2140 processor_t processor = NULL;
2141
2142 thread = current_thread();
2143
2144 /* Not interrupt safe, as the scheduler may otherwise update timer values underneath us */
2145 interrupt_state = ml_set_interrupts_enabled(FALSE);
2146 processor = current_processor();
2147 timer_update(processor->thread_timer, mach_absolute_time());
2148 runtime = (timer_grab(&thread->user_timer) + timer_grab(&thread->system_timer));
2149 ml_set_interrupts_enabled(interrupt_state);
2150
2151 return runtime;
2152 }
2153
2154 kern_return_t
thread_assign(__unused thread_t thread,__unused processor_set_t new_pset)2155 thread_assign(
2156 __unused thread_t thread,
2157 __unused processor_set_t new_pset)
2158 {
2159 return KERN_FAILURE;
2160 }
2161
2162 /*
2163 * thread_assign_default:
2164 *
2165 * Special version of thread_assign for assigning threads to default
2166 * processor set.
2167 */
2168 kern_return_t
thread_assign_default(thread_t thread)2169 thread_assign_default(
2170 thread_t thread)
2171 {
2172 return thread_assign(thread, &pset0);
2173 }
2174
2175 /*
2176 * thread_get_assignment
2177 *
2178 * Return current assignment for this thread.
2179 */
2180 kern_return_t
thread_get_assignment(thread_t thread,processor_set_t * pset)2181 thread_get_assignment(
2182 thread_t thread,
2183 processor_set_t *pset)
2184 {
2185 if (thread == NULL) {
2186 return KERN_INVALID_ARGUMENT;
2187 }
2188
2189 *pset = &pset0;
2190
2191 return KERN_SUCCESS;
2192 }
2193
2194 /*
2195 * thread_wire_internal:
2196 *
2197 * Specify that the target thread must always be able
2198 * to run and to allocate memory.
2199 */
2200 kern_return_t
thread_wire_internal(host_priv_t host_priv,thread_t thread,boolean_t wired,boolean_t * prev_state)2201 thread_wire_internal(
2202 host_priv_t host_priv,
2203 thread_t thread,
2204 boolean_t wired,
2205 boolean_t *prev_state)
2206 {
2207 if (host_priv == NULL || thread != current_thread()) {
2208 return KERN_INVALID_ARGUMENT;
2209 }
2210
2211 if (prev_state) {
2212 *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
2213 }
2214
2215 if (wired) {
2216 if (!(thread->options & TH_OPT_VMPRIV)) {
2217 vm_page_free_reserve(1); /* XXX */
2218 }
2219 thread->options |= TH_OPT_VMPRIV;
2220 } else {
2221 if (thread->options & TH_OPT_VMPRIV) {
2222 vm_page_free_reserve(-1); /* XXX */
2223 }
2224 thread->options &= ~TH_OPT_VMPRIV;
2225 }
2226
2227 return KERN_SUCCESS;
2228 }
2229
2230
2231 /*
2232 * thread_wire:
2233 *
2234 * User-api wrapper for thread_wire_internal()
2235 */
2236 kern_return_t
thread_wire(host_priv_t host_priv,thread_t thread,boolean_t wired)2237 thread_wire(
2238 host_priv_t host_priv,
2239 thread_t thread,
2240 boolean_t wired)
2241 {
2242 return thread_wire_internal(host_priv, thread, wired, NULL);
2243 }
2244
2245 boolean_t
is_external_pageout_thread(void)2246 is_external_pageout_thread(void)
2247 {
2248 return current_thread() == vm_pageout_state.vm_pageout_external_iothread;
2249 }
2250
2251 boolean_t
is_vm_privileged(void)2252 is_vm_privileged(void)
2253 {
2254 return current_thread()->options & TH_OPT_VMPRIV ? TRUE : FALSE;
2255 }
2256
2257 boolean_t
set_vm_privilege(boolean_t privileged)2258 set_vm_privilege(boolean_t privileged)
2259 {
2260 boolean_t was_vmpriv;
2261
2262 if (current_thread()->options & TH_OPT_VMPRIV) {
2263 was_vmpriv = TRUE;
2264 } else {
2265 was_vmpriv = FALSE;
2266 }
2267
2268 if (privileged != FALSE) {
2269 current_thread()->options |= TH_OPT_VMPRIV;
2270 } else {
2271 current_thread()->options &= ~TH_OPT_VMPRIV;
2272 }
2273
2274 return was_vmpriv;
2275 }
2276
2277 void
thread_floor_boost_set_promotion_locked(thread_t thread)2278 thread_floor_boost_set_promotion_locked(thread_t thread)
2279 {
2280 assert(thread->priority_floor_count > 0);
2281
2282 if (!(thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED)) {
2283 sched_thread_promote_reason(thread, TH_SFLAG_FLOOR_PROMOTED, 0);
2284 }
2285 }
2286
2287 /*! @function thread_priority_floor_start
2288 * @abstract boost the current thread priority to floor.
2289 * @discussion Increase the priority of the current thread to at least MINPRI_FLOOR.
2290 * The boost will be mantained until a corresponding thread_priority_floor_end()
2291 * is called. Every call of thread_priority_floor_start() needs to have a corresponding
2292 * call to thread_priority_floor_end() from the same thread.
2293 * No thread can return to userspace before calling thread_priority_floor_end().
2294 *
2295 * NOTE: avoid to use this function. Try to use gate_t or sleep_with_inheritor()
2296 * instead.
2297 * @result a token to be given to the corresponding thread_priority_floor_end()
2298 */
2299 thread_pri_floor_t
thread_priority_floor_start(void)2300 thread_priority_floor_start(void)
2301 {
2302 thread_pri_floor_t ret;
2303 thread_t thread = current_thread();
2304 __assert_only uint16_t prev_priority_floor_count;
2305
2306 assert(thread->priority_floor_count < UINT16_MAX);
2307 prev_priority_floor_count = thread->priority_floor_count++;
2308 #if MACH_ASSERT
2309 /*
2310 * Set the ast to check that the
2311 * priority_floor_count is going to be set to zero when
2312 * going back to userspace.
2313 * Set it only once when we increment it for the first time.
2314 */
2315 if (prev_priority_floor_count == 0) {
2316 act_set_debug_assert();
2317 }
2318 #endif
2319
2320 ret.thread = thread;
2321 return ret;
2322 }
2323
2324 /*! @function thread_priority_floor_end
2325 * @abstract ends the floor boost.
2326 * @param token the token obtained from thread_priority_floor_start()
2327 * @discussion ends the priority floor boost started with thread_priority_floor_start()
2328 */
2329 void
thread_priority_floor_end(thread_pri_floor_t * token)2330 thread_priority_floor_end(thread_pri_floor_t *token)
2331 {
2332 thread_t thread = current_thread();
2333
2334 assert(thread->priority_floor_count > 0);
2335 assertf(token->thread == thread, "thread_priority_floor_end called from a different thread from thread_priority_floor_start %p %p", thread, token->thread);
2336
2337 if ((thread->priority_floor_count-- == 1) && (thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED)) {
2338 spl_t s = splsched();
2339 thread_lock(thread);
2340
2341 if (thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) {
2342 sched_thread_unpromote_reason(thread, TH_SFLAG_FLOOR_PROMOTED, 0);
2343 }
2344
2345 thread_unlock(thread);
2346 splx(s);
2347 }
2348
2349 token->thread = NULL;
2350 }
2351
2352 /*
2353 * XXX assuming current thread only, for now...
2354 */
2355 void
thread_guard_violation(thread_t thread,mach_exception_data_type_t code,mach_exception_data_type_t subcode,boolean_t fatal)2356 thread_guard_violation(thread_t thread,
2357 mach_exception_data_type_t code, mach_exception_data_type_t subcode, boolean_t fatal)
2358 {
2359 assert(thread == current_thread());
2360
2361 /* Don't set up the AST for kernel threads; this check is needed to ensure
2362 * that the guard_exc_* fields in the thread structure are set only by the
2363 * current thread and therefore, don't require a lock.
2364 */
2365 if (get_threadtask(thread) == kernel_task) {
2366 return;
2367 }
2368
2369 assert(EXC_GUARD_DECODE_GUARD_TYPE(code));
2370
2371 /*
2372 * Use the saved state area of the thread structure
2373 * to store all info required to handle the AST when
2374 * returning to userspace. It's possible that there is
2375 * already a pending guard exception. If it's non-fatal,
2376 * it can only be over-written by a fatal exception code.
2377 */
2378 if (thread->guard_exc_info.code && (thread->guard_exc_fatal || !fatal)) {
2379 return;
2380 }
2381
2382 thread->guard_exc_info.code = code;
2383 thread->guard_exc_info.subcode = subcode;
2384 thread->guard_exc_fatal = fatal ? 1 : 0;
2385
2386 spl_t s = splsched();
2387 thread_ast_set(thread, AST_GUARD);
2388 ast_propagate(thread);
2389 splx(s);
2390 }
2391
2392 #if CONFIG_DEBUG_SYSCALL_REJECTION
2393 extern void rejected_syscall_guard_ast(thread_t __unused t, mach_exception_data_type_t code, mach_exception_data_type_t subcode);
2394 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
2395
2396 /*
2397 * guard_ast:
2398 *
2399 * Handle AST_GUARD for a thread. This routine looks at the
2400 * state saved in the thread structure to determine the cause
2401 * of this exception. Based on this value, it invokes the
2402 * appropriate routine which determines other exception related
2403 * info and raises the exception.
2404 */
2405 void
guard_ast(thread_t t)2406 guard_ast(thread_t t)
2407 {
2408 const mach_exception_data_type_t
2409 code = t->guard_exc_info.code,
2410 subcode = t->guard_exc_info.subcode;
2411
2412 t->guard_exc_info.code = 0;
2413 t->guard_exc_info.subcode = 0;
2414 t->guard_exc_fatal = 0;
2415
2416 switch (EXC_GUARD_DECODE_GUARD_TYPE(code)) {
2417 case GUARD_TYPE_NONE:
2418 /* lingering AST_GUARD on the processor? */
2419 break;
2420 case GUARD_TYPE_MACH_PORT:
2421 mach_port_guard_ast(t, code, subcode);
2422 break;
2423 case GUARD_TYPE_FD:
2424 fd_guard_ast(t, code, subcode);
2425 break;
2426 #if CONFIG_VNGUARD
2427 case GUARD_TYPE_VN:
2428 vn_guard_ast(t, code, subcode);
2429 break;
2430 #endif
2431 case GUARD_TYPE_VIRT_MEMORY:
2432 virt_memory_guard_ast(t, code, subcode);
2433 break;
2434 #if CONFIG_DEBUG_SYSCALL_REJECTION
2435 case GUARD_TYPE_REJECTED_SC:
2436 rejected_syscall_guard_ast(t, code, subcode);
2437 break;
2438 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
2439 default:
2440 panic("guard_exc_info %llx %llx", code, subcode);
2441 }
2442 }
2443
2444 static void
thread_cputime_callback(int warning,__unused const void * arg0,__unused const void * arg1)2445 thread_cputime_callback(int warning, __unused const void *arg0, __unused const void *arg1)
2446 {
2447 if (warning == LEDGER_WARNING_ROSE_ABOVE) {
2448 #if CONFIG_TELEMETRY
2449 /*
2450 * This thread is in danger of violating the CPU usage monitor. Enable telemetry
2451 * on the entire task so there are micro-stackshots available if and when
2452 * EXC_RESOURCE is triggered. We could have chosen to enable micro-stackshots
2453 * for this thread only; but now that this task is suspect, knowing what all of
2454 * its threads are up to will be useful.
2455 */
2456 telemetry_task_ctl(current_task(), TF_CPUMON_WARNING, 1);
2457 #endif
2458 return;
2459 }
2460
2461 #if CONFIG_TELEMETRY
2462 /*
2463 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
2464 * exceeded the limit, turn telemetry off for the task.
2465 */
2466 telemetry_task_ctl(current_task(), TF_CPUMON_WARNING, 0);
2467 #endif
2468
2469 if (warning == 0) {
2470 SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU();
2471 }
2472 }
2473
2474 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void)2475 SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void)
2476 {
2477 int pid = 0;
2478 task_t task = current_task();
2479 thread_t thread = current_thread();
2480 uint64_t tid = thread->thread_id;
2481 const char *procname = "unknown";
2482 time_value_t thread_total_time = {0, 0};
2483 time_value_t thread_system_time;
2484 time_value_t thread_user_time;
2485 int action;
2486 uint8_t percentage;
2487 uint32_t usage_percent = 0;
2488 uint32_t interval_sec;
2489 uint64_t interval_ns;
2490 uint64_t balance_ns;
2491 boolean_t fatal = FALSE;
2492 boolean_t send_exc_resource = TRUE; /* in addition to RESOURCE_NOTIFY */
2493 kern_return_t kr;
2494
2495 #ifdef EXC_RESOURCE_MONITORS
2496 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2497 #endif /* EXC_RESOURCE_MONITORS */
2498 struct ledger_entry_info lei;
2499
2500 assert(thread->t_threadledger != LEDGER_NULL);
2501
2502 /*
2503 * Extract the fatal bit and suspend the monitor (which clears the bit).
2504 */
2505 task_lock(task);
2506 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_CPUMON) {
2507 fatal = TRUE;
2508 send_exc_resource = TRUE;
2509 }
2510 /* Only one thread can be here at a time. Whichever makes it through
2511 * first will successfully suspend the monitor and proceed to send the
2512 * notification. Other threads will get an error trying to suspend the
2513 * monitor and give up on sending the notification. In the first release,
2514 * the monitor won't be resumed for a number of seconds, but we may
2515 * eventually need to handle low-latency resume.
2516 */
2517 kr = task_suspend_cpumon(task);
2518 task_unlock(task);
2519 if (kr == KERN_INVALID_ARGUMENT) {
2520 return;
2521 }
2522
2523 #ifdef MACH_BSD
2524 pid = proc_selfpid();
2525 if (task->bsd_info != NULL) {
2526 procname = proc_name_address(task->bsd_info);
2527 }
2528 #endif
2529
2530 thread_get_cpulimit(&action, &percentage, &interval_ns);
2531
2532 interval_sec = (uint32_t)(interval_ns / NSEC_PER_SEC);
2533
2534 thread_read_times(thread, &thread_user_time, &thread_system_time, NULL);
2535 time_value_add(&thread_total_time, &thread_user_time);
2536 time_value_add(&thread_total_time, &thread_system_time);
2537 ledger_get_entry_info(thread->t_threadledger, thread_ledgers.cpu_time, &lei);
2538
2539 /* credit/debit/balance/limit are in absolute time units;
2540 * the refill info is in nanoseconds. */
2541 absolutetime_to_nanoseconds(lei.lei_balance, &balance_ns);
2542 if (lei.lei_last_refill > 0) {
2543 usage_percent = (uint32_t)((balance_ns * 100ULL) / lei.lei_last_refill);
2544 }
2545
2546 /* TODO: show task total runtime (via TASK_ABSOLUTETIME_INFO)? */
2547 printf("process %s[%d] thread %llu caught burning CPU! It used more than %d%% CPU over %u seconds\n",
2548 procname, pid, tid, percentage, interval_sec);
2549 printf(" (actual recent usage: %d%% over ~%llu seconds)\n",
2550 usage_percent, (lei.lei_last_refill + NSEC_PER_SEC / 2) / NSEC_PER_SEC);
2551 printf(" Thread lifetime cpu usage %d.%06ds, (%d.%06d user, %d.%06d sys)\n",
2552 thread_total_time.seconds, thread_total_time.microseconds,
2553 thread_user_time.seconds, thread_user_time.microseconds,
2554 thread_system_time.seconds, thread_system_time.microseconds);
2555 printf(" Ledger balance: %lld; mabs credit: %lld; mabs debit: %lld\n",
2556 lei.lei_balance, lei.lei_credit, lei.lei_debit);
2557 printf(" mabs limit: %llu; mabs period: %llu ns; last refill: %llu ns%s.\n",
2558 lei.lei_limit, lei.lei_refill_period, lei.lei_last_refill,
2559 (fatal ? " [fatal violation]" : ""));
2560
2561 /*
2562 * For now, send RESOURCE_NOTIFY in parallel with EXC_RESOURCE. Once
2563 * we have logging parity, we will stop sending EXC_RESOURCE (24508922).
2564 */
2565
2566 /* RESOURCE_NOTIFY MIG specifies nanoseconds of CPU time */
2567 lei.lei_balance = balance_ns;
2568 absolutetime_to_nanoseconds(lei.lei_limit, &lei.lei_limit);
2569 trace_resource_violation(RMON_CPUUSAGE_VIOLATED, &lei);
2570 kr = send_resource_violation(send_cpu_usage_violation, task, &lei,
2571 fatal ? kRNFatalLimitFlag : 0);
2572 if (kr) {
2573 printf("send_resource_violation(CPU usage, ...): error %#x\n", kr);
2574 }
2575
2576 #ifdef EXC_RESOURCE_MONITORS
2577 if (send_exc_resource) {
2578 if (disable_exc_resource) {
2579 printf("process %s[%d] thread %llu caught burning CPU! "
2580 "EXC_RESOURCE%s supressed by a boot-arg\n",
2581 procname, pid, tid, fatal ? " (and termination)" : "");
2582 return;
2583 }
2584
2585 if (audio_active) {
2586 printf("process %s[%d] thread %llu caught burning CPU! "
2587 "EXC_RESOURCE & termination supressed due to audio playback\n",
2588 procname, pid, tid);
2589 return;
2590 }
2591 }
2592
2593
2594 if (send_exc_resource) {
2595 code[0] = code[1] = 0;
2596 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_CPU);
2597 if (fatal) {
2598 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR_FATAL);
2599 } else {
2600 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR);
2601 }
2602 EXC_RESOURCE_CPUMONITOR_ENCODE_INTERVAL(code[0], interval_sec);
2603 EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[0], percentage);
2604 EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[1], usage_percent);
2605 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
2606 }
2607 #endif /* EXC_RESOURCE_MONITORS */
2608
2609 if (fatal) {
2610 #if CONFIG_JETSAM
2611 jetsam_on_ledger_cpulimit_exceeded();
2612 #else
2613 task_terminate_internal(task);
2614 #endif
2615 }
2616 }
2617
2618 #if DEVELOPMENT || DEBUG
2619 void __attribute__((noinline))
SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task,int thread_count)2620 SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task, int thread_count)
2621 {
2622 mach_exception_data_type_t code[EXCEPTION_CODE_MAX] = {0};
2623 int pid = task_pid(task);
2624 char procname[MAXCOMLEN + 1] = "unknown";
2625
2626 if (pid == 1) {
2627 /*
2628 * Cannot suspend launchd
2629 */
2630 return;
2631 }
2632
2633 proc_name(pid, procname, sizeof(procname));
2634
2635 if (disable_exc_resource) {
2636 printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2637 "supressed by a boot-arg. \n", procname, pid, thread_count);
2638 return;
2639 }
2640
2641 if (audio_active) {
2642 printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2643 "supressed due to audio playback.\n", procname, pid, thread_count);
2644 return;
2645 }
2646
2647 if (!exc_via_corpse_forking) {
2648 printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2649 "supressed due to corpse forking being disabled.\n", procname, pid,
2650 thread_count);
2651 return;
2652 }
2653
2654 printf("process %s[%d] crossed thread count high watermark (%d), sending "
2655 "EXC_RESOURCE\n", procname, pid, thread_count);
2656
2657 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_THREADS);
2658 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_THREADS_HIGH_WATERMARK);
2659 EXC_RESOURCE_THREADS_ENCODE_THREADS(code[0], thread_count);
2660
2661 task_enqueue_exception_with_corpse(task, EXC_RESOURCE, code, EXCEPTION_CODE_MAX, NULL);
2662 }
2663 #endif /* DEVELOPMENT || DEBUG */
2664
2665 void
thread_update_io_stats(thread_t thread,int size,int io_flags)2666 thread_update_io_stats(thread_t thread, int size, int io_flags)
2667 {
2668 task_t task = get_threadtask(thread);
2669 int io_tier;
2670
2671 if (thread->thread_io_stats == NULL || task->task_io_stats == NULL) {
2672 return;
2673 }
2674
2675 if (io_flags & DKIO_READ) {
2676 UPDATE_IO_STATS(thread->thread_io_stats->disk_reads, size);
2677 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->disk_reads, size);
2678 }
2679
2680 if (io_flags & DKIO_META) {
2681 UPDATE_IO_STATS(thread->thread_io_stats->metadata, size);
2682 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->metadata, size);
2683 }
2684
2685 if (io_flags & DKIO_PAGING) {
2686 UPDATE_IO_STATS(thread->thread_io_stats->paging, size);
2687 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->paging, size);
2688 }
2689
2690 io_tier = ((io_flags & DKIO_TIER_MASK) >> DKIO_TIER_SHIFT);
2691 assert(io_tier < IO_NUM_PRIORITIES);
2692
2693 UPDATE_IO_STATS(thread->thread_io_stats->io_priority[io_tier], size);
2694 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->io_priority[io_tier], size);
2695
2696 /* Update Total I/O Counts */
2697 UPDATE_IO_STATS(thread->thread_io_stats->total_io, size);
2698 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->total_io, size);
2699
2700 if (!(io_flags & DKIO_READ)) {
2701 DTRACE_IO3(physical_writes, struct task *, task, uint32_t, size, int, io_flags);
2702 ledger_credit(task->ledger, task_ledgers.physical_writes, size);
2703 }
2704 }
2705
2706 static void
init_thread_ledgers(void)2707 init_thread_ledgers(void)
2708 {
2709 ledger_template_t t;
2710 int idx;
2711
2712 assert(thread_ledger_template == NULL);
2713
2714 if ((t = ledger_template_create("Per-thread ledger")) == NULL) {
2715 panic("couldn't create thread ledger template");
2716 }
2717
2718 if ((idx = ledger_entry_add(t, "cpu_time", "sched", "ns")) < 0) {
2719 panic("couldn't create cpu_time entry for thread ledger template");
2720 }
2721
2722 if (ledger_set_callback(t, idx, thread_cputime_callback, NULL, NULL) < 0) {
2723 panic("couldn't set thread ledger callback for cpu_time entry");
2724 }
2725
2726 thread_ledgers.cpu_time = idx;
2727
2728 ledger_template_complete(t);
2729 thread_ledger_template = t;
2730 }
2731
2732 /*
2733 * Returns currently applied CPU usage limit, or 0/0 if none is applied.
2734 */
2735 int
thread_get_cpulimit(int * action,uint8_t * percentage,uint64_t * interval_ns)2736 thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns)
2737 {
2738 int64_t abstime = 0;
2739 uint64_t limittime = 0;
2740 thread_t thread = current_thread();
2741
2742 *percentage = 0;
2743 *interval_ns = 0;
2744 *action = 0;
2745
2746 if (thread->t_threadledger == LEDGER_NULL) {
2747 /*
2748 * This thread has no per-thread ledger, so it can't possibly
2749 * have a CPU limit applied.
2750 */
2751 return KERN_SUCCESS;
2752 }
2753
2754 ledger_get_period(thread->t_threadledger, thread_ledgers.cpu_time, interval_ns);
2755 ledger_get_limit(thread->t_threadledger, thread_ledgers.cpu_time, &abstime);
2756
2757 if ((abstime == LEDGER_LIMIT_INFINITY) || (*interval_ns == 0)) {
2758 /*
2759 * This thread's CPU time ledger has no period or limit; so it
2760 * doesn't have a CPU limit applied.
2761 */
2762 return KERN_SUCCESS;
2763 }
2764
2765 /*
2766 * This calculation is the converse to the one in thread_set_cpulimit().
2767 */
2768 absolutetime_to_nanoseconds(abstime, &limittime);
2769 *percentage = (uint8_t)((limittime * 100ULL) / *interval_ns);
2770 assert(*percentage <= 100);
2771
2772 if (thread->options & TH_OPT_PROC_CPULIMIT) {
2773 assert((thread->options & TH_OPT_PRVT_CPULIMIT) == 0);
2774
2775 *action = THREAD_CPULIMIT_BLOCK;
2776 } else if (thread->options & TH_OPT_PRVT_CPULIMIT) {
2777 assert((thread->options & TH_OPT_PROC_CPULIMIT) == 0);
2778
2779 *action = THREAD_CPULIMIT_EXCEPTION;
2780 } else {
2781 *action = THREAD_CPULIMIT_DISABLE;
2782 }
2783
2784 return KERN_SUCCESS;
2785 }
2786
2787 /*
2788 * Set CPU usage limit on a thread.
2789 *
2790 * Calling with percentage of 0 will unset the limit for this thread.
2791 */
2792 int
thread_set_cpulimit(int action,uint8_t percentage,uint64_t interval_ns)2793 thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns)
2794 {
2795 thread_t thread = current_thread();
2796 ledger_t l;
2797 uint64_t limittime = 0;
2798 uint64_t abstime = 0;
2799
2800 assert(percentage <= 100);
2801
2802 if (action == THREAD_CPULIMIT_DISABLE) {
2803 /*
2804 * Remove CPU limit, if any exists.
2805 */
2806 if (thread->t_threadledger != LEDGER_NULL) {
2807 l = thread->t_threadledger;
2808 ledger_set_limit(l, thread_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0);
2809 ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_IGNORE);
2810 thread->options &= ~(TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT);
2811 }
2812
2813 return 0;
2814 }
2815
2816 if (interval_ns < MINIMUM_CPULIMIT_INTERVAL_MS * NSEC_PER_MSEC) {
2817 return KERN_INVALID_ARGUMENT;
2818 }
2819
2820 l = thread->t_threadledger;
2821 if (l == LEDGER_NULL) {
2822 /*
2823 * This thread doesn't yet have a per-thread ledger; so create one with the CPU time entry active.
2824 */
2825 if ((l = ledger_instantiate(thread_ledger_template, LEDGER_CREATE_INACTIVE_ENTRIES)) == LEDGER_NULL) {
2826 return KERN_RESOURCE_SHORTAGE;
2827 }
2828
2829 /*
2830 * We are the first to create this thread's ledger, so only activate our entry.
2831 */
2832 ledger_entry_setactive(l, thread_ledgers.cpu_time);
2833 thread->t_threadledger = l;
2834 }
2835
2836 /*
2837 * The limit is specified as a percentage of CPU over an interval in nanoseconds.
2838 * Calculate the amount of CPU time that the thread needs to consume in order to hit the limit.
2839 */
2840 limittime = (interval_ns * percentage) / 100;
2841 nanoseconds_to_absolutetime(limittime, &abstime);
2842 ledger_set_limit(l, thread_ledgers.cpu_time, abstime, cpumon_ustackshots_trigger_pct);
2843 /*
2844 * Refill the thread's allotted CPU time every interval_ns nanoseconds.
2845 */
2846 ledger_set_period(l, thread_ledgers.cpu_time, interval_ns);
2847
2848 if (action == THREAD_CPULIMIT_EXCEPTION) {
2849 /*
2850 * We don't support programming the CPU usage monitor on a task if any of its
2851 * threads have a per-thread blocking CPU limit configured.
2852 */
2853 if (thread->options & TH_OPT_PRVT_CPULIMIT) {
2854 panic("CPU usage monitor activated, but blocking thread limit exists");
2855 }
2856
2857 /*
2858 * Make a note that this thread's CPU limit is being used for the task-wide CPU
2859 * usage monitor. We don't have to arm the callback which will trigger the
2860 * exception, because that was done for us in ledger_instantiate (because the
2861 * ledger template used has a default callback).
2862 */
2863 thread->options |= TH_OPT_PROC_CPULIMIT;
2864 } else {
2865 /*
2866 * We deliberately override any CPU limit imposed by a task-wide limit (eg
2867 * CPU usage monitor).
2868 */
2869 thread->options &= ~TH_OPT_PROC_CPULIMIT;
2870
2871 thread->options |= TH_OPT_PRVT_CPULIMIT;
2872 /* The per-thread ledger template by default has a callback for CPU time */
2873 ledger_disable_callback(l, thread_ledgers.cpu_time);
2874 ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
2875 }
2876
2877 return 0;
2878 }
2879
2880 void
thread_sched_call(thread_t thread,sched_call_t call)2881 thread_sched_call(
2882 thread_t thread,
2883 sched_call_t call)
2884 {
2885 assert((thread->state & TH_WAIT_REPORT) == 0);
2886 thread->sched_call = call;
2887 }
2888
2889 uint64_t
thread_tid(thread_t thread)2890 thread_tid(
2891 thread_t thread)
2892 {
2893 return thread != THREAD_NULL? thread->thread_id: 0;
2894 }
2895
2896 uint64_t
uthread_tid(struct uthread * uth)2897 uthread_tid(
2898 struct uthread *uth)
2899 {
2900 if (uth) {
2901 return thread_tid(get_machthread(uth));
2902 }
2903 return 0;
2904 }
2905
2906 uint16_t
thread_set_tag(thread_t th,uint16_t tag)2907 thread_set_tag(thread_t th, uint16_t tag)
2908 {
2909 return thread_set_tag_internal(th, tag);
2910 }
2911
2912 uint16_t
thread_get_tag(thread_t th)2913 thread_get_tag(thread_t th)
2914 {
2915 return thread_get_tag_internal(th);
2916 }
2917
2918 uint64_t
thread_last_run_time(thread_t th)2919 thread_last_run_time(thread_t th)
2920 {
2921 return th->last_run_time;
2922 }
2923
2924 /*
2925 * Shared resource contention management
2926 *
2927 * The scheduler attempts to load balance the shared resource intensive
2928 * workloads across clusters to ensure that the resource is not heavily
2929 * contended. The kernel relies on external agents (userspace or
2930 * performance controller) to identify shared resource heavy threads.
2931 * The load balancing is achieved based on the scheduler configuration
2932 * enabled on the platform.
2933 */
2934
2935
2936 #if CONFIG_SCHED_EDGE
2937
2938 /*
2939 * On the Edge scheduler, the load balancing is achieved by looking
2940 * at cluster level shared resource loads and migrating resource heavy
2941 * threads dynamically to under utilized cluster. Therefore, when a
2942 * thread is indicated as a resource heavy thread, the policy set
2943 * routine simply adds a flag to the thread which is looked at by
2944 * the scheduler on thread migration decisions.
2945 */
2946
2947 boolean_t
thread_shared_rsrc_policy_get(thread_t thread,cluster_shared_rsrc_type_t type)2948 thread_shared_rsrc_policy_get(thread_t thread, cluster_shared_rsrc_type_t type)
2949 {
2950 return thread->th_shared_rsrc_heavy_user[type] || thread->th_shared_rsrc_heavy_perf_control[type];
2951 }
2952
2953 __options_decl(sched_edge_rsrc_heavy_thread_state, uint32_t, {
2954 SCHED_EDGE_RSRC_HEAVY_THREAD_SET = 1,
2955 SCHED_EDGE_RSRC_HEAVY_THREAD_CLR = 2,
2956 });
2957
2958 kern_return_t
thread_shared_rsrc_policy_set(thread_t thread,__unused uint32_t index,cluster_shared_rsrc_type_t type,shared_rsrc_policy_agent_t agent)2959 thread_shared_rsrc_policy_set(thread_t thread, __unused uint32_t index, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent)
2960 {
2961 spl_t s = splsched();
2962 thread_lock(thread);
2963
2964 bool user = (agent == SHARED_RSRC_POLICY_AGENT_DISPATCH) || (agent == SHARED_RSRC_POLICY_AGENT_SYSCTL);
2965 bool *thread_flags = (user) ? thread->th_shared_rsrc_heavy_user : thread->th_shared_rsrc_heavy_perf_control;
2966 if (thread_flags[type]) {
2967 thread_unlock(thread);
2968 splx(s);
2969 return KERN_FAILURE;
2970 }
2971
2972 thread_flags[type] = true;
2973 thread_unlock(thread);
2974 splx(s);
2975
2976 KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_RSRC_HEAVY_THREAD) | DBG_FUNC_NONE, SCHED_EDGE_RSRC_HEAVY_THREAD_SET, thread_tid(thread), type, agent);
2977 if (thread == current_thread()) {
2978 if (agent == SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM) {
2979 ast_on(AST_PREEMPT);
2980 } else {
2981 assert(agent != SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
2982 thread_block(THREAD_CONTINUE_NULL);
2983 }
2984 }
2985 return KERN_SUCCESS;
2986 }
2987
2988 kern_return_t
thread_shared_rsrc_policy_clear(thread_t thread,cluster_shared_rsrc_type_t type,shared_rsrc_policy_agent_t agent)2989 thread_shared_rsrc_policy_clear(thread_t thread, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent)
2990 {
2991 spl_t s = splsched();
2992 thread_lock(thread);
2993
2994 bool user = (agent == SHARED_RSRC_POLICY_AGENT_DISPATCH) || (agent == SHARED_RSRC_POLICY_AGENT_SYSCTL);
2995 bool *thread_flags = (user) ? thread->th_shared_rsrc_heavy_user : thread->th_shared_rsrc_heavy_perf_control;
2996 if (!thread_flags[type]) {
2997 thread_unlock(thread);
2998 splx(s);
2999 return KERN_FAILURE;
3000 }
3001
3002 thread_flags[type] = false;
3003 thread_unlock(thread);
3004 splx(s);
3005
3006 KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_RSRC_HEAVY_THREAD) | DBG_FUNC_NONE, SCHED_EDGE_RSRC_HEAVY_THREAD_CLR, thread_tid(thread), type, agent);
3007 if (thread == current_thread()) {
3008 if (agent == SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM) {
3009 ast_on(AST_PREEMPT);
3010 } else {
3011 assert(agent != SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
3012 thread_block(THREAD_CONTINUE_NULL);
3013 }
3014 }
3015 return KERN_SUCCESS;
3016 }
3017
3018 #else /* CONFIG_SCHED_EDGE */
3019
3020 /*
3021 * On non-Edge schedulers, the shared resource contention
3022 * is managed by simply binding threads to specific clusters
3023 * based on the worker index passed by the agents marking
3024 * this thread as resource heavy threads. The thread binding
3025 * approach does not provide any rebalancing opportunities;
3026 * it can also suffer from scheduling delays if the cluster
3027 * where the thread is bound is contended.
3028 */
3029
3030 boolean_t
thread_shared_rsrc_policy_get(__unused thread_t thread,__unused cluster_shared_rsrc_type_t type)3031 thread_shared_rsrc_policy_get(__unused thread_t thread, __unused cluster_shared_rsrc_type_t type)
3032 {
3033 return false;
3034 }
3035
3036 kern_return_t
thread_shared_rsrc_policy_set(thread_t thread,uint32_t index,__unused cluster_shared_rsrc_type_t type,__unused shared_rsrc_policy_agent_t agent)3037 thread_shared_rsrc_policy_set(thread_t thread, uint32_t index, __unused cluster_shared_rsrc_type_t type, __unused shared_rsrc_policy_agent_t agent)
3038 {
3039 return thread_bind_cluster_id(thread, index, THREAD_BIND_SOFT | THREAD_BIND_ELIGIBLE_ONLY);
3040 }
3041
3042 kern_return_t
thread_shared_rsrc_policy_clear(thread_t thread,__unused cluster_shared_rsrc_type_t type,__unused shared_rsrc_policy_agent_t agent)3043 thread_shared_rsrc_policy_clear(thread_t thread, __unused cluster_shared_rsrc_type_t type, __unused shared_rsrc_policy_agent_t agent)
3044 {
3045 return thread_bind_cluster_id(thread, 0, THREAD_UNBIND);
3046 }
3047
3048 #endif /* CONFIG_SCHED_EDGE */
3049
3050 uint64_t
thread_dispatchqaddr(thread_t thread)3051 thread_dispatchqaddr(
3052 thread_t thread)
3053 {
3054 uint64_t dispatchqueue_addr;
3055 uint64_t thread_handle;
3056 task_t task;
3057
3058 if (thread == THREAD_NULL) {
3059 return 0;
3060 }
3061
3062 thread_handle = thread->machine.cthread_self;
3063 if (thread_handle == 0) {
3064 return 0;
3065 }
3066
3067 task = get_threadtask(thread);
3068 if (thread->inspection == TRUE) {
3069 dispatchqueue_addr = thread_handle + get_task_dispatchqueue_offset(task);
3070 } else if (task->bsd_info) {
3071 dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(task->bsd_info);
3072 } else {
3073 dispatchqueue_addr = 0;
3074 }
3075
3076 return dispatchqueue_addr;
3077 }
3078
3079
3080 uint64_t
thread_wqquantum_addr(thread_t thread)3081 thread_wqquantum_addr(thread_t thread)
3082 {
3083 uint64_t thread_handle;
3084 task_t task;
3085
3086 if (thread == THREAD_NULL) {
3087 return 0;
3088 }
3089
3090 thread_handle = thread->machine.cthread_self;
3091 if (thread_handle == 0) {
3092 return 0;
3093 }
3094 task = get_threadtask(thread);
3095
3096 uint64_t wq_quantum_expiry_offset = get_wq_quantum_offset_from_proc(task->bsd_info);
3097 if (wq_quantum_expiry_offset == 0) {
3098 return 0;
3099 }
3100
3101 return wq_quantum_expiry_offset + thread_handle;
3102 }
3103
3104 uint64_t
thread_rettokern_addr(thread_t thread)3105 thread_rettokern_addr(
3106 thread_t thread)
3107 {
3108 uint64_t rettokern_addr;
3109 uint64_t rettokern_offset;
3110 uint64_t thread_handle;
3111 task_t task;
3112
3113 if (thread == THREAD_NULL) {
3114 return 0;
3115 }
3116
3117 thread_handle = thread->machine.cthread_self;
3118 if (thread_handle == 0) {
3119 return 0;
3120 }
3121 task = get_threadtask(thread);
3122
3123 if (task->bsd_info) {
3124 rettokern_offset = get_return_to_kernel_offset_from_proc(task->bsd_info);
3125
3126 /* Return 0 if return to kernel offset is not initialized. */
3127 if (rettokern_offset == 0) {
3128 rettokern_addr = 0;
3129 } else {
3130 rettokern_addr = thread_handle + rettokern_offset;
3131 }
3132 } else {
3133 rettokern_addr = 0;
3134 }
3135
3136 return rettokern_addr;
3137 }
3138
3139 /*
3140 * Export routines to other components for things that are done as macros
3141 * within the osfmk component.
3142 */
3143
3144 void
thread_mtx_lock(thread_t thread)3145 thread_mtx_lock(thread_t thread)
3146 {
3147 lck_mtx_lock(&thread->mutex);
3148 }
3149
3150 void
thread_mtx_unlock(thread_t thread)3151 thread_mtx_unlock(thread_t thread)
3152 {
3153 lck_mtx_unlock(&thread->mutex);
3154 }
3155
3156 void
thread_reference(thread_t thread)3157 thread_reference(
3158 thread_t thread)
3159 {
3160 if (thread != THREAD_NULL) {
3161 zone_id_require(ZONE_ID_THREAD, sizeof(struct thread), thread);
3162 os_ref_retain_raw(&thread->ref_count, &thread_refgrp);
3163 }
3164 }
3165
3166 void
thread_require(thread_t thread)3167 thread_require(thread_t thread)
3168 {
3169 zone_id_require(ZONE_ID_THREAD, sizeof(struct thread), thread);
3170 }
3171
3172 #undef thread_should_halt
3173
3174 boolean_t
thread_should_halt(thread_t th)3175 thread_should_halt(
3176 thread_t th)
3177 {
3178 return thread_should_halt_fast(th);
3179 }
3180
3181 /*
3182 * thread_set_voucher_name - reset the voucher port name bound to this thread
3183 *
3184 * Conditions: nothing locked
3185 */
3186
3187 kern_return_t
thread_set_voucher_name(mach_port_name_t voucher_name)3188 thread_set_voucher_name(mach_port_name_t voucher_name)
3189 {
3190 thread_t thread = current_thread();
3191 ipc_voucher_t new_voucher = IPC_VOUCHER_NULL;
3192 ipc_voucher_t voucher;
3193 ledger_t bankledger = NULL;
3194 struct thread_group *banktg = NULL;
3195 uint32_t persona_id = 0;
3196
3197 if (MACH_PORT_DEAD == voucher_name) {
3198 return KERN_INVALID_RIGHT;
3199 }
3200
3201 /*
3202 * agressively convert to voucher reference
3203 */
3204 if (MACH_PORT_VALID(voucher_name)) {
3205 new_voucher = convert_port_name_to_voucher(voucher_name);
3206 if (IPC_VOUCHER_NULL == new_voucher) {
3207 return KERN_INVALID_ARGUMENT;
3208 }
3209 }
3210 bank_get_bank_ledger_thread_group_and_persona(new_voucher, &bankledger, &banktg, &persona_id);
3211
3212 thread_mtx_lock(thread);
3213 voucher = thread->ith_voucher;
3214 thread->ith_voucher_name = voucher_name;
3215 thread->ith_voucher = new_voucher;
3216 thread_mtx_unlock(thread);
3217
3218 bank_swap_thread_bank_ledger(thread, bankledger);
3219 #if CONFIG_THREAD_GROUPS
3220 thread_group_set_bank(thread, banktg);
3221 #endif /* CONFIG_THREAD_GROUPS */
3222
3223 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3224 MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
3225 (uintptr_t)thread_tid(thread),
3226 (uintptr_t)voucher_name,
3227 VM_KERNEL_ADDRPERM((uintptr_t)new_voucher),
3228 persona_id, 0);
3229
3230 if (IPC_VOUCHER_NULL != voucher) {
3231 ipc_voucher_release(voucher);
3232 }
3233
3234 return KERN_SUCCESS;
3235 }
3236
3237 /*
3238 * thread_get_mach_voucher - return a voucher reference for the specified thread voucher
3239 *
3240 * Conditions: nothing locked
3241 *
3242 * NOTE: At the moment, there is no distinction between the current and effective
3243 * vouchers because we only set them at the thread level currently.
3244 */
3245 kern_return_t
thread_get_mach_voucher(thread_act_t thread,mach_voucher_selector_t __unused which,ipc_voucher_t * voucherp)3246 thread_get_mach_voucher(
3247 thread_act_t thread,
3248 mach_voucher_selector_t __unused which,
3249 ipc_voucher_t *voucherp)
3250 {
3251 ipc_voucher_t voucher;
3252
3253 if (THREAD_NULL == thread) {
3254 return KERN_INVALID_ARGUMENT;
3255 }
3256
3257 thread_mtx_lock(thread);
3258 voucher = thread->ith_voucher;
3259
3260 if (IPC_VOUCHER_NULL != voucher) {
3261 ipc_voucher_reference(voucher);
3262 thread_mtx_unlock(thread);
3263 *voucherp = voucher;
3264 return KERN_SUCCESS;
3265 }
3266
3267 thread_mtx_unlock(thread);
3268
3269 *voucherp = IPC_VOUCHER_NULL;
3270 return KERN_SUCCESS;
3271 }
3272
3273 /*
3274 * thread_set_mach_voucher - set a voucher reference for the specified thread voucher
3275 *
3276 * Conditions: callers holds a reference on the voucher.
3277 * nothing locked.
3278 *
3279 * We grab another reference to the voucher and bind it to the thread.
3280 * The old voucher reference associated with the thread is
3281 * discarded.
3282 */
3283 kern_return_t
thread_set_mach_voucher(thread_t thread,ipc_voucher_t voucher)3284 thread_set_mach_voucher(
3285 thread_t thread,
3286 ipc_voucher_t voucher)
3287 {
3288 ipc_voucher_t old_voucher;
3289 ledger_t bankledger = NULL;
3290 struct thread_group *banktg = NULL;
3291 uint32_t persona_id = 0;
3292
3293 if (THREAD_NULL == thread) {
3294 return KERN_INVALID_ARGUMENT;
3295 }
3296
3297 bank_get_bank_ledger_thread_group_and_persona(voucher, &bankledger, &banktg, &persona_id);
3298
3299 thread_mtx_lock(thread);
3300 /*
3301 * Once the thread is started, we will look at `ith_voucher` without
3302 * holding any lock.
3303 *
3304 * Setting the voucher hence can only be done by current_thread() or
3305 * before it started. "started" flips under the thread mutex and must be
3306 * tested under it too.
3307 */
3308 if (thread != current_thread() && thread->started) {
3309 thread_mtx_unlock(thread);
3310 return KERN_INVALID_ARGUMENT;
3311 }
3312
3313 ipc_voucher_reference(voucher);
3314 old_voucher = thread->ith_voucher;
3315 thread->ith_voucher = voucher;
3316 thread->ith_voucher_name = MACH_PORT_NULL;
3317 thread_mtx_unlock(thread);
3318
3319 bank_swap_thread_bank_ledger(thread, bankledger);
3320 #if CONFIG_THREAD_GROUPS
3321 thread_group_set_bank(thread, banktg);
3322 #endif /* CONFIG_THREAD_GROUPS */
3323
3324 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3325 MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
3326 (uintptr_t)thread_tid(thread),
3327 (uintptr_t)MACH_PORT_NULL,
3328 VM_KERNEL_ADDRPERM((uintptr_t)voucher),
3329 persona_id, 0);
3330
3331 ipc_voucher_release(old_voucher);
3332
3333 return KERN_SUCCESS;
3334 }
3335
3336 /*
3337 * thread_swap_mach_voucher - swap a voucher reference for the specified thread voucher
3338 *
3339 * Conditions: callers holds a reference on the new and presumed old voucher(s).
3340 * nothing locked.
3341 *
3342 * This function is no longer supported.
3343 */
3344 kern_return_t
thread_swap_mach_voucher(__unused thread_t thread,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)3345 thread_swap_mach_voucher(
3346 __unused thread_t thread,
3347 __unused ipc_voucher_t new_voucher,
3348 ipc_voucher_t *in_out_old_voucher)
3349 {
3350 /*
3351 * Currently this function is only called from a MIG generated
3352 * routine which doesn't release the reference on the voucher
3353 * addressed by in_out_old_voucher. To avoid leaking this reference,
3354 * a call to release it has been added here.
3355 */
3356 ipc_voucher_release(*in_out_old_voucher);
3357 OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
3358 }
3359
3360 /*
3361 * thread_get_current_voucher_origin_pid - get the pid of the originator of the current voucher.
3362 */
3363 kern_return_t
thread_get_current_voucher_origin_pid(int32_t * pid)3364 thread_get_current_voucher_origin_pid(
3365 int32_t *pid)
3366 {
3367 uint32_t buf_size;
3368 kern_return_t kr;
3369 thread_t thread = current_thread();
3370
3371 buf_size = sizeof(*pid);
3372 kr = mach_voucher_attr_command(thread->ith_voucher,
3373 MACH_VOUCHER_ATTR_KEY_BANK,
3374 BANK_ORIGINATOR_PID,
3375 NULL,
3376 0,
3377 (mach_voucher_attr_content_t)pid,
3378 &buf_size);
3379
3380 return kr;
3381 }
3382
3383 #if CONFIG_THREAD_GROUPS
3384 /*
3385 * Returns the current thread's voucher-carried thread group
3386 *
3387 * Reference is borrowed from this being the current voucher, so it does NOT
3388 * return a reference to the group.
3389 */
3390 struct thread_group *
thread_get_current_voucher_thread_group(thread_t thread)3391 thread_get_current_voucher_thread_group(thread_t thread)
3392 {
3393 assert(thread == current_thread());
3394
3395 if (thread->ith_voucher == NULL) {
3396 return NULL;
3397 }
3398
3399 ledger_t bankledger = NULL;
3400 struct thread_group *banktg = NULL;
3401
3402 bank_get_bank_ledger_thread_group_and_persona(thread->ith_voucher, &bankledger, &banktg, NULL);
3403
3404 return banktg;
3405 }
3406
3407 #endif /* CONFIG_THREAD_GROUPS */
3408
3409 extern struct workqueue *
3410 proc_get_wqptr(void *proc);
3411
3412 static bool
task_supports_cooperative_workqueue(task_t task)3413 task_supports_cooperative_workqueue(task_t task)
3414 {
3415 assert(task == current_task());
3416 if (task->bsd_info == NULL) {
3417 return false;
3418 }
3419
3420 uint64_t wq_quantum_expiry_offset = get_wq_quantum_offset_from_proc(task->bsd_info);
3421 /* userspace may not yet have called workq_open yet */
3422 struct workqueue *wq = proc_get_wqptr(task->bsd_info);
3423
3424 return (wq != NULL) && (wq_quantum_expiry_offset != 0);
3425 }
3426
3427 /* Not safe to call from scheduler paths - should only be called on self */
3428 bool
thread_supports_cooperative_workqueue(thread_t thread)3429 thread_supports_cooperative_workqueue(thread_t thread)
3430 {
3431 struct uthread *uth = get_bsdthread_info(thread);
3432 task_t task = get_threadtask(thread);
3433
3434 assert(thread == current_thread());
3435
3436 return task_supports_cooperative_workqueue(task) &&
3437 bsdthread_part_of_cooperative_workqueue(uth);
3438 }
3439
3440 static inline bool
thread_has_armed_workqueue_quantum(thread_t thread)3441 thread_has_armed_workqueue_quantum(thread_t thread)
3442 {
3443 return thread->workq_quantum_deadline != 0;
3444 }
3445
3446 /*
3447 * The workq quantum is a lazy timer that is evaluated at 2 specific times in
3448 * the scheduler:
3449 *
3450 * - context switch time
3451 * - scheduler quantum expiry time.
3452 *
3453 * We're currently expressing the workq quantum with a 0.5 scale factor of the
3454 * scheduler quantum. It is possible that if the workq quantum is rearmed
3455 * shortly after the scheduler quantum begins, we could have a large delay
3456 * between when the workq quantum next expires and when it actually is noticed.
3457 *
3458 * A potential future improvement for the wq quantum expiry logic is to compare
3459 * it to the next actual scheduler quantum deadline and expire it if it is
3460 * within a certain leeway.
3461 */
3462 static inline uint64_t
thread_workq_quantum_size(thread_t thread)3463 thread_workq_quantum_size(thread_t thread)
3464 {
3465 return (uint64_t) (SCHED(initial_quantum_size)(thread) / 2);
3466 }
3467
3468 /*
3469 * Always called by thread on itself - either at AST boundary after processing
3470 * an existing quantum expiry, or when a new quantum is armed before the thread
3471 * goes out to userspace to handle a thread request
3472 */
3473 void
thread_arm_workqueue_quantum(thread_t thread)3474 thread_arm_workqueue_quantum(thread_t thread)
3475 {
3476 /*
3477 * If the task is not opted into wq quantum notification, or if the thread
3478 * is not part of the cooperative workqueue, don't even bother with tracking
3479 * the quantum or calculating expiry
3480 */
3481 if (!thread_supports_cooperative_workqueue(thread)) {
3482 assert(thread->workq_quantum_deadline == 0);
3483 return;
3484 }
3485
3486 assert(current_thread() == thread);
3487 assert(thread_get_tag(thread) & THREAD_TAG_WORKQUEUE);
3488
3489 uint64_t current_runtime = thread_get_runtime_self();
3490 uint64_t deadline = thread_workq_quantum_size(thread) + current_runtime;
3491
3492 /*
3493 * The update of a workqueue quantum should always be followed by the update
3494 * of the AST - see explanation in kern/thread.h for synchronization of this
3495 * field
3496 */
3497 thread->workq_quantum_deadline = deadline;
3498
3499 /* We're arming a new quantum, clear any previous expiry notification */
3500 act_clear_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3501
3502 WQ_TRACE(TRACE_wq_quantum_arm, current_runtime, deadline, 0, 0);
3503
3504 WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, thread->workq_quantum_deadline, true);
3505 }
3506
3507 /* Called by a thread on itself when it is about to park */
3508 void
thread_disarm_workqueue_quantum(thread_t thread)3509 thread_disarm_workqueue_quantum(thread_t thread)
3510 {
3511 /* The update of a workqueue quantum should always be followed by the update
3512 * of the AST - see explanation in kern/thread.h for synchronization of this
3513 * field */
3514 thread->workq_quantum_deadline = 0;
3515 act_clear_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3516
3517 WQ_TRACE(TRACE_wq_quantum_disarm, 0, 0, 0, 0);
3518
3519 WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, thread->workq_quantum_deadline, false);
3520 }
3521
3522 /* This is called at context switch time on a thread that may not be self,
3523 * and at AST time
3524 */
3525 bool
thread_has_expired_workqueue_quantum(thread_t thread,bool should_trace)3526 thread_has_expired_workqueue_quantum(thread_t thread, bool should_trace)
3527 {
3528 if (!thread_has_armed_workqueue_quantum(thread)) {
3529 return false;
3530 }
3531 /* We do not do a thread_get_runtime_self() here since this function is
3532 * called from context switch time or during scheduler quantum expiry and
3533 * therefore, we may not be evaluating it on the current thread/self.
3534 *
3535 * In addition, the timers on the thread have just been updated recently so
3536 * we don't need to update them again.
3537 */
3538 uint64_t runtime = (timer_grab(&thread->user_timer) + timer_grab(&thread->system_timer));
3539 bool expired = runtime > thread->workq_quantum_deadline;
3540
3541 if (expired && should_trace) {
3542 WQ_TRACE(TRACE_wq_quantum_expired, runtime, thread->workq_quantum_deadline, 0, 0);
3543 }
3544
3545 return expired;
3546 }
3547
3548 /*
3549 * Called on a thread that is being context switched out or during quantum
3550 * expiry on self. Only called from scheduler paths.
3551 */
3552 void
thread_evaluate_workqueue_quantum_expiry(thread_t thread)3553 thread_evaluate_workqueue_quantum_expiry(thread_t thread)
3554 {
3555 if (thread_has_expired_workqueue_quantum(thread, true)) {
3556 act_set_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3557 }
3558 }
3559
3560 boolean_t
thread_has_thread_name(thread_t th)3561 thread_has_thread_name(thread_t th)
3562 {
3563 if (th) {
3564 return bsd_hasthreadname(get_bsdthread_info(th));
3565 }
3566
3567 /*
3568 * This is an odd case; clients may set the thread name based on the lack of
3569 * a name, but in this context there is no uthread to attach the name to.
3570 */
3571 return FALSE;
3572 }
3573
3574 void
thread_set_thread_name(thread_t th,const char * name)3575 thread_set_thread_name(thread_t th, const char* name)
3576 {
3577 if (th && name) {
3578 bsd_setthreadname(get_bsdthread_info(th), name);
3579 }
3580 }
3581
3582 void
thread_get_thread_name(thread_t th,char * name)3583 thread_get_thread_name(thread_t th, char* name)
3584 {
3585 if (!name) {
3586 return;
3587 }
3588 if (th) {
3589 bsd_getthreadname(get_bsdthread_info(th), name);
3590 } else {
3591 name[0] = '\0';
3592 }
3593 }
3594
3595 void
thread_set_honor_qlimit(thread_t thread)3596 thread_set_honor_qlimit(thread_t thread)
3597 {
3598 thread->options |= TH_OPT_HONOR_QLIMIT;
3599 }
3600
3601 void
thread_clear_honor_qlimit(thread_t thread)3602 thread_clear_honor_qlimit(thread_t thread)
3603 {
3604 thread->options &= (~TH_OPT_HONOR_QLIMIT);
3605 }
3606
3607 /*
3608 * thread_enable_send_importance - set/clear the SEND_IMPORTANCE thread option bit.
3609 */
3610 void
thread_enable_send_importance(thread_t thread,boolean_t enable)3611 thread_enable_send_importance(thread_t thread, boolean_t enable)
3612 {
3613 if (enable == TRUE) {
3614 thread->options |= TH_OPT_SEND_IMPORTANCE;
3615 } else {
3616 thread->options &= ~TH_OPT_SEND_IMPORTANCE;
3617 }
3618 }
3619
3620 kern_return_t
thread_get_ipc_propagate_attr(thread_t thread,struct thread_attr_for_ipc_propagation * attr)3621 thread_get_ipc_propagate_attr(thread_t thread, struct thread_attr_for_ipc_propagation *attr)
3622 {
3623 int iotier;
3624 int qos;
3625
3626 if (thread == NULL || attr == NULL) {
3627 return KERN_INVALID_ARGUMENT;
3628 }
3629
3630 iotier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO);
3631 qos = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS);
3632
3633 attr->tafip_iotier = iotier;
3634 attr->tafip_qos = qos;
3635
3636 return KERN_SUCCESS;
3637 }
3638
3639 /*
3640 * thread_set_allocation_name - .
3641 */
3642
3643 kern_allocation_name_t
thread_set_allocation_name(kern_allocation_name_t new_name)3644 thread_set_allocation_name(kern_allocation_name_t new_name)
3645 {
3646 kern_allocation_name_t ret;
3647 thread_kernel_state_t kstate = thread_get_kernel_state(current_thread());
3648 ret = kstate->allocation_name;
3649 // fifo
3650 if (!new_name || !kstate->allocation_name) {
3651 kstate->allocation_name = new_name;
3652 }
3653 return ret;
3654 }
3655
3656 void *
thread_iokit_tls_get(uint32_t index)3657 thread_iokit_tls_get(uint32_t index)
3658 {
3659 assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
3660 return current_thread()->saved.iokit.tls[index];
3661 }
3662
3663 void
thread_iokit_tls_set(uint32_t index,void * data)3664 thread_iokit_tls_set(uint32_t index, void * data)
3665 {
3666 assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
3667 current_thread()->saved.iokit.tls[index] = data;
3668 }
3669
3670 uint64_t
thread_get_last_wait_duration(thread_t thread)3671 thread_get_last_wait_duration(thread_t thread)
3672 {
3673 return thread->last_made_runnable_time - thread->last_run_time;
3674 }
3675
3676 integer_t
thread_kern_get_pri(thread_t thr)3677 thread_kern_get_pri(thread_t thr)
3678 {
3679 return thr->base_pri;
3680 }
3681
3682 void
thread_kern_set_pri(thread_t thr,integer_t pri)3683 thread_kern_set_pri(thread_t thr, integer_t pri)
3684 {
3685 sched_set_kernel_thread_priority(thr, pri);
3686 }
3687
3688 integer_t
thread_kern_get_kernel_maxpri(void)3689 thread_kern_get_kernel_maxpri(void)
3690 {
3691 return MAXPRI_KERNEL;
3692 }
3693 /*
3694 * thread_port_with_flavor_no_senders
3695 *
3696 * Called whenever the Mach port system detects no-senders on
3697 * the thread inspect or read port. These ports are allocated lazily and
3698 * should be deallocated here when there are no senders remaining.
3699 */
3700 static void
thread_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)3701 thread_port_with_flavor_no_senders(
3702 ipc_port_t port,
3703 mach_port_mscount_t mscount __unused)
3704 {
3705 thread_ro_t tro;
3706 thread_t thread;
3707 mach_thread_flavor_t flavor;
3708 ipc_kobject_type_t kotype;
3709
3710 ip_mq_lock(port);
3711 if (port->ip_srights > 0) {
3712 ip_mq_unlock(port);
3713 return;
3714 }
3715 kotype = ip_kotype(port);
3716 assert((IKOT_THREAD_READ == kotype) || (IKOT_THREAD_INSPECT == kotype));
3717 thread = ipc_kobject_get_locked(port, kotype);
3718 if (thread != THREAD_NULL) {
3719 thread_reference(thread);
3720 }
3721 ip_mq_unlock(port);
3722
3723 if (thread == THREAD_NULL) {
3724 /* The thread is exiting or disabled; it will eventually deallocate the port */
3725 return;
3726 }
3727
3728 if (kotype == IKOT_THREAD_READ) {
3729 flavor = THREAD_FLAVOR_READ;
3730 } else {
3731 flavor = THREAD_FLAVOR_INSPECT;
3732 }
3733
3734 thread_mtx_lock(thread);
3735 ip_mq_lock(port);
3736
3737 /*
3738 * If the port is no longer active, then ipc_thread_terminate() ran
3739 * and destroyed the kobject already. Just deallocate the task
3740 * ref we took and go away.
3741 *
3742 * It is also possible that several nsrequests are in flight,
3743 * only one shall NULL-out the port entry, and this is the one
3744 * that gets to dealloc the port.
3745 *
3746 * Check for a stale no-senders notification. A call to any function
3747 * that vends out send rights to this port could resurrect it between
3748 * this notification being generated and actually being handled here.
3749 */
3750 tro = get_thread_ro(thread);
3751 if (!ip_active(port) ||
3752 tro->tro_ports[flavor] != port ||
3753 port->ip_srights > 0) {
3754 ip_mq_unlock(port);
3755 thread_mtx_unlock(thread);
3756 thread_deallocate(thread);
3757 return;
3758 }
3759
3760 assert(tro->tro_ports[flavor] == port);
3761 zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_ports[flavor]);
3762 thread_mtx_unlock(thread);
3763
3764 ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
3765
3766 thread_deallocate(thread);
3767 }
3768
3769 /*
3770 * The 'thread_region_page_shift' is used by footprint
3771 * to specify the page size that it will use to
3772 * accomplish its accounting work on the task being
3773 * inspected. Since footprint uses a thread for each
3774 * task that it works on, we need to keep the page_shift
3775 * on a per-thread basis.
3776 */
3777
3778 int
thread_self_region_page_shift(void)3779 thread_self_region_page_shift(void)
3780 {
3781 /*
3782 * Return the page shift that this thread
3783 * would like to use for its accounting work.
3784 */
3785 return current_thread()->thread_region_page_shift;
3786 }
3787
3788 void
thread_self_region_page_shift_set(int pgshift)3789 thread_self_region_page_shift_set(
3790 int pgshift)
3791 {
3792 /*
3793 * Set the page shift that this thread
3794 * would like to use for its accounting work
3795 * when dealing with a task.
3796 */
3797 current_thread()->thread_region_page_shift = pgshift;
3798 }
3799
3800 #if CONFIG_DTRACE
3801 uint32_t
dtrace_get_thread_predcache(thread_t thread)3802 dtrace_get_thread_predcache(thread_t thread)
3803 {
3804 if (thread != THREAD_NULL) {
3805 return thread->t_dtrace_predcache;
3806 } else {
3807 return 0;
3808 }
3809 }
3810
3811 int64_t
dtrace_get_thread_vtime(thread_t thread)3812 dtrace_get_thread_vtime(thread_t thread)
3813 {
3814 if (thread != THREAD_NULL) {
3815 return thread->t_dtrace_vtime;
3816 } else {
3817 return 0;
3818 }
3819 }
3820
3821 int
dtrace_get_thread_last_cpu_id(thread_t thread)3822 dtrace_get_thread_last_cpu_id(thread_t thread)
3823 {
3824 if ((thread != THREAD_NULL) && (thread->last_processor != PROCESSOR_NULL)) {
3825 return thread->last_processor->cpu_id;
3826 } else {
3827 return -1;
3828 }
3829 }
3830
3831 int64_t
dtrace_get_thread_tracing(thread_t thread)3832 dtrace_get_thread_tracing(thread_t thread)
3833 {
3834 if (thread != THREAD_NULL) {
3835 return thread->t_dtrace_tracing;
3836 } else {
3837 return 0;
3838 }
3839 }
3840
3841 uint16_t
dtrace_get_thread_inprobe(thread_t thread)3842 dtrace_get_thread_inprobe(thread_t thread)
3843 {
3844 if (thread != THREAD_NULL) {
3845 return thread->t_dtrace_inprobe;
3846 } else {
3847 return 0;
3848 }
3849 }
3850
3851 vm_offset_t
thread_get_kernel_stack(thread_t thread)3852 thread_get_kernel_stack(thread_t thread)
3853 {
3854 if (thread != THREAD_NULL) {
3855 return thread->kernel_stack;
3856 } else {
3857 return 0;
3858 }
3859 }
3860
3861 #if KASAN
3862 struct kasan_thread_data *
kasan_get_thread_data(thread_t thread)3863 kasan_get_thread_data(thread_t thread)
3864 {
3865 return &thread->kasan_data;
3866 }
3867 #endif
3868
3869 #if CONFIG_KCOV
3870 kcov_thread_data_t *
kcov_get_thread_data(thread_t thread)3871 kcov_get_thread_data(thread_t thread)
3872 {
3873 return &thread->kcov_data;
3874 }
3875 #endif
3876
3877 #if CONFIG_STKSZ
3878 /*
3879 * Returns base of a thread's kernel stack.
3880 *
3881 * Coverage sanitizer instruments every function including those that participates in stack handoff between threads.
3882 * There is a window in which CPU still holds old values but stack has been handed over to anoher thread already.
3883 * In this window kernel_stack is 0 but CPU still uses the original stack (until contex switch occurs). The original
3884 * kernel_stack value is preserved in ksancov_stack during this window.
3885 */
3886 vm_offset_t
kcov_stksz_get_thread_stkbase(thread_t thread)3887 kcov_stksz_get_thread_stkbase(thread_t thread)
3888 {
3889 if (thread != THREAD_NULL) {
3890 kcov_thread_data_t *data = kcov_get_thread_data(thread);
3891 if (data->ktd_stksz.kst_stack) {
3892 return data->ktd_stksz.kst_stack;
3893 } else {
3894 return thread->kernel_stack;
3895 }
3896 } else {
3897 return 0;
3898 }
3899 }
3900
3901 vm_offset_t
kcov_stksz_get_thread_stksize(thread_t thread)3902 kcov_stksz_get_thread_stksize(thread_t thread)
3903 {
3904 if (thread != THREAD_NULL) {
3905 return kernel_stack_size;
3906 } else {
3907 return 0;
3908 }
3909 }
3910
3911 void
kcov_stksz_set_thread_stack(thread_t thread,vm_offset_t stack)3912 kcov_stksz_set_thread_stack(thread_t thread, vm_offset_t stack)
3913 {
3914 kcov_thread_data_t *data = kcov_get_thread_data(thread);
3915 data->ktd_stksz.kst_stack = stack;
3916 }
3917 #endif /* CONFIG_STKSZ */
3918
3919 int64_t
dtrace_calc_thread_recent_vtime(thread_t thread)3920 dtrace_calc_thread_recent_vtime(thread_t thread)
3921 {
3922 if (thread != THREAD_NULL) {
3923 processor_t processor = current_processor();
3924 uint64_t abstime = mach_absolute_time();
3925 timer_t timer;
3926
3927 timer = processor->thread_timer;
3928
3929 return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) +
3930 (abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */
3931 } else {
3932 return 0;
3933 }
3934 }
3935
3936 void
dtrace_set_thread_predcache(thread_t thread,uint32_t predcache)3937 dtrace_set_thread_predcache(thread_t thread, uint32_t predcache)
3938 {
3939 if (thread != THREAD_NULL) {
3940 thread->t_dtrace_predcache = predcache;
3941 }
3942 }
3943
3944 void
dtrace_set_thread_vtime(thread_t thread,int64_t vtime)3945 dtrace_set_thread_vtime(thread_t thread, int64_t vtime)
3946 {
3947 if (thread != THREAD_NULL) {
3948 thread->t_dtrace_vtime = vtime;
3949 }
3950 }
3951
3952 void
dtrace_set_thread_tracing(thread_t thread,int64_t accum)3953 dtrace_set_thread_tracing(thread_t thread, int64_t accum)
3954 {
3955 if (thread != THREAD_NULL) {
3956 thread->t_dtrace_tracing = accum;
3957 }
3958 }
3959
3960 void
dtrace_set_thread_inprobe(thread_t thread,uint16_t inprobe)3961 dtrace_set_thread_inprobe(thread_t thread, uint16_t inprobe)
3962 {
3963 if (thread != THREAD_NULL) {
3964 thread->t_dtrace_inprobe = inprobe;
3965 }
3966 }
3967
3968 vm_offset_t
dtrace_set_thread_recover(thread_t thread,vm_offset_t recover)3969 dtrace_set_thread_recover(thread_t thread, vm_offset_t recover)
3970 {
3971 vm_offset_t prev = 0;
3972
3973 if (thread != THREAD_NULL) {
3974 prev = thread->recover;
3975 thread->recover = recover;
3976 }
3977 return prev;
3978 }
3979
3980 vm_offset_t
dtrace_sign_and_set_thread_recover(thread_t thread,vm_offset_t recover)3981 dtrace_sign_and_set_thread_recover(thread_t thread, vm_offset_t recover)
3982 {
3983 #if defined(HAS_APPLE_PAC)
3984 return dtrace_set_thread_recover(thread,
3985 (vm_address_t)ptrauth_sign_unauthenticated((void *)recover,
3986 ptrauth_key_function_pointer,
3987 ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER)));
3988 #else /* defined(HAS_APPLE_PAC) */
3989 return dtrace_set_thread_recover(thread, recover);
3990 #endif /* defined(HAS_APPLE_PAC) */
3991 }
3992
3993 void
dtrace_thread_bootstrap(void)3994 dtrace_thread_bootstrap(void)
3995 {
3996 task_t task = current_task();
3997
3998 if (task->thread_count == 1) {
3999 thread_t thread = current_thread();
4000 if (thread->t_dtrace_flags & TH_DTRACE_EXECSUCCESS) {
4001 thread->t_dtrace_flags &= ~TH_DTRACE_EXECSUCCESS;
4002 DTRACE_PROC(exec__success);
4003 KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXEC),
4004 task_pid(task));
4005 }
4006 DTRACE_PROC(start);
4007 }
4008 DTRACE_PROC(lwp__start);
4009 }
4010
4011 void
dtrace_thread_didexec(thread_t thread)4012 dtrace_thread_didexec(thread_t thread)
4013 {
4014 thread->t_dtrace_flags |= TH_DTRACE_EXECSUCCESS;
4015 }
4016 #endif /* CONFIG_DTRACE */
4017