1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/thread.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61 * Date: 1986
62 *
63 * Thread management primitives implementation.
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to [email protected] any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
84 #include <mach/mach_types.h>
85 #include <mach/boolean.h>
86 #include <mach/policy.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_special_ports.h>
89 #include <mach/thread_act.h>
90 #include <mach/thread_status.h>
91 #include <mach/time_value.h>
92 #include <mach/vm_param.h>
93
94 #include <machine/thread.h>
95 #include <machine/pal_routines.h>
96 #include <machine/limits.h>
97
98 #include <kern/kern_types.h>
99 #include <kern/kalloc.h>
100 #include <kern/cpu_data.h>
101 #include <kern/extmod_statistics.h>
102 #include <kern/ipc_mig.h>
103 #include <kern/ipc_tt.h>
104 #include <kern/mach_param.h>
105 #include <kern/machine.h>
106 #include <kern/misc_protos.h>
107 #include <kern/processor.h>
108 #include <kern/queue.h>
109 #include <kern/restartable.h>
110 #include <kern/sched.h>
111 #include <kern/sched_prim.h>
112 #include <kern/syscall_subr.h>
113 #include <kern/task.h>
114 #include <kern/thread.h>
115 #include <kern/thread_group.h>
116 #include <kern/coalition.h>
117 #include <kern/host.h>
118 #include <kern/zalloc.h>
119 #include <kern/assert.h>
120 #include <kern/exc_resource.h>
121 #include <kern/exc_guard.h>
122 #include <kern/telemetry.h>
123 #include <kern/policy_internal.h>
124 #include <kern/turnstile.h>
125 #include <kern/sched_clutch.h>
126 #include <kern/hazard.h>
127 #include <kern/ast.h>
128
129 #include <corpses/task_corpse.h>
130 #if KPC
131 #include <kern/kpc.h>
132 #endif
133
134 #if MONOTONIC
135 #include <kern/monotonic.h>
136 #include <machine/monotonic.h>
137 #endif /* MONOTONIC */
138
139 #include <ipc/ipc_kmsg.h>
140 #include <ipc/ipc_port.h>
141 #include <bank/bank_types.h>
142
143 #include <vm/vm_kern.h>
144 #include <vm/vm_pageout.h>
145
146 #include <sys/kdebug.h>
147 #include <sys/bsdtask_info.h>
148 #include <mach/sdt.h>
149 #include <san/kasan.h>
150 #include <san/kcov_stksz.h>
151
152 #include <stdatomic.h>
153
154 #if defined(HAS_APPLE_PAC)
155 #include <ptrauth.h>
156 #include <arm64/proc_reg.h>
157 #endif /* defined(HAS_APPLE_PAC) */
158
159 /*
160 * Exported interfaces
161 */
162 #include <mach/task_server.h>
163 #include <mach/thread_act_server.h>
164 #include <mach/mach_host_server.h>
165 #include <mach/host_priv_server.h>
166 #include <mach/mach_voucher_server.h>
167 #include <kern/policy_internal.h>
168
169 #if CONFIG_MACF
170 #include <security/mac_mach_internal.h>
171 #endif
172
173 #include <pthread/workqueue_trace.h>
174
175 LCK_GRP_DECLARE(thread_lck_grp, "thread");
176
177 static SECURITY_READ_ONLY_LATE(zone_t) thread_zone;
178 ZONE_DEFINE_ID(ZONE_ID_THREAD_RO, "threads_ro", struct thread_ro, ZC_READONLY);
179
180 static void thread_port_with_flavor_no_senders(ipc_port_t, mach_port_mscount_t);
181
182 IPC_KOBJECT_DEFINE(IKOT_THREAD_CONTROL);
183 IPC_KOBJECT_DEFINE(IKOT_THREAD_READ,
184 .iko_op_no_senders = thread_port_with_flavor_no_senders);
185 IPC_KOBJECT_DEFINE(IKOT_THREAD_INSPECT,
186 .iko_op_no_senders = thread_port_with_flavor_no_senders);
187
188 static struct mpsc_daemon_queue thread_stack_queue;
189 static struct mpsc_daemon_queue thread_terminate_queue;
190 static struct mpsc_daemon_queue thread_deallocate_queue;
191 static struct mpsc_daemon_queue thread_exception_queue;
192
193 decl_simple_lock_data(static, crashed_threads_lock);
194 static queue_head_t crashed_threads_queue;
195
196 struct thread_exception_elt {
197 struct mpsc_queue_chain link;
198 exception_type_t exception_type;
199 task_t exception_task;
200 thread_t exception_thread;
201 };
202
203 static SECURITY_READ_ONLY_LATE(struct thread) thread_template = {
204 #if MACH_ASSERT
205 .thread_magic = THREAD_MAGIC,
206 #endif /* MACH_ASSERT */
207 .wait_result = THREAD_WAITING,
208 .options = THREAD_ABORTSAFE,
209 .state = TH_WAIT | TH_UNINT,
210 .th_sched_bucket = TH_BUCKET_RUN,
211 .base_pri = BASEPRI_DEFAULT,
212 .realtime.deadline = UINT64_MAX,
213 .last_made_runnable_time = THREAD_NOT_RUNNABLE,
214 .last_basepri_change_time = THREAD_NOT_RUNNABLE,
215 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
216 .pri_shift = INT8_MAX,
217 #endif
218 /* timers are initialized in thread_bootstrap */
219 };
220
221 __startup_func
222 static void
thread_zone_startup(void)223 thread_zone_startup(void)
224 {
225 size_t size = sizeof(struct thread);
226
227 #ifdef MACH_BSD
228 size += roundup(uthread_size, _Alignof(struct thread));
229 #endif
230 thread_zone = zone_create_ext("threads", size,
231 ZC_ZFREE_CLEARMEM, ZONE_ID_THREAD, NULL);
232 }
233 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, thread_zone_startup);
234
235 __startup_data
236 static struct thread init_thread;
237 static void thread_deallocate_enqueue(thread_t thread);
238 static void thread_deallocate_complete(thread_t thread);
239
240 #ifdef MACH_BSD
241 extern void proc_exit(void *);
242 extern mach_exception_data_type_t proc_encode_exit_exception_code(void *);
243 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
244 extern uint64_t get_return_to_kernel_offset_from_proc(void *p);
245 extern uint64_t get_wq_quantum_offset_from_proc(void *);
246 extern int proc_selfpid(void);
247 extern void proc_name(int, char*, int);
248 extern char * proc_name_address(void *p);
249 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
250 #endif /* MACH_BSD */
251
252 extern bool bsdthread_part_of_cooperative_workqueue(struct uthread *uth);
253 extern int disable_exc_resource;
254 extern int audio_active;
255 extern int debug_task;
256 int thread_max = CONFIG_THREAD_MAX; /* Max number of threads */
257 int task_threadmax = CONFIG_THREAD_MAX;
258
259 static uint64_t thread_unique_id = 100;
260
261 struct _thread_ledger_indices thread_ledgers = { .cpu_time = -1 };
262 static ledger_template_t thread_ledger_template = NULL;
263 static void init_thread_ledgers(void);
264
265 #if CONFIG_JETSAM
266 void jetsam_on_ledger_cpulimit_exceeded(void);
267 #endif
268
269 extern int task_thread_soft_limit;
270
271 #if DEVELOPMENT || DEBUG
272 extern int exc_resource_threads_enabled;
273 #endif /* DEVELOPMENT || DEBUG */
274
275 /*
276 * Level (in terms of percentage of the limit) at which the CPU usage monitor triggers telemetry.
277 *
278 * (ie when any thread's CPU consumption exceeds 70% of the limit, start taking user
279 * stacktraces, aka micro-stackshots)
280 */
281 #define CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT 70
282
283 /* Percentage. Level at which we start gathering telemetry. */
284 static TUNABLE(uint8_t, cpumon_ustackshots_trigger_pct,
285 "cpumon_ustackshots_trigger_pct", CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT);
286 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void);
287 #if DEVELOPMENT || DEBUG
288 void __attribute__((noinline)) SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t, int);
289 #endif /* DEVELOPMENT || DEBUG */
290
291 /*
292 * The smallest interval over which we support limiting CPU consumption is 1ms
293 */
294 #define MINIMUM_CPULIMIT_INTERVAL_MS 1
295
296 os_refgrp_decl(static, thread_refgrp, "thread", NULL);
297
298 static inline void
init_thread_from_template(thread_t thread)299 init_thread_from_template(thread_t thread)
300 {
301 /*
302 * In general, struct thread isn't trivially-copyable, since it may
303 * contain pointers to thread-specific state. This may be enforced at
304 * compile time on architectures that store authed + diversified
305 * pointers in machine_thread.
306 *
307 * In this specific case, where we're initializing a new thread from a
308 * thread_template, we know all diversified pointers are NULL; these are
309 * safe to bitwise copy.
310 */
311 #pragma clang diagnostic push
312 #pragma clang diagnostic ignored "-Wnontrivial-memaccess"
313 memcpy(thread, &thread_template, sizeof(*thread));
314 #pragma clang diagnostic pop
315 }
316
317 static void
thread_ro_create(task_t parent_task,thread_t th,thread_ro_t tro_tpl)318 thread_ro_create(task_t parent_task, thread_t th, thread_ro_t tro_tpl)
319 {
320 #if __x86_64__ || __arm__
321 th->t_task = parent_task;
322 #endif
323 tro_tpl->tro_owner = th;
324 tro_tpl->tro_task = parent_task;
325 th->t_tro = zalloc_ro(ZONE_ID_THREAD_RO, Z_WAITOK | Z_ZERO | Z_NOFAIL);
326 zalloc_ro_update_elem(ZONE_ID_THREAD_RO, th->t_tro, tro_tpl);
327 }
328
329 static void
thread_ro_destroy(thread_t th)330 thread_ro_destroy(thread_t th)
331 {
332 thread_ro_t tro = get_thread_ro(th);
333 #if MACH_BSD
334 struct ucred *cred = tro->tro_cred;
335 #endif
336
337 zfree_ro(ZONE_ID_THREAD_RO, tro);
338 #if MACH_BSD
339 if (cred) {
340 uthread_cred_free(cred);
341 }
342 #endif
343 }
344
345 #if MACH_BSD
346 extern void kauth_cred_set(struct ucred **, struct ucred *);
347
348 void
thread_ro_update_cred(thread_ro_t tro,struct ucred * ucred)349 thread_ro_update_cred(thread_ro_t tro, struct ucred *ucred)
350 {
351 struct ucred *my_cred = tro->tro_cred;
352 if (my_cred != ucred) {
353 kauth_cred_set(&my_cred, ucred);
354 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_cred, &my_cred);
355 }
356 }
357
358 void
thread_ro_update_flags(thread_ro_t tro,thread_ro_flags_t add,thread_ro_flags_t clr)359 thread_ro_update_flags(thread_ro_t tro, thread_ro_flags_t add, thread_ro_flags_t clr)
360 {
361 thread_ro_flags_t flags = (tro->tro_flags & ~clr) | add;
362 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_flags, &flags);
363 }
364 #endif
365
366 thread_t
thread_bootstrap(void)367 thread_bootstrap(void)
368 {
369 /*
370 * Fill in a template thread for fast initialization.
371 */
372 timer_init(&thread_template.user_timer);
373 timer_init(&thread_template.system_timer);
374 timer_init(&thread_template.ptime);
375 timer_init(&thread_template.runnable_timer);
376
377 init_thread_from_template(&init_thread);
378 /* fiddle with init thread to skip asserts in set_sched_pri */
379 init_thread.sched_pri = MAXPRI_KERNEL;
380
381 return &init_thread;
382 }
383
384 void
thread_machine_init_template(void)385 thread_machine_init_template(void)
386 {
387 machine_thread_template_init(&thread_template);
388 }
389
390 void
thread_init(void)391 thread_init(void)
392 {
393 /*
394 * Initialize any machine-dependent
395 * per-thread structures necessary.
396 */
397 machine_thread_init();
398
399 init_thread_ledgers();
400 }
401
402 boolean_t
thread_is_active(thread_t thread)403 thread_is_active(thread_t thread)
404 {
405 return thread->active;
406 }
407
408 void
thread_corpse_continue(void)409 thread_corpse_continue(void)
410 {
411 thread_t thread = current_thread();
412
413 thread_terminate_internal(thread);
414
415 /*
416 * Handle the thread termination directly
417 * here instead of returning to userspace.
418 */
419 assert(thread->active == FALSE);
420 thread_ast_clear(thread, AST_APC);
421 thread_apc_ast(thread);
422
423 panic("thread_corpse_continue");
424 /*NOTREACHED*/
425 }
426
427 __dead2
428 static void
thread_terminate_continue(void)429 thread_terminate_continue(void)
430 {
431 panic("thread_terminate_continue");
432 /*NOTREACHED*/
433 }
434
435 /*
436 * thread_terminate_self:
437 */
438 void
thread_terminate_self(void)439 thread_terminate_self(void)
440 {
441 thread_t thread = current_thread();
442 thread_ro_t tro = get_thread_ro(thread);
443 task_t task = tro->tro_task;
444 int threadcnt;
445
446 pal_thread_terminate_self(thread);
447
448 DTRACE_PROC(lwp__exit);
449
450 thread_mtx_lock(thread);
451
452 ipc_thread_disable(thread);
453
454 thread_mtx_unlock(thread);
455
456 thread_sched_call(thread, NULL);
457
458 spl_t s = splsched();
459 thread_lock(thread);
460
461 thread_depress_abort_locked(thread);
462
463 /*
464 * Before we take the thread_lock right above,
465 * act_set_ast_reset_pcs() might not yet observe
466 * that the thread is inactive, and could have
467 * requested an IPI Ack.
468 *
469 * Once we unlock the thread, we know that
470 * act_set_ast_reset_pcs() can't fail to notice
471 * that thread->active is false,
472 * and won't set new ones.
473 */
474 thread_reset_pcs_ack_IPI(thread);
475
476 thread_unlock(thread);
477
478 splx(s);
479
480 #if CONFIG_TASKWATCH
481 thead_remove_taskwatch(thread);
482 #endif /* CONFIG_TASKWATCH */
483
484 work_interval_thread_terminate(thread);
485
486 thread_mtx_lock(thread);
487
488 thread_policy_reset(thread);
489
490 thread_mtx_unlock(thread);
491
492 assert(thread->th_work_interval == NULL);
493
494 bank_swap_thread_bank_ledger(thread, NULL);
495
496 if (kdebug_enable && bsd_hasthreadname(get_bsdthread_info(thread))) {
497 char threadname[MAXTHREADNAMESIZE];
498 bsd_getthreadname(get_bsdthread_info(thread), threadname);
499 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, threadname);
500 }
501
502 uthread_cleanup(get_bsdthread_info(thread), tro);
503
504 if (kdebug_enable && task->bsd_info && !task_is_exec_copy(task)) {
505 /* trace out pid before we sign off */
506 long dbg_arg1 = 0;
507 long dbg_arg2 = 0;
508
509 kdbg_trace_data(task->bsd_info, &dbg_arg1, &dbg_arg2);
510 #if MONOTONIC
511 if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_THR_EXIT)) {
512 uint64_t counts[MT_CORE_NFIXED];
513 uint64_t thread_user_time;
514 uint64_t thread_system_time;
515 thread_user_time = timer_grab(&thread->user_timer);
516 thread_system_time = timer_grab(&thread->system_timer);
517 mt_fixed_thread_counts(thread, counts);
518 KDBG_RELEASE(DBG_MT_INSTRS_CYCLES_THR_EXIT,
519 #ifdef MT_CORE_INSTRS
520 counts[MT_CORE_INSTRS],
521 #else /* defined(MT_CORE_INSTRS) */
522 0,
523 #endif/* !defined(MT_CORE_INSTRS) */
524 counts[MT_CORE_CYCLES],
525 thread_system_time, thread_user_time);
526 }
527 #endif/* MONOTONIC */
528 KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE_PID, dbg_arg1, dbg_arg2);
529 }
530
531 /*
532 * After this subtraction, this thread should never access
533 * task->bsd_info unless it got 0 back from the os_atomic_dec. It
534 * could be racing with other threads to be the last thread in the
535 * process, and the last thread in the process will tear down the proc
536 * structure and zero-out task->bsd_info.
537 */
538 threadcnt = os_atomic_dec(&task->active_thread_count, relaxed);
539
540 #if CONFIG_COALITIONS
541 /*
542 * Leave the coalitions when last thread of task is exiting and the
543 * task is not a corpse.
544 */
545 if (threadcnt == 0 && !task->corpse_info) {
546 coalitions_remove_task(task);
547 }
548 #endif
549
550 /*
551 * If we are the last thread to terminate and the task is
552 * associated with a BSD process, perform BSD process exit.
553 */
554 if (threadcnt == 0 && task->bsd_info != NULL && !task_is_exec_copy(task)) {
555 mach_exception_data_type_t subcode = 0;
556 if (kdebug_enable) {
557 /* since we're the last thread in this process, trace out the command name too */
558 long args[4] = {};
559 kdbg_trace_string(task->bsd_info, &args[0], &args[1], &args[2], &args[3]);
560 #if MONOTONIC
561 if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_PROC_EXIT)) {
562 uint64_t counts[MT_CORE_NFIXED];
563 uint64_t task_user_time;
564 uint64_t task_system_time;
565 mt_fixed_task_counts(task, counts);
566 /* since the thread time is not yet added to the task */
567 task_user_time = task->total_user_time + timer_grab(&thread->user_timer);
568 task_system_time = task->total_system_time + timer_grab(&thread->system_timer);
569 KDBG_RELEASE((DBG_MT_INSTRS_CYCLES_PROC_EXIT),
570 #ifdef MT_CORE_INSTRS
571 counts[MT_CORE_INSTRS],
572 #else /* defined(MT_CORE_INSTRS) */
573 0,
574 #endif/* !defined(MT_CORE_INSTRS) */
575 counts[MT_CORE_CYCLES],
576 task_system_time, task_user_time);
577 }
578 #endif/* MONOTONIC */
579 KDBG_RELEASE(TRACE_STRING_PROC_EXIT, args[0], args[1], args[2], args[3]);
580 }
581
582 /* Get the exit reason before proc_exit */
583 subcode = proc_encode_exit_exception_code(task->bsd_info);
584 proc_exit(task->bsd_info);
585 /*
586 * if there is crash info in task
587 * then do the deliver action since this is
588 * last thread for this task.
589 */
590 if (task->corpse_info) {
591 /* reset all except task name port */
592 ipc_task_reset(task);
593 /* enable all task ports (name port unchanged) */
594 ipc_task_enable(task);
595 exception_type_t etype = get_exception_from_corpse_crashinfo(task->corpse_info);
596 task_deliver_crash_notification(task, current_thread(), etype, subcode);
597 }
598 }
599
600 if (threadcnt == 0) {
601 task_lock(task);
602 if (task_is_a_corpse_fork(task)) {
603 thread_wakeup((event_t)&task->active_thread_count);
604 }
605 task_unlock(task);
606 }
607
608 s = splsched();
609 thread_lock(thread);
610
611 /*
612 * Ensure that the depress timer is no longer enqueued,
613 * so the timer can be safely deallocated
614 *
615 * TODO: build timer_call_cancel_wait
616 */
617
618 assert((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) == 0);
619
620 uint32_t delay_us = 1;
621
622 while (thread->depress_timer_active > 0) {
623 thread_unlock(thread);
624 splx(s);
625
626 delay(delay_us++);
627
628 if (delay_us > USEC_PER_SEC) {
629 panic("depress timer failed to inactivate!"
630 "thread: %p depress_timer_active: %d",
631 thread, thread->depress_timer_active);
632 }
633
634 s = splsched();
635 thread_lock(thread);
636 }
637
638 /*
639 * Cancel wait timer, and wait for
640 * concurrent expirations.
641 */
642 if (thread->wait_timer_is_set) {
643 thread->wait_timer_is_set = FALSE;
644
645 if (timer_call_cancel(thread->wait_timer)) {
646 thread->wait_timer_active--;
647 }
648 }
649
650 delay_us = 1;
651
652 while (thread->wait_timer_active > 0) {
653 thread_unlock(thread);
654 splx(s);
655
656 delay(delay_us++);
657
658 if (delay_us > USEC_PER_SEC) {
659 panic("wait timer failed to inactivate!"
660 "thread: %p wait_timer_active: %d",
661 thread, thread->wait_timer_active);
662 }
663
664 s = splsched();
665 thread_lock(thread);
666 }
667
668 /*
669 * If there is a reserved stack, release it.
670 */
671 if (thread->reserved_stack != 0) {
672 stack_free_reserved(thread);
673 thread->reserved_stack = 0;
674 }
675
676 /*
677 * Mark thread as terminating, and block.
678 */
679 thread->state |= TH_TERMINATE;
680 thread_mark_wait_locked(thread, THREAD_UNINT);
681
682 assert(thread->th_work_interval_flags == TH_WORK_INTERVAL_FLAGS_NONE);
683 assert(thread->kern_promotion_schedpri == 0);
684 assert(thread->rwlock_count == 0);
685 assert(thread->priority_floor_count == 0);
686 assert(thread->handoff_thread == THREAD_NULL);
687 assert(thread->th_work_interval == NULL);
688 assert(thread->t_rr_state.trr_value == 0);
689
690 assert3u(0, ==, thread->sched_flags &
691 (TH_SFLAG_WAITQ_PROMOTED |
692 TH_SFLAG_RW_PROMOTED |
693 TH_SFLAG_EXEC_PROMOTED |
694 TH_SFLAG_FLOOR_PROMOTED |
695 TH_SFLAG_PROMOTED |
696 TH_SFLAG_DEPRESS));
697
698 thread_unlock(thread);
699 /* splsched */
700
701 thread_block((thread_continue_t)thread_terminate_continue);
702 /*NOTREACHED*/
703 }
704
705 static bool
thread_ref_release(thread_t thread)706 thread_ref_release(thread_t thread)
707 {
708 if (thread == THREAD_NULL) {
709 return false;
710 }
711
712 assert_thread_magic(thread);
713
714 return os_ref_release_raw(&thread->ref_count, &thread_refgrp) == 0;
715 }
716
717 /* Drop a thread refcount safely without triggering a zfree */
718 void
thread_deallocate_safe(thread_t thread)719 thread_deallocate_safe(thread_t thread)
720 {
721 if (__improbable(thread_ref_release(thread))) {
722 /* enqueue the thread for thread deallocate deamon to call thread_deallocate_complete */
723 thread_deallocate_enqueue(thread);
724 }
725 }
726
727 void
thread_deallocate(thread_t thread)728 thread_deallocate(thread_t thread)
729 {
730 if (__improbable(thread_ref_release(thread))) {
731 thread_deallocate_complete(thread);
732 }
733 }
734
735 void
thread_deallocate_complete(thread_t thread)736 thread_deallocate_complete(
737 thread_t thread)
738 {
739 task_t task;
740
741 assert_thread_magic(thread);
742
743 assert(os_ref_get_count_raw(&thread->ref_count) == 0);
744
745 if (!(thread->state & TH_TERMINATE2)) {
746 panic("thread_deallocate: thread not properly terminated");
747 }
748
749 assert(thread->runq == PROCESSOR_NULL);
750
751 #if KPC
752 kpc_thread_destroy(thread);
753 #endif
754
755 ipc_thread_terminate(thread);
756
757 proc_thread_qos_deallocate(thread);
758
759 task = get_threadtask(thread);
760
761 #ifdef MACH_BSD
762 uthread_destroy(get_bsdthread_info(thread));
763 #endif /* MACH_BSD */
764
765 if (thread->t_ledger) {
766 ledger_dereference(thread->t_ledger);
767 }
768 if (thread->t_threadledger) {
769 ledger_dereference(thread->t_threadledger);
770 }
771
772 assert(thread->turnstile != TURNSTILE_NULL);
773 if (thread->turnstile) {
774 turnstile_deallocate(thread->turnstile);
775 }
776
777 if (IPC_VOUCHER_NULL != thread->ith_voucher) {
778 ipc_voucher_release(thread->ith_voucher);
779 }
780
781 kfree_data(thread->thread_io_stats, sizeof(struct io_stat_info));
782 #if CONFIG_PREADOPT_TG
783 if (thread->old_preadopt_thread_group) {
784 thread_group_release(thread->old_preadopt_thread_group);
785 }
786
787 if (thread->preadopt_thread_group) {
788 thread_group_release(thread->preadopt_thread_group);
789 }
790 #endif
791
792 if (thread->kernel_stack != 0) {
793 stack_free(thread);
794 }
795
796 lck_mtx_destroy(&thread->mutex, &thread_lck_grp);
797 machine_thread_destroy(thread);
798
799 task_deallocate_grp(task, TASK_GRP_INTERNAL);
800
801 #if MACH_ASSERT
802 assert_thread_magic(thread);
803 thread->thread_magic = 0;
804 #endif /* MACH_ASSERT */
805
806 lck_mtx_lock(&tasks_threads_lock);
807 assert(terminated_threads_count > 0);
808 queue_remove(&terminated_threads, thread, thread_t, threads);
809 terminated_threads_count--;
810 lck_mtx_unlock(&tasks_threads_lock);
811
812 timer_call_free(thread->depress_timer);
813 timer_call_free(thread->wait_timer);
814
815 thread_ro_destroy(thread);
816 zfree(thread_zone, thread);
817 }
818
819 /*
820 * thread_inspect_deallocate:
821 *
822 * Drop a thread inspection reference.
823 */
824 void
thread_inspect_deallocate(thread_inspect_t thread_inspect)825 thread_inspect_deallocate(
826 thread_inspect_t thread_inspect)
827 {
828 return thread_deallocate((thread_t)thread_inspect);
829 }
830
831 /*
832 * thread_read_deallocate:
833 *
834 * Drop a reference on thread read port.
835 */
836 void
thread_read_deallocate(thread_read_t thread_read)837 thread_read_deallocate(
838 thread_read_t thread_read)
839 {
840 return thread_deallocate((thread_t)thread_read);
841 }
842
843
844 /*
845 * thread_exception_queue_invoke:
846 *
847 * Deliver EXC_{RESOURCE,GUARD} exception
848 */
849 static void
thread_exception_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)850 thread_exception_queue_invoke(mpsc_queue_chain_t elm,
851 __assert_only mpsc_daemon_queue_t dq)
852 {
853 struct thread_exception_elt *elt;
854 task_t task;
855 thread_t thread;
856 exception_type_t etype;
857
858 assert(dq == &thread_exception_queue);
859 elt = mpsc_queue_element(elm, struct thread_exception_elt, link);
860
861 etype = elt->exception_type;
862 task = elt->exception_task;
863 thread = elt->exception_thread;
864 assert_thread_magic(thread);
865
866 kfree_type(struct thread_exception_elt, elt);
867
868 /* wait for all the threads in the task to terminate */
869 task_lock(task);
870 task_wait_till_threads_terminate_locked(task);
871 task_unlock(task);
872
873 /* Consumes the task ref returned by task_generate_corpse_internal */
874 task_deallocate(task);
875 /* Consumes the thread ref returned by task_generate_corpse_internal */
876 thread_deallocate(thread);
877
878 /* Deliver the notification, also clears the corpse. */
879 task_deliver_crash_notification(task, thread, etype, 0);
880 }
881
882 /*
883 * thread_exception_enqueue:
884 *
885 * Enqueue a corpse port to be delivered an EXC_{RESOURCE,GUARD}.
886 */
887 void
thread_exception_enqueue(task_t task,thread_t thread,exception_type_t etype)888 thread_exception_enqueue(
889 task_t task,
890 thread_t thread,
891 exception_type_t etype)
892 {
893 assert(EXC_RESOURCE == etype || EXC_GUARD == etype);
894 struct thread_exception_elt *elt = kalloc_type(struct thread_exception_elt, Z_WAITOK);
895 elt->exception_type = etype;
896 elt->exception_task = task;
897 elt->exception_thread = thread;
898
899 mpsc_daemon_enqueue(&thread_exception_queue, &elt->link,
900 MPSC_QUEUE_DISABLE_PREEMPTION);
901 }
902
903 /*
904 * thread_copy_resource_info
905 *
906 * Copy the resource info counters from source
907 * thread to destination thread.
908 */
909 void
thread_copy_resource_info(thread_t dst_thread,thread_t src_thread)910 thread_copy_resource_info(
911 thread_t dst_thread,
912 thread_t src_thread)
913 {
914 dst_thread->c_switch = src_thread->c_switch;
915 dst_thread->p_switch = src_thread->p_switch;
916 dst_thread->ps_switch = src_thread->ps_switch;
917 dst_thread->precise_user_kernel_time = src_thread->precise_user_kernel_time;
918 dst_thread->user_timer = src_thread->user_timer;
919 dst_thread->user_timer_save = src_thread->user_timer_save;
920 dst_thread->system_timer = src_thread->system_timer;
921 dst_thread->system_timer_save = src_thread->system_timer_save;
922 dst_thread->runnable_timer = src_thread->runnable_timer;
923 dst_thread->vtimer_user_save = src_thread->vtimer_user_save;
924 dst_thread->vtimer_prof_save = src_thread->vtimer_prof_save;
925 dst_thread->vtimer_rlim_save = src_thread->vtimer_rlim_save;
926 dst_thread->vtimer_qos_save = src_thread->vtimer_qos_save;
927 dst_thread->syscalls_unix = src_thread->syscalls_unix;
928 dst_thread->syscalls_mach = src_thread->syscalls_mach;
929 ledger_rollup(dst_thread->t_threadledger, src_thread->t_threadledger);
930 *dst_thread->thread_io_stats = *src_thread->thread_io_stats;
931 }
932
933 static void
thread_terminate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)934 thread_terminate_queue_invoke(mpsc_queue_chain_t e,
935 __assert_only mpsc_daemon_queue_t dq)
936 {
937 thread_t thread = mpsc_queue_element(e, struct thread, mpsc_links);
938 task_t task = get_threadtask(thread);
939
940 assert(dq == &thread_terminate_queue);
941
942 task_lock(task);
943
944 /*
945 * if marked for crash reporting, skip reaping.
946 * The corpse delivery thread will clear bit and enqueue
947 * for reaping when done
948 *
949 * Note: the inspection field is set under the task lock
950 *
951 * FIXME[mad]: why enqueue for termination before `inspection` is false ?
952 */
953 if (__improbable(thread->inspection)) {
954 simple_lock(&crashed_threads_lock, &thread_lck_grp);
955 task_unlock(task);
956
957 enqueue_tail(&crashed_threads_queue, &thread->runq_links);
958 simple_unlock(&crashed_threads_lock);
959 return;
960 }
961
962
963 task->total_user_time += timer_grab(&thread->user_timer);
964 task->total_ptime += timer_grab(&thread->ptime);
965 task->total_runnable_time += timer_grab(&thread->runnable_timer);
966 if (thread->precise_user_kernel_time) {
967 task->total_system_time += timer_grab(&thread->system_timer);
968 } else {
969 task->total_user_time += timer_grab(&thread->system_timer);
970 }
971
972 task->c_switch += thread->c_switch;
973 task->p_switch += thread->p_switch;
974 task->ps_switch += thread->ps_switch;
975
976 task->syscalls_unix += thread->syscalls_unix;
977 task->syscalls_mach += thread->syscalls_mach;
978
979 task->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
980 task->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
981 task->task_gpu_ns += ml_gpu_stat(thread);
982 task->task_energy += ml_energy_stat(thread);
983 task->decompressions += thread->decompressions;
984
985 #if MONOTONIC
986 mt_terminate_update(task, thread);
987 #endif /* MONOTONIC */
988
989 thread_update_qos_cpu_time(thread);
990
991 queue_remove(&task->threads, thread, thread_t, task_threads);
992 task->thread_count--;
993
994 /*
995 * If the task is being halted, and there is only one thread
996 * left in the task after this one, then wakeup that thread.
997 */
998 if (task->thread_count == 1 && task->halting) {
999 thread_wakeup((event_t)&task->halting);
1000 }
1001
1002 task_unlock(task);
1003
1004 lck_mtx_lock(&tasks_threads_lock);
1005 queue_remove(&threads, thread, thread_t, threads);
1006 threads_count--;
1007 queue_enter(&terminated_threads, thread, thread_t, threads);
1008 terminated_threads_count++;
1009 lck_mtx_unlock(&tasks_threads_lock);
1010
1011 #if MACH_BSD
1012 /*
1013 * The thread no longer counts against the task's thread count,
1014 * we can now wake up any pending joiner.
1015 *
1016 * Note that the inheritor will be set to `thread` which is
1017 * incorrect once it is on the termination queue, however
1018 * the termination queue runs at MINPRI_KERNEL which is higher
1019 * than any user thread, so this isn't a priority inversion.
1020 */
1021 if (thread_get_tag(thread) & THREAD_TAG_USER_JOIN) {
1022 struct uthread *uth = get_bsdthread_info(thread);
1023 mach_port_name_t kport = uthread_joiner_port(uth);
1024
1025 /*
1026 * Clear the port low two bits to tell pthread that thread is gone.
1027 */
1028 #ifndef NO_PORT_GEN
1029 kport &= ~MACH_PORT_MAKE(0, IE_BITS_GEN_MASK + IE_BITS_GEN_ONE);
1030 #else
1031 kport |= MACH_PORT_MAKE(0, ~(IE_BITS_GEN_MASK + IE_BITS_GEN_ONE));
1032 #endif
1033 (void)copyoutmap_atomic32(task->map, kport,
1034 uthread_joiner_address(uth));
1035 uthread_joiner_wake(task, uth);
1036 }
1037 #endif
1038
1039 thread_deallocate(thread);
1040 }
1041
1042 static void
thread_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)1043 thread_deallocate_queue_invoke(mpsc_queue_chain_t e,
1044 __assert_only mpsc_daemon_queue_t dq)
1045 {
1046 thread_t thread = mpsc_queue_element(e, struct thread, mpsc_links);
1047
1048 assert(dq == &thread_deallocate_queue);
1049
1050 thread_deallocate_complete(thread);
1051 }
1052
1053 /*
1054 * thread_terminate_enqueue:
1055 *
1056 * Enqueue a terminating thread for final disposition.
1057 *
1058 * Called at splsched.
1059 */
1060 void
thread_terminate_enqueue(thread_t thread)1061 thread_terminate_enqueue(
1062 thread_t thread)
1063 {
1064 KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE, thread->thread_id);
1065
1066 mpsc_daemon_enqueue(&thread_terminate_queue, &thread->mpsc_links,
1067 MPSC_QUEUE_DISABLE_PREEMPTION);
1068 }
1069
1070 /*
1071 * thread_deallocate_enqueue:
1072 *
1073 * Enqueue a thread for final deallocation.
1074 */
1075 static void
thread_deallocate_enqueue(thread_t thread)1076 thread_deallocate_enqueue(
1077 thread_t thread)
1078 {
1079 mpsc_daemon_enqueue(&thread_deallocate_queue, &thread->mpsc_links,
1080 MPSC_QUEUE_DISABLE_PREEMPTION);
1081 }
1082
1083 /*
1084 * thread_terminate_crashed_threads:
1085 * walk the list of crashed threads and put back set of threads
1086 * who are no longer being inspected.
1087 */
1088 void
thread_terminate_crashed_threads(void)1089 thread_terminate_crashed_threads(void)
1090 {
1091 thread_t th_remove;
1092
1093 simple_lock(&crashed_threads_lock, &thread_lck_grp);
1094 /*
1095 * loop through the crashed threads queue
1096 * to put any threads that are not being inspected anymore
1097 */
1098
1099 qe_foreach_element_safe(th_remove, &crashed_threads_queue, runq_links) {
1100 /* make sure current_thread is never in crashed queue */
1101 assert(th_remove != current_thread());
1102
1103 if (th_remove->inspection == FALSE) {
1104 remqueue(&th_remove->runq_links);
1105 mpsc_daemon_enqueue(&thread_terminate_queue, &th_remove->mpsc_links,
1106 MPSC_QUEUE_NONE);
1107 }
1108 }
1109
1110 simple_unlock(&crashed_threads_lock);
1111 }
1112
1113 /*
1114 * thread_stack_queue_invoke:
1115 *
1116 * Perform stack allocation as required due to
1117 * invoke failures.
1118 */
1119 static void
thread_stack_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)1120 thread_stack_queue_invoke(mpsc_queue_chain_t elm,
1121 __assert_only mpsc_daemon_queue_t dq)
1122 {
1123 thread_t thread = mpsc_queue_element(elm, struct thread, mpsc_links);
1124
1125 assert(dq == &thread_stack_queue);
1126
1127 /* allocate stack with interrupts enabled so that we can call into VM */
1128 stack_alloc(thread);
1129
1130 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_END, thread_tid(thread), 0, 0, 0, 0);
1131
1132 spl_t s = splsched();
1133 thread_lock(thread);
1134 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1135 thread_unlock(thread);
1136 splx(s);
1137 }
1138
1139 /*
1140 * thread_stack_enqueue:
1141 *
1142 * Enqueue a thread for stack allocation.
1143 *
1144 * Called at splsched.
1145 */
1146 void
thread_stack_enqueue(thread_t thread)1147 thread_stack_enqueue(
1148 thread_t thread)
1149 {
1150 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_START, thread_tid(thread), 0, 0, 0, 0);
1151 assert_thread_magic(thread);
1152
1153 mpsc_daemon_enqueue(&thread_stack_queue, &thread->mpsc_links,
1154 MPSC_QUEUE_DISABLE_PREEMPTION);
1155 }
1156
1157 void
thread_daemon_init(void)1158 thread_daemon_init(void)
1159 {
1160 kern_return_t result;
1161
1162 thread_deallocate_daemon_init();
1163
1164 thread_deallocate_daemon_register_queue(&thread_terminate_queue,
1165 thread_terminate_queue_invoke);
1166
1167 thread_deallocate_daemon_register_queue(&thread_deallocate_queue,
1168 thread_deallocate_queue_invoke);
1169
1170 hazard_register_mpsc_queue();
1171
1172 ipc_object_deallocate_register_queue();
1173
1174 simple_lock_init(&crashed_threads_lock, 0);
1175 queue_init(&crashed_threads_queue);
1176
1177 result = mpsc_daemon_queue_init_with_thread(&thread_stack_queue,
1178 thread_stack_queue_invoke, BASEPRI_PREEMPT_HIGH,
1179 "daemon.thread-stack", MPSC_DAEMON_INIT_NONE);
1180 if (result != KERN_SUCCESS) {
1181 panic("thread_daemon_init: thread_stack_daemon");
1182 }
1183
1184 result = mpsc_daemon_queue_init_with_thread(&thread_exception_queue,
1185 thread_exception_queue_invoke, MINPRI_KERNEL,
1186 "daemon.thread-exception", MPSC_DAEMON_INIT_NONE);
1187 if (result != KERN_SUCCESS) {
1188 panic("thread_daemon_init: thread_exception_daemon");
1189 }
1190 }
1191
1192 __options_decl(thread_create_internal_options_t, uint32_t, {
1193 TH_OPTION_NONE = 0x00,
1194 TH_OPTION_NOSUSP = 0x02,
1195 TH_OPTION_WORKQ = 0x04,
1196 TH_OPTION_MAINTHREAD = 0x08,
1197 });
1198
1199 void
main_thread_set_immovable_pinned(thread_t thread)1200 main_thread_set_immovable_pinned(thread_t thread)
1201 {
1202 ipc_main_thread_set_immovable_pinned(thread);
1203 }
1204
1205 /*
1206 * Create a new thread.
1207 * Doesn't start the thread running.
1208 *
1209 * Task and tasks_threads_lock are returned locked on success.
1210 */
1211 static kern_return_t
thread_create_internal(task_t parent_task,integer_t priority,thread_continue_t continuation,void * parameter,thread_create_internal_options_t options,thread_t * out_thread)1212 thread_create_internal(
1213 task_t parent_task,
1214 integer_t priority,
1215 thread_continue_t continuation,
1216 void *parameter,
1217 thread_create_internal_options_t options,
1218 thread_t *out_thread)
1219 {
1220 thread_t new_thread;
1221 ipc_thread_init_options_t init_options = IPC_THREAD_INIT_NONE;
1222 struct thread_ro tro_tpl = { };
1223 bool first_thread = false;
1224
1225 /*
1226 * Allocate a thread and initialize static fields
1227 */
1228 new_thread = zalloc_flags(thread_zone, Z_WAITOK | Z_NOFAIL);
1229
1230 if (__improbable(current_thread() == &init_thread)) {
1231 /*
1232 * The first thread ever is a global, but because we want to be
1233 * able to zone_id_require() threads, we have to stop using the
1234 * global piece of memory we used to boostrap the kernel and
1235 * jump to a proper thread from a zone.
1236 *
1237 * This is why that one thread will inherit its original
1238 * state differently.
1239 *
1240 * Also remember this thread in `vm_pageout_scan_thread`
1241 * as this is what the first thread ever becomes.
1242 *
1243 * Also pre-warm the depress timer since the VM pageout scan
1244 * daemon might need to use it.
1245 */
1246 assert(vm_pageout_scan_thread == THREAD_NULL);
1247 vm_pageout_scan_thread = new_thread;
1248
1249 first_thread = true;
1250 #pragma clang diagnostic push
1251 #pragma clang diagnostic ignored "-Wnontrivial-memaccess"
1252 /* work around 74481146 */
1253 memcpy(new_thread, &init_thread, sizeof(*new_thread));
1254 #pragma clang diagnostic pop
1255 } else {
1256 init_thread_from_template(new_thread);
1257 }
1258
1259 if (options & TH_OPTION_MAINTHREAD) {
1260 init_options |= IPC_THREAD_INIT_MAINTHREAD;
1261 }
1262
1263 os_ref_init_count_raw(&new_thread->ref_count, &thread_refgrp, 2);
1264 machine_thread_create(new_thread, parent_task, first_thread);
1265
1266 #ifdef MACH_BSD
1267 uthread_init(parent_task, get_bsdthread_info(new_thread),
1268 &tro_tpl, (options & TH_OPTION_WORKQ) != 0);
1269 if (!is_corpsetask(parent_task)) {
1270 /*
1271 * uthread_init will set tro_cred (with a +1)
1272 * and tro_proc for live tasks.
1273 */
1274 assert(tro_tpl.tro_cred && tro_tpl.tro_proc);
1275 }
1276 #endif /* MACH_BSD */
1277
1278 thread_lock_init(new_thread);
1279 wake_lock_init(new_thread);
1280
1281 lck_mtx_init(&new_thread->mutex, &thread_lck_grp, LCK_ATTR_NULL);
1282
1283 ipc_thread_init(parent_task, new_thread, &tro_tpl, init_options);
1284
1285 thread_ro_create(parent_task, new_thread, &tro_tpl);
1286
1287 new_thread->continuation = continuation;
1288 new_thread->parameter = parameter;
1289 new_thread->inheritor_flags = TURNSTILE_UPDATE_FLAGS_NONE;
1290 new_thread->requested_policy = default_thread_requested_policy;
1291 priority_queue_init(&new_thread->sched_inheritor_queue);
1292 priority_queue_init(&new_thread->base_inheritor_queue);
1293 #if CONFIG_SCHED_CLUTCH
1294 priority_queue_entry_init(&new_thread->th_clutch_runq_link);
1295 priority_queue_entry_init(&new_thread->th_clutch_pri_link);
1296 #endif /* CONFIG_SCHED_CLUTCH */
1297
1298 #if CONFIG_SCHED_EDGE
1299 new_thread->th_bound_cluster_enqueued = false;
1300 for (cluster_shared_rsrc_type_t shared_rsrc_type = CLUSTER_SHARED_RSRC_TYPE_MIN; shared_rsrc_type < CLUSTER_SHARED_RSRC_TYPE_COUNT; shared_rsrc_type++) {
1301 new_thread->th_shared_rsrc_enqueued[shared_rsrc_type] = false;
1302 new_thread->th_shared_rsrc_heavy_user[shared_rsrc_type] = false;
1303 new_thread->th_shared_rsrc_heavy_perf_control[shared_rsrc_type] = false;
1304 }
1305 #endif /* CONFIG_SCHED_EDGE */
1306 new_thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
1307
1308 /* Allocate I/O Statistics structure */
1309 new_thread->thread_io_stats = kalloc_data(sizeof(struct io_stat_info),
1310 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1311
1312 #if KASAN
1313 kasan_init_thread(&new_thread->kasan_data);
1314 #endif
1315
1316 #if CONFIG_KCOV
1317 kcov_init_thread(&new_thread->kcov_data);
1318 #endif
1319
1320 #if CONFIG_IOSCHED
1321 /* Clear out the I/O Scheduling info for AppleFSCompression */
1322 new_thread->decmp_upl = NULL;
1323 #endif /* CONFIG_IOSCHED */
1324
1325 new_thread->thread_region_page_shift = 0;
1326
1327 #if DEVELOPMENT || DEBUG
1328 task_lock(parent_task);
1329 uint16_t thread_limit = parent_task->task_thread_limit;
1330 if (exc_resource_threads_enabled &&
1331 thread_limit > 0 &&
1332 parent_task->thread_count >= thread_limit &&
1333 !parent_task->task_has_crossed_thread_limit &&
1334 !(parent_task->t_flags & TF_CORPSE)) {
1335 int thread_count = parent_task->thread_count;
1336 parent_task->task_has_crossed_thread_limit = TRUE;
1337 task_unlock(parent_task);
1338 SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(parent_task, thread_count);
1339 } else {
1340 task_unlock(parent_task);
1341 }
1342 #endif
1343
1344 lck_mtx_lock(&tasks_threads_lock);
1345 task_lock(parent_task);
1346
1347 /*
1348 * Fail thread creation if parent task is being torn down or has too many threads
1349 * If the caller asked for TH_OPTION_NOSUSP, also fail if the parent task is suspended
1350 */
1351 if (parent_task->active == 0 || parent_task->halting ||
1352 (parent_task->suspend_count > 0 && (options & TH_OPTION_NOSUSP) != 0) ||
1353 (parent_task->thread_count >= task_threadmax && parent_task != kernel_task)) {
1354 task_unlock(parent_task);
1355 lck_mtx_unlock(&tasks_threads_lock);
1356
1357 #ifdef MACH_BSD
1358 {
1359 struct uthread *ut = get_bsdthread_info(new_thread);
1360
1361 uthread_cleanup(ut, &tro_tpl);
1362 uthread_destroy(ut);
1363 }
1364 #endif /* MACH_BSD */
1365 ipc_thread_disable(new_thread);
1366 ipc_thread_terminate(new_thread);
1367 kfree_data(new_thread->thread_io_stats,
1368 sizeof(struct io_stat_info));
1369 lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
1370 machine_thread_destroy(new_thread);
1371 thread_ro_destroy(new_thread);
1372 zfree(thread_zone, new_thread);
1373 return KERN_FAILURE;
1374 }
1375
1376 /* Protected by the tasks_threads_lock */
1377 new_thread->thread_id = ++thread_unique_id;
1378
1379 /* New threads inherit any default state on the task */
1380 machine_thread_inherit_taskwide(new_thread, parent_task);
1381
1382 task_reference_grp(parent_task, TASK_GRP_INTERNAL);
1383
1384 if (parent_task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
1385 /*
1386 * This task has a per-thread CPU limit; make sure this new thread
1387 * gets its limit set too, before it gets out of the kernel.
1388 */
1389 act_set_astledger(new_thread);
1390 }
1391
1392 /* Instantiate a thread ledger. Do not fail thread creation if ledger creation fails. */
1393 if ((new_thread->t_threadledger = ledger_instantiate(thread_ledger_template,
1394 LEDGER_CREATE_INACTIVE_ENTRIES)) != LEDGER_NULL) {
1395 ledger_entry_setactive(new_thread->t_threadledger, thread_ledgers.cpu_time);
1396 }
1397
1398 new_thread->t_bankledger = LEDGER_NULL;
1399 new_thread->t_deduct_bank_ledger_time = 0;
1400 new_thread->t_deduct_bank_ledger_energy = 0;
1401
1402 new_thread->t_ledger = parent_task->ledger;
1403 if (new_thread->t_ledger) {
1404 ledger_reference(new_thread->t_ledger);
1405 }
1406
1407 #if defined(CONFIG_SCHED_MULTIQ)
1408 /* Cache the task's sched_group */
1409 new_thread->sched_group = parent_task->sched_group;
1410 #endif /* defined(CONFIG_SCHED_MULTIQ) */
1411
1412 /* Cache the task's map */
1413 new_thread->map = parent_task->map;
1414
1415 new_thread->depress_timer = timer_call_alloc(thread_depress_expire, new_thread);
1416 new_thread->wait_timer = timer_call_alloc(thread_timer_expire, new_thread);
1417
1418 #if KPC
1419 kpc_thread_create(new_thread);
1420 #endif
1421
1422 /* Set the thread's scheduling parameters */
1423 new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task);
1424 new_thread->max_priority = parent_task->max_priority;
1425 new_thread->task_priority = parent_task->priority;
1426
1427 #if CONFIG_THREAD_GROUPS
1428 thread_group_init_thread(new_thread, parent_task);
1429 #endif /* CONFIG_THREAD_GROUPS */
1430
1431 int new_priority = (priority < 0) ? parent_task->priority: priority;
1432 new_priority = (priority < 0)? parent_task->priority: priority;
1433 if (new_priority > new_thread->max_priority) {
1434 new_priority = new_thread->max_priority;
1435 }
1436 #if !defined(XNU_TARGET_OS_OSX)
1437 if (new_priority < MAXPRI_THROTTLE) {
1438 new_priority = MAXPRI_THROTTLE;
1439 }
1440 #endif /* !defined(XNU_TARGET_OS_OSX) */
1441
1442 new_thread->importance = new_priority - new_thread->task_priority;
1443
1444 sched_set_thread_base_priority(new_thread, new_priority);
1445
1446 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
1447 new_thread->sched_stamp = sched_tick;
1448 #if CONFIG_SCHED_CLUTCH
1449 new_thread->pri_shift = sched_clutch_thread_pri_shift(new_thread, new_thread->th_sched_bucket);
1450 #else /* CONFIG_SCHED_CLUTCH */
1451 new_thread->pri_shift = sched_pri_shifts[new_thread->th_sched_bucket];
1452 #endif /* CONFIG_SCHED_CLUTCH */
1453 #endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */
1454
1455 if (parent_task->max_priority <= MAXPRI_THROTTLE) {
1456 sched_thread_mode_demote(new_thread, TH_SFLAG_THROTTLED);
1457 }
1458
1459 thread_policy_create(new_thread);
1460
1461 /* Chain the thread onto the task's list */
1462 queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
1463 parent_task->thread_count++;
1464
1465 /* So terminating threads don't need to take the task lock to decrement */
1466 os_atomic_inc(&parent_task->active_thread_count, relaxed);
1467
1468 queue_enter(&threads, new_thread, thread_t, threads);
1469 threads_count++;
1470
1471 new_thread->active = TRUE;
1472 if (task_is_a_corpse_fork(parent_task)) {
1473 /* Set the inspection bit if the task is a corpse fork */
1474 new_thread->inspection = TRUE;
1475 } else {
1476 new_thread->inspection = FALSE;
1477 }
1478 new_thread->corpse_dup = FALSE;
1479 new_thread->turnstile = turnstile_alloc();
1480
1481
1482 *out_thread = new_thread;
1483
1484 if (kdebug_enable) {
1485 long args[4] = {};
1486
1487 kdbg_trace_data(parent_task->bsd_info, &args[1], &args[3]);
1488
1489 /*
1490 * Starting with 26604425, exec'ing creates a new task/thread.
1491 *
1492 * NEWTHREAD in the current process has two possible meanings:
1493 *
1494 * 1) Create a new thread for this process.
1495 * 2) Create a new thread for the future process this will become in an
1496 * exec.
1497 *
1498 * To disambiguate these, arg3 will be set to TRUE for case #2.
1499 *
1500 * The value we need to find (TPF_EXEC_COPY) is stable in the case of a
1501 * task exec'ing. The read of t_procflags does not take the proc_lock.
1502 */
1503 args[2] = task_is_exec_copy(parent_task) ? 1 : 0;
1504
1505 KDBG_RELEASE(TRACE_DATA_NEWTHREAD, (uintptr_t)thread_tid(new_thread),
1506 args[1], args[2], args[3]);
1507
1508 kdbg_trace_string(parent_task->bsd_info, &args[0], &args[1],
1509 &args[2], &args[3]);
1510 KDBG_RELEASE(TRACE_STRING_NEWTHREAD, args[0], args[1], args[2],
1511 args[3]);
1512 }
1513
1514 DTRACE_PROC1(lwp__create, thread_t, *out_thread);
1515
1516 return KERN_SUCCESS;
1517 }
1518
1519 static kern_return_t
thread_create_with_options_internal(task_t task,thread_t * new_thread,boolean_t from_user,thread_create_internal_options_t options,thread_continue_t continuation)1520 thread_create_with_options_internal(
1521 task_t task,
1522 thread_t *new_thread,
1523 boolean_t from_user,
1524 thread_create_internal_options_t options,
1525 thread_continue_t continuation)
1526 {
1527 kern_return_t result;
1528 thread_t thread;
1529
1530 if (task == TASK_NULL || task == kernel_task) {
1531 return KERN_INVALID_ARGUMENT;
1532 }
1533
1534 #if CONFIG_MACF
1535 if (from_user && current_task() != task &&
1536 mac_proc_check_remote_thread_create(task, -1, NULL, 0) != 0) {
1537 return KERN_DENIED;
1538 }
1539 #endif
1540
1541 result = thread_create_internal(task, -1, continuation, NULL, options, &thread);
1542 if (result != KERN_SUCCESS) {
1543 return result;
1544 }
1545
1546 thread->user_stop_count = 1;
1547 thread_hold(thread);
1548 if (task->suspend_count > 0) {
1549 thread_hold(thread);
1550 }
1551
1552 if (from_user) {
1553 extmod_statistics_incr_thread_create(task);
1554 }
1555
1556 task_unlock(task);
1557 lck_mtx_unlock(&tasks_threads_lock);
1558
1559 *new_thread = thread;
1560
1561 return KERN_SUCCESS;
1562 }
1563
1564 kern_return_t
thread_create_immovable(task_t task,thread_t * new_thread)1565 thread_create_immovable(
1566 task_t task,
1567 thread_t *new_thread)
1568 {
1569 return thread_create_with_options_internal(task, new_thread, FALSE,
1570 TH_OPTION_NONE, (thread_continue_t)thread_bootstrap_return);
1571 }
1572
1573 kern_return_t
thread_create_from_user(task_t task,thread_t * new_thread)1574 thread_create_from_user(
1575 task_t task,
1576 thread_t *new_thread)
1577 {
1578 /* All thread ports are created immovable by default */
1579 return thread_create_with_options_internal(task, new_thread, TRUE, TH_OPTION_NONE,
1580 (thread_continue_t)thread_bootstrap_return);
1581 }
1582
1583 kern_return_t
thread_create_with_continuation(task_t task,thread_t * new_thread,thread_continue_t continuation)1584 thread_create_with_continuation(
1585 task_t task,
1586 thread_t *new_thread,
1587 thread_continue_t continuation)
1588 {
1589 return thread_create_with_options_internal(task, new_thread, FALSE, TH_OPTION_NONE, continuation);
1590 }
1591
1592 /*
1593 * Create a thread that is already started, but is waiting on an event
1594 */
1595 static kern_return_t
thread_create_waiting_internal(task_t task,thread_continue_t continuation,event_t event,block_hint_t block_hint,thread_create_internal_options_t options,thread_t * new_thread)1596 thread_create_waiting_internal(
1597 task_t task,
1598 thread_continue_t continuation,
1599 event_t event,
1600 block_hint_t block_hint,
1601 thread_create_internal_options_t options,
1602 thread_t *new_thread)
1603 {
1604 kern_return_t result;
1605 thread_t thread;
1606
1607 if (task == TASK_NULL || task == kernel_task) {
1608 return KERN_INVALID_ARGUMENT;
1609 }
1610
1611 result = thread_create_internal(task, -1, continuation, NULL,
1612 options, &thread);
1613 if (result != KERN_SUCCESS) {
1614 return result;
1615 }
1616
1617 /* note no user_stop_count or thread_hold here */
1618
1619 if (task->suspend_count > 0) {
1620 thread_hold(thread);
1621 }
1622
1623 thread_mtx_lock(thread);
1624 thread_set_pending_block_hint(thread, block_hint);
1625 if (options & TH_OPTION_WORKQ) {
1626 thread->static_param = true;
1627 event = workq_thread_init_and_wq_lock(task, thread);
1628 }
1629 thread_start_in_assert_wait(thread, event, THREAD_INTERRUPTIBLE);
1630 thread_mtx_unlock(thread);
1631
1632 task_unlock(task);
1633 lck_mtx_unlock(&tasks_threads_lock);
1634
1635 *new_thread = thread;
1636
1637 return KERN_SUCCESS;
1638 }
1639
1640 kern_return_t
main_thread_create_waiting(task_t task,thread_continue_t continuation,event_t event,thread_t * new_thread)1641 main_thread_create_waiting(
1642 task_t task,
1643 thread_continue_t continuation,
1644 event_t event,
1645 thread_t *new_thread)
1646 {
1647 return thread_create_waiting_internal(task, continuation, event,
1648 kThreadWaitNone, TH_OPTION_MAINTHREAD, new_thread);
1649 }
1650
1651
1652 static kern_return_t
thread_create_running_internal2(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread,boolean_t from_user)1653 thread_create_running_internal2(
1654 task_t task,
1655 int flavor,
1656 thread_state_t new_state,
1657 mach_msg_type_number_t new_state_count,
1658 thread_t *new_thread,
1659 boolean_t from_user)
1660 {
1661 kern_return_t result;
1662 thread_t thread;
1663
1664 if (task == TASK_NULL || task == kernel_task) {
1665 return KERN_INVALID_ARGUMENT;
1666 }
1667
1668 #if CONFIG_MACF
1669 if (from_user && current_task() != task &&
1670 mac_proc_check_remote_thread_create(task, flavor, new_state, new_state_count) != 0) {
1671 return KERN_DENIED;
1672 }
1673 #endif
1674
1675 result = thread_create_internal(task, -1,
1676 (thread_continue_t)thread_bootstrap_return, NULL,
1677 TH_OPTION_NONE, &thread);
1678 if (result != KERN_SUCCESS) {
1679 return result;
1680 }
1681
1682 if (task->suspend_count > 0) {
1683 thread_hold(thread);
1684 }
1685
1686 if (from_user) {
1687 result = machine_thread_state_convert_from_user(thread, flavor,
1688 new_state, new_state_count, NULL, 0, TSSF_FLAGS_NONE);
1689 }
1690 if (result == KERN_SUCCESS) {
1691 result = machine_thread_set_state(thread, flavor, new_state,
1692 new_state_count);
1693 }
1694 if (result != KERN_SUCCESS) {
1695 task_unlock(task);
1696 lck_mtx_unlock(&tasks_threads_lock);
1697
1698 thread_terminate(thread);
1699 thread_deallocate(thread);
1700 return result;
1701 }
1702
1703 thread_mtx_lock(thread);
1704 thread_start(thread);
1705 thread_mtx_unlock(thread);
1706
1707 if (from_user) {
1708 extmod_statistics_incr_thread_create(task);
1709 }
1710
1711 task_unlock(task);
1712 lck_mtx_unlock(&tasks_threads_lock);
1713
1714 *new_thread = thread;
1715
1716 return result;
1717 }
1718
1719 /* Prototype, see justification above */
1720 kern_return_t
1721 thread_create_running(
1722 task_t task,
1723 int flavor,
1724 thread_state_t new_state,
1725 mach_msg_type_number_t new_state_count,
1726 thread_t *new_thread);
1727
1728 kern_return_t
thread_create_running(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread)1729 thread_create_running(
1730 task_t task,
1731 int flavor,
1732 thread_state_t new_state,
1733 mach_msg_type_number_t new_state_count,
1734 thread_t *new_thread)
1735 {
1736 return thread_create_running_internal2(
1737 task, flavor, new_state, new_state_count,
1738 new_thread, FALSE);
1739 }
1740
1741 kern_return_t
thread_create_running_from_user(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread)1742 thread_create_running_from_user(
1743 task_t task,
1744 int flavor,
1745 thread_state_t new_state,
1746 mach_msg_type_number_t new_state_count,
1747 thread_t *new_thread)
1748 {
1749 return thread_create_running_internal2(
1750 task, flavor, new_state, new_state_count,
1751 new_thread, TRUE);
1752 }
1753
1754 kern_return_t
thread_create_workq_waiting(task_t task,thread_continue_t continuation,thread_t * new_thread)1755 thread_create_workq_waiting(
1756 task_t task,
1757 thread_continue_t continuation,
1758 thread_t *new_thread)
1759 {
1760 /*
1761 * Create thread, but don't pin control port just yet, in case someone calls
1762 * task_threads() and deallocates pinned port before kernel copyout happens,
1763 * which will result in pinned port guard exception. Instead, pin and copyout
1764 * atomically during workq_setup_and_run().
1765 */
1766 int options = TH_OPTION_NOSUSP | TH_OPTION_WORKQ;
1767 return thread_create_waiting_internal(task, continuation, NULL,
1768 kThreadWaitParkedWorkQueue, options, new_thread);
1769 }
1770
1771 /*
1772 * kernel_thread_create:
1773 *
1774 * Create a thread in the kernel task
1775 * to execute in kernel context.
1776 */
1777 kern_return_t
kernel_thread_create(thread_continue_t continuation,void * parameter,integer_t priority,thread_t * new_thread)1778 kernel_thread_create(
1779 thread_continue_t continuation,
1780 void *parameter,
1781 integer_t priority,
1782 thread_t *new_thread)
1783 {
1784 kern_return_t result;
1785 thread_t thread;
1786 task_t task = kernel_task;
1787
1788 result = thread_create_internal(task, priority, continuation, parameter,
1789 TH_OPTION_NONE, &thread);
1790 if (result != KERN_SUCCESS) {
1791 return result;
1792 }
1793
1794 task_unlock(task);
1795 lck_mtx_unlock(&tasks_threads_lock);
1796
1797 stack_alloc(thread);
1798 assert(thread->kernel_stack != 0);
1799 #if !defined(XNU_TARGET_OS_OSX)
1800 if (priority > BASEPRI_KERNEL)
1801 #endif
1802 thread->reserved_stack = thread->kernel_stack;
1803
1804 if (debug_task & 1) {
1805 kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread, continuation);
1806 }
1807 *new_thread = thread;
1808
1809 return result;
1810 }
1811
1812 kern_return_t
kernel_thread_start_priority(thread_continue_t continuation,void * parameter,integer_t priority,thread_t * new_thread)1813 kernel_thread_start_priority(
1814 thread_continue_t continuation,
1815 void *parameter,
1816 integer_t priority,
1817 thread_t *new_thread)
1818 {
1819 kern_return_t result;
1820 thread_t thread;
1821
1822 result = kernel_thread_create(continuation, parameter, priority, &thread);
1823 if (result != KERN_SUCCESS) {
1824 return result;
1825 }
1826
1827 *new_thread = thread;
1828
1829 thread_mtx_lock(thread);
1830 thread_start(thread);
1831 thread_mtx_unlock(thread);
1832
1833 return result;
1834 }
1835
1836 kern_return_t
kernel_thread_start(thread_continue_t continuation,void * parameter,thread_t * new_thread)1837 kernel_thread_start(
1838 thread_continue_t continuation,
1839 void *parameter,
1840 thread_t *new_thread)
1841 {
1842 return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
1843 }
1844
1845 /* Separated into helper function so it can be used by THREAD_BASIC_INFO and THREAD_EXTENDED_INFO */
1846 /* it is assumed that the thread is locked by the caller */
1847 static void
retrieve_thread_basic_info(thread_t thread,thread_basic_info_t basic_info)1848 retrieve_thread_basic_info(thread_t thread, thread_basic_info_t basic_info)
1849 {
1850 int state, flags;
1851
1852 /* fill in info */
1853
1854 thread_read_times(thread, &basic_info->user_time,
1855 &basic_info->system_time, NULL);
1856
1857 /*
1858 * Update lazy-evaluated scheduler info because someone wants it.
1859 */
1860 if (SCHED(can_update_priority)(thread)) {
1861 SCHED(update_priority)(thread);
1862 }
1863
1864 basic_info->sleep_time = 0;
1865
1866 /*
1867 * To calculate cpu_usage, first correct for timer rate,
1868 * then for 5/8 ageing. The correction factor [3/5] is
1869 * (1/(5/8) - 1).
1870 */
1871 basic_info->cpu_usage = 0;
1872 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
1873 if (sched_tick_interval) {
1874 basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage
1875 * TH_USAGE_SCALE) / sched_tick_interval);
1876 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1877 }
1878 #endif
1879
1880 if (basic_info->cpu_usage > TH_USAGE_SCALE) {
1881 basic_info->cpu_usage = TH_USAGE_SCALE;
1882 }
1883
1884 basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)?
1885 POLICY_TIMESHARE: POLICY_RR);
1886
1887 flags = 0;
1888 if (thread->options & TH_OPT_IDLE_THREAD) {
1889 flags |= TH_FLAGS_IDLE;
1890 }
1891
1892 if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) {
1893 flags |= TH_FLAGS_GLOBAL_FORCED_IDLE;
1894 }
1895
1896 if (!thread->kernel_stack) {
1897 flags |= TH_FLAGS_SWAPPED;
1898 }
1899
1900 state = 0;
1901 if (thread->state & TH_TERMINATE) {
1902 state = TH_STATE_HALTED;
1903 } else if (thread->state & TH_RUN) {
1904 state = TH_STATE_RUNNING;
1905 } else if (thread->state & TH_UNINT) {
1906 state = TH_STATE_UNINTERRUPTIBLE;
1907 } else if (thread->state & TH_SUSP) {
1908 state = TH_STATE_STOPPED;
1909 } else if (thread->state & TH_WAIT) {
1910 state = TH_STATE_WAITING;
1911 }
1912
1913 basic_info->run_state = state;
1914 basic_info->flags = flags;
1915
1916 basic_info->suspend_count = thread->user_stop_count;
1917
1918 return;
1919 }
1920
1921 kern_return_t
thread_info_internal(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)1922 thread_info_internal(
1923 thread_t thread,
1924 thread_flavor_t flavor,
1925 thread_info_t thread_info_out, /* ptr to OUT array */
1926 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
1927 {
1928 spl_t s;
1929
1930 if (thread == THREAD_NULL) {
1931 return KERN_INVALID_ARGUMENT;
1932 }
1933
1934 if (flavor == THREAD_BASIC_INFO) {
1935 if (*thread_info_count < THREAD_BASIC_INFO_COUNT) {
1936 return KERN_INVALID_ARGUMENT;
1937 }
1938
1939 s = splsched();
1940 thread_lock(thread);
1941
1942 retrieve_thread_basic_info(thread, (thread_basic_info_t) thread_info_out);
1943
1944 thread_unlock(thread);
1945 splx(s);
1946
1947 *thread_info_count = THREAD_BASIC_INFO_COUNT;
1948
1949 return KERN_SUCCESS;
1950 } else if (flavor == THREAD_IDENTIFIER_INFO) {
1951 thread_identifier_info_t identifier_info;
1952
1953 if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) {
1954 return KERN_INVALID_ARGUMENT;
1955 }
1956
1957 identifier_info = __IGNORE_WCASTALIGN((thread_identifier_info_t)thread_info_out);
1958
1959 s = splsched();
1960 thread_lock(thread);
1961
1962 identifier_info->thread_id = thread->thread_id;
1963 identifier_info->thread_handle = thread->machine.cthread_self;
1964 identifier_info->dispatch_qaddr = thread_dispatchqaddr(thread);
1965
1966 thread_unlock(thread);
1967 splx(s);
1968 return KERN_SUCCESS;
1969 } else if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
1970 policy_timeshare_info_t ts_info;
1971
1972 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT) {
1973 return KERN_INVALID_ARGUMENT;
1974 }
1975
1976 ts_info = (policy_timeshare_info_t)thread_info_out;
1977
1978 s = splsched();
1979 thread_lock(thread);
1980
1981 if (thread->sched_mode != TH_MODE_TIMESHARE) {
1982 thread_unlock(thread);
1983 splx(s);
1984 return KERN_INVALID_POLICY;
1985 }
1986
1987 ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
1988 if (ts_info->depressed) {
1989 ts_info->base_priority = DEPRESSPRI;
1990 ts_info->depress_priority = thread->base_pri;
1991 } else {
1992 ts_info->base_priority = thread->base_pri;
1993 ts_info->depress_priority = -1;
1994 }
1995
1996 ts_info->cur_priority = thread->sched_pri;
1997 ts_info->max_priority = thread->max_priority;
1998
1999 thread_unlock(thread);
2000 splx(s);
2001
2002 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
2003
2004 return KERN_SUCCESS;
2005 } else if (flavor == THREAD_SCHED_FIFO_INFO) {
2006 if (*thread_info_count < POLICY_FIFO_INFO_COUNT) {
2007 return KERN_INVALID_ARGUMENT;
2008 }
2009
2010 return KERN_INVALID_POLICY;
2011 } else if (flavor == THREAD_SCHED_RR_INFO) {
2012 policy_rr_info_t rr_info;
2013 uint32_t quantum_time;
2014 uint64_t quantum_ns;
2015
2016 if (*thread_info_count < POLICY_RR_INFO_COUNT) {
2017 return KERN_INVALID_ARGUMENT;
2018 }
2019
2020 rr_info = (policy_rr_info_t) thread_info_out;
2021
2022 s = splsched();
2023 thread_lock(thread);
2024
2025 if (thread->sched_mode == TH_MODE_TIMESHARE) {
2026 thread_unlock(thread);
2027 splx(s);
2028
2029 return KERN_INVALID_POLICY;
2030 }
2031
2032 rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
2033 if (rr_info->depressed) {
2034 rr_info->base_priority = DEPRESSPRI;
2035 rr_info->depress_priority = thread->base_pri;
2036 } else {
2037 rr_info->base_priority = thread->base_pri;
2038 rr_info->depress_priority = -1;
2039 }
2040
2041 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
2042 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
2043
2044 rr_info->max_priority = thread->max_priority;
2045 rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
2046
2047 thread_unlock(thread);
2048 splx(s);
2049
2050 *thread_info_count = POLICY_RR_INFO_COUNT;
2051
2052 return KERN_SUCCESS;
2053 } else if (flavor == THREAD_EXTENDED_INFO) {
2054 thread_basic_info_data_t basic_info;
2055 thread_extended_info_t extended_info = __IGNORE_WCASTALIGN((thread_extended_info_t)thread_info_out);
2056
2057 if (*thread_info_count < THREAD_EXTENDED_INFO_COUNT) {
2058 return KERN_INVALID_ARGUMENT;
2059 }
2060
2061 s = splsched();
2062 thread_lock(thread);
2063
2064 /* NOTE: This mimics fill_taskthreadinfo(), which is the function used by proc_pidinfo() for
2065 * the PROC_PIDTHREADINFO flavor (which can't be used on corpses)
2066 */
2067 retrieve_thread_basic_info(thread, &basic_info);
2068 extended_info->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
2069 extended_info->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
2070
2071 extended_info->pth_cpu_usage = basic_info.cpu_usage;
2072 extended_info->pth_policy = basic_info.policy;
2073 extended_info->pth_run_state = basic_info.run_state;
2074 extended_info->pth_flags = basic_info.flags;
2075 extended_info->pth_sleep_time = basic_info.sleep_time;
2076 extended_info->pth_curpri = thread->sched_pri;
2077 extended_info->pth_priority = thread->base_pri;
2078 extended_info->pth_maxpriority = thread->max_priority;
2079
2080 bsd_getthreadname(get_bsdthread_info(thread), extended_info->pth_name);
2081
2082 thread_unlock(thread);
2083 splx(s);
2084
2085 *thread_info_count = THREAD_EXTENDED_INFO_COUNT;
2086
2087 return KERN_SUCCESS;
2088 } else if (flavor == THREAD_DEBUG_INFO_INTERNAL) {
2089 #if DEVELOPMENT || DEBUG
2090 thread_debug_info_internal_t dbg_info;
2091 if (*thread_info_count < THREAD_DEBUG_INFO_INTERNAL_COUNT) {
2092 return KERN_NOT_SUPPORTED;
2093 }
2094
2095 if (thread_info_out == NULL) {
2096 return KERN_INVALID_ARGUMENT;
2097 }
2098
2099 dbg_info = __IGNORE_WCASTALIGN((thread_debug_info_internal_t)thread_info_out);
2100 dbg_info->page_creation_count = thread->t_page_creation_count;
2101
2102 *thread_info_count = THREAD_DEBUG_INFO_INTERNAL_COUNT;
2103 return KERN_SUCCESS;
2104 #endif /* DEVELOPMENT || DEBUG */
2105 return KERN_NOT_SUPPORTED;
2106 }
2107
2108 return KERN_INVALID_ARGUMENT;
2109 }
2110
2111 void
thread_read_times(thread_t thread,time_value_t * user_time,time_value_t * system_time,time_value_t * runnable_time)2112 thread_read_times(
2113 thread_t thread,
2114 time_value_t *user_time,
2115 time_value_t *system_time,
2116 time_value_t *runnable_time)
2117 {
2118 clock_sec_t secs;
2119 clock_usec_t usecs;
2120 uint64_t tval_user, tval_system;
2121
2122 tval_user = timer_grab(&thread->user_timer);
2123 tval_system = timer_grab(&thread->system_timer);
2124
2125 if (thread->precise_user_kernel_time) {
2126 absolutetime_to_microtime(tval_user, &secs, &usecs);
2127 user_time->seconds = (typeof(user_time->seconds))secs;
2128 user_time->microseconds = usecs;
2129
2130 absolutetime_to_microtime(tval_system, &secs, &usecs);
2131 system_time->seconds = (typeof(system_time->seconds))secs;
2132 system_time->microseconds = usecs;
2133 } else {
2134 /* system_timer may represent either sys or user */
2135 tval_user += tval_system;
2136 absolutetime_to_microtime(tval_user, &secs, &usecs);
2137 user_time->seconds = (typeof(user_time->seconds))secs;
2138 user_time->microseconds = usecs;
2139
2140 system_time->seconds = 0;
2141 system_time->microseconds = 0;
2142 }
2143
2144 if (runnable_time) {
2145 uint64_t tval_runnable = timer_grab(&thread->runnable_timer);
2146 absolutetime_to_microtime(tval_runnable, &secs, &usecs);
2147 runnable_time->seconds = (typeof(runnable_time->seconds))secs;
2148 runnable_time->microseconds = usecs;
2149 }
2150 }
2151
2152 uint64_t
thread_get_runtime_self(void)2153 thread_get_runtime_self(void)
2154 {
2155 boolean_t interrupt_state;
2156 uint64_t runtime;
2157 thread_t thread = NULL;
2158 processor_t processor = NULL;
2159
2160 thread = current_thread();
2161
2162 /* Not interrupt safe, as the scheduler may otherwise update timer values underneath us */
2163 interrupt_state = ml_set_interrupts_enabled(FALSE);
2164 processor = current_processor();
2165 timer_update(processor->thread_timer, mach_absolute_time());
2166 runtime = (timer_grab(&thread->user_timer) + timer_grab(&thread->system_timer));
2167 ml_set_interrupts_enabled(interrupt_state);
2168
2169 return runtime;
2170 }
2171
2172 kern_return_t
thread_assign(__unused thread_t thread,__unused processor_set_t new_pset)2173 thread_assign(
2174 __unused thread_t thread,
2175 __unused processor_set_t new_pset)
2176 {
2177 return KERN_FAILURE;
2178 }
2179
2180 /*
2181 * thread_assign_default:
2182 *
2183 * Special version of thread_assign for assigning threads to default
2184 * processor set.
2185 */
2186 kern_return_t
thread_assign_default(thread_t thread)2187 thread_assign_default(
2188 thread_t thread)
2189 {
2190 return thread_assign(thread, &pset0);
2191 }
2192
2193 /*
2194 * thread_get_assignment
2195 *
2196 * Return current assignment for this thread.
2197 */
2198 kern_return_t
thread_get_assignment(thread_t thread,processor_set_t * pset)2199 thread_get_assignment(
2200 thread_t thread,
2201 processor_set_t *pset)
2202 {
2203 if (thread == NULL) {
2204 return KERN_INVALID_ARGUMENT;
2205 }
2206
2207 *pset = &pset0;
2208
2209 return KERN_SUCCESS;
2210 }
2211
2212 /*
2213 * thread_wire_internal:
2214 *
2215 * Specify that the target thread must always be able
2216 * to run and to allocate memory.
2217 */
2218 kern_return_t
thread_wire_internal(host_priv_t host_priv,thread_t thread,boolean_t wired,boolean_t * prev_state)2219 thread_wire_internal(
2220 host_priv_t host_priv,
2221 thread_t thread,
2222 boolean_t wired,
2223 boolean_t *prev_state)
2224 {
2225 if (host_priv == NULL || thread != current_thread()) {
2226 return KERN_INVALID_ARGUMENT;
2227 }
2228
2229 if (prev_state) {
2230 *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
2231 }
2232
2233 if (wired) {
2234 if (!(thread->options & TH_OPT_VMPRIV)) {
2235 vm_page_free_reserve(1); /* XXX */
2236 }
2237 thread->options |= TH_OPT_VMPRIV;
2238 } else {
2239 if (thread->options & TH_OPT_VMPRIV) {
2240 vm_page_free_reserve(-1); /* XXX */
2241 }
2242 thread->options &= ~TH_OPT_VMPRIV;
2243 }
2244
2245 return KERN_SUCCESS;
2246 }
2247
2248
2249 /*
2250 * thread_wire:
2251 *
2252 * User-api wrapper for thread_wire_internal()
2253 */
2254 kern_return_t
thread_wire(host_priv_t host_priv,thread_t thread,boolean_t wired)2255 thread_wire(
2256 host_priv_t host_priv,
2257 thread_t thread,
2258 boolean_t wired)
2259 {
2260 return thread_wire_internal(host_priv, thread, wired, NULL);
2261 }
2262
2263 boolean_t
is_external_pageout_thread(void)2264 is_external_pageout_thread(void)
2265 {
2266 return current_thread() == vm_pageout_state.vm_pageout_external_iothread;
2267 }
2268
2269 boolean_t
is_vm_privileged(void)2270 is_vm_privileged(void)
2271 {
2272 return current_thread()->options & TH_OPT_VMPRIV ? TRUE : FALSE;
2273 }
2274
2275 boolean_t
set_vm_privilege(boolean_t privileged)2276 set_vm_privilege(boolean_t privileged)
2277 {
2278 boolean_t was_vmpriv;
2279
2280 if (current_thread()->options & TH_OPT_VMPRIV) {
2281 was_vmpriv = TRUE;
2282 } else {
2283 was_vmpriv = FALSE;
2284 }
2285
2286 if (privileged != FALSE) {
2287 current_thread()->options |= TH_OPT_VMPRIV;
2288 } else {
2289 current_thread()->options &= ~TH_OPT_VMPRIV;
2290 }
2291
2292 return was_vmpriv;
2293 }
2294
2295 void
thread_floor_boost_set_promotion_locked(thread_t thread)2296 thread_floor_boost_set_promotion_locked(thread_t thread)
2297 {
2298 assert(thread->priority_floor_count > 0);
2299
2300 if (!(thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED)) {
2301 sched_thread_promote_reason(thread, TH_SFLAG_FLOOR_PROMOTED, 0);
2302 }
2303 }
2304
2305 /*! @function thread_priority_floor_start
2306 * @abstract boost the current thread priority to floor.
2307 * @discussion Increase the priority of the current thread to at least MINPRI_FLOOR.
2308 * The boost will be mantained until a corresponding thread_priority_floor_end()
2309 * is called. Every call of thread_priority_floor_start() needs to have a corresponding
2310 * call to thread_priority_floor_end() from the same thread.
2311 * No thread can return to userspace before calling thread_priority_floor_end().
2312 *
2313 * NOTE: avoid to use this function. Try to use gate_t or sleep_with_inheritor()
2314 * instead.
2315 * @result a token to be given to the corresponding thread_priority_floor_end()
2316 */
2317 thread_pri_floor_t
thread_priority_floor_start(void)2318 thread_priority_floor_start(void)
2319 {
2320 thread_pri_floor_t ret;
2321 thread_t thread = current_thread();
2322 __assert_only uint16_t prev_priority_floor_count;
2323
2324 assert(thread->priority_floor_count < UINT16_MAX);
2325 prev_priority_floor_count = thread->priority_floor_count++;
2326 #if MACH_ASSERT
2327 /*
2328 * Set the ast to check that the
2329 * priority_floor_count is going to be set to zero when
2330 * going back to userspace.
2331 * Set it only once when we increment it for the first time.
2332 */
2333 if (prev_priority_floor_count == 0) {
2334 act_set_debug_assert();
2335 }
2336 #endif
2337
2338 ret.thread = thread;
2339 return ret;
2340 }
2341
2342 /*! @function thread_priority_floor_end
2343 * @abstract ends the floor boost.
2344 * @param token the token obtained from thread_priority_floor_start()
2345 * @discussion ends the priority floor boost started with thread_priority_floor_start()
2346 */
2347 void
thread_priority_floor_end(thread_pri_floor_t * token)2348 thread_priority_floor_end(thread_pri_floor_t *token)
2349 {
2350 thread_t thread = current_thread();
2351
2352 assert(thread->priority_floor_count > 0);
2353 assertf(token->thread == thread, "thread_priority_floor_end called from a different thread from thread_priority_floor_start %p %p", thread, token->thread);
2354
2355 if ((thread->priority_floor_count-- == 1) && (thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED)) {
2356 spl_t s = splsched();
2357 thread_lock(thread);
2358
2359 if (thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) {
2360 sched_thread_unpromote_reason(thread, TH_SFLAG_FLOOR_PROMOTED, 0);
2361 }
2362
2363 thread_unlock(thread);
2364 splx(s);
2365 }
2366
2367 token->thread = NULL;
2368 }
2369
2370 /*
2371 * XXX assuming current thread only, for now...
2372 */
2373 void
thread_guard_violation(thread_t thread,mach_exception_data_type_t code,mach_exception_data_type_t subcode,boolean_t fatal)2374 thread_guard_violation(thread_t thread,
2375 mach_exception_data_type_t code, mach_exception_data_type_t subcode, boolean_t fatal)
2376 {
2377 assert(thread == current_thread());
2378
2379 /* Don't set up the AST for kernel threads; this check is needed to ensure
2380 * that the guard_exc_* fields in the thread structure are set only by the
2381 * current thread and therefore, don't require a lock.
2382 */
2383 if (get_threadtask(thread) == kernel_task) {
2384 return;
2385 }
2386
2387 assert(EXC_GUARD_DECODE_GUARD_TYPE(code));
2388
2389 /*
2390 * Use the saved state area of the thread structure
2391 * to store all info required to handle the AST when
2392 * returning to userspace. It's possible that there is
2393 * already a pending guard exception. If it's non-fatal,
2394 * it can only be over-written by a fatal exception code.
2395 */
2396 if (thread->guard_exc_info.code && (thread->guard_exc_fatal || !fatal)) {
2397 return;
2398 }
2399
2400 thread->guard_exc_info.code = code;
2401 thread->guard_exc_info.subcode = subcode;
2402 thread->guard_exc_fatal = fatal ? 1 : 0;
2403
2404 spl_t s = splsched();
2405 thread_ast_set(thread, AST_GUARD);
2406 ast_propagate(thread);
2407 splx(s);
2408 }
2409
2410 #if CONFIG_DEBUG_SYSCALL_REJECTION
2411 extern void rejected_syscall_guard_ast(thread_t __unused t, mach_exception_data_type_t code, mach_exception_data_type_t subcode);
2412 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
2413
2414 /*
2415 * guard_ast:
2416 *
2417 * Handle AST_GUARD for a thread. This routine looks at the
2418 * state saved in the thread structure to determine the cause
2419 * of this exception. Based on this value, it invokes the
2420 * appropriate routine which determines other exception related
2421 * info and raises the exception.
2422 */
2423 void
guard_ast(thread_t t)2424 guard_ast(thread_t t)
2425 {
2426 const mach_exception_data_type_t
2427 code = t->guard_exc_info.code,
2428 subcode = t->guard_exc_info.subcode;
2429
2430 t->guard_exc_info.code = 0;
2431 t->guard_exc_info.subcode = 0;
2432 t->guard_exc_fatal = 0;
2433
2434 switch (EXC_GUARD_DECODE_GUARD_TYPE(code)) {
2435 case GUARD_TYPE_NONE:
2436 /* lingering AST_GUARD on the processor? */
2437 break;
2438 case GUARD_TYPE_MACH_PORT:
2439 mach_port_guard_ast(t, code, subcode);
2440 break;
2441 case GUARD_TYPE_FD:
2442 fd_guard_ast(t, code, subcode);
2443 break;
2444 #if CONFIG_VNGUARD
2445 case GUARD_TYPE_VN:
2446 vn_guard_ast(t, code, subcode);
2447 break;
2448 #endif
2449 case GUARD_TYPE_VIRT_MEMORY:
2450 virt_memory_guard_ast(t, code, subcode);
2451 break;
2452 #if CONFIG_DEBUG_SYSCALL_REJECTION
2453 case GUARD_TYPE_REJECTED_SC:
2454 rejected_syscall_guard_ast(t, code, subcode);
2455 break;
2456 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
2457 default:
2458 panic("guard_exc_info %llx %llx", code, subcode);
2459 }
2460 }
2461
2462 static void
thread_cputime_callback(int warning,__unused const void * arg0,__unused const void * arg1)2463 thread_cputime_callback(int warning, __unused const void *arg0, __unused const void *arg1)
2464 {
2465 if (warning == LEDGER_WARNING_ROSE_ABOVE) {
2466 #if CONFIG_TELEMETRY
2467 /*
2468 * This thread is in danger of violating the CPU usage monitor. Enable telemetry
2469 * on the entire task so there are micro-stackshots available if and when
2470 * EXC_RESOURCE is triggered. We could have chosen to enable micro-stackshots
2471 * for this thread only; but now that this task is suspect, knowing what all of
2472 * its threads are up to will be useful.
2473 */
2474 telemetry_task_ctl(current_task(), TF_CPUMON_WARNING, 1);
2475 #endif
2476 return;
2477 }
2478
2479 #if CONFIG_TELEMETRY
2480 /*
2481 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
2482 * exceeded the limit, turn telemetry off for the task.
2483 */
2484 telemetry_task_ctl(current_task(), TF_CPUMON_WARNING, 0);
2485 #endif
2486
2487 if (warning == 0) {
2488 SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU();
2489 }
2490 }
2491
2492 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void)2493 SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void)
2494 {
2495 int pid = 0;
2496 task_t task = current_task();
2497 thread_t thread = current_thread();
2498 uint64_t tid = thread->thread_id;
2499 const char *procname = "unknown";
2500 time_value_t thread_total_time = {0, 0};
2501 time_value_t thread_system_time;
2502 time_value_t thread_user_time;
2503 int action;
2504 uint8_t percentage;
2505 uint32_t usage_percent = 0;
2506 uint32_t interval_sec;
2507 uint64_t interval_ns;
2508 uint64_t balance_ns;
2509 boolean_t fatal = FALSE;
2510 boolean_t send_exc_resource = TRUE; /* in addition to RESOURCE_NOTIFY */
2511 kern_return_t kr;
2512
2513 #ifdef EXC_RESOURCE_MONITORS
2514 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2515 #endif /* EXC_RESOURCE_MONITORS */
2516 struct ledger_entry_info lei;
2517
2518 assert(thread->t_threadledger != LEDGER_NULL);
2519
2520 /*
2521 * Extract the fatal bit and suspend the monitor (which clears the bit).
2522 */
2523 task_lock(task);
2524 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_CPUMON) {
2525 fatal = TRUE;
2526 send_exc_resource = TRUE;
2527 }
2528 /* Only one thread can be here at a time. Whichever makes it through
2529 * first will successfully suspend the monitor and proceed to send the
2530 * notification. Other threads will get an error trying to suspend the
2531 * monitor and give up on sending the notification. In the first release,
2532 * the monitor won't be resumed for a number of seconds, but we may
2533 * eventually need to handle low-latency resume.
2534 */
2535 kr = task_suspend_cpumon(task);
2536 task_unlock(task);
2537 if (kr == KERN_INVALID_ARGUMENT) {
2538 return;
2539 }
2540
2541 #ifdef MACH_BSD
2542 pid = proc_selfpid();
2543 if (task->bsd_info != NULL) {
2544 procname = proc_name_address(task->bsd_info);
2545 }
2546 #endif
2547
2548 thread_get_cpulimit(&action, &percentage, &interval_ns);
2549
2550 interval_sec = (uint32_t)(interval_ns / NSEC_PER_SEC);
2551
2552 thread_read_times(thread, &thread_user_time, &thread_system_time, NULL);
2553 time_value_add(&thread_total_time, &thread_user_time);
2554 time_value_add(&thread_total_time, &thread_system_time);
2555 ledger_get_entry_info(thread->t_threadledger, thread_ledgers.cpu_time, &lei);
2556
2557 /* credit/debit/balance/limit are in absolute time units;
2558 * the refill info is in nanoseconds. */
2559 absolutetime_to_nanoseconds(lei.lei_balance, &balance_ns);
2560 if (lei.lei_last_refill > 0) {
2561 usage_percent = (uint32_t)((balance_ns * 100ULL) / lei.lei_last_refill);
2562 }
2563
2564 /* TODO: show task total runtime (via TASK_ABSOLUTETIME_INFO)? */
2565 printf("process %s[%d] thread %llu caught burning CPU! It used more than %d%% CPU over %u seconds\n",
2566 procname, pid, tid, percentage, interval_sec);
2567 printf(" (actual recent usage: %d%% over ~%llu seconds)\n",
2568 usage_percent, (lei.lei_last_refill + NSEC_PER_SEC / 2) / NSEC_PER_SEC);
2569 printf(" Thread lifetime cpu usage %d.%06ds, (%d.%06d user, %d.%06d sys)\n",
2570 thread_total_time.seconds, thread_total_time.microseconds,
2571 thread_user_time.seconds, thread_user_time.microseconds,
2572 thread_system_time.seconds, thread_system_time.microseconds);
2573 printf(" Ledger balance: %lld; mabs credit: %lld; mabs debit: %lld\n",
2574 lei.lei_balance, lei.lei_credit, lei.lei_debit);
2575 printf(" mabs limit: %llu; mabs period: %llu ns; last refill: %llu ns%s.\n",
2576 lei.lei_limit, lei.lei_refill_period, lei.lei_last_refill,
2577 (fatal ? " [fatal violation]" : ""));
2578
2579 /*
2580 * For now, send RESOURCE_NOTIFY in parallel with EXC_RESOURCE. Once
2581 * we have logging parity, we will stop sending EXC_RESOURCE (24508922).
2582 */
2583
2584 /* RESOURCE_NOTIFY MIG specifies nanoseconds of CPU time */
2585 lei.lei_balance = balance_ns;
2586 absolutetime_to_nanoseconds(lei.lei_limit, &lei.lei_limit);
2587 trace_resource_violation(RMON_CPUUSAGE_VIOLATED, &lei);
2588 kr = send_resource_violation(send_cpu_usage_violation, task, &lei,
2589 fatal ? kRNFatalLimitFlag : 0);
2590 if (kr) {
2591 printf("send_resource_violation(CPU usage, ...): error %#x\n", kr);
2592 }
2593
2594 #ifdef EXC_RESOURCE_MONITORS
2595 if (send_exc_resource) {
2596 if (disable_exc_resource) {
2597 printf("process %s[%d] thread %llu caught burning CPU! "
2598 "EXC_RESOURCE%s supressed by a boot-arg\n",
2599 procname, pid, tid, fatal ? " (and termination)" : "");
2600 return;
2601 }
2602
2603 if (audio_active) {
2604 printf("process %s[%d] thread %llu caught burning CPU! "
2605 "EXC_RESOURCE & termination supressed due to audio playback\n",
2606 procname, pid, tid);
2607 return;
2608 }
2609 }
2610
2611
2612 if (send_exc_resource) {
2613 code[0] = code[1] = 0;
2614 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_CPU);
2615 if (fatal) {
2616 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR_FATAL);
2617 } else {
2618 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR);
2619 }
2620 EXC_RESOURCE_CPUMONITOR_ENCODE_INTERVAL(code[0], interval_sec);
2621 EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[0], percentage);
2622 EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[1], usage_percent);
2623 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
2624 }
2625 #endif /* EXC_RESOURCE_MONITORS */
2626
2627 if (fatal) {
2628 #if CONFIG_JETSAM
2629 jetsam_on_ledger_cpulimit_exceeded();
2630 #else
2631 task_terminate_internal(task);
2632 #endif
2633 }
2634 }
2635
2636 #if DEVELOPMENT || DEBUG
2637 void __attribute__((noinline))
SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task,int thread_count)2638 SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task, int thread_count)
2639 {
2640 mach_exception_data_type_t code[EXCEPTION_CODE_MAX] = {0};
2641 int pid = task_pid(task);
2642 char procname[MAXCOMLEN + 1] = "unknown";
2643
2644 if (pid == 1) {
2645 /*
2646 * Cannot suspend launchd
2647 */
2648 return;
2649 }
2650
2651 proc_name(pid, procname, sizeof(procname));
2652
2653 if (disable_exc_resource) {
2654 printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2655 "supressed by a boot-arg. \n", procname, pid, thread_count);
2656 return;
2657 }
2658
2659 if (audio_active) {
2660 printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2661 "supressed due to audio playback.\n", procname, pid, thread_count);
2662 return;
2663 }
2664
2665 if (!exc_via_corpse_forking) {
2666 printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2667 "supressed due to corpse forking being disabled.\n", procname, pid,
2668 thread_count);
2669 return;
2670 }
2671
2672 printf("process %s[%d] crossed thread count high watermark (%d), sending "
2673 "EXC_RESOURCE\n", procname, pid, thread_count);
2674
2675 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_THREADS);
2676 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_THREADS_HIGH_WATERMARK);
2677 EXC_RESOURCE_THREADS_ENCODE_THREADS(code[0], thread_count);
2678
2679 task_enqueue_exception_with_corpse(task, EXC_RESOURCE, code, EXCEPTION_CODE_MAX, NULL);
2680 }
2681 #endif /* DEVELOPMENT || DEBUG */
2682
2683 void
thread_update_io_stats(thread_t thread,int size,int io_flags)2684 thread_update_io_stats(thread_t thread, int size, int io_flags)
2685 {
2686 task_t task = get_threadtask(thread);
2687 int io_tier;
2688
2689 if (thread->thread_io_stats == NULL || task->task_io_stats == NULL) {
2690 return;
2691 }
2692
2693 if (io_flags & DKIO_READ) {
2694 UPDATE_IO_STATS(thread->thread_io_stats->disk_reads, size);
2695 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->disk_reads, size);
2696 }
2697
2698 if (io_flags & DKIO_META) {
2699 UPDATE_IO_STATS(thread->thread_io_stats->metadata, size);
2700 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->metadata, size);
2701 }
2702
2703 if (io_flags & DKIO_PAGING) {
2704 UPDATE_IO_STATS(thread->thread_io_stats->paging, size);
2705 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->paging, size);
2706 }
2707
2708 io_tier = ((io_flags & DKIO_TIER_MASK) >> DKIO_TIER_SHIFT);
2709 assert(io_tier < IO_NUM_PRIORITIES);
2710
2711 UPDATE_IO_STATS(thread->thread_io_stats->io_priority[io_tier], size);
2712 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->io_priority[io_tier], size);
2713
2714 /* Update Total I/O Counts */
2715 UPDATE_IO_STATS(thread->thread_io_stats->total_io, size);
2716 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->total_io, size);
2717
2718 if (!(io_flags & DKIO_READ)) {
2719 DTRACE_IO3(physical_writes, struct task *, task, uint32_t, size, int, io_flags);
2720 ledger_credit(task->ledger, task_ledgers.physical_writes, size);
2721 }
2722 }
2723
2724 static void
init_thread_ledgers(void)2725 init_thread_ledgers(void)
2726 {
2727 ledger_template_t t;
2728 int idx;
2729
2730 assert(thread_ledger_template == NULL);
2731
2732 if ((t = ledger_template_create("Per-thread ledger")) == NULL) {
2733 panic("couldn't create thread ledger template");
2734 }
2735
2736 if ((idx = ledger_entry_add(t, "cpu_time", "sched", "ns")) < 0) {
2737 panic("couldn't create cpu_time entry for thread ledger template");
2738 }
2739
2740 if (ledger_set_callback(t, idx, thread_cputime_callback, NULL, NULL) < 0) {
2741 panic("couldn't set thread ledger callback for cpu_time entry");
2742 }
2743
2744 thread_ledgers.cpu_time = idx;
2745
2746 ledger_template_complete(t);
2747 thread_ledger_template = t;
2748 }
2749
2750 /*
2751 * Returns currently applied CPU usage limit, or 0/0 if none is applied.
2752 */
2753 int
thread_get_cpulimit(int * action,uint8_t * percentage,uint64_t * interval_ns)2754 thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns)
2755 {
2756 int64_t abstime = 0;
2757 uint64_t limittime = 0;
2758 thread_t thread = current_thread();
2759
2760 *percentage = 0;
2761 *interval_ns = 0;
2762 *action = 0;
2763
2764 if (thread->t_threadledger == LEDGER_NULL) {
2765 /*
2766 * This thread has no per-thread ledger, so it can't possibly
2767 * have a CPU limit applied.
2768 */
2769 return KERN_SUCCESS;
2770 }
2771
2772 ledger_get_period(thread->t_threadledger, thread_ledgers.cpu_time, interval_ns);
2773 ledger_get_limit(thread->t_threadledger, thread_ledgers.cpu_time, &abstime);
2774
2775 if ((abstime == LEDGER_LIMIT_INFINITY) || (*interval_ns == 0)) {
2776 /*
2777 * This thread's CPU time ledger has no period or limit; so it
2778 * doesn't have a CPU limit applied.
2779 */
2780 return KERN_SUCCESS;
2781 }
2782
2783 /*
2784 * This calculation is the converse to the one in thread_set_cpulimit().
2785 */
2786 absolutetime_to_nanoseconds(abstime, &limittime);
2787 *percentage = (uint8_t)((limittime * 100ULL) / *interval_ns);
2788 assert(*percentage <= 100);
2789
2790 if (thread->options & TH_OPT_PROC_CPULIMIT) {
2791 assert((thread->options & TH_OPT_PRVT_CPULIMIT) == 0);
2792
2793 *action = THREAD_CPULIMIT_BLOCK;
2794 } else if (thread->options & TH_OPT_PRVT_CPULIMIT) {
2795 assert((thread->options & TH_OPT_PROC_CPULIMIT) == 0);
2796
2797 *action = THREAD_CPULIMIT_EXCEPTION;
2798 } else {
2799 *action = THREAD_CPULIMIT_DISABLE;
2800 }
2801
2802 return KERN_SUCCESS;
2803 }
2804
2805 /*
2806 * Set CPU usage limit on a thread.
2807 *
2808 * Calling with percentage of 0 will unset the limit for this thread.
2809 */
2810 int
thread_set_cpulimit(int action,uint8_t percentage,uint64_t interval_ns)2811 thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns)
2812 {
2813 thread_t thread = current_thread();
2814 ledger_t l;
2815 uint64_t limittime = 0;
2816 uint64_t abstime = 0;
2817
2818 assert(percentage <= 100);
2819
2820 if (action == THREAD_CPULIMIT_DISABLE) {
2821 /*
2822 * Remove CPU limit, if any exists.
2823 */
2824 if (thread->t_threadledger != LEDGER_NULL) {
2825 l = thread->t_threadledger;
2826 ledger_set_limit(l, thread_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0);
2827 ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_IGNORE);
2828 thread->options &= ~(TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT);
2829 }
2830
2831 return 0;
2832 }
2833
2834 if (interval_ns < MINIMUM_CPULIMIT_INTERVAL_MS * NSEC_PER_MSEC) {
2835 return KERN_INVALID_ARGUMENT;
2836 }
2837
2838 l = thread->t_threadledger;
2839 if (l == LEDGER_NULL) {
2840 /*
2841 * This thread doesn't yet have a per-thread ledger; so create one with the CPU time entry active.
2842 */
2843 if ((l = ledger_instantiate(thread_ledger_template, LEDGER_CREATE_INACTIVE_ENTRIES)) == LEDGER_NULL) {
2844 return KERN_RESOURCE_SHORTAGE;
2845 }
2846
2847 /*
2848 * We are the first to create this thread's ledger, so only activate our entry.
2849 */
2850 ledger_entry_setactive(l, thread_ledgers.cpu_time);
2851 thread->t_threadledger = l;
2852 }
2853
2854 /*
2855 * The limit is specified as a percentage of CPU over an interval in nanoseconds.
2856 * Calculate the amount of CPU time that the thread needs to consume in order to hit the limit.
2857 */
2858 limittime = (interval_ns * percentage) / 100;
2859 nanoseconds_to_absolutetime(limittime, &abstime);
2860 ledger_set_limit(l, thread_ledgers.cpu_time, abstime, cpumon_ustackshots_trigger_pct);
2861 /*
2862 * Refill the thread's allotted CPU time every interval_ns nanoseconds.
2863 */
2864 ledger_set_period(l, thread_ledgers.cpu_time, interval_ns);
2865
2866 if (action == THREAD_CPULIMIT_EXCEPTION) {
2867 /*
2868 * We don't support programming the CPU usage monitor on a task if any of its
2869 * threads have a per-thread blocking CPU limit configured.
2870 */
2871 if (thread->options & TH_OPT_PRVT_CPULIMIT) {
2872 panic("CPU usage monitor activated, but blocking thread limit exists");
2873 }
2874
2875 /*
2876 * Make a note that this thread's CPU limit is being used for the task-wide CPU
2877 * usage monitor. We don't have to arm the callback which will trigger the
2878 * exception, because that was done for us in ledger_instantiate (because the
2879 * ledger template used has a default callback).
2880 */
2881 thread->options |= TH_OPT_PROC_CPULIMIT;
2882 } else {
2883 /*
2884 * We deliberately override any CPU limit imposed by a task-wide limit (eg
2885 * CPU usage monitor).
2886 */
2887 thread->options &= ~TH_OPT_PROC_CPULIMIT;
2888
2889 thread->options |= TH_OPT_PRVT_CPULIMIT;
2890 /* The per-thread ledger template by default has a callback for CPU time */
2891 ledger_disable_callback(l, thread_ledgers.cpu_time);
2892 ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
2893 }
2894
2895 return 0;
2896 }
2897
2898 void
thread_sched_call(thread_t thread,sched_call_t call)2899 thread_sched_call(
2900 thread_t thread,
2901 sched_call_t call)
2902 {
2903 assert((thread->state & TH_WAIT_REPORT) == 0);
2904 thread->sched_call = call;
2905 }
2906
2907 uint64_t
thread_tid(thread_t thread)2908 thread_tid(
2909 thread_t thread)
2910 {
2911 return thread != THREAD_NULL? thread->thread_id: 0;
2912 }
2913
2914 uint64_t
uthread_tid(struct uthread * uth)2915 uthread_tid(
2916 struct uthread *uth)
2917 {
2918 if (uth) {
2919 return thread_tid(get_machthread(uth));
2920 }
2921 return 0;
2922 }
2923
2924 uint16_t
thread_set_tag(thread_t th,uint16_t tag)2925 thread_set_tag(thread_t th, uint16_t tag)
2926 {
2927 return thread_set_tag_internal(th, tag);
2928 }
2929
2930 uint16_t
thread_get_tag(thread_t th)2931 thread_get_tag(thread_t th)
2932 {
2933 return thread_get_tag_internal(th);
2934 }
2935
2936 uint64_t
thread_last_run_time(thread_t th)2937 thread_last_run_time(thread_t th)
2938 {
2939 return th->last_run_time;
2940 }
2941
2942 /*
2943 * Shared resource contention management
2944 *
2945 * The scheduler attempts to load balance the shared resource intensive
2946 * workloads across clusters to ensure that the resource is not heavily
2947 * contended. The kernel relies on external agents (userspace or
2948 * performance controller) to identify shared resource heavy threads.
2949 * The load balancing is achieved based on the scheduler configuration
2950 * enabled on the platform.
2951 */
2952
2953
2954 #if CONFIG_SCHED_EDGE
2955
2956 /*
2957 * On the Edge scheduler, the load balancing is achieved by looking
2958 * at cluster level shared resource loads and migrating resource heavy
2959 * threads dynamically to under utilized cluster. Therefore, when a
2960 * thread is indicated as a resource heavy thread, the policy set
2961 * routine simply adds a flag to the thread which is looked at by
2962 * the scheduler on thread migration decisions.
2963 */
2964
2965 boolean_t
thread_shared_rsrc_policy_get(thread_t thread,cluster_shared_rsrc_type_t type)2966 thread_shared_rsrc_policy_get(thread_t thread, cluster_shared_rsrc_type_t type)
2967 {
2968 return thread->th_shared_rsrc_heavy_user[type] || thread->th_shared_rsrc_heavy_perf_control[type];
2969 }
2970
2971 __options_decl(sched_edge_rsrc_heavy_thread_state, uint32_t, {
2972 SCHED_EDGE_RSRC_HEAVY_THREAD_SET = 1,
2973 SCHED_EDGE_RSRC_HEAVY_THREAD_CLR = 2,
2974 });
2975
2976 kern_return_t
thread_shared_rsrc_policy_set(thread_t thread,__unused uint32_t index,cluster_shared_rsrc_type_t type,shared_rsrc_policy_agent_t agent)2977 thread_shared_rsrc_policy_set(thread_t thread, __unused uint32_t index, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent)
2978 {
2979 spl_t s = splsched();
2980 thread_lock(thread);
2981
2982 bool user = (agent == SHARED_RSRC_POLICY_AGENT_DISPATCH) || (agent == SHARED_RSRC_POLICY_AGENT_SYSCTL);
2983 bool *thread_flags = (user) ? thread->th_shared_rsrc_heavy_user : thread->th_shared_rsrc_heavy_perf_control;
2984 if (thread_flags[type]) {
2985 thread_unlock(thread);
2986 splx(s);
2987 return KERN_FAILURE;
2988 }
2989
2990 thread_flags[type] = true;
2991 thread_unlock(thread);
2992 splx(s);
2993
2994 KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_RSRC_HEAVY_THREAD) | DBG_FUNC_NONE, SCHED_EDGE_RSRC_HEAVY_THREAD_SET, thread_tid(thread), type, agent);
2995 if (thread == current_thread()) {
2996 if (agent == SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM) {
2997 ast_on(AST_PREEMPT);
2998 } else {
2999 assert(agent != SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
3000 thread_block(THREAD_CONTINUE_NULL);
3001 }
3002 }
3003 return KERN_SUCCESS;
3004 }
3005
3006 kern_return_t
thread_shared_rsrc_policy_clear(thread_t thread,cluster_shared_rsrc_type_t type,shared_rsrc_policy_agent_t agent)3007 thread_shared_rsrc_policy_clear(thread_t thread, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent)
3008 {
3009 spl_t s = splsched();
3010 thread_lock(thread);
3011
3012 bool user = (agent == SHARED_RSRC_POLICY_AGENT_DISPATCH) || (agent == SHARED_RSRC_POLICY_AGENT_SYSCTL);
3013 bool *thread_flags = (user) ? thread->th_shared_rsrc_heavy_user : thread->th_shared_rsrc_heavy_perf_control;
3014 if (!thread_flags[type]) {
3015 thread_unlock(thread);
3016 splx(s);
3017 return KERN_FAILURE;
3018 }
3019
3020 thread_flags[type] = false;
3021 thread_unlock(thread);
3022 splx(s);
3023
3024 KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_RSRC_HEAVY_THREAD) | DBG_FUNC_NONE, SCHED_EDGE_RSRC_HEAVY_THREAD_CLR, thread_tid(thread), type, agent);
3025 if (thread == current_thread()) {
3026 if (agent == SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM) {
3027 ast_on(AST_PREEMPT);
3028 } else {
3029 assert(agent != SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
3030 thread_block(THREAD_CONTINUE_NULL);
3031 }
3032 }
3033 return KERN_SUCCESS;
3034 }
3035
3036 #else /* CONFIG_SCHED_EDGE */
3037
3038 /*
3039 * On non-Edge schedulers, the shared resource contention
3040 * is managed by simply binding threads to specific clusters
3041 * based on the worker index passed by the agents marking
3042 * this thread as resource heavy threads. The thread binding
3043 * approach does not provide any rebalancing opportunities;
3044 * it can also suffer from scheduling delays if the cluster
3045 * where the thread is bound is contended.
3046 */
3047
3048 boolean_t
thread_shared_rsrc_policy_get(__unused thread_t thread,__unused cluster_shared_rsrc_type_t type)3049 thread_shared_rsrc_policy_get(__unused thread_t thread, __unused cluster_shared_rsrc_type_t type)
3050 {
3051 return false;
3052 }
3053
3054 kern_return_t
thread_shared_rsrc_policy_set(thread_t thread,uint32_t index,__unused cluster_shared_rsrc_type_t type,__unused shared_rsrc_policy_agent_t agent)3055 thread_shared_rsrc_policy_set(thread_t thread, uint32_t index, __unused cluster_shared_rsrc_type_t type, __unused shared_rsrc_policy_agent_t agent)
3056 {
3057 return thread_bind_cluster_id(thread, index, THREAD_BIND_SOFT | THREAD_BIND_ELIGIBLE_ONLY);
3058 }
3059
3060 kern_return_t
thread_shared_rsrc_policy_clear(thread_t thread,__unused cluster_shared_rsrc_type_t type,__unused shared_rsrc_policy_agent_t agent)3061 thread_shared_rsrc_policy_clear(thread_t thread, __unused cluster_shared_rsrc_type_t type, __unused shared_rsrc_policy_agent_t agent)
3062 {
3063 return thread_bind_cluster_id(thread, 0, THREAD_UNBIND);
3064 }
3065
3066 #endif /* CONFIG_SCHED_EDGE */
3067
3068 uint64_t
thread_dispatchqaddr(thread_t thread)3069 thread_dispatchqaddr(
3070 thread_t thread)
3071 {
3072 uint64_t dispatchqueue_addr;
3073 uint64_t thread_handle;
3074 task_t task;
3075
3076 if (thread == THREAD_NULL) {
3077 return 0;
3078 }
3079
3080 thread_handle = thread->machine.cthread_self;
3081 if (thread_handle == 0) {
3082 return 0;
3083 }
3084
3085 task = get_threadtask(thread);
3086 if (thread->inspection == TRUE) {
3087 dispatchqueue_addr = thread_handle + get_task_dispatchqueue_offset(task);
3088 } else if (task->bsd_info) {
3089 dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(task->bsd_info);
3090 } else {
3091 dispatchqueue_addr = 0;
3092 }
3093
3094 return dispatchqueue_addr;
3095 }
3096
3097
3098 uint64_t
thread_wqquantum_addr(thread_t thread)3099 thread_wqquantum_addr(thread_t thread)
3100 {
3101 uint64_t thread_handle;
3102 task_t task;
3103
3104 if (thread == THREAD_NULL) {
3105 return 0;
3106 }
3107
3108 thread_handle = thread->machine.cthread_self;
3109 if (thread_handle == 0) {
3110 return 0;
3111 }
3112 task = get_threadtask(thread);
3113
3114 uint64_t wq_quantum_expiry_offset = get_wq_quantum_offset_from_proc(task->bsd_info);
3115 if (wq_quantum_expiry_offset == 0) {
3116 return 0;
3117 }
3118
3119 return wq_quantum_expiry_offset + thread_handle;
3120 }
3121
3122 uint64_t
thread_rettokern_addr(thread_t thread)3123 thread_rettokern_addr(
3124 thread_t thread)
3125 {
3126 uint64_t rettokern_addr;
3127 uint64_t rettokern_offset;
3128 uint64_t thread_handle;
3129 task_t task;
3130
3131 if (thread == THREAD_NULL) {
3132 return 0;
3133 }
3134
3135 thread_handle = thread->machine.cthread_self;
3136 if (thread_handle == 0) {
3137 return 0;
3138 }
3139 task = get_threadtask(thread);
3140
3141 if (task->bsd_info) {
3142 rettokern_offset = get_return_to_kernel_offset_from_proc(task->bsd_info);
3143
3144 /* Return 0 if return to kernel offset is not initialized. */
3145 if (rettokern_offset == 0) {
3146 rettokern_addr = 0;
3147 } else {
3148 rettokern_addr = thread_handle + rettokern_offset;
3149 }
3150 } else {
3151 rettokern_addr = 0;
3152 }
3153
3154 return rettokern_addr;
3155 }
3156
3157 /*
3158 * Export routines to other components for things that are done as macros
3159 * within the osfmk component.
3160 */
3161
3162 void
thread_mtx_lock(thread_t thread)3163 thread_mtx_lock(thread_t thread)
3164 {
3165 lck_mtx_lock(&thread->mutex);
3166 }
3167
3168 void
thread_mtx_unlock(thread_t thread)3169 thread_mtx_unlock(thread_t thread)
3170 {
3171 lck_mtx_unlock(&thread->mutex);
3172 }
3173
3174 void
thread_reference(thread_t thread)3175 thread_reference(
3176 thread_t thread)
3177 {
3178 if (thread != THREAD_NULL) {
3179 zone_id_require(ZONE_ID_THREAD, sizeof(struct thread), thread);
3180 os_ref_retain_raw(&thread->ref_count, &thread_refgrp);
3181 }
3182 }
3183
3184 void
thread_require(thread_t thread)3185 thread_require(thread_t thread)
3186 {
3187 zone_id_require(ZONE_ID_THREAD, sizeof(struct thread), thread);
3188 }
3189
3190 #undef thread_should_halt
3191
3192 boolean_t
thread_should_halt(thread_t th)3193 thread_should_halt(
3194 thread_t th)
3195 {
3196 return thread_should_halt_fast(th);
3197 }
3198
3199 /*
3200 * thread_set_voucher_name - reset the voucher port name bound to this thread
3201 *
3202 * Conditions: nothing locked
3203 */
3204
3205 kern_return_t
thread_set_voucher_name(mach_port_name_t voucher_name)3206 thread_set_voucher_name(mach_port_name_t voucher_name)
3207 {
3208 thread_t thread = current_thread();
3209 ipc_voucher_t new_voucher = IPC_VOUCHER_NULL;
3210 ipc_voucher_t voucher;
3211 ledger_t bankledger = NULL;
3212 struct thread_group *banktg = NULL;
3213 uint32_t persona_id = 0;
3214
3215 if (MACH_PORT_DEAD == voucher_name) {
3216 return KERN_INVALID_RIGHT;
3217 }
3218
3219 /*
3220 * agressively convert to voucher reference
3221 */
3222 if (MACH_PORT_VALID(voucher_name)) {
3223 new_voucher = convert_port_name_to_voucher(voucher_name);
3224 if (IPC_VOUCHER_NULL == new_voucher) {
3225 return KERN_INVALID_ARGUMENT;
3226 }
3227 }
3228 bank_get_bank_ledger_thread_group_and_persona(new_voucher, &bankledger, &banktg, &persona_id);
3229
3230 thread_mtx_lock(thread);
3231 voucher = thread->ith_voucher;
3232 thread->ith_voucher_name = voucher_name;
3233 thread->ith_voucher = new_voucher;
3234 thread_mtx_unlock(thread);
3235
3236 bank_swap_thread_bank_ledger(thread, bankledger);
3237 #if CONFIG_THREAD_GROUPS
3238 thread_group_set_bank(thread, banktg);
3239 #endif /* CONFIG_THREAD_GROUPS */
3240
3241 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3242 MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
3243 (uintptr_t)thread_tid(thread),
3244 (uintptr_t)voucher_name,
3245 VM_KERNEL_ADDRPERM((uintptr_t)new_voucher),
3246 persona_id, 0);
3247
3248 if (IPC_VOUCHER_NULL != voucher) {
3249 ipc_voucher_release(voucher);
3250 }
3251
3252 return KERN_SUCCESS;
3253 }
3254
3255 /*
3256 * thread_get_mach_voucher - return a voucher reference for the specified thread voucher
3257 *
3258 * Conditions: nothing locked
3259 *
3260 * NOTE: At the moment, there is no distinction between the current and effective
3261 * vouchers because we only set them at the thread level currently.
3262 */
3263 kern_return_t
thread_get_mach_voucher(thread_act_t thread,mach_voucher_selector_t __unused which,ipc_voucher_t * voucherp)3264 thread_get_mach_voucher(
3265 thread_act_t thread,
3266 mach_voucher_selector_t __unused which,
3267 ipc_voucher_t *voucherp)
3268 {
3269 ipc_voucher_t voucher;
3270
3271 if (THREAD_NULL == thread) {
3272 return KERN_INVALID_ARGUMENT;
3273 }
3274
3275 thread_mtx_lock(thread);
3276 voucher = thread->ith_voucher;
3277
3278 if (IPC_VOUCHER_NULL != voucher) {
3279 ipc_voucher_reference(voucher);
3280 thread_mtx_unlock(thread);
3281 *voucherp = voucher;
3282 return KERN_SUCCESS;
3283 }
3284
3285 thread_mtx_unlock(thread);
3286
3287 *voucherp = IPC_VOUCHER_NULL;
3288 return KERN_SUCCESS;
3289 }
3290
3291 /*
3292 * thread_set_mach_voucher - set a voucher reference for the specified thread voucher
3293 *
3294 * Conditions: callers holds a reference on the voucher.
3295 * nothing locked.
3296 *
3297 * We grab another reference to the voucher and bind it to the thread.
3298 * The old voucher reference associated with the thread is
3299 * discarded.
3300 */
3301 kern_return_t
thread_set_mach_voucher(thread_t thread,ipc_voucher_t voucher)3302 thread_set_mach_voucher(
3303 thread_t thread,
3304 ipc_voucher_t voucher)
3305 {
3306 ipc_voucher_t old_voucher;
3307 ledger_t bankledger = NULL;
3308 struct thread_group *banktg = NULL;
3309 uint32_t persona_id = 0;
3310
3311 if (THREAD_NULL == thread) {
3312 return KERN_INVALID_ARGUMENT;
3313 }
3314
3315 bank_get_bank_ledger_thread_group_and_persona(voucher, &bankledger, &banktg, &persona_id);
3316
3317 thread_mtx_lock(thread);
3318 /*
3319 * Once the thread is started, we will look at `ith_voucher` without
3320 * holding any lock.
3321 *
3322 * Setting the voucher hence can only be done by current_thread() or
3323 * before it started. "started" flips under the thread mutex and must be
3324 * tested under it too.
3325 */
3326 if (thread != current_thread() && thread->started) {
3327 thread_mtx_unlock(thread);
3328 return KERN_INVALID_ARGUMENT;
3329 }
3330
3331 ipc_voucher_reference(voucher);
3332 old_voucher = thread->ith_voucher;
3333 thread->ith_voucher = voucher;
3334 thread->ith_voucher_name = MACH_PORT_NULL;
3335 thread_mtx_unlock(thread);
3336
3337 bank_swap_thread_bank_ledger(thread, bankledger);
3338 #if CONFIG_THREAD_GROUPS
3339 thread_group_set_bank(thread, banktg);
3340 #endif /* CONFIG_THREAD_GROUPS */
3341
3342 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3343 MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
3344 (uintptr_t)thread_tid(thread),
3345 (uintptr_t)MACH_PORT_NULL,
3346 VM_KERNEL_ADDRPERM((uintptr_t)voucher),
3347 persona_id, 0);
3348
3349 ipc_voucher_release(old_voucher);
3350
3351 return KERN_SUCCESS;
3352 }
3353
3354 /*
3355 * thread_swap_mach_voucher - swap a voucher reference for the specified thread voucher
3356 *
3357 * Conditions: callers holds a reference on the new and presumed old voucher(s).
3358 * nothing locked.
3359 *
3360 * This function is no longer supported.
3361 */
3362 kern_return_t
thread_swap_mach_voucher(__unused thread_t thread,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)3363 thread_swap_mach_voucher(
3364 __unused thread_t thread,
3365 __unused ipc_voucher_t new_voucher,
3366 ipc_voucher_t *in_out_old_voucher)
3367 {
3368 /*
3369 * Currently this function is only called from a MIG generated
3370 * routine which doesn't release the reference on the voucher
3371 * addressed by in_out_old_voucher. To avoid leaking this reference,
3372 * a call to release it has been added here.
3373 */
3374 ipc_voucher_release(*in_out_old_voucher);
3375 OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
3376 }
3377
3378 /*
3379 * thread_get_current_voucher_origin_pid - get the pid of the originator of the current voucher.
3380 */
3381 kern_return_t
thread_get_current_voucher_origin_pid(int32_t * pid)3382 thread_get_current_voucher_origin_pid(
3383 int32_t *pid)
3384 {
3385 uint32_t buf_size;
3386 kern_return_t kr;
3387 thread_t thread = current_thread();
3388
3389 buf_size = sizeof(*pid);
3390 kr = mach_voucher_attr_command(thread->ith_voucher,
3391 MACH_VOUCHER_ATTR_KEY_BANK,
3392 BANK_ORIGINATOR_PID,
3393 NULL,
3394 0,
3395 (mach_voucher_attr_content_t)pid,
3396 &buf_size);
3397
3398 return kr;
3399 }
3400
3401 #if CONFIG_THREAD_GROUPS
3402 /*
3403 * Returns the current thread's voucher-carried thread group
3404 *
3405 * Reference is borrowed from this being the current voucher, so it does NOT
3406 * return a reference to the group.
3407 */
3408 struct thread_group *
thread_get_current_voucher_thread_group(thread_t thread)3409 thread_get_current_voucher_thread_group(thread_t thread)
3410 {
3411 assert(thread == current_thread());
3412
3413 if (thread->ith_voucher == NULL) {
3414 return NULL;
3415 }
3416
3417 ledger_t bankledger = NULL;
3418 struct thread_group *banktg = NULL;
3419
3420 bank_get_bank_ledger_thread_group_and_persona(thread->ith_voucher, &bankledger, &banktg, NULL);
3421
3422 return banktg;
3423 }
3424
3425 #endif /* CONFIG_THREAD_GROUPS */
3426
3427 extern struct workqueue *
3428 proc_get_wqptr(void *proc);
3429
3430 static bool
task_supports_cooperative_workqueue(task_t task)3431 task_supports_cooperative_workqueue(task_t task)
3432 {
3433 assert(task == current_task());
3434 if (task->bsd_info == NULL) {
3435 return false;
3436 }
3437
3438 uint64_t wq_quantum_expiry_offset = get_wq_quantum_offset_from_proc(task->bsd_info);
3439 /* userspace may not yet have called workq_open yet */
3440 struct workqueue *wq = proc_get_wqptr(task->bsd_info);
3441
3442 return (wq != NULL) && (wq_quantum_expiry_offset != 0);
3443 }
3444
3445 /* Not safe to call from scheduler paths - should only be called on self */
3446 bool
thread_supports_cooperative_workqueue(thread_t thread)3447 thread_supports_cooperative_workqueue(thread_t thread)
3448 {
3449 struct uthread *uth = get_bsdthread_info(thread);
3450 task_t task = get_threadtask(thread);
3451
3452 assert(thread == current_thread());
3453
3454 return task_supports_cooperative_workqueue(task) &&
3455 bsdthread_part_of_cooperative_workqueue(uth);
3456 }
3457
3458 static inline bool
thread_has_armed_workqueue_quantum(thread_t thread)3459 thread_has_armed_workqueue_quantum(thread_t thread)
3460 {
3461 return thread->workq_quantum_deadline != 0;
3462 }
3463
3464 /*
3465 * The workq quantum is a lazy timer that is evaluated at 2 specific times in
3466 * the scheduler:
3467 *
3468 * - context switch time
3469 * - scheduler quantum expiry time.
3470 *
3471 * We're currently expressing the workq quantum with a 0.5 scale factor of the
3472 * scheduler quantum. It is possible that if the workq quantum is rearmed
3473 * shortly after the scheduler quantum begins, we could have a large delay
3474 * between when the workq quantum next expires and when it actually is noticed.
3475 *
3476 * A potential future improvement for the wq quantum expiry logic is to compare
3477 * it to the next actual scheduler quantum deadline and expire it if it is
3478 * within a certain leeway.
3479 */
3480 static inline uint64_t
thread_workq_quantum_size(thread_t thread)3481 thread_workq_quantum_size(thread_t thread)
3482 {
3483 return (uint64_t) (SCHED(initial_quantum_size)(thread) / 2);
3484 }
3485
3486 /*
3487 * Always called by thread on itself - either at AST boundary after processing
3488 * an existing quantum expiry, or when a new quantum is armed before the thread
3489 * goes out to userspace to handle a thread request
3490 */
3491 void
thread_arm_workqueue_quantum(thread_t thread)3492 thread_arm_workqueue_quantum(thread_t thread)
3493 {
3494 /*
3495 * If the task is not opted into wq quantum notification, or if the thread
3496 * is not part of the cooperative workqueue, don't even bother with tracking
3497 * the quantum or calculating expiry
3498 */
3499 if (!thread_supports_cooperative_workqueue(thread)) {
3500 assert(thread->workq_quantum_deadline == 0);
3501 return;
3502 }
3503
3504 assert(current_thread() == thread);
3505 assert(thread_get_tag(thread) & THREAD_TAG_WORKQUEUE);
3506
3507 uint64_t current_runtime = thread_get_runtime_self();
3508 uint64_t deadline = thread_workq_quantum_size(thread) + current_runtime;
3509
3510 /*
3511 * The update of a workqueue quantum should always be followed by the update
3512 * of the AST - see explanation in kern/thread.h for synchronization of this
3513 * field
3514 */
3515 thread->workq_quantum_deadline = deadline;
3516
3517 /* We're arming a new quantum, clear any previous expiry notification */
3518 act_clear_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3519
3520 WQ_TRACE(TRACE_wq_quantum_arm, current_runtime, deadline, 0, 0);
3521
3522 WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, thread->workq_quantum_deadline, true);
3523 }
3524
3525 /* Called by a thread on itself when it is about to park */
3526 void
thread_disarm_workqueue_quantum(thread_t thread)3527 thread_disarm_workqueue_quantum(thread_t thread)
3528 {
3529 /* The update of a workqueue quantum should always be followed by the update
3530 * of the AST - see explanation in kern/thread.h for synchronization of this
3531 * field */
3532 thread->workq_quantum_deadline = 0;
3533 act_clear_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3534
3535 WQ_TRACE(TRACE_wq_quantum_disarm, 0, 0, 0, 0);
3536
3537 WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, thread->workq_quantum_deadline, false);
3538 }
3539
3540 /* This is called at context switch time on a thread that may not be self,
3541 * and at AST time
3542 */
3543 bool
thread_has_expired_workqueue_quantum(thread_t thread,bool should_trace)3544 thread_has_expired_workqueue_quantum(thread_t thread, bool should_trace)
3545 {
3546 if (!thread_has_armed_workqueue_quantum(thread)) {
3547 return false;
3548 }
3549 /* We do not do a thread_get_runtime_self() here since this function is
3550 * called from context switch time or during scheduler quantum expiry and
3551 * therefore, we may not be evaluating it on the current thread/self.
3552 *
3553 * In addition, the timers on the thread have just been updated recently so
3554 * we don't need to update them again.
3555 */
3556 uint64_t runtime = (timer_grab(&thread->user_timer) + timer_grab(&thread->system_timer));
3557 bool expired = runtime > thread->workq_quantum_deadline;
3558
3559 if (expired && should_trace) {
3560 WQ_TRACE(TRACE_wq_quantum_expired, runtime, thread->workq_quantum_deadline, 0, 0);
3561 }
3562
3563 return expired;
3564 }
3565
3566 /*
3567 * Called on a thread that is being context switched out or during quantum
3568 * expiry on self. Only called from scheduler paths.
3569 */
3570 void
thread_evaluate_workqueue_quantum_expiry(thread_t thread)3571 thread_evaluate_workqueue_quantum_expiry(thread_t thread)
3572 {
3573 if (thread_has_expired_workqueue_quantum(thread, true)) {
3574 act_set_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3575 }
3576 }
3577
3578 boolean_t
thread_has_thread_name(thread_t th)3579 thread_has_thread_name(thread_t th)
3580 {
3581 if (th) {
3582 return bsd_hasthreadname(get_bsdthread_info(th));
3583 }
3584
3585 /*
3586 * This is an odd case; clients may set the thread name based on the lack of
3587 * a name, but in this context there is no uthread to attach the name to.
3588 */
3589 return FALSE;
3590 }
3591
3592 void
thread_set_thread_name(thread_t th,const char * name)3593 thread_set_thread_name(thread_t th, const char* name)
3594 {
3595 if (th && name) {
3596 bsd_setthreadname(get_bsdthread_info(th), name);
3597 }
3598 }
3599
3600 void
thread_get_thread_name(thread_t th,char * name)3601 thread_get_thread_name(thread_t th, char* name)
3602 {
3603 if (!name) {
3604 return;
3605 }
3606 if (th) {
3607 bsd_getthreadname(get_bsdthread_info(th), name);
3608 } else {
3609 name[0] = '\0';
3610 }
3611 }
3612
3613 void
thread_set_honor_qlimit(thread_t thread)3614 thread_set_honor_qlimit(thread_t thread)
3615 {
3616 thread->options |= TH_OPT_HONOR_QLIMIT;
3617 }
3618
3619 void
thread_clear_honor_qlimit(thread_t thread)3620 thread_clear_honor_qlimit(thread_t thread)
3621 {
3622 thread->options &= (~TH_OPT_HONOR_QLIMIT);
3623 }
3624
3625 /*
3626 * thread_enable_send_importance - set/clear the SEND_IMPORTANCE thread option bit.
3627 */
3628 void
thread_enable_send_importance(thread_t thread,boolean_t enable)3629 thread_enable_send_importance(thread_t thread, boolean_t enable)
3630 {
3631 if (enable == TRUE) {
3632 thread->options |= TH_OPT_SEND_IMPORTANCE;
3633 } else {
3634 thread->options &= ~TH_OPT_SEND_IMPORTANCE;
3635 }
3636 }
3637
3638 kern_return_t
thread_get_ipc_propagate_attr(thread_t thread,struct thread_attr_for_ipc_propagation * attr)3639 thread_get_ipc_propagate_attr(thread_t thread, struct thread_attr_for_ipc_propagation *attr)
3640 {
3641 int iotier;
3642 int qos;
3643
3644 if (thread == NULL || attr == NULL) {
3645 return KERN_INVALID_ARGUMENT;
3646 }
3647
3648 iotier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO);
3649 qos = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS);
3650
3651 attr->tafip_iotier = iotier;
3652 attr->tafip_qos = qos;
3653
3654 return KERN_SUCCESS;
3655 }
3656
3657 /*
3658 * thread_set_allocation_name - .
3659 */
3660
3661 kern_allocation_name_t
thread_set_allocation_name(kern_allocation_name_t new_name)3662 thread_set_allocation_name(kern_allocation_name_t new_name)
3663 {
3664 kern_allocation_name_t ret;
3665 thread_kernel_state_t kstate = thread_get_kernel_state(current_thread());
3666 ret = kstate->allocation_name;
3667 // fifo
3668 if (!new_name || !kstate->allocation_name) {
3669 kstate->allocation_name = new_name;
3670 }
3671 return ret;
3672 }
3673
3674 void *
thread_iokit_tls_get(uint32_t index)3675 thread_iokit_tls_get(uint32_t index)
3676 {
3677 assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
3678 return current_thread()->saved.iokit.tls[index];
3679 }
3680
3681 void
thread_iokit_tls_set(uint32_t index,void * data)3682 thread_iokit_tls_set(uint32_t index, void * data)
3683 {
3684 assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
3685 current_thread()->saved.iokit.tls[index] = data;
3686 }
3687
3688 uint64_t
thread_get_last_wait_duration(thread_t thread)3689 thread_get_last_wait_duration(thread_t thread)
3690 {
3691 return thread->last_made_runnable_time - thread->last_run_time;
3692 }
3693
3694 integer_t
thread_kern_get_pri(thread_t thr)3695 thread_kern_get_pri(thread_t thr)
3696 {
3697 return thr->base_pri;
3698 }
3699
3700 void
thread_kern_set_pri(thread_t thr,integer_t pri)3701 thread_kern_set_pri(thread_t thr, integer_t pri)
3702 {
3703 sched_set_kernel_thread_priority(thr, pri);
3704 }
3705
3706 integer_t
thread_kern_get_kernel_maxpri(void)3707 thread_kern_get_kernel_maxpri(void)
3708 {
3709 return MAXPRI_KERNEL;
3710 }
3711 /*
3712 * thread_port_with_flavor_no_senders
3713 *
3714 * Called whenever the Mach port system detects no-senders on
3715 * the thread inspect or read port. These ports are allocated lazily and
3716 * should be deallocated here when there are no senders remaining.
3717 */
3718 static void
thread_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)3719 thread_port_with_flavor_no_senders(
3720 ipc_port_t port,
3721 mach_port_mscount_t mscount __unused)
3722 {
3723 thread_ro_t tro;
3724 thread_t thread;
3725 mach_thread_flavor_t flavor;
3726 ipc_kobject_type_t kotype;
3727
3728 ip_mq_lock(port);
3729 if (port->ip_srights > 0) {
3730 ip_mq_unlock(port);
3731 return;
3732 }
3733 kotype = ip_kotype(port);
3734 assert((IKOT_THREAD_READ == kotype) || (IKOT_THREAD_INSPECT == kotype));
3735 thread = ipc_kobject_get_locked(port, kotype);
3736 if (thread != THREAD_NULL) {
3737 thread_reference(thread);
3738 }
3739 ip_mq_unlock(port);
3740
3741 if (thread == THREAD_NULL) {
3742 /* The thread is exiting or disabled; it will eventually deallocate the port */
3743 return;
3744 }
3745
3746 if (kotype == IKOT_THREAD_READ) {
3747 flavor = THREAD_FLAVOR_READ;
3748 } else {
3749 flavor = THREAD_FLAVOR_INSPECT;
3750 }
3751
3752 thread_mtx_lock(thread);
3753 ip_mq_lock(port);
3754
3755 /*
3756 * If the port is no longer active, then ipc_thread_terminate() ran
3757 * and destroyed the kobject already. Just deallocate the task
3758 * ref we took and go away.
3759 *
3760 * It is also possible that several nsrequests are in flight,
3761 * only one shall NULL-out the port entry, and this is the one
3762 * that gets to dealloc the port.
3763 *
3764 * Check for a stale no-senders notification. A call to any function
3765 * that vends out send rights to this port could resurrect it between
3766 * this notification being generated and actually being handled here.
3767 */
3768 tro = get_thread_ro(thread);
3769 if (!ip_active(port) ||
3770 tro->tro_ports[flavor] != port ||
3771 port->ip_srights > 0) {
3772 ip_mq_unlock(port);
3773 thread_mtx_unlock(thread);
3774 thread_deallocate(thread);
3775 return;
3776 }
3777
3778 assert(tro->tro_ports[flavor] == port);
3779 zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_ports[flavor]);
3780 thread_mtx_unlock(thread);
3781
3782 ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
3783
3784 thread_deallocate(thread);
3785 }
3786
3787 /*
3788 * The 'thread_region_page_shift' is used by footprint
3789 * to specify the page size that it will use to
3790 * accomplish its accounting work on the task being
3791 * inspected. Since footprint uses a thread for each
3792 * task that it works on, we need to keep the page_shift
3793 * on a per-thread basis.
3794 */
3795
3796 int
thread_self_region_page_shift(void)3797 thread_self_region_page_shift(void)
3798 {
3799 /*
3800 * Return the page shift that this thread
3801 * would like to use for its accounting work.
3802 */
3803 return current_thread()->thread_region_page_shift;
3804 }
3805
3806 void
thread_self_region_page_shift_set(int pgshift)3807 thread_self_region_page_shift_set(
3808 int pgshift)
3809 {
3810 /*
3811 * Set the page shift that this thread
3812 * would like to use for its accounting work
3813 * when dealing with a task.
3814 */
3815 current_thread()->thread_region_page_shift = pgshift;
3816 }
3817
3818 #if CONFIG_DTRACE
3819 uint32_t
dtrace_get_thread_predcache(thread_t thread)3820 dtrace_get_thread_predcache(thread_t thread)
3821 {
3822 if (thread != THREAD_NULL) {
3823 return thread->t_dtrace_predcache;
3824 } else {
3825 return 0;
3826 }
3827 }
3828
3829 int64_t
dtrace_get_thread_vtime(thread_t thread)3830 dtrace_get_thread_vtime(thread_t thread)
3831 {
3832 if (thread != THREAD_NULL) {
3833 return thread->t_dtrace_vtime;
3834 } else {
3835 return 0;
3836 }
3837 }
3838
3839 int
dtrace_get_thread_last_cpu_id(thread_t thread)3840 dtrace_get_thread_last_cpu_id(thread_t thread)
3841 {
3842 if ((thread != THREAD_NULL) && (thread->last_processor != PROCESSOR_NULL)) {
3843 return thread->last_processor->cpu_id;
3844 } else {
3845 return -1;
3846 }
3847 }
3848
3849 int64_t
dtrace_get_thread_tracing(thread_t thread)3850 dtrace_get_thread_tracing(thread_t thread)
3851 {
3852 if (thread != THREAD_NULL) {
3853 return thread->t_dtrace_tracing;
3854 } else {
3855 return 0;
3856 }
3857 }
3858
3859 uint16_t
dtrace_get_thread_inprobe(thread_t thread)3860 dtrace_get_thread_inprobe(thread_t thread)
3861 {
3862 if (thread != THREAD_NULL) {
3863 return thread->t_dtrace_inprobe;
3864 } else {
3865 return 0;
3866 }
3867 }
3868
3869 vm_offset_t
thread_get_kernel_stack(thread_t thread)3870 thread_get_kernel_stack(thread_t thread)
3871 {
3872 if (thread != THREAD_NULL) {
3873 return thread->kernel_stack;
3874 } else {
3875 return 0;
3876 }
3877 }
3878
3879 #if KASAN
3880 struct kasan_thread_data *
kasan_get_thread_data(thread_t thread)3881 kasan_get_thread_data(thread_t thread)
3882 {
3883 return &thread->kasan_data;
3884 }
3885 #endif
3886
3887 #if CONFIG_KCOV
3888 kcov_thread_data_t *
kcov_get_thread_data(thread_t thread)3889 kcov_get_thread_data(thread_t thread)
3890 {
3891 return &thread->kcov_data;
3892 }
3893 #endif
3894
3895 #if CONFIG_STKSZ
3896 /*
3897 * Returns base of a thread's kernel stack.
3898 *
3899 * Coverage sanitizer instruments every function including those that participates in stack handoff between threads.
3900 * There is a window in which CPU still holds old values but stack has been handed over to anoher thread already.
3901 * In this window kernel_stack is 0 but CPU still uses the original stack (until contex switch occurs). The original
3902 * kernel_stack value is preserved in ksancov_stack during this window.
3903 */
3904 vm_offset_t
kcov_stksz_get_thread_stkbase(thread_t thread)3905 kcov_stksz_get_thread_stkbase(thread_t thread)
3906 {
3907 if (thread != THREAD_NULL) {
3908 kcov_thread_data_t *data = kcov_get_thread_data(thread);
3909 if (data->ktd_stksz.kst_stack) {
3910 return data->ktd_stksz.kst_stack;
3911 } else {
3912 return thread->kernel_stack;
3913 }
3914 } else {
3915 return 0;
3916 }
3917 }
3918
3919 vm_offset_t
kcov_stksz_get_thread_stksize(thread_t thread)3920 kcov_stksz_get_thread_stksize(thread_t thread)
3921 {
3922 if (thread != THREAD_NULL) {
3923 return kernel_stack_size;
3924 } else {
3925 return 0;
3926 }
3927 }
3928
3929 void
kcov_stksz_set_thread_stack(thread_t thread,vm_offset_t stack)3930 kcov_stksz_set_thread_stack(thread_t thread, vm_offset_t stack)
3931 {
3932 kcov_thread_data_t *data = kcov_get_thread_data(thread);
3933 data->ktd_stksz.kst_stack = stack;
3934 }
3935 #endif /* CONFIG_STKSZ */
3936
3937 int64_t
dtrace_calc_thread_recent_vtime(thread_t thread)3938 dtrace_calc_thread_recent_vtime(thread_t thread)
3939 {
3940 if (thread != THREAD_NULL) {
3941 processor_t processor = current_processor();
3942 uint64_t abstime = mach_absolute_time();
3943 timer_t timer;
3944
3945 timer = processor->thread_timer;
3946
3947 return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) +
3948 (abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */
3949 } else {
3950 return 0;
3951 }
3952 }
3953
3954 void
dtrace_set_thread_predcache(thread_t thread,uint32_t predcache)3955 dtrace_set_thread_predcache(thread_t thread, uint32_t predcache)
3956 {
3957 if (thread != THREAD_NULL) {
3958 thread->t_dtrace_predcache = predcache;
3959 }
3960 }
3961
3962 void
dtrace_set_thread_vtime(thread_t thread,int64_t vtime)3963 dtrace_set_thread_vtime(thread_t thread, int64_t vtime)
3964 {
3965 if (thread != THREAD_NULL) {
3966 thread->t_dtrace_vtime = vtime;
3967 }
3968 }
3969
3970 void
dtrace_set_thread_tracing(thread_t thread,int64_t accum)3971 dtrace_set_thread_tracing(thread_t thread, int64_t accum)
3972 {
3973 if (thread != THREAD_NULL) {
3974 thread->t_dtrace_tracing = accum;
3975 }
3976 }
3977
3978 void
dtrace_set_thread_inprobe(thread_t thread,uint16_t inprobe)3979 dtrace_set_thread_inprobe(thread_t thread, uint16_t inprobe)
3980 {
3981 if (thread != THREAD_NULL) {
3982 thread->t_dtrace_inprobe = inprobe;
3983 }
3984 }
3985
3986 void
dtrace_thread_bootstrap(void)3987 dtrace_thread_bootstrap(void)
3988 {
3989 task_t task = current_task();
3990
3991 if (task->thread_count == 1) {
3992 thread_t thread = current_thread();
3993 if (thread->t_dtrace_flags & TH_DTRACE_EXECSUCCESS) {
3994 thread->t_dtrace_flags &= ~TH_DTRACE_EXECSUCCESS;
3995 DTRACE_PROC(exec__success);
3996 KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXEC),
3997 task_pid(task));
3998 }
3999 DTRACE_PROC(start);
4000 }
4001 DTRACE_PROC(lwp__start);
4002 }
4003
4004 void
dtrace_thread_didexec(thread_t thread)4005 dtrace_thread_didexec(thread_t thread)
4006 {
4007 thread->t_dtrace_flags |= TH_DTRACE_EXECSUCCESS;
4008 }
4009 #endif /* CONFIG_DTRACE */
4010