1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/thread.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61 * Date: 1986
62 *
63 * Thread management primitives implementation.
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to [email protected] any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
84 #include <mach/mach_types.h>
85 #include <mach/boolean.h>
86 #include <mach/policy.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_special_ports.h>
89 #include <mach/thread_act.h>
90 #include <mach/thread_status.h>
91 #include <mach/time_value.h>
92 #include <mach/vm_param.h>
93
94 #include <machine/thread.h>
95 #include <machine/pal_routines.h>
96 #include <machine/limits.h>
97
98 #include <kern/kern_types.h>
99 #include <kern/kalloc.h>
100 #include <kern/cpu_data.h>
101 #include <kern/extmod_statistics.h>
102 #include <kern/ipc_mig.h>
103 #include <kern/ipc_tt.h>
104 #include <kern/mach_param.h>
105 #include <kern/machine.h>
106 #include <kern/misc_protos.h>
107 #include <kern/processor.h>
108 #include <kern/queue.h>
109 #include <kern/sched.h>
110 #include <kern/sched_prim.h>
111 #include <kern/syscall_subr.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/thread_group.h>
115 #include <kern/coalition.h>
116 #include <kern/host.h>
117 #include <kern/zalloc.h>
118 #include <kern/assert.h>
119 #include <kern/exc_resource.h>
120 #include <kern/exc_guard.h>
121 #include <kern/telemetry.h>
122 #include <kern/policy_internal.h>
123 #include <kern/turnstile.h>
124 #include <kern/sched_clutch.h>
125 #include <kern/hazard.h>
126 #include <kern/ast.h>
127
128 #include <corpses/task_corpse.h>
129 #if KPC
130 #include <kern/kpc.h>
131 #endif
132
133 #if MONOTONIC
134 #include <kern/monotonic.h>
135 #include <machine/monotonic.h>
136 #endif /* MONOTONIC */
137
138 #include <ipc/ipc_kmsg.h>
139 #include <ipc/ipc_port.h>
140 #include <bank/bank_types.h>
141
142 #include <vm/vm_kern.h>
143 #include <vm/vm_pageout.h>
144
145 #include <sys/kdebug.h>
146 #include <sys/bsdtask_info.h>
147 #include <mach/sdt.h>
148 #include <san/kasan.h>
149 #include <san/kcov_stksz.h>
150
151 #include <stdatomic.h>
152
153 #if defined(HAS_APPLE_PAC)
154 #include <ptrauth.h>
155 #include <arm64/proc_reg.h>
156 #endif /* defined(HAS_APPLE_PAC) */
157
158 /*
159 * Exported interfaces
160 */
161 #include <mach/task_server.h>
162 #include <mach/thread_act_server.h>
163 #include <mach/mach_host_server.h>
164 #include <mach/host_priv_server.h>
165 #include <mach/mach_voucher_server.h>
166 #include <kern/policy_internal.h>
167
168 #if CONFIG_MACF
169 #include <security/mac_mach_internal.h>
170 #endif
171
172 #include <pthread/workqueue_trace.h>
173
174 LCK_GRP_DECLARE(thread_lck_grp, "thread");
175
176 static SECURITY_READ_ONLY_LATE(zone_t) thread_zone;
177 SECURITY_READ_ONLY_LATE(zone_t) thread_ro_zone;
178
179 static void thread_port_with_flavor_no_senders(ipc_port_t, mach_port_mscount_t);
180
181 IPC_KOBJECT_DEFINE(IKOT_THREAD_CONTROL);
182 IPC_KOBJECT_DEFINE(IKOT_THREAD_READ,
183 .iko_op_no_senders = thread_port_with_flavor_no_senders);
184 IPC_KOBJECT_DEFINE(IKOT_THREAD_INSPECT,
185 .iko_op_no_senders = thread_port_with_flavor_no_senders);
186
187 static struct mpsc_daemon_queue thread_stack_queue;
188 static struct mpsc_daemon_queue thread_terminate_queue;
189 static struct mpsc_daemon_queue thread_deallocate_queue;
190 static struct mpsc_daemon_queue thread_exception_queue;
191
192 decl_simple_lock_data(static, crashed_threads_lock);
193 static queue_head_t crashed_threads_queue;
194
195 struct thread_exception_elt {
196 struct mpsc_queue_chain link;
197 exception_type_t exception_type;
198 task_t exception_task;
199 thread_t exception_thread;
200 };
201
202 static SECURITY_READ_ONLY_LATE(struct thread) thread_template = {
203 #if MACH_ASSERT
204 .thread_magic = THREAD_MAGIC,
205 #endif /* MACH_ASSERT */
206 .wait_result = THREAD_WAITING,
207 .options = THREAD_ABORTSAFE,
208 .state = TH_WAIT | TH_UNINT,
209 .th_sched_bucket = TH_BUCKET_RUN,
210 .base_pri = BASEPRI_DEFAULT,
211 .realtime.deadline = UINT64_MAX,
212 .last_made_runnable_time = THREAD_NOT_RUNNABLE,
213 .last_basepri_change_time = THREAD_NOT_RUNNABLE,
214 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
215 .pri_shift = INT8_MAX,
216 #endif
217 /* timers are initialized in thread_bootstrap */
218 };
219
220 __startup_func
221 static void
thread_zone_startup(void)222 thread_zone_startup(void)
223 {
224 size_t size = sizeof(struct thread);
225
226 #ifdef MACH_BSD
227 size += uthread_size;
228 #endif
229 thread_zone = zone_create_ext("threads", size,
230 ZC_ZFREE_CLEARMEM, ZONE_ID_THREAD, NULL);
231
232 thread_ro_zone = zone_create_ext("threads_ro", sizeof(struct thread_ro),
233 ZC_READONLY, ZONE_ID_THREAD_RO, NULL);
234 }
235 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, thread_zone_startup);
236
237 __startup_data
238 static struct thread init_thread;
239 static void thread_deallocate_enqueue(thread_t thread);
240 static void thread_deallocate_complete(thread_t thread);
241
242 #ifdef MACH_BSD
243 extern void proc_exit(void *);
244 extern mach_exception_data_type_t proc_encode_exit_exception_code(void *);
245 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
246 extern uint64_t get_return_to_kernel_offset_from_proc(void *p);
247 extern uint64_t get_wq_quantum_offset_from_proc(void *);
248 extern int proc_selfpid(void);
249 extern void proc_name(int, char*, int);
250 extern char * proc_name_address(void *p);
251 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
252 #endif /* MACH_BSD */
253
254 extern bool bsdthread_part_of_cooperative_workqueue(struct uthread *uth);
255 extern int disable_exc_resource;
256 extern int audio_active;
257 extern int debug_task;
258 int thread_max = CONFIG_THREAD_MAX; /* Max number of threads */
259 int task_threadmax = CONFIG_THREAD_MAX;
260
261 static uint64_t thread_unique_id = 100;
262
263 struct _thread_ledger_indices thread_ledgers = { .cpu_time = -1 };
264 static ledger_template_t thread_ledger_template = NULL;
265 static void init_thread_ledgers(void);
266
267 #if CONFIG_JETSAM
268 void jetsam_on_ledger_cpulimit_exceeded(void);
269 #endif
270
271 extern int task_thread_soft_limit;
272
273 #if DEVELOPMENT || DEBUG
274 extern int exc_resource_threads_enabled;
275 #endif /* DEVELOPMENT || DEBUG */
276
277 /*
278 * Level (in terms of percentage of the limit) at which the CPU usage monitor triggers telemetry.
279 *
280 * (ie when any thread's CPU consumption exceeds 70% of the limit, start taking user
281 * stacktraces, aka micro-stackshots)
282 */
283 #define CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT 70
284
285 /* Percentage. Level at which we start gathering telemetry. */
286 static TUNABLE(uint8_t, cpumon_ustackshots_trigger_pct,
287 "cpumon_ustackshots_trigger_pct", CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT);
288 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void);
289 #if DEVELOPMENT || DEBUG
290 void __attribute__((noinline)) SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t, int);
291 #endif /* DEVELOPMENT || DEBUG */
292
293 /*
294 * The smallest interval over which we support limiting CPU consumption is 1ms
295 */
296 #define MINIMUM_CPULIMIT_INTERVAL_MS 1
297
298 os_refgrp_decl(static, thread_refgrp, "thread", NULL);
299
300 static inline void
init_thread_from_template(thread_t thread)301 init_thread_from_template(thread_t thread)
302 {
303 /*
304 * In general, struct thread isn't trivially-copyable, since it may
305 * contain pointers to thread-specific state. This may be enforced at
306 * compile time on architectures that store authed + diversified
307 * pointers in machine_thread.
308 *
309 * In this specific case, where we're initializing a new thread from a
310 * thread_template, we know all diversified pointers are NULL; these are
311 * safe to bitwise copy.
312 */
313 #pragma clang diagnostic push
314 #pragma clang diagnostic ignored "-Wnontrivial-memaccess"
315 memcpy(thread, &thread_template, sizeof(*thread));
316 #pragma clang diagnostic pop
317 }
318
319 static void
thread_ro_create(task_t parent_task,thread_t th,thread_ro_t tro_tpl)320 thread_ro_create(task_t parent_task, thread_t th, thread_ro_t tro_tpl)
321 {
322 #if __x86_64__ || __arm__
323 th->t_task = parent_task;
324 #endif
325 tro_tpl->tro_owner = th;
326 tro_tpl->tro_task = parent_task;
327 th->t_tro = zalloc_ro(ZONE_ID_THREAD_RO, Z_WAITOK | Z_ZERO | Z_NOFAIL);
328 zalloc_ro_update_elem(ZONE_ID_THREAD_RO, th->t_tro, tro_tpl);
329 }
330
331 static void
thread_ro_destroy(thread_t th)332 thread_ro_destroy(thread_t th)
333 {
334 thread_ro_t tro = get_thread_ro(th);
335 #if MACH_BSD
336 struct ucred *cred = tro->tro_cred;
337 #endif
338
339 zfree_ro(ZONE_ID_THREAD_RO, tro);
340 #if MACH_BSD
341 if (cred) {
342 uthread_cred_free(cred);
343 }
344 #endif
345 }
346
347 #if MACH_BSD
348 extern void kauth_cred_set(struct ucred **, struct ucred *);
349
350 void
thread_ro_update_cred(thread_ro_t tro,struct ucred * ucred)351 thread_ro_update_cred(thread_ro_t tro, struct ucred *ucred)
352 {
353 struct ucred *my_cred = tro->tro_cred;
354 if (my_cred != ucred) {
355 kauth_cred_set(&my_cred, ucred);
356 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_cred, &my_cred);
357 }
358 }
359
360 void
thread_ro_update_flags(thread_ro_t tro,thread_ro_flags_t add,thread_ro_flags_t clr)361 thread_ro_update_flags(thread_ro_t tro, thread_ro_flags_t add, thread_ro_flags_t clr)
362 {
363 thread_ro_flags_t flags = (tro->tro_flags & ~clr) | add;
364 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_flags, &flags);
365 }
366 #endif
367
368 thread_t
thread_bootstrap(void)369 thread_bootstrap(void)
370 {
371 /*
372 * Fill in a template thread for fast initialization.
373 */
374 timer_init(&thread_template.user_timer);
375 timer_init(&thread_template.system_timer);
376 timer_init(&thread_template.ptime);
377 timer_init(&thread_template.runnable_timer);
378
379 init_thread_from_template(&init_thread);
380 /* fiddle with init thread to skip asserts in set_sched_pri */
381 init_thread.sched_pri = MAXPRI_KERNEL;
382
383 return &init_thread;
384 }
385
386 void
thread_machine_init_template(void)387 thread_machine_init_template(void)
388 {
389 machine_thread_template_init(&thread_template);
390 }
391
392 void
thread_init(void)393 thread_init(void)
394 {
395 /*
396 * Initialize any machine-dependent
397 * per-thread structures necessary.
398 */
399 machine_thread_init();
400
401 init_thread_ledgers();
402 }
403
404 boolean_t
thread_is_active(thread_t thread)405 thread_is_active(thread_t thread)
406 {
407 return thread->active;
408 }
409
410 void
thread_corpse_continue(void)411 thread_corpse_continue(void)
412 {
413 thread_t thread = current_thread();
414
415 thread_terminate_internal(thread);
416
417 /*
418 * Handle the thread termination directly
419 * here instead of returning to userspace.
420 */
421 assert(thread->active == FALSE);
422 thread_ast_clear(thread, AST_APC);
423 thread_apc_ast(thread);
424
425 panic("thread_corpse_continue");
426 /*NOTREACHED*/
427 }
428
429 __dead2
430 static void
thread_terminate_continue(void)431 thread_terminate_continue(void)
432 {
433 panic("thread_terminate_continue");
434 /*NOTREACHED*/
435 }
436
437 /*
438 * thread_terminate_self:
439 */
440 void
thread_terminate_self(void)441 thread_terminate_self(void)
442 {
443 thread_t thread = current_thread();
444 thread_ro_t tro = get_thread_ro(thread);
445 task_t task = tro->tro_task;
446 int threadcnt;
447
448 pal_thread_terminate_self(thread);
449
450 DTRACE_PROC(lwp__exit);
451
452 thread_mtx_lock(thread);
453
454 ipc_thread_disable(thread);
455
456 thread_mtx_unlock(thread);
457
458 thread_sched_call(thread, NULL);
459
460 spl_t s = splsched();
461 thread_lock(thread);
462
463 thread_depress_abort_locked(thread);
464
465 thread_unlock(thread);
466 splx(s);
467
468 #if CONFIG_TASKWATCH
469 thead_remove_taskwatch(thread);
470 #endif /* CONFIG_TASKWATCH */
471
472 work_interval_thread_terminate(thread);
473
474 thread_mtx_lock(thread);
475
476 thread_policy_reset(thread);
477
478 thread_mtx_unlock(thread);
479
480 assert(thread->th_work_interval == NULL);
481
482 bank_swap_thread_bank_ledger(thread, NULL);
483
484 if (kdebug_enable && bsd_hasthreadname(get_bsdthread_info(thread))) {
485 char threadname[MAXTHREADNAMESIZE];
486 bsd_getthreadname(get_bsdthread_info(thread), threadname);
487 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, threadname);
488 }
489
490 uthread_cleanup(get_bsdthread_info(thread), tro);
491
492 if (kdebug_enable && task->bsd_info && !task_is_exec_copy(task)) {
493 /* trace out pid before we sign off */
494 long dbg_arg1 = 0;
495 long dbg_arg2 = 0;
496
497 kdbg_trace_data(task->bsd_info, &dbg_arg1, &dbg_arg2);
498 #if MONOTONIC
499 if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_THR_EXIT)) {
500 uint64_t counts[MT_CORE_NFIXED];
501 uint64_t thread_user_time;
502 uint64_t thread_system_time;
503 thread_user_time = timer_grab(&thread->user_timer);
504 thread_system_time = timer_grab(&thread->system_timer);
505 mt_fixed_thread_counts(thread, counts);
506 KDBG_RELEASE(DBG_MT_INSTRS_CYCLES_THR_EXIT,
507 #ifdef MT_CORE_INSTRS
508 counts[MT_CORE_INSTRS],
509 #else /* defined(MT_CORE_INSTRS) */
510 0,
511 #endif/* !defined(MT_CORE_INSTRS) */
512 counts[MT_CORE_CYCLES],
513 thread_system_time, thread_user_time);
514 }
515 #endif/* MONOTONIC */
516 KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE_PID, dbg_arg1, dbg_arg2);
517 }
518
519 /*
520 * After this subtraction, this thread should never access
521 * task->bsd_info unless it got 0 back from the os_atomic_dec. It
522 * could be racing with other threads to be the last thread in the
523 * process, and the last thread in the process will tear down the proc
524 * structure and zero-out task->bsd_info.
525 */
526 threadcnt = os_atomic_dec(&task->active_thread_count, relaxed);
527
528 #if CONFIG_COALITIONS
529 /*
530 * Leave the coalitions when last thread of task is exiting and the
531 * task is not a corpse.
532 */
533 if (threadcnt == 0 && !task->corpse_info) {
534 coalitions_remove_task(task);
535 }
536 #endif
537
538 /*
539 * If we are the last thread to terminate and the task is
540 * associated with a BSD process, perform BSD process exit.
541 */
542 if (threadcnt == 0 && task->bsd_info != NULL && !task_is_exec_copy(task)) {
543 mach_exception_data_type_t subcode = 0;
544 if (kdebug_enable) {
545 /* since we're the last thread in this process, trace out the command name too */
546 long args[4] = {};
547 kdbg_trace_string(task->bsd_info, &args[0], &args[1], &args[2], &args[3]);
548 #if MONOTONIC
549 if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_PROC_EXIT)) {
550 uint64_t counts[MT_CORE_NFIXED];
551 uint64_t task_user_time;
552 uint64_t task_system_time;
553 mt_fixed_task_counts(task, counts);
554 /* since the thread time is not yet added to the task */
555 task_user_time = task->total_user_time + timer_grab(&thread->user_timer);
556 task_system_time = task->total_system_time + timer_grab(&thread->system_timer);
557 KDBG_RELEASE((DBG_MT_INSTRS_CYCLES_PROC_EXIT),
558 #ifdef MT_CORE_INSTRS
559 counts[MT_CORE_INSTRS],
560 #else /* defined(MT_CORE_INSTRS) */
561 0,
562 #endif/* !defined(MT_CORE_INSTRS) */
563 counts[MT_CORE_CYCLES],
564 task_system_time, task_user_time);
565 }
566 #endif/* MONOTONIC */
567 KDBG_RELEASE(TRACE_STRING_PROC_EXIT, args[0], args[1], args[2], args[3]);
568 }
569
570 /* Get the exit reason before proc_exit */
571 subcode = proc_encode_exit_exception_code(task->bsd_info);
572 proc_exit(task->bsd_info);
573 /*
574 * if there is crash info in task
575 * then do the deliver action since this is
576 * last thread for this task.
577 */
578 if (task->corpse_info) {
579 /* reset all except task name port */
580 ipc_task_reset(task);
581 /* enable all task ports (name port unchanged) */
582 ipc_task_enable(task);
583 exception_type_t etype = get_exception_from_corpse_crashinfo(task->corpse_info);
584 task_deliver_crash_notification(task, current_thread(), etype, subcode);
585 }
586 }
587
588 if (threadcnt == 0) {
589 task_lock(task);
590 if (task_is_a_corpse_fork(task)) {
591 thread_wakeup((event_t)&task->active_thread_count);
592 }
593 task_unlock(task);
594 }
595
596 s = splsched();
597 thread_lock(thread);
598
599 /*
600 * Ensure that the depress timer is no longer enqueued,
601 * so the timer can be safely deallocated
602 *
603 * TODO: build timer_call_cancel_wait
604 */
605
606 assert((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) == 0);
607
608 uint32_t delay_us = 1;
609
610 while (thread->depress_timer_active > 0) {
611 thread_unlock(thread);
612 splx(s);
613
614 delay(delay_us++);
615
616 if (delay_us > USEC_PER_SEC) {
617 panic("depress timer failed to inactivate!"
618 "thread: %p depress_timer_active: %d",
619 thread, thread->depress_timer_active);
620 }
621
622 s = splsched();
623 thread_lock(thread);
624 }
625
626 /*
627 * Cancel wait timer, and wait for
628 * concurrent expirations.
629 */
630 if (thread->wait_timer_is_set) {
631 thread->wait_timer_is_set = FALSE;
632
633 if (timer_call_cancel(thread->wait_timer)) {
634 thread->wait_timer_active--;
635 }
636 }
637
638 delay_us = 1;
639
640 while (thread->wait_timer_active > 0) {
641 thread_unlock(thread);
642 splx(s);
643
644 delay(delay_us++);
645
646 if (delay_us > USEC_PER_SEC) {
647 panic("wait timer failed to inactivate!"
648 "thread: %p wait_timer_active: %d",
649 thread, thread->wait_timer_active);
650 }
651
652 s = splsched();
653 thread_lock(thread);
654 }
655
656 /*
657 * If there is a reserved stack, release it.
658 */
659 if (thread->reserved_stack != 0) {
660 stack_free_reserved(thread);
661 thread->reserved_stack = 0;
662 }
663
664 /*
665 * Mark thread as terminating, and block.
666 */
667 thread->state |= TH_TERMINATE;
668 thread_mark_wait_locked(thread, THREAD_UNINT);
669
670 assert(thread->th_work_interval_flags == TH_WORK_INTERVAL_FLAGS_NONE);
671 assert(thread->kern_promotion_schedpri == 0);
672 assert(thread->rwlock_count == 0);
673 assert(thread->priority_floor_count == 0);
674 assert(thread->handoff_thread == THREAD_NULL);
675 assert(thread->th_work_interval == NULL);
676
677 assert((thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) == 0);
678 assert((thread->sched_flags & TH_SFLAG_RW_PROMOTED) == 0);
679 assert((thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) == 0);
680 assert((thread->sched_flags & TH_SFLAG_EXEC_PROMOTED) == 0);
681 assert((thread->sched_flags & TH_SFLAG_PROMOTED) == 0);
682 assert((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) == 0);
683 thread_unlock(thread);
684 /* splsched */
685
686 thread_block((thread_continue_t)thread_terminate_continue);
687 /*NOTREACHED*/
688 }
689
690 static bool
thread_ref_release(thread_t thread)691 thread_ref_release(thread_t thread)
692 {
693 if (thread == THREAD_NULL) {
694 return false;
695 }
696
697 assert_thread_magic(thread);
698
699 return os_ref_release_raw(&thread->ref_count, &thread_refgrp) == 0;
700 }
701
702 /* Drop a thread refcount safely without triggering a zfree */
703 void
thread_deallocate_safe(thread_t thread)704 thread_deallocate_safe(thread_t thread)
705 {
706 if (__improbable(thread_ref_release(thread))) {
707 /* enqueue the thread for thread deallocate deamon to call thread_deallocate_complete */
708 thread_deallocate_enqueue(thread);
709 }
710 }
711
712 void
thread_deallocate(thread_t thread)713 thread_deallocate(thread_t thread)
714 {
715 if (__improbable(thread_ref_release(thread))) {
716 thread_deallocate_complete(thread);
717 }
718 }
719
720 void
thread_deallocate_complete(thread_t thread)721 thread_deallocate_complete(
722 thread_t thread)
723 {
724 task_t task;
725
726 assert_thread_magic(thread);
727
728 assert(os_ref_get_count_raw(&thread->ref_count) == 0);
729
730 if (!(thread->state & TH_TERMINATE2)) {
731 panic("thread_deallocate: thread not properly terminated");
732 }
733
734 assert(thread->runq == PROCESSOR_NULL);
735
736 #if KPC
737 kpc_thread_destroy(thread);
738 #endif
739
740 ipc_thread_terminate(thread);
741
742 proc_thread_qos_deallocate(thread);
743
744 task = get_threadtask(thread);
745
746 #ifdef MACH_BSD
747 uthread_destroy(get_bsdthread_info(thread));
748 #endif /* MACH_BSD */
749
750 if (thread->t_ledger) {
751 ledger_dereference(thread->t_ledger);
752 }
753 if (thread->t_threadledger) {
754 ledger_dereference(thread->t_threadledger);
755 }
756
757 assert(thread->turnstile != TURNSTILE_NULL);
758 if (thread->turnstile) {
759 turnstile_deallocate(thread->turnstile);
760 }
761
762 if (IPC_VOUCHER_NULL != thread->ith_voucher) {
763 ipc_voucher_release(thread->ith_voucher);
764 }
765
766 kfree_data(thread->thread_io_stats, sizeof(struct io_stat_info));
767 #if CONFIG_PREADOPT_TG
768 if (thread->old_preadopt_thread_group) {
769 thread_group_release(thread->old_preadopt_thread_group);
770 }
771
772 if (thread->preadopt_thread_group) {
773 thread_group_release(thread->preadopt_thread_group);
774 }
775 #endif
776
777 if (thread->kernel_stack != 0) {
778 stack_free(thread);
779 }
780
781 lck_mtx_destroy(&thread->mutex, &thread_lck_grp);
782 machine_thread_destroy(thread);
783
784 task_deallocate_grp(task, TASK_GRP_INTERNAL);
785
786 #if MACH_ASSERT
787 assert_thread_magic(thread);
788 thread->thread_magic = 0;
789 #endif /* MACH_ASSERT */
790
791 lck_mtx_lock(&tasks_threads_lock);
792 assert(terminated_threads_count > 0);
793 queue_remove(&terminated_threads, thread, thread_t, threads);
794 terminated_threads_count--;
795 lck_mtx_unlock(&tasks_threads_lock);
796
797 timer_call_free(thread->depress_timer);
798 timer_call_free(thread->wait_timer);
799
800 thread_ro_destroy(thread);
801 zfree(thread_zone, thread);
802 }
803
804 /*
805 * thread_inspect_deallocate:
806 *
807 * Drop a thread inspection reference.
808 */
809 void
thread_inspect_deallocate(thread_inspect_t thread_inspect)810 thread_inspect_deallocate(
811 thread_inspect_t thread_inspect)
812 {
813 return thread_deallocate((thread_t)thread_inspect);
814 }
815
816 /*
817 * thread_read_deallocate:
818 *
819 * Drop a reference on thread read port.
820 */
821 void
thread_read_deallocate(thread_read_t thread_read)822 thread_read_deallocate(
823 thread_read_t thread_read)
824 {
825 return thread_deallocate((thread_t)thread_read);
826 }
827
828
829 /*
830 * thread_exception_queue_invoke:
831 *
832 * Deliver EXC_{RESOURCE,GUARD} exception
833 */
834 static void
thread_exception_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)835 thread_exception_queue_invoke(mpsc_queue_chain_t elm,
836 __assert_only mpsc_daemon_queue_t dq)
837 {
838 struct thread_exception_elt *elt;
839 task_t task;
840 thread_t thread;
841 exception_type_t etype;
842
843 assert(dq == &thread_exception_queue);
844 elt = mpsc_queue_element(elm, struct thread_exception_elt, link);
845
846 etype = elt->exception_type;
847 task = elt->exception_task;
848 thread = elt->exception_thread;
849 assert_thread_magic(thread);
850
851 kfree_type(struct thread_exception_elt, elt);
852
853 /* wait for all the threads in the task to terminate */
854 task_lock(task);
855 task_wait_till_threads_terminate_locked(task);
856 task_unlock(task);
857
858 /* Consumes the task ref returned by task_generate_corpse_internal */
859 task_deallocate(task);
860 /* Consumes the thread ref returned by task_generate_corpse_internal */
861 thread_deallocate(thread);
862
863 /* Deliver the notification, also clears the corpse. */
864 task_deliver_crash_notification(task, thread, etype, 0);
865 }
866
867 /*
868 * thread_exception_enqueue:
869 *
870 * Enqueue a corpse port to be delivered an EXC_{RESOURCE,GUARD}.
871 */
872 void
thread_exception_enqueue(task_t task,thread_t thread,exception_type_t etype)873 thread_exception_enqueue(
874 task_t task,
875 thread_t thread,
876 exception_type_t etype)
877 {
878 assert(EXC_RESOURCE == etype || EXC_GUARD == etype);
879 struct thread_exception_elt *elt = kalloc_type(struct thread_exception_elt, Z_WAITOK);
880 elt->exception_type = etype;
881 elt->exception_task = task;
882 elt->exception_thread = thread;
883
884 mpsc_daemon_enqueue(&thread_exception_queue, &elt->link,
885 MPSC_QUEUE_DISABLE_PREEMPTION);
886 }
887
888 /*
889 * thread_copy_resource_info
890 *
891 * Copy the resource info counters from source
892 * thread to destination thread.
893 */
894 void
thread_copy_resource_info(thread_t dst_thread,thread_t src_thread)895 thread_copy_resource_info(
896 thread_t dst_thread,
897 thread_t src_thread)
898 {
899 dst_thread->c_switch = src_thread->c_switch;
900 dst_thread->p_switch = src_thread->p_switch;
901 dst_thread->ps_switch = src_thread->ps_switch;
902 dst_thread->precise_user_kernel_time = src_thread->precise_user_kernel_time;
903 dst_thread->user_timer = src_thread->user_timer;
904 dst_thread->user_timer_save = src_thread->user_timer_save;
905 dst_thread->system_timer = src_thread->system_timer;
906 dst_thread->system_timer_save = src_thread->system_timer_save;
907 dst_thread->runnable_timer = src_thread->runnable_timer;
908 dst_thread->vtimer_user_save = src_thread->vtimer_user_save;
909 dst_thread->vtimer_prof_save = src_thread->vtimer_prof_save;
910 dst_thread->vtimer_rlim_save = src_thread->vtimer_rlim_save;
911 dst_thread->vtimer_qos_save = src_thread->vtimer_qos_save;
912 dst_thread->syscalls_unix = src_thread->syscalls_unix;
913 dst_thread->syscalls_mach = src_thread->syscalls_mach;
914 ledger_rollup(dst_thread->t_threadledger, src_thread->t_threadledger);
915 *dst_thread->thread_io_stats = *src_thread->thread_io_stats;
916 }
917
918 static void
thread_terminate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)919 thread_terminate_queue_invoke(mpsc_queue_chain_t e,
920 __assert_only mpsc_daemon_queue_t dq)
921 {
922 thread_t thread = mpsc_queue_element(e, struct thread, mpsc_links);
923 task_t task = get_threadtask(thread);
924
925 assert(dq == &thread_terminate_queue);
926
927 task_lock(task);
928
929 /*
930 * if marked for crash reporting, skip reaping.
931 * The corpse delivery thread will clear bit and enqueue
932 * for reaping when done
933 *
934 * Note: the inspection field is set under the task lock
935 *
936 * FIXME[mad]: why enqueue for termination before `inspection` is false ?
937 */
938 if (__improbable(thread->inspection)) {
939 simple_lock(&crashed_threads_lock, &thread_lck_grp);
940 task_unlock(task);
941
942 enqueue_tail(&crashed_threads_queue, &thread->runq_links);
943 simple_unlock(&crashed_threads_lock);
944 return;
945 }
946
947
948 task->total_user_time += timer_grab(&thread->user_timer);
949 task->total_ptime += timer_grab(&thread->ptime);
950 task->total_runnable_time += timer_grab(&thread->runnable_timer);
951 if (thread->precise_user_kernel_time) {
952 task->total_system_time += timer_grab(&thread->system_timer);
953 } else {
954 task->total_user_time += timer_grab(&thread->system_timer);
955 }
956
957 task->c_switch += thread->c_switch;
958 task->p_switch += thread->p_switch;
959 task->ps_switch += thread->ps_switch;
960
961 task->syscalls_unix += thread->syscalls_unix;
962 task->syscalls_mach += thread->syscalls_mach;
963
964 task->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
965 task->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
966 task->task_gpu_ns += ml_gpu_stat(thread);
967 task->task_energy += ml_energy_stat(thread);
968 task->decompressions += thread->decompressions;
969
970 #if MONOTONIC
971 mt_terminate_update(task, thread);
972 #endif /* MONOTONIC */
973
974 thread_update_qos_cpu_time(thread);
975
976 queue_remove(&task->threads, thread, thread_t, task_threads);
977 task->thread_count--;
978
979 /*
980 * If the task is being halted, and there is only one thread
981 * left in the task after this one, then wakeup that thread.
982 */
983 if (task->thread_count == 1 && task->halting) {
984 thread_wakeup((event_t)&task->halting);
985 }
986
987 task_unlock(task);
988
989 lck_mtx_lock(&tasks_threads_lock);
990 queue_remove(&threads, thread, thread_t, threads);
991 threads_count--;
992 queue_enter(&terminated_threads, thread, thread_t, threads);
993 terminated_threads_count++;
994 lck_mtx_unlock(&tasks_threads_lock);
995
996 thread_deallocate(thread);
997 }
998
999 static void
thread_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)1000 thread_deallocate_queue_invoke(mpsc_queue_chain_t e,
1001 __assert_only mpsc_daemon_queue_t dq)
1002 {
1003 thread_t thread = mpsc_queue_element(e, struct thread, mpsc_links);
1004
1005 assert(dq == &thread_deallocate_queue);
1006
1007 thread_deallocate_complete(thread);
1008 }
1009
1010 /*
1011 * thread_terminate_enqueue:
1012 *
1013 * Enqueue a terminating thread for final disposition.
1014 *
1015 * Called at splsched.
1016 */
1017 void
thread_terminate_enqueue(thread_t thread)1018 thread_terminate_enqueue(
1019 thread_t thread)
1020 {
1021 KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE, thread->thread_id);
1022
1023 mpsc_daemon_enqueue(&thread_terminate_queue, &thread->mpsc_links,
1024 MPSC_QUEUE_DISABLE_PREEMPTION);
1025 }
1026
1027 /*
1028 * thread_deallocate_enqueue:
1029 *
1030 * Enqueue a thread for final deallocation.
1031 */
1032 static void
thread_deallocate_enqueue(thread_t thread)1033 thread_deallocate_enqueue(
1034 thread_t thread)
1035 {
1036 mpsc_daemon_enqueue(&thread_deallocate_queue, &thread->mpsc_links,
1037 MPSC_QUEUE_DISABLE_PREEMPTION);
1038 }
1039
1040 /*
1041 * thread_terminate_crashed_threads:
1042 * walk the list of crashed threads and put back set of threads
1043 * who are no longer being inspected.
1044 */
1045 void
thread_terminate_crashed_threads(void)1046 thread_terminate_crashed_threads(void)
1047 {
1048 thread_t th_remove;
1049
1050 simple_lock(&crashed_threads_lock, &thread_lck_grp);
1051 /*
1052 * loop through the crashed threads queue
1053 * to put any threads that are not being inspected anymore
1054 */
1055
1056 qe_foreach_element_safe(th_remove, &crashed_threads_queue, runq_links) {
1057 /* make sure current_thread is never in crashed queue */
1058 assert(th_remove != current_thread());
1059
1060 if (th_remove->inspection == FALSE) {
1061 remqueue(&th_remove->runq_links);
1062 mpsc_daemon_enqueue(&thread_terminate_queue, &th_remove->mpsc_links,
1063 MPSC_QUEUE_NONE);
1064 }
1065 }
1066
1067 simple_unlock(&crashed_threads_lock);
1068 }
1069
1070 /*
1071 * thread_stack_queue_invoke:
1072 *
1073 * Perform stack allocation as required due to
1074 * invoke failures.
1075 */
1076 static void
thread_stack_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)1077 thread_stack_queue_invoke(mpsc_queue_chain_t elm,
1078 __assert_only mpsc_daemon_queue_t dq)
1079 {
1080 thread_t thread = mpsc_queue_element(elm, struct thread, mpsc_links);
1081
1082 assert(dq == &thread_stack_queue);
1083
1084 /* allocate stack with interrupts enabled so that we can call into VM */
1085 stack_alloc(thread);
1086
1087 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_END, thread_tid(thread), 0, 0, 0, 0);
1088
1089 spl_t s = splsched();
1090 thread_lock(thread);
1091 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1092 thread_unlock(thread);
1093 splx(s);
1094 }
1095
1096 /*
1097 * thread_stack_enqueue:
1098 *
1099 * Enqueue a thread for stack allocation.
1100 *
1101 * Called at splsched.
1102 */
1103 void
thread_stack_enqueue(thread_t thread)1104 thread_stack_enqueue(
1105 thread_t thread)
1106 {
1107 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_START, thread_tid(thread), 0, 0, 0, 0);
1108 assert_thread_magic(thread);
1109
1110 mpsc_daemon_enqueue(&thread_stack_queue, &thread->mpsc_links,
1111 MPSC_QUEUE_DISABLE_PREEMPTION);
1112 }
1113
1114 void
thread_daemon_init(void)1115 thread_daemon_init(void)
1116 {
1117 kern_return_t result;
1118
1119 thread_deallocate_daemon_init();
1120
1121 thread_deallocate_daemon_register_queue(&thread_terminate_queue,
1122 thread_terminate_queue_invoke);
1123
1124 thread_deallocate_daemon_register_queue(&thread_deallocate_queue,
1125 thread_deallocate_queue_invoke);
1126
1127 hazard_register_mpsc_queue();
1128
1129 ipc_object_deallocate_register_queue();
1130
1131 simple_lock_init(&crashed_threads_lock, 0);
1132 queue_init(&crashed_threads_queue);
1133
1134 result = mpsc_daemon_queue_init_with_thread(&thread_stack_queue,
1135 thread_stack_queue_invoke, BASEPRI_PREEMPT_HIGH,
1136 "daemon.thread-stack");
1137 if (result != KERN_SUCCESS) {
1138 panic("thread_daemon_init: thread_stack_daemon");
1139 }
1140
1141 result = mpsc_daemon_queue_init_with_thread(&thread_exception_queue,
1142 thread_exception_queue_invoke, MINPRI_KERNEL,
1143 "daemon.thread-exception");
1144 if (result != KERN_SUCCESS) {
1145 panic("thread_daemon_init: thread_exception_daemon");
1146 }
1147 }
1148
1149 __options_decl(thread_create_internal_options_t, uint32_t, {
1150 TH_OPTION_NONE = 0x00,
1151 TH_OPTION_NOSUSP = 0x02,
1152 TH_OPTION_WORKQ = 0x04,
1153 TH_OPTION_IMMOVABLE = 0x08,
1154 TH_OPTION_PINNED = 0x10,
1155 });
1156
1157 void
main_thread_set_immovable_pinned(thread_t thread)1158 main_thread_set_immovable_pinned(thread_t thread)
1159 {
1160 ipc_main_thread_set_immovable_pinned(thread);
1161 }
1162
1163 /*
1164 * Create a new thread.
1165 * Doesn't start the thread running.
1166 *
1167 * Task and tasks_threads_lock are returned locked on success.
1168 */
1169 static kern_return_t
thread_create_internal(task_t parent_task,integer_t priority,thread_continue_t continuation,void * parameter,thread_create_internal_options_t options,thread_t * out_thread)1170 thread_create_internal(
1171 task_t parent_task,
1172 integer_t priority,
1173 thread_continue_t continuation,
1174 void *parameter,
1175 thread_create_internal_options_t options,
1176 thread_t *out_thread)
1177 {
1178 thread_t new_thread;
1179 ipc_thread_init_options_t init_options = IPC_THREAD_INIT_NONE;
1180 struct thread_ro tro_tpl = { };
1181 bool first_thread = false;
1182
1183 /*
1184 * Allocate a thread and initialize static fields
1185 */
1186 new_thread = zalloc_flags(thread_zone, Z_WAITOK | Z_NOFAIL);
1187
1188 if (__improbable(current_thread() == &init_thread)) {
1189 /*
1190 * The first thread ever is a global, but because we want to be
1191 * able to zone_id_require() threads, we have to stop using the
1192 * global piece of memory we used to boostrap the kernel and
1193 * jump to a proper thread from a zone.
1194 *
1195 * This is why that one thread will inherit its original
1196 * state differently.
1197 *
1198 * Also remember this thread in `vm_pageout_scan_thread`
1199 * as this is what the first thread ever becomes.
1200 *
1201 * Also pre-warm the depress timer since the VM pageout scan
1202 * daemon might need to use it.
1203 */
1204 assert(vm_pageout_scan_thread == THREAD_NULL);
1205 vm_pageout_scan_thread = new_thread;
1206
1207 first_thread = true;
1208 #pragma clang diagnostic push
1209 #pragma clang diagnostic ignored "-Wnontrivial-memaccess"
1210 /* work around 74481146 */
1211 memcpy(new_thread, &init_thread, sizeof(*new_thread));
1212 #pragma clang diagnostic pop
1213 } else {
1214 init_thread_from_template(new_thread);
1215 }
1216
1217 if (options & TH_OPTION_PINNED) {
1218 init_options |= IPC_THREAD_INIT_PINNED;
1219 }
1220
1221 if (options & TH_OPTION_IMMOVABLE) {
1222 init_options |= IPC_THREAD_INIT_IMMOVABLE;
1223 }
1224
1225 os_ref_init_count_raw(&new_thread->ref_count, &thread_refgrp, 2);
1226 machine_thread_create(new_thread, parent_task, first_thread);
1227
1228 #ifdef MACH_BSD
1229 uthread_init(parent_task, get_bsdthread_info(new_thread),
1230 &tro_tpl, (options & TH_OPTION_WORKQ) != 0);
1231 if (!is_corpsetask(parent_task)) {
1232 /*
1233 * uthread_init will set tro_cred (with a +1)
1234 * and tro_proc for live tasks.
1235 */
1236 assert(tro_tpl.tro_cred && tro_tpl.tro_proc);
1237 }
1238 #endif /* MACH_BSD */
1239
1240 thread_lock_init(new_thread);
1241 wake_lock_init(new_thread);
1242
1243 lck_mtx_init(&new_thread->mutex, &thread_lck_grp, LCK_ATTR_NULL);
1244
1245 ipc_thread_init(parent_task, new_thread, &tro_tpl, init_options);
1246
1247 thread_ro_create(parent_task, new_thread, &tro_tpl);
1248
1249 new_thread->continuation = continuation;
1250 new_thread->parameter = parameter;
1251 new_thread->inheritor_flags = TURNSTILE_UPDATE_FLAGS_NONE;
1252 priority_queue_init(&new_thread->sched_inheritor_queue);
1253 priority_queue_init(&new_thread->base_inheritor_queue);
1254 #if CONFIG_SCHED_CLUTCH
1255 priority_queue_entry_init(&new_thread->th_clutch_runq_link);
1256 priority_queue_entry_init(&new_thread->th_clutch_pri_link);
1257 #endif /* CONFIG_SCHED_CLUTCH */
1258
1259 #if CONFIG_SCHED_EDGE
1260 new_thread->th_bound_cluster_enqueued = false;
1261 for (cluster_shared_rsrc_type_t shared_rsrc_type = CLUSTER_SHARED_RSRC_TYPE_MIN; shared_rsrc_type < CLUSTER_SHARED_RSRC_TYPE_COUNT; shared_rsrc_type++) {
1262 new_thread->th_shared_rsrc_enqueued[shared_rsrc_type] = false;
1263 new_thread->th_shared_rsrc_heavy_user[shared_rsrc_type] = false;
1264 new_thread->th_shared_rsrc_heavy_perf_control[shared_rsrc_type] = false;
1265 }
1266 #endif /* CONFIG_SCHED_EDGE */
1267 new_thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
1268
1269 /* Allocate I/O Statistics structure */
1270 new_thread->thread_io_stats = kalloc_data(sizeof(struct io_stat_info),
1271 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1272
1273 #if KASAN
1274 kasan_init_thread(&new_thread->kasan_data);
1275 #endif
1276
1277 #if CONFIG_KCOV
1278 kcov_init_thread(&new_thread->kcov_data);
1279 #endif
1280
1281 #if CONFIG_IOSCHED
1282 /* Clear out the I/O Scheduling info for AppleFSCompression */
1283 new_thread->decmp_upl = NULL;
1284 #endif /* CONFIG_IOSCHED */
1285
1286 new_thread->thread_region_page_shift = 0;
1287
1288 #if DEVELOPMENT || DEBUG
1289 task_lock(parent_task);
1290 uint16_t thread_limit = parent_task->task_thread_limit;
1291 if (exc_resource_threads_enabled &&
1292 thread_limit > 0 &&
1293 parent_task->thread_count >= thread_limit &&
1294 !parent_task->task_has_crossed_thread_limit &&
1295 !(parent_task->t_flags & TF_CORPSE)) {
1296 int thread_count = parent_task->thread_count;
1297 parent_task->task_has_crossed_thread_limit = TRUE;
1298 task_unlock(parent_task);
1299 SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(parent_task, thread_count);
1300 } else {
1301 task_unlock(parent_task);
1302 }
1303 #endif
1304
1305 lck_mtx_lock(&tasks_threads_lock);
1306 task_lock(parent_task);
1307
1308 /*
1309 * Fail thread creation if parent task is being torn down or has too many threads
1310 * If the caller asked for TH_OPTION_NOSUSP, also fail if the parent task is suspended
1311 */
1312 if (parent_task->active == 0 || parent_task->halting ||
1313 (parent_task->suspend_count > 0 && (options & TH_OPTION_NOSUSP) != 0) ||
1314 (parent_task->thread_count >= task_threadmax && parent_task != kernel_task)) {
1315 task_unlock(parent_task);
1316 lck_mtx_unlock(&tasks_threads_lock);
1317
1318 #ifdef MACH_BSD
1319 {
1320 struct uthread *ut = get_bsdthread_info(new_thread);
1321
1322 uthread_cleanup(ut, &tro_tpl);
1323 uthread_destroy(ut);
1324 }
1325 #endif /* MACH_BSD */
1326 ipc_thread_disable(new_thread);
1327 ipc_thread_terminate(new_thread);
1328 kfree_data(new_thread->thread_io_stats,
1329 sizeof(struct io_stat_info));
1330 lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
1331 machine_thread_destroy(new_thread);
1332 thread_ro_destroy(new_thread);
1333 zfree(thread_zone, new_thread);
1334 return KERN_FAILURE;
1335 }
1336
1337 /* Protected by the tasks_threads_lock */
1338 new_thread->thread_id = ++thread_unique_id;
1339
1340 /* New threads inherit any default state on the task */
1341 machine_thread_inherit_taskwide(new_thread, parent_task);
1342
1343 task_reference_grp(parent_task, TASK_GRP_INTERNAL);
1344
1345 if (parent_task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
1346 /*
1347 * This task has a per-thread CPU limit; make sure this new thread
1348 * gets its limit set too, before it gets out of the kernel.
1349 */
1350 act_set_astledger(new_thread);
1351 }
1352
1353 /* Instantiate a thread ledger. Do not fail thread creation if ledger creation fails. */
1354 if ((new_thread->t_threadledger = ledger_instantiate(thread_ledger_template,
1355 LEDGER_CREATE_INACTIVE_ENTRIES)) != LEDGER_NULL) {
1356 ledger_entry_setactive(new_thread->t_threadledger, thread_ledgers.cpu_time);
1357 }
1358
1359 new_thread->t_bankledger = LEDGER_NULL;
1360 new_thread->t_deduct_bank_ledger_time = 0;
1361 new_thread->t_deduct_bank_ledger_energy = 0;
1362
1363 new_thread->t_ledger = parent_task->ledger;
1364 if (new_thread->t_ledger) {
1365 ledger_reference(new_thread->t_ledger);
1366 }
1367
1368 #if defined(CONFIG_SCHED_MULTIQ)
1369 /* Cache the task's sched_group */
1370 new_thread->sched_group = parent_task->sched_group;
1371 #endif /* defined(CONFIG_SCHED_MULTIQ) */
1372
1373 /* Cache the task's map */
1374 new_thread->map = parent_task->map;
1375
1376 new_thread->depress_timer = timer_call_alloc(thread_depress_expire, new_thread);
1377 new_thread->wait_timer = timer_call_alloc(thread_timer_expire, new_thread);
1378
1379 #if KPC
1380 kpc_thread_create(new_thread);
1381 #endif
1382
1383 /* Set the thread's scheduling parameters */
1384 new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task);
1385 new_thread->max_priority = parent_task->max_priority;
1386 new_thread->task_priority = parent_task->priority;
1387
1388 #if CONFIG_THREAD_GROUPS
1389 thread_group_init_thread(new_thread, parent_task);
1390 #endif /* CONFIG_THREAD_GROUPS */
1391
1392 int new_priority = (priority < 0) ? parent_task->priority: priority;
1393 new_priority = (priority < 0)? parent_task->priority: priority;
1394 if (new_priority > new_thread->max_priority) {
1395 new_priority = new_thread->max_priority;
1396 }
1397 #if !defined(XNU_TARGET_OS_OSX)
1398 if (new_priority < MAXPRI_THROTTLE) {
1399 new_priority = MAXPRI_THROTTLE;
1400 }
1401 #endif /* !defined(XNU_TARGET_OS_OSX) */
1402
1403 new_thread->importance = new_priority - new_thread->task_priority;
1404
1405 sched_set_thread_base_priority(new_thread, new_priority);
1406
1407 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
1408 new_thread->sched_stamp = sched_tick;
1409 #if CONFIG_SCHED_CLUTCH
1410 new_thread->pri_shift = sched_clutch_thread_pri_shift(new_thread, new_thread->th_sched_bucket);
1411 #else /* CONFIG_SCHED_CLUTCH */
1412 new_thread->pri_shift = sched_pri_shifts[new_thread->th_sched_bucket];
1413 #endif /* CONFIG_SCHED_CLUTCH */
1414 #endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */
1415
1416 if (parent_task->max_priority <= MAXPRI_THROTTLE) {
1417 sched_thread_mode_demote(new_thread, TH_SFLAG_THROTTLED);
1418 }
1419
1420 thread_policy_create(new_thread);
1421
1422 /* Chain the thread onto the task's list */
1423 queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
1424 parent_task->thread_count++;
1425
1426 /* So terminating threads don't need to take the task lock to decrement */
1427 os_atomic_inc(&parent_task->active_thread_count, relaxed);
1428
1429 queue_enter(&threads, new_thread, thread_t, threads);
1430 threads_count++;
1431
1432 new_thread->active = TRUE;
1433 if (task_is_a_corpse_fork(parent_task)) {
1434 /* Set the inspection bit if the task is a corpse fork */
1435 new_thread->inspection = TRUE;
1436 } else {
1437 new_thread->inspection = FALSE;
1438 }
1439 new_thread->corpse_dup = FALSE;
1440 new_thread->turnstile = turnstile_alloc();
1441
1442
1443 *out_thread = new_thread;
1444
1445 if (kdebug_enable) {
1446 long args[4] = {};
1447
1448 kdbg_trace_data(parent_task->bsd_info, &args[1], &args[3]);
1449
1450 /*
1451 * Starting with 26604425, exec'ing creates a new task/thread.
1452 *
1453 * NEWTHREAD in the current process has two possible meanings:
1454 *
1455 * 1) Create a new thread for this process.
1456 * 2) Create a new thread for the future process this will become in an
1457 * exec.
1458 *
1459 * To disambiguate these, arg3 will be set to TRUE for case #2.
1460 *
1461 * The value we need to find (TPF_EXEC_COPY) is stable in the case of a
1462 * task exec'ing. The read of t_procflags does not take the proc_lock.
1463 */
1464 args[2] = task_is_exec_copy(parent_task) ? 1 : 0;
1465
1466 KDBG_RELEASE(TRACE_DATA_NEWTHREAD, (uintptr_t)thread_tid(new_thread),
1467 args[1], args[2], args[3]);
1468
1469 kdbg_trace_string(parent_task->bsd_info, &args[0], &args[1],
1470 &args[2], &args[3]);
1471 KDBG_RELEASE(TRACE_STRING_NEWTHREAD, args[0], args[1], args[2],
1472 args[3]);
1473 }
1474
1475 DTRACE_PROC1(lwp__create, thread_t, *out_thread);
1476
1477 return KERN_SUCCESS;
1478 }
1479
1480 static kern_return_t
thread_create_with_options_internal(task_t task,thread_t * new_thread,boolean_t from_user,thread_create_internal_options_t options,thread_continue_t continuation)1481 thread_create_with_options_internal(
1482 task_t task,
1483 thread_t *new_thread,
1484 boolean_t from_user,
1485 thread_create_internal_options_t options,
1486 thread_continue_t continuation)
1487 {
1488 kern_return_t result;
1489 thread_t thread;
1490
1491 if (task == TASK_NULL || task == kernel_task) {
1492 return KERN_INVALID_ARGUMENT;
1493 }
1494
1495 #if CONFIG_MACF
1496 if (from_user && current_task() != task &&
1497 mac_proc_check_remote_thread_create(task, -1, NULL, 0) != 0) {
1498 return KERN_DENIED;
1499 }
1500 #endif
1501
1502 result = thread_create_internal(task, -1, continuation, NULL, options, &thread);
1503 if (result != KERN_SUCCESS) {
1504 return result;
1505 }
1506
1507 thread->user_stop_count = 1;
1508 thread_hold(thread);
1509 if (task->suspend_count > 0) {
1510 thread_hold(thread);
1511 }
1512
1513 if (from_user) {
1514 extmod_statistics_incr_thread_create(task);
1515 }
1516
1517 task_unlock(task);
1518 lck_mtx_unlock(&tasks_threads_lock);
1519
1520 *new_thread = thread;
1521
1522 return KERN_SUCCESS;
1523 }
1524
1525 /* No prototype, since task_server.h has the _from_user version if KERNEL_SERVER */
1526 kern_return_t
1527 thread_create(
1528 task_t task,
1529 thread_t *new_thread);
1530
1531 kern_return_t
thread_create(task_t task,thread_t * new_thread)1532 thread_create(
1533 task_t task,
1534 thread_t *new_thread)
1535 {
1536 return thread_create_with_options_internal(task, new_thread, FALSE, TH_OPTION_NONE,
1537 (thread_continue_t)thread_bootstrap_return);
1538 }
1539
1540 kern_return_t
thread_create_immovable(task_t task,thread_t * new_thread)1541 thread_create_immovable(
1542 task_t task,
1543 thread_t *new_thread)
1544 {
1545 return thread_create_with_options_internal(task, new_thread, FALSE,
1546 TH_OPTION_IMMOVABLE, (thread_continue_t)thread_bootstrap_return);
1547 }
1548
1549 kern_return_t
thread_create_from_user(task_t task,thread_t * new_thread)1550 thread_create_from_user(
1551 task_t task,
1552 thread_t *new_thread)
1553 {
1554 return thread_create_with_options_internal(task, new_thread, TRUE, TH_OPTION_NONE,
1555 (thread_continue_t)thread_bootstrap_return);
1556 }
1557
1558 kern_return_t
thread_create_with_continuation(task_t task,thread_t * new_thread,thread_continue_t continuation)1559 thread_create_with_continuation(
1560 task_t task,
1561 thread_t *new_thread,
1562 thread_continue_t continuation)
1563 {
1564 return thread_create_with_options_internal(task, new_thread, FALSE, TH_OPTION_NONE, continuation);
1565 }
1566
1567 /*
1568 * Create a thread that is already started, but is waiting on an event
1569 */
1570 static kern_return_t
thread_create_waiting_internal(task_t task,thread_continue_t continuation,event_t event,block_hint_t block_hint,int options,thread_t * new_thread)1571 thread_create_waiting_internal(
1572 task_t task,
1573 thread_continue_t continuation,
1574 event_t event,
1575 block_hint_t block_hint,
1576 int options,
1577 thread_t *new_thread)
1578 {
1579 kern_return_t result;
1580 thread_t thread;
1581
1582 if (task == TASK_NULL || task == kernel_task) {
1583 return KERN_INVALID_ARGUMENT;
1584 }
1585
1586 result = thread_create_internal(task, -1, continuation, NULL,
1587 options, &thread);
1588 if (result != KERN_SUCCESS) {
1589 return result;
1590 }
1591
1592 /* note no user_stop_count or thread_hold here */
1593
1594 if (task->suspend_count > 0) {
1595 thread_hold(thread);
1596 }
1597
1598 thread_mtx_lock(thread);
1599 thread_set_pending_block_hint(thread, block_hint);
1600 if (options & TH_OPTION_WORKQ) {
1601 thread->static_param = true;
1602 event = workq_thread_init_and_wq_lock(task, thread);
1603 }
1604 thread_start_in_assert_wait(thread, event, THREAD_INTERRUPTIBLE);
1605 thread_mtx_unlock(thread);
1606
1607 task_unlock(task);
1608 lck_mtx_unlock(&tasks_threads_lock);
1609
1610 *new_thread = thread;
1611
1612 return KERN_SUCCESS;
1613 }
1614
1615 kern_return_t
thread_create_waiting(task_t task,thread_continue_t continuation,event_t event,th_create_waiting_options_t options,thread_t * new_thread)1616 thread_create_waiting(
1617 task_t task,
1618 thread_continue_t continuation,
1619 event_t event,
1620 th_create_waiting_options_t options,
1621 thread_t *new_thread)
1622 {
1623 thread_create_internal_options_t ci_options = TH_OPTION_NONE;
1624
1625 assert((options & ~TH_CREATE_WAITING_OPTION_MASK) == 0);
1626 if (options & TH_CREATE_WAITING_OPTION_PINNED) {
1627 ci_options |= TH_OPTION_PINNED;
1628 }
1629 if (options & TH_CREATE_WAITING_OPTION_IMMOVABLE) {
1630 ci_options |= TH_OPTION_IMMOVABLE;
1631 }
1632
1633 return thread_create_waiting_internal(task, continuation, event,
1634 kThreadWaitNone, ci_options, new_thread);
1635 }
1636
1637
1638 static kern_return_t
thread_create_running_internal2(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread,boolean_t from_user)1639 thread_create_running_internal2(
1640 task_t task,
1641 int flavor,
1642 thread_state_t new_state,
1643 mach_msg_type_number_t new_state_count,
1644 thread_t *new_thread,
1645 boolean_t from_user)
1646 {
1647 kern_return_t result;
1648 thread_t thread;
1649
1650 if (task == TASK_NULL || task == kernel_task) {
1651 return KERN_INVALID_ARGUMENT;
1652 }
1653
1654 #if CONFIG_MACF
1655 if (from_user && current_task() != task &&
1656 mac_proc_check_remote_thread_create(task, flavor, new_state, new_state_count) != 0) {
1657 return KERN_DENIED;
1658 }
1659 #endif
1660
1661 result = thread_create_internal(task, -1,
1662 (thread_continue_t)thread_bootstrap_return, NULL,
1663 TH_OPTION_NONE, &thread);
1664 if (result != KERN_SUCCESS) {
1665 return result;
1666 }
1667
1668 if (task->suspend_count > 0) {
1669 thread_hold(thread);
1670 }
1671
1672 if (from_user) {
1673 result = machine_thread_state_convert_from_user(thread, flavor,
1674 new_state, new_state_count);
1675 }
1676 if (result == KERN_SUCCESS) {
1677 result = machine_thread_set_state(thread, flavor, new_state,
1678 new_state_count);
1679 }
1680 if (result != KERN_SUCCESS) {
1681 task_unlock(task);
1682 lck_mtx_unlock(&tasks_threads_lock);
1683
1684 thread_terminate(thread);
1685 thread_deallocate(thread);
1686 return result;
1687 }
1688
1689 thread_mtx_lock(thread);
1690 thread_start(thread);
1691 thread_mtx_unlock(thread);
1692
1693 if (from_user) {
1694 extmod_statistics_incr_thread_create(task);
1695 }
1696
1697 task_unlock(task);
1698 lck_mtx_unlock(&tasks_threads_lock);
1699
1700 *new_thread = thread;
1701
1702 return result;
1703 }
1704
1705 /* Prototype, see justification above */
1706 kern_return_t
1707 thread_create_running(
1708 task_t task,
1709 int flavor,
1710 thread_state_t new_state,
1711 mach_msg_type_number_t new_state_count,
1712 thread_t *new_thread);
1713
1714 kern_return_t
thread_create_running(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread)1715 thread_create_running(
1716 task_t task,
1717 int flavor,
1718 thread_state_t new_state,
1719 mach_msg_type_number_t new_state_count,
1720 thread_t *new_thread)
1721 {
1722 return thread_create_running_internal2(
1723 task, flavor, new_state, new_state_count,
1724 new_thread, FALSE);
1725 }
1726
1727 kern_return_t
thread_create_running_from_user(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread)1728 thread_create_running_from_user(
1729 task_t task,
1730 int flavor,
1731 thread_state_t new_state,
1732 mach_msg_type_number_t new_state_count,
1733 thread_t *new_thread)
1734 {
1735 return thread_create_running_internal2(
1736 task, flavor, new_state, new_state_count,
1737 new_thread, TRUE);
1738 }
1739
1740 kern_return_t
thread_create_workq_waiting(task_t task,thread_continue_t continuation,thread_t * new_thread)1741 thread_create_workq_waiting(
1742 task_t task,
1743 thread_continue_t continuation,
1744 thread_t *new_thread)
1745 {
1746 /*
1747 * Create thread, but don't pin control port just yet, in case someone calls
1748 * task_threads() and deallocates pinned port before kernel copyout happens,
1749 * which will result in pinned port guard exception. Instead, pin and make
1750 * it immovable atomically at copyout during workq_setup_and_run().
1751 */
1752 int options = TH_OPTION_NOSUSP | TH_OPTION_WORKQ | TH_OPTION_IMMOVABLE;
1753 return thread_create_waiting_internal(task, continuation, NULL,
1754 kThreadWaitParkedWorkQueue, options, new_thread);
1755 }
1756
1757 /*
1758 * kernel_thread_create:
1759 *
1760 * Create a thread in the kernel task
1761 * to execute in kernel context.
1762 */
1763 kern_return_t
kernel_thread_create(thread_continue_t continuation,void * parameter,integer_t priority,thread_t * new_thread)1764 kernel_thread_create(
1765 thread_continue_t continuation,
1766 void *parameter,
1767 integer_t priority,
1768 thread_t *new_thread)
1769 {
1770 kern_return_t result;
1771 thread_t thread;
1772 task_t task = kernel_task;
1773
1774 result = thread_create_internal(task, priority, continuation, parameter,
1775 TH_OPTION_NONE, &thread);
1776 if (result != KERN_SUCCESS) {
1777 return result;
1778 }
1779
1780 task_unlock(task);
1781 lck_mtx_unlock(&tasks_threads_lock);
1782
1783 stack_alloc(thread);
1784 assert(thread->kernel_stack != 0);
1785 #if !defined(XNU_TARGET_OS_OSX)
1786 if (priority > BASEPRI_KERNEL)
1787 #endif
1788 thread->reserved_stack = thread->kernel_stack;
1789
1790 if (debug_task & 1) {
1791 kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread, continuation);
1792 }
1793 *new_thread = thread;
1794
1795 return result;
1796 }
1797
1798 kern_return_t
kernel_thread_start_priority(thread_continue_t continuation,void * parameter,integer_t priority,thread_t * new_thread)1799 kernel_thread_start_priority(
1800 thread_continue_t continuation,
1801 void *parameter,
1802 integer_t priority,
1803 thread_t *new_thread)
1804 {
1805 kern_return_t result;
1806 thread_t thread;
1807
1808 result = kernel_thread_create(continuation, parameter, priority, &thread);
1809 if (result != KERN_SUCCESS) {
1810 return result;
1811 }
1812
1813 *new_thread = thread;
1814
1815 thread_mtx_lock(thread);
1816 thread_start(thread);
1817 thread_mtx_unlock(thread);
1818
1819 return result;
1820 }
1821
1822 kern_return_t
kernel_thread_start(thread_continue_t continuation,void * parameter,thread_t * new_thread)1823 kernel_thread_start(
1824 thread_continue_t continuation,
1825 void *parameter,
1826 thread_t *new_thread)
1827 {
1828 return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
1829 }
1830
1831 /* Separated into helper function so it can be used by THREAD_BASIC_INFO and THREAD_EXTENDED_INFO */
1832 /* it is assumed that the thread is locked by the caller */
1833 static void
retrieve_thread_basic_info(thread_t thread,thread_basic_info_t basic_info)1834 retrieve_thread_basic_info(thread_t thread, thread_basic_info_t basic_info)
1835 {
1836 int state, flags;
1837
1838 /* fill in info */
1839
1840 thread_read_times(thread, &basic_info->user_time,
1841 &basic_info->system_time, NULL);
1842
1843 /*
1844 * Update lazy-evaluated scheduler info because someone wants it.
1845 */
1846 if (SCHED(can_update_priority)(thread)) {
1847 SCHED(update_priority)(thread);
1848 }
1849
1850 basic_info->sleep_time = 0;
1851
1852 /*
1853 * To calculate cpu_usage, first correct for timer rate,
1854 * then for 5/8 ageing. The correction factor [3/5] is
1855 * (1/(5/8) - 1).
1856 */
1857 basic_info->cpu_usage = 0;
1858 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
1859 if (sched_tick_interval) {
1860 basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage
1861 * TH_USAGE_SCALE) / sched_tick_interval);
1862 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1863 }
1864 #endif
1865
1866 if (basic_info->cpu_usage > TH_USAGE_SCALE) {
1867 basic_info->cpu_usage = TH_USAGE_SCALE;
1868 }
1869
1870 basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)?
1871 POLICY_TIMESHARE: POLICY_RR);
1872
1873 flags = 0;
1874 if (thread->options & TH_OPT_IDLE_THREAD) {
1875 flags |= TH_FLAGS_IDLE;
1876 }
1877
1878 if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) {
1879 flags |= TH_FLAGS_GLOBAL_FORCED_IDLE;
1880 }
1881
1882 if (!thread->kernel_stack) {
1883 flags |= TH_FLAGS_SWAPPED;
1884 }
1885
1886 state = 0;
1887 if (thread->state & TH_TERMINATE) {
1888 state = TH_STATE_HALTED;
1889 } else if (thread->state & TH_RUN) {
1890 state = TH_STATE_RUNNING;
1891 } else if (thread->state & TH_UNINT) {
1892 state = TH_STATE_UNINTERRUPTIBLE;
1893 } else if (thread->state & TH_SUSP) {
1894 state = TH_STATE_STOPPED;
1895 } else if (thread->state & TH_WAIT) {
1896 state = TH_STATE_WAITING;
1897 }
1898
1899 basic_info->run_state = state;
1900 basic_info->flags = flags;
1901
1902 basic_info->suspend_count = thread->user_stop_count;
1903
1904 return;
1905 }
1906
1907 kern_return_t
thread_info_internal(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)1908 thread_info_internal(
1909 thread_t thread,
1910 thread_flavor_t flavor,
1911 thread_info_t thread_info_out, /* ptr to OUT array */
1912 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
1913 {
1914 spl_t s;
1915
1916 if (thread == THREAD_NULL) {
1917 return KERN_INVALID_ARGUMENT;
1918 }
1919
1920 if (flavor == THREAD_BASIC_INFO) {
1921 if (*thread_info_count < THREAD_BASIC_INFO_COUNT) {
1922 return KERN_INVALID_ARGUMENT;
1923 }
1924
1925 s = splsched();
1926 thread_lock(thread);
1927
1928 retrieve_thread_basic_info(thread, (thread_basic_info_t) thread_info_out);
1929
1930 thread_unlock(thread);
1931 splx(s);
1932
1933 *thread_info_count = THREAD_BASIC_INFO_COUNT;
1934
1935 return KERN_SUCCESS;
1936 } else if (flavor == THREAD_IDENTIFIER_INFO) {
1937 thread_identifier_info_t identifier_info;
1938
1939 if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) {
1940 return KERN_INVALID_ARGUMENT;
1941 }
1942
1943 identifier_info = __IGNORE_WCASTALIGN((thread_identifier_info_t)thread_info_out);
1944
1945 s = splsched();
1946 thread_lock(thread);
1947
1948 identifier_info->thread_id = thread->thread_id;
1949 identifier_info->thread_handle = thread->machine.cthread_self;
1950 identifier_info->dispatch_qaddr = thread_dispatchqaddr(thread);
1951
1952 thread_unlock(thread);
1953 splx(s);
1954 return KERN_SUCCESS;
1955 } else if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
1956 policy_timeshare_info_t ts_info;
1957
1958 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT) {
1959 return KERN_INVALID_ARGUMENT;
1960 }
1961
1962 ts_info = (policy_timeshare_info_t)thread_info_out;
1963
1964 s = splsched();
1965 thread_lock(thread);
1966
1967 if (thread->sched_mode != TH_MODE_TIMESHARE) {
1968 thread_unlock(thread);
1969 splx(s);
1970 return KERN_INVALID_POLICY;
1971 }
1972
1973 ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
1974 if (ts_info->depressed) {
1975 ts_info->base_priority = DEPRESSPRI;
1976 ts_info->depress_priority = thread->base_pri;
1977 } else {
1978 ts_info->base_priority = thread->base_pri;
1979 ts_info->depress_priority = -1;
1980 }
1981
1982 ts_info->cur_priority = thread->sched_pri;
1983 ts_info->max_priority = thread->max_priority;
1984
1985 thread_unlock(thread);
1986 splx(s);
1987
1988 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
1989
1990 return KERN_SUCCESS;
1991 } else if (flavor == THREAD_SCHED_FIFO_INFO) {
1992 if (*thread_info_count < POLICY_FIFO_INFO_COUNT) {
1993 return KERN_INVALID_ARGUMENT;
1994 }
1995
1996 return KERN_INVALID_POLICY;
1997 } else if (flavor == THREAD_SCHED_RR_INFO) {
1998 policy_rr_info_t rr_info;
1999 uint32_t quantum_time;
2000 uint64_t quantum_ns;
2001
2002 if (*thread_info_count < POLICY_RR_INFO_COUNT) {
2003 return KERN_INVALID_ARGUMENT;
2004 }
2005
2006 rr_info = (policy_rr_info_t) thread_info_out;
2007
2008 s = splsched();
2009 thread_lock(thread);
2010
2011 if (thread->sched_mode == TH_MODE_TIMESHARE) {
2012 thread_unlock(thread);
2013 splx(s);
2014
2015 return KERN_INVALID_POLICY;
2016 }
2017
2018 rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
2019 if (rr_info->depressed) {
2020 rr_info->base_priority = DEPRESSPRI;
2021 rr_info->depress_priority = thread->base_pri;
2022 } else {
2023 rr_info->base_priority = thread->base_pri;
2024 rr_info->depress_priority = -1;
2025 }
2026
2027 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
2028 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
2029
2030 rr_info->max_priority = thread->max_priority;
2031 rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
2032
2033 thread_unlock(thread);
2034 splx(s);
2035
2036 *thread_info_count = POLICY_RR_INFO_COUNT;
2037
2038 return KERN_SUCCESS;
2039 } else if (flavor == THREAD_EXTENDED_INFO) {
2040 thread_basic_info_data_t basic_info;
2041 thread_extended_info_t extended_info = __IGNORE_WCASTALIGN((thread_extended_info_t)thread_info_out);
2042
2043 if (*thread_info_count < THREAD_EXTENDED_INFO_COUNT) {
2044 return KERN_INVALID_ARGUMENT;
2045 }
2046
2047 s = splsched();
2048 thread_lock(thread);
2049
2050 /* NOTE: This mimics fill_taskthreadinfo(), which is the function used by proc_pidinfo() for
2051 * the PROC_PIDTHREADINFO flavor (which can't be used on corpses)
2052 */
2053 retrieve_thread_basic_info(thread, &basic_info);
2054 extended_info->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
2055 extended_info->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
2056
2057 extended_info->pth_cpu_usage = basic_info.cpu_usage;
2058 extended_info->pth_policy = basic_info.policy;
2059 extended_info->pth_run_state = basic_info.run_state;
2060 extended_info->pth_flags = basic_info.flags;
2061 extended_info->pth_sleep_time = basic_info.sleep_time;
2062 extended_info->pth_curpri = thread->sched_pri;
2063 extended_info->pth_priority = thread->base_pri;
2064 extended_info->pth_maxpriority = thread->max_priority;
2065
2066 bsd_getthreadname(get_bsdthread_info(thread), extended_info->pth_name);
2067
2068 thread_unlock(thread);
2069 splx(s);
2070
2071 *thread_info_count = THREAD_EXTENDED_INFO_COUNT;
2072
2073 return KERN_SUCCESS;
2074 } else if (flavor == THREAD_DEBUG_INFO_INTERNAL) {
2075 #if DEVELOPMENT || DEBUG
2076 thread_debug_info_internal_t dbg_info;
2077 if (*thread_info_count < THREAD_DEBUG_INFO_INTERNAL_COUNT) {
2078 return KERN_NOT_SUPPORTED;
2079 }
2080
2081 if (thread_info_out == NULL) {
2082 return KERN_INVALID_ARGUMENT;
2083 }
2084
2085 dbg_info = __IGNORE_WCASTALIGN((thread_debug_info_internal_t)thread_info_out);
2086 dbg_info->page_creation_count = thread->t_page_creation_count;
2087
2088 *thread_info_count = THREAD_DEBUG_INFO_INTERNAL_COUNT;
2089 return KERN_SUCCESS;
2090 #endif /* DEVELOPMENT || DEBUG */
2091 return KERN_NOT_SUPPORTED;
2092 }
2093
2094 return KERN_INVALID_ARGUMENT;
2095 }
2096
2097 void
thread_read_times(thread_t thread,time_value_t * user_time,time_value_t * system_time,time_value_t * runnable_time)2098 thread_read_times(
2099 thread_t thread,
2100 time_value_t *user_time,
2101 time_value_t *system_time,
2102 time_value_t *runnable_time)
2103 {
2104 clock_sec_t secs;
2105 clock_usec_t usecs;
2106 uint64_t tval_user, tval_system;
2107
2108 tval_user = timer_grab(&thread->user_timer);
2109 tval_system = timer_grab(&thread->system_timer);
2110
2111 if (thread->precise_user_kernel_time) {
2112 absolutetime_to_microtime(tval_user, &secs, &usecs);
2113 user_time->seconds = (typeof(user_time->seconds))secs;
2114 user_time->microseconds = usecs;
2115
2116 absolutetime_to_microtime(tval_system, &secs, &usecs);
2117 system_time->seconds = (typeof(system_time->seconds))secs;
2118 system_time->microseconds = usecs;
2119 } else {
2120 /* system_timer may represent either sys or user */
2121 tval_user += tval_system;
2122 absolutetime_to_microtime(tval_user, &secs, &usecs);
2123 user_time->seconds = (typeof(user_time->seconds))secs;
2124 user_time->microseconds = usecs;
2125
2126 system_time->seconds = 0;
2127 system_time->microseconds = 0;
2128 }
2129
2130 if (runnable_time) {
2131 uint64_t tval_runnable = timer_grab(&thread->runnable_timer);
2132 absolutetime_to_microtime(tval_runnable, &secs, &usecs);
2133 runnable_time->seconds = (typeof(runnable_time->seconds))secs;
2134 runnable_time->microseconds = usecs;
2135 }
2136 }
2137
2138 uint64_t
thread_get_runtime_self(void)2139 thread_get_runtime_self(void)
2140 {
2141 boolean_t interrupt_state;
2142 uint64_t runtime;
2143 thread_t thread = NULL;
2144 processor_t processor = NULL;
2145
2146 thread = current_thread();
2147
2148 /* Not interrupt safe, as the scheduler may otherwise update timer values underneath us */
2149 interrupt_state = ml_set_interrupts_enabled(FALSE);
2150 processor = current_processor();
2151 timer_update(processor->thread_timer, mach_absolute_time());
2152 runtime = (timer_grab(&thread->user_timer) + timer_grab(&thread->system_timer));
2153 ml_set_interrupts_enabled(interrupt_state);
2154
2155 return runtime;
2156 }
2157
2158 kern_return_t
thread_assign(__unused thread_t thread,__unused processor_set_t new_pset)2159 thread_assign(
2160 __unused thread_t thread,
2161 __unused processor_set_t new_pset)
2162 {
2163 return KERN_FAILURE;
2164 }
2165
2166 /*
2167 * thread_assign_default:
2168 *
2169 * Special version of thread_assign for assigning threads to default
2170 * processor set.
2171 */
2172 kern_return_t
thread_assign_default(thread_t thread)2173 thread_assign_default(
2174 thread_t thread)
2175 {
2176 return thread_assign(thread, &pset0);
2177 }
2178
2179 /*
2180 * thread_get_assignment
2181 *
2182 * Return current assignment for this thread.
2183 */
2184 kern_return_t
thread_get_assignment(thread_t thread,processor_set_t * pset)2185 thread_get_assignment(
2186 thread_t thread,
2187 processor_set_t *pset)
2188 {
2189 if (thread == NULL) {
2190 return KERN_INVALID_ARGUMENT;
2191 }
2192
2193 *pset = &pset0;
2194
2195 return KERN_SUCCESS;
2196 }
2197
2198 /*
2199 * thread_wire_internal:
2200 *
2201 * Specify that the target thread must always be able
2202 * to run and to allocate memory.
2203 */
2204 kern_return_t
thread_wire_internal(host_priv_t host_priv,thread_t thread,boolean_t wired,boolean_t * prev_state)2205 thread_wire_internal(
2206 host_priv_t host_priv,
2207 thread_t thread,
2208 boolean_t wired,
2209 boolean_t *prev_state)
2210 {
2211 if (host_priv == NULL || thread != current_thread()) {
2212 return KERN_INVALID_ARGUMENT;
2213 }
2214
2215 if (prev_state) {
2216 *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
2217 }
2218
2219 if (wired) {
2220 if (!(thread->options & TH_OPT_VMPRIV)) {
2221 vm_page_free_reserve(1); /* XXX */
2222 }
2223 thread->options |= TH_OPT_VMPRIV;
2224 } else {
2225 if (thread->options & TH_OPT_VMPRIV) {
2226 vm_page_free_reserve(-1); /* XXX */
2227 }
2228 thread->options &= ~TH_OPT_VMPRIV;
2229 }
2230
2231 return KERN_SUCCESS;
2232 }
2233
2234
2235 /*
2236 * thread_wire:
2237 *
2238 * User-api wrapper for thread_wire_internal()
2239 */
2240 kern_return_t
thread_wire(host_priv_t host_priv,thread_t thread,boolean_t wired)2241 thread_wire(
2242 host_priv_t host_priv,
2243 thread_t thread,
2244 boolean_t wired)
2245 {
2246 return thread_wire_internal(host_priv, thread, wired, NULL);
2247 }
2248
2249
2250 boolean_t
is_vm_privileged(void)2251 is_vm_privileged(void)
2252 {
2253 return current_thread()->options & TH_OPT_VMPRIV ? TRUE : FALSE;
2254 }
2255
2256 boolean_t
set_vm_privilege(boolean_t privileged)2257 set_vm_privilege(boolean_t privileged)
2258 {
2259 boolean_t was_vmpriv;
2260
2261 if (current_thread()->options & TH_OPT_VMPRIV) {
2262 was_vmpriv = TRUE;
2263 } else {
2264 was_vmpriv = FALSE;
2265 }
2266
2267 if (privileged != FALSE) {
2268 current_thread()->options |= TH_OPT_VMPRIV;
2269 } else {
2270 current_thread()->options &= ~TH_OPT_VMPRIV;
2271 }
2272
2273 return was_vmpriv;
2274 }
2275
2276 void
thread_floor_boost_set_promotion_locked(thread_t thread)2277 thread_floor_boost_set_promotion_locked(thread_t thread)
2278 {
2279 assert(thread->priority_floor_count > 0);
2280
2281 if (!(thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED)) {
2282 sched_thread_promote_reason(thread, TH_SFLAG_FLOOR_PROMOTED, 0);
2283 }
2284 }
2285
2286 /*! @function thread_priority_floor_start
2287 * @abstract boost the current thread priority to floor.
2288 * @discussion Increase the priority of the current thread to at least MINPRI_FLOOR.
2289 * The boost will be mantained until a corresponding thread_priority_floor_end()
2290 * is called. Every call of thread_priority_floor_start() needs to have a corresponding
2291 * call to thread_priority_floor_end() from the same thread.
2292 * No thread can return to userspace before calling thread_priority_floor_end().
2293 *
2294 * NOTE: avoid to use this function. Try to use gate_t or sleep_with_inheritor()
2295 * instead.
2296 * @result a token to be given to the corresponding thread_priority_floor_end()
2297 */
2298 thread_pri_floor_t
thread_priority_floor_start(void)2299 thread_priority_floor_start(void)
2300 {
2301 thread_pri_floor_t ret;
2302 thread_t thread = current_thread();
2303 __assert_only uint16_t prev_priority_floor_count;
2304
2305 assert(thread->priority_floor_count < UINT16_MAX);
2306 prev_priority_floor_count = thread->priority_floor_count++;
2307 #if MACH_ASSERT
2308 /*
2309 * Set the ast to check that the
2310 * priority_floor_count is going to be set to zero when
2311 * going back to userspace.
2312 * Set it only once when we increment it for the first time.
2313 */
2314 if (prev_priority_floor_count == 0) {
2315 act_set_debug_assert();
2316 }
2317 #endif
2318
2319 ret.thread = thread;
2320 return ret;
2321 }
2322
2323 /*! @function thread_priority_floor_end
2324 * @abstract ends the floor boost.
2325 * @param token the token obtained from thread_priority_floor_start()
2326 * @discussion ends the priority floor boost started with thread_priority_floor_start()
2327 */
2328 void
thread_priority_floor_end(thread_pri_floor_t * token)2329 thread_priority_floor_end(thread_pri_floor_t *token)
2330 {
2331 thread_t thread = current_thread();
2332
2333 assert(thread->priority_floor_count > 0);
2334 assertf(token->thread == thread, "thread_priority_floor_end called from a different thread from thread_priority_floor_start %p %p", thread, token->thread);
2335
2336 if ((thread->priority_floor_count-- == 1) && (thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED)) {
2337 spl_t s = splsched();
2338 thread_lock(thread);
2339
2340 if (thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) {
2341 sched_thread_unpromote_reason(thread, TH_SFLAG_FLOOR_PROMOTED, 0);
2342 }
2343
2344 thread_unlock(thread);
2345 splx(s);
2346 }
2347
2348 token->thread = NULL;
2349 }
2350
2351 /*
2352 * XXX assuming current thread only, for now...
2353 */
2354 void
thread_guard_violation(thread_t thread,mach_exception_data_type_t code,mach_exception_data_type_t subcode,boolean_t fatal)2355 thread_guard_violation(thread_t thread,
2356 mach_exception_data_type_t code, mach_exception_data_type_t subcode, boolean_t fatal)
2357 {
2358 assert(thread == current_thread());
2359
2360 /* Don't set up the AST for kernel threads; this check is needed to ensure
2361 * that the guard_exc_* fields in the thread structure are set only by the
2362 * current thread and therefore, don't require a lock.
2363 */
2364 if (get_threadtask(thread) == kernel_task) {
2365 return;
2366 }
2367
2368 assert(EXC_GUARD_DECODE_GUARD_TYPE(code));
2369
2370 /*
2371 * Use the saved state area of the thread structure
2372 * to store all info required to handle the AST when
2373 * returning to userspace. It's possible that there is
2374 * already a pending guard exception. If it's non-fatal,
2375 * it can only be over-written by a fatal exception code.
2376 */
2377 if (thread->guard_exc_info.code && (thread->guard_exc_fatal || !fatal)) {
2378 return;
2379 }
2380
2381 thread->guard_exc_info.code = code;
2382 thread->guard_exc_info.subcode = subcode;
2383 thread->guard_exc_fatal = fatal ? 1 : 0;
2384
2385 spl_t s = splsched();
2386 thread_ast_set(thread, AST_GUARD);
2387 ast_propagate(thread);
2388 splx(s);
2389 }
2390
2391 #if CONFIG_DEBUG_SYSCALL_REJECTION
2392 extern void rejected_syscall_guard_ast(thread_t __unused t, mach_exception_data_type_t code, mach_exception_data_type_t subcode);
2393 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
2394
2395 /*
2396 * guard_ast:
2397 *
2398 * Handle AST_GUARD for a thread. This routine looks at the
2399 * state saved in the thread structure to determine the cause
2400 * of this exception. Based on this value, it invokes the
2401 * appropriate routine which determines other exception related
2402 * info and raises the exception.
2403 */
2404 void
guard_ast(thread_t t)2405 guard_ast(thread_t t)
2406 {
2407 const mach_exception_data_type_t
2408 code = t->guard_exc_info.code,
2409 subcode = t->guard_exc_info.subcode;
2410
2411 t->guard_exc_info.code = 0;
2412 t->guard_exc_info.subcode = 0;
2413 t->guard_exc_fatal = 0;
2414
2415 switch (EXC_GUARD_DECODE_GUARD_TYPE(code)) {
2416 case GUARD_TYPE_NONE:
2417 /* lingering AST_GUARD on the processor? */
2418 break;
2419 case GUARD_TYPE_MACH_PORT:
2420 mach_port_guard_ast(t, code, subcode);
2421 break;
2422 case GUARD_TYPE_FD:
2423 fd_guard_ast(t, code, subcode);
2424 break;
2425 #if CONFIG_VNGUARD
2426 case GUARD_TYPE_VN:
2427 vn_guard_ast(t, code, subcode);
2428 break;
2429 #endif
2430 case GUARD_TYPE_VIRT_MEMORY:
2431 virt_memory_guard_ast(t, code, subcode);
2432 break;
2433 #if CONFIG_DEBUG_SYSCALL_REJECTION
2434 case GUARD_TYPE_REJECTED_SC:
2435 rejected_syscall_guard_ast(t, code, subcode);
2436 break;
2437 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
2438 default:
2439 panic("guard_exc_info %llx %llx", code, subcode);
2440 }
2441 }
2442
2443 static void
thread_cputime_callback(int warning,__unused const void * arg0,__unused const void * arg1)2444 thread_cputime_callback(int warning, __unused const void *arg0, __unused const void *arg1)
2445 {
2446 if (warning == LEDGER_WARNING_ROSE_ABOVE) {
2447 #if CONFIG_TELEMETRY
2448 /*
2449 * This thread is in danger of violating the CPU usage monitor. Enable telemetry
2450 * on the entire task so there are micro-stackshots available if and when
2451 * EXC_RESOURCE is triggered. We could have chosen to enable micro-stackshots
2452 * for this thread only; but now that this task is suspect, knowing what all of
2453 * its threads are up to will be useful.
2454 */
2455 telemetry_task_ctl(current_task(), TF_CPUMON_WARNING, 1);
2456 #endif
2457 return;
2458 }
2459
2460 #if CONFIG_TELEMETRY
2461 /*
2462 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
2463 * exceeded the limit, turn telemetry off for the task.
2464 */
2465 telemetry_task_ctl(current_task(), TF_CPUMON_WARNING, 0);
2466 #endif
2467
2468 if (warning == 0) {
2469 SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU();
2470 }
2471 }
2472
2473 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void)2474 SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void)
2475 {
2476 int pid = 0;
2477 task_t task = current_task();
2478 thread_t thread = current_thread();
2479 uint64_t tid = thread->thread_id;
2480 const char *procname = "unknown";
2481 time_value_t thread_total_time = {0, 0};
2482 time_value_t thread_system_time;
2483 time_value_t thread_user_time;
2484 int action;
2485 uint8_t percentage;
2486 uint32_t usage_percent = 0;
2487 uint32_t interval_sec;
2488 uint64_t interval_ns;
2489 uint64_t balance_ns;
2490 boolean_t fatal = FALSE;
2491 boolean_t send_exc_resource = TRUE; /* in addition to RESOURCE_NOTIFY */
2492 kern_return_t kr;
2493
2494 #ifdef EXC_RESOURCE_MONITORS
2495 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2496 #endif /* EXC_RESOURCE_MONITORS */
2497 struct ledger_entry_info lei;
2498
2499 assert(thread->t_threadledger != LEDGER_NULL);
2500
2501 /*
2502 * Extract the fatal bit and suspend the monitor (which clears the bit).
2503 */
2504 task_lock(task);
2505 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_CPUMON) {
2506 fatal = TRUE;
2507 send_exc_resource = TRUE;
2508 }
2509 /* Only one thread can be here at a time. Whichever makes it through
2510 * first will successfully suspend the monitor and proceed to send the
2511 * notification. Other threads will get an error trying to suspend the
2512 * monitor and give up on sending the notification. In the first release,
2513 * the monitor won't be resumed for a number of seconds, but we may
2514 * eventually need to handle low-latency resume.
2515 */
2516 kr = task_suspend_cpumon(task);
2517 task_unlock(task);
2518 if (kr == KERN_INVALID_ARGUMENT) {
2519 return;
2520 }
2521
2522 #ifdef MACH_BSD
2523 pid = proc_selfpid();
2524 if (task->bsd_info != NULL) {
2525 procname = proc_name_address(task->bsd_info);
2526 }
2527 #endif
2528
2529 thread_get_cpulimit(&action, &percentage, &interval_ns);
2530
2531 interval_sec = (uint32_t)(interval_ns / NSEC_PER_SEC);
2532
2533 thread_read_times(thread, &thread_user_time, &thread_system_time, NULL);
2534 time_value_add(&thread_total_time, &thread_user_time);
2535 time_value_add(&thread_total_time, &thread_system_time);
2536 ledger_get_entry_info(thread->t_threadledger, thread_ledgers.cpu_time, &lei);
2537
2538 /* credit/debit/balance/limit are in absolute time units;
2539 * the refill info is in nanoseconds. */
2540 absolutetime_to_nanoseconds(lei.lei_balance, &balance_ns);
2541 if (lei.lei_last_refill > 0) {
2542 usage_percent = (uint32_t)((balance_ns * 100ULL) / lei.lei_last_refill);
2543 }
2544
2545 /* TODO: show task total runtime (via TASK_ABSOLUTETIME_INFO)? */
2546 printf("process %s[%d] thread %llu caught burning CPU! It used more than %d%% CPU over %u seconds\n",
2547 procname, pid, tid, percentage, interval_sec);
2548 printf(" (actual recent usage: %d%% over ~%llu seconds)\n",
2549 usage_percent, (lei.lei_last_refill + NSEC_PER_SEC / 2) / NSEC_PER_SEC);
2550 printf(" Thread lifetime cpu usage %d.%06ds, (%d.%06d user, %d.%06d sys)\n",
2551 thread_total_time.seconds, thread_total_time.microseconds,
2552 thread_user_time.seconds, thread_user_time.microseconds,
2553 thread_system_time.seconds, thread_system_time.microseconds);
2554 printf(" Ledger balance: %lld; mabs credit: %lld; mabs debit: %lld\n",
2555 lei.lei_balance, lei.lei_credit, lei.lei_debit);
2556 printf(" mabs limit: %llu; mabs period: %llu ns; last refill: %llu ns%s.\n",
2557 lei.lei_limit, lei.lei_refill_period, lei.lei_last_refill,
2558 (fatal ? " [fatal violation]" : ""));
2559
2560 /*
2561 * For now, send RESOURCE_NOTIFY in parallel with EXC_RESOURCE. Once
2562 * we have logging parity, we will stop sending EXC_RESOURCE (24508922).
2563 */
2564
2565 /* RESOURCE_NOTIFY MIG specifies nanoseconds of CPU time */
2566 lei.lei_balance = balance_ns;
2567 absolutetime_to_nanoseconds(lei.lei_limit, &lei.lei_limit);
2568 trace_resource_violation(RMON_CPUUSAGE_VIOLATED, &lei);
2569 kr = send_resource_violation(send_cpu_usage_violation, task, &lei,
2570 fatal ? kRNFatalLimitFlag : 0);
2571 if (kr) {
2572 printf("send_resource_violation(CPU usage, ...): error %#x\n", kr);
2573 }
2574
2575 #ifdef EXC_RESOURCE_MONITORS
2576 if (send_exc_resource) {
2577 if (disable_exc_resource) {
2578 printf("process %s[%d] thread %llu caught burning CPU! "
2579 "EXC_RESOURCE%s supressed by a boot-arg\n",
2580 procname, pid, tid, fatal ? " (and termination)" : "");
2581 return;
2582 }
2583
2584 if (audio_active) {
2585 printf("process %s[%d] thread %llu caught burning CPU! "
2586 "EXC_RESOURCE & termination supressed due to audio playback\n",
2587 procname, pid, tid);
2588 return;
2589 }
2590 }
2591
2592
2593 if (send_exc_resource) {
2594 code[0] = code[1] = 0;
2595 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_CPU);
2596 if (fatal) {
2597 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR_FATAL);
2598 } else {
2599 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR);
2600 }
2601 EXC_RESOURCE_CPUMONITOR_ENCODE_INTERVAL(code[0], interval_sec);
2602 EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[0], percentage);
2603 EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[1], usage_percent);
2604 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
2605 }
2606 #endif /* EXC_RESOURCE_MONITORS */
2607
2608 if (fatal) {
2609 #if CONFIG_JETSAM
2610 jetsam_on_ledger_cpulimit_exceeded();
2611 #else
2612 task_terminate_internal(task);
2613 #endif
2614 }
2615 }
2616
2617 #if DEVELOPMENT || DEBUG
2618 void __attribute__((noinline))
SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task,int thread_count)2619 SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task, int thread_count)
2620 {
2621 mach_exception_data_type_t code[EXCEPTION_CODE_MAX] = {0};
2622 int pid = task_pid(task);
2623 char procname[MAXCOMLEN + 1] = "unknown";
2624
2625 if (pid == 1) {
2626 /*
2627 * Cannot suspend launchd
2628 */
2629 return;
2630 }
2631
2632 proc_name(pid, procname, sizeof(procname));
2633
2634 if (disable_exc_resource) {
2635 printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2636 "supressed by a boot-arg. \n", procname, pid, thread_count);
2637 return;
2638 }
2639
2640 if (audio_active) {
2641 printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2642 "supressed due to audio playback.\n", procname, pid, thread_count);
2643 return;
2644 }
2645
2646 if (!exc_via_corpse_forking) {
2647 printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2648 "supressed due to corpse forking being disabled.\n", procname, pid,
2649 thread_count);
2650 return;
2651 }
2652
2653 printf("process %s[%d] crossed thread count high watermark (%d), sending "
2654 "EXC_RESOURCE\n", procname, pid, thread_count);
2655
2656 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_THREADS);
2657 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_THREADS_HIGH_WATERMARK);
2658 EXC_RESOURCE_THREADS_ENCODE_THREADS(code[0], thread_count);
2659
2660 task_enqueue_exception_with_corpse(task, EXC_RESOURCE, code, EXCEPTION_CODE_MAX, NULL);
2661 }
2662 #endif /* DEVELOPMENT || DEBUG */
2663
2664 void
thread_update_io_stats(thread_t thread,int size,int io_flags)2665 thread_update_io_stats(thread_t thread, int size, int io_flags)
2666 {
2667 task_t task = get_threadtask(thread);
2668 int io_tier;
2669
2670 if (thread->thread_io_stats == NULL || task->task_io_stats == NULL) {
2671 return;
2672 }
2673
2674 if (io_flags & DKIO_READ) {
2675 UPDATE_IO_STATS(thread->thread_io_stats->disk_reads, size);
2676 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->disk_reads, size);
2677 }
2678
2679 if (io_flags & DKIO_META) {
2680 UPDATE_IO_STATS(thread->thread_io_stats->metadata, size);
2681 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->metadata, size);
2682 }
2683
2684 if (io_flags & DKIO_PAGING) {
2685 UPDATE_IO_STATS(thread->thread_io_stats->paging, size);
2686 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->paging, size);
2687 }
2688
2689 io_tier = ((io_flags & DKIO_TIER_MASK) >> DKIO_TIER_SHIFT);
2690 assert(io_tier < IO_NUM_PRIORITIES);
2691
2692 UPDATE_IO_STATS(thread->thread_io_stats->io_priority[io_tier], size);
2693 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->io_priority[io_tier], size);
2694
2695 /* Update Total I/O Counts */
2696 UPDATE_IO_STATS(thread->thread_io_stats->total_io, size);
2697 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->total_io, size);
2698
2699 if (!(io_flags & DKIO_READ)) {
2700 DTRACE_IO3(physical_writes, struct task *, task, uint32_t, size, int, io_flags);
2701 ledger_credit(task->ledger, task_ledgers.physical_writes, size);
2702 }
2703 }
2704
2705 static void
init_thread_ledgers(void)2706 init_thread_ledgers(void)
2707 {
2708 ledger_template_t t;
2709 int idx;
2710
2711 assert(thread_ledger_template == NULL);
2712
2713 if ((t = ledger_template_create("Per-thread ledger")) == NULL) {
2714 panic("couldn't create thread ledger template");
2715 }
2716
2717 if ((idx = ledger_entry_add(t, "cpu_time", "sched", "ns")) < 0) {
2718 panic("couldn't create cpu_time entry for thread ledger template");
2719 }
2720
2721 if (ledger_set_callback(t, idx, thread_cputime_callback, NULL, NULL) < 0) {
2722 panic("couldn't set thread ledger callback for cpu_time entry");
2723 }
2724
2725 thread_ledgers.cpu_time = idx;
2726
2727 ledger_template_complete(t);
2728 thread_ledger_template = t;
2729 }
2730
2731 /*
2732 * Returns currently applied CPU usage limit, or 0/0 if none is applied.
2733 */
2734 int
thread_get_cpulimit(int * action,uint8_t * percentage,uint64_t * interval_ns)2735 thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns)
2736 {
2737 int64_t abstime = 0;
2738 uint64_t limittime = 0;
2739 thread_t thread = current_thread();
2740
2741 *percentage = 0;
2742 *interval_ns = 0;
2743 *action = 0;
2744
2745 if (thread->t_threadledger == LEDGER_NULL) {
2746 /*
2747 * This thread has no per-thread ledger, so it can't possibly
2748 * have a CPU limit applied.
2749 */
2750 return KERN_SUCCESS;
2751 }
2752
2753 ledger_get_period(thread->t_threadledger, thread_ledgers.cpu_time, interval_ns);
2754 ledger_get_limit(thread->t_threadledger, thread_ledgers.cpu_time, &abstime);
2755
2756 if ((abstime == LEDGER_LIMIT_INFINITY) || (*interval_ns == 0)) {
2757 /*
2758 * This thread's CPU time ledger has no period or limit; so it
2759 * doesn't have a CPU limit applied.
2760 */
2761 return KERN_SUCCESS;
2762 }
2763
2764 /*
2765 * This calculation is the converse to the one in thread_set_cpulimit().
2766 */
2767 absolutetime_to_nanoseconds(abstime, &limittime);
2768 *percentage = (uint8_t)((limittime * 100ULL) / *interval_ns);
2769 assert(*percentage <= 100);
2770
2771 if (thread->options & TH_OPT_PROC_CPULIMIT) {
2772 assert((thread->options & TH_OPT_PRVT_CPULIMIT) == 0);
2773
2774 *action = THREAD_CPULIMIT_BLOCK;
2775 } else if (thread->options & TH_OPT_PRVT_CPULIMIT) {
2776 assert((thread->options & TH_OPT_PROC_CPULIMIT) == 0);
2777
2778 *action = THREAD_CPULIMIT_EXCEPTION;
2779 } else {
2780 *action = THREAD_CPULIMIT_DISABLE;
2781 }
2782
2783 return KERN_SUCCESS;
2784 }
2785
2786 /*
2787 * Set CPU usage limit on a thread.
2788 *
2789 * Calling with percentage of 0 will unset the limit for this thread.
2790 */
2791 int
thread_set_cpulimit(int action,uint8_t percentage,uint64_t interval_ns)2792 thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns)
2793 {
2794 thread_t thread = current_thread();
2795 ledger_t l;
2796 uint64_t limittime = 0;
2797 uint64_t abstime = 0;
2798
2799 assert(percentage <= 100);
2800
2801 if (action == THREAD_CPULIMIT_DISABLE) {
2802 /*
2803 * Remove CPU limit, if any exists.
2804 */
2805 if (thread->t_threadledger != LEDGER_NULL) {
2806 l = thread->t_threadledger;
2807 ledger_set_limit(l, thread_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0);
2808 ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_IGNORE);
2809 thread->options &= ~(TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT);
2810 }
2811
2812 return 0;
2813 }
2814
2815 if (interval_ns < MINIMUM_CPULIMIT_INTERVAL_MS * NSEC_PER_MSEC) {
2816 return KERN_INVALID_ARGUMENT;
2817 }
2818
2819 l = thread->t_threadledger;
2820 if (l == LEDGER_NULL) {
2821 /*
2822 * This thread doesn't yet have a per-thread ledger; so create one with the CPU time entry active.
2823 */
2824 if ((l = ledger_instantiate(thread_ledger_template, LEDGER_CREATE_INACTIVE_ENTRIES)) == LEDGER_NULL) {
2825 return KERN_RESOURCE_SHORTAGE;
2826 }
2827
2828 /*
2829 * We are the first to create this thread's ledger, so only activate our entry.
2830 */
2831 ledger_entry_setactive(l, thread_ledgers.cpu_time);
2832 thread->t_threadledger = l;
2833 }
2834
2835 /*
2836 * The limit is specified as a percentage of CPU over an interval in nanoseconds.
2837 * Calculate the amount of CPU time that the thread needs to consume in order to hit the limit.
2838 */
2839 limittime = (interval_ns * percentage) / 100;
2840 nanoseconds_to_absolutetime(limittime, &abstime);
2841 ledger_set_limit(l, thread_ledgers.cpu_time, abstime, cpumon_ustackshots_trigger_pct);
2842 /*
2843 * Refill the thread's allotted CPU time every interval_ns nanoseconds.
2844 */
2845 ledger_set_period(l, thread_ledgers.cpu_time, interval_ns);
2846
2847 if (action == THREAD_CPULIMIT_EXCEPTION) {
2848 /*
2849 * We don't support programming the CPU usage monitor on a task if any of its
2850 * threads have a per-thread blocking CPU limit configured.
2851 */
2852 if (thread->options & TH_OPT_PRVT_CPULIMIT) {
2853 panic("CPU usage monitor activated, but blocking thread limit exists");
2854 }
2855
2856 /*
2857 * Make a note that this thread's CPU limit is being used for the task-wide CPU
2858 * usage monitor. We don't have to arm the callback which will trigger the
2859 * exception, because that was done for us in ledger_instantiate (because the
2860 * ledger template used has a default callback).
2861 */
2862 thread->options |= TH_OPT_PROC_CPULIMIT;
2863 } else {
2864 /*
2865 * We deliberately override any CPU limit imposed by a task-wide limit (eg
2866 * CPU usage monitor).
2867 */
2868 thread->options &= ~TH_OPT_PROC_CPULIMIT;
2869
2870 thread->options |= TH_OPT_PRVT_CPULIMIT;
2871 /* The per-thread ledger template by default has a callback for CPU time */
2872 ledger_disable_callback(l, thread_ledgers.cpu_time);
2873 ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
2874 }
2875
2876 return 0;
2877 }
2878
2879 void
thread_sched_call(thread_t thread,sched_call_t call)2880 thread_sched_call(
2881 thread_t thread,
2882 sched_call_t call)
2883 {
2884 assert((thread->state & TH_WAIT_REPORT) == 0);
2885 thread->sched_call = call;
2886 }
2887
2888 uint64_t
thread_tid(thread_t thread)2889 thread_tid(
2890 thread_t thread)
2891 {
2892 return thread != THREAD_NULL? thread->thread_id: 0;
2893 }
2894
2895 uint64_t
uthread_tid(struct uthread * uth)2896 uthread_tid(
2897 struct uthread *uth)
2898 {
2899 if (uth) {
2900 return thread_tid(get_machthread(uth));
2901 }
2902 return 0;
2903 }
2904
2905 uint16_t
thread_set_tag(thread_t th,uint16_t tag)2906 thread_set_tag(thread_t th, uint16_t tag)
2907 {
2908 return thread_set_tag_internal(th, tag);
2909 }
2910
2911 uint16_t
thread_get_tag(thread_t th)2912 thread_get_tag(thread_t th)
2913 {
2914 return thread_get_tag_internal(th);
2915 }
2916
2917 uint64_t
thread_last_run_time(thread_t th)2918 thread_last_run_time(thread_t th)
2919 {
2920 return th->last_run_time;
2921 }
2922
2923 /*
2924 * Shared resource contention management
2925 *
2926 * The scheduler attempts to load balance the shared resource intensive
2927 * workloads across clusters to ensure that the resource is not heavily
2928 * contended. The kernel relies on external agents (userspace or
2929 * performance controller) to identify shared resource heavy threads.
2930 * The load balancing is achieved based on the scheduler configuration
2931 * enabled on the platform.
2932 */
2933
2934
2935 #if CONFIG_SCHED_EDGE
2936
2937 /*
2938 * On the Edge scheduler, the load balancing is achieved by looking
2939 * at cluster level shared resource loads and migrating resource heavy
2940 * threads dynamically to under utilized cluster. Therefore, when a
2941 * thread is indicated as a resource heavy thread, the policy set
2942 * routine simply adds a flag to the thread which is looked at by
2943 * the scheduler on thread migration decisions.
2944 */
2945
2946 boolean_t
thread_shared_rsrc_policy_get(thread_t thread,cluster_shared_rsrc_type_t type)2947 thread_shared_rsrc_policy_get(thread_t thread, cluster_shared_rsrc_type_t type)
2948 {
2949 return thread->th_shared_rsrc_heavy_user[type] || thread->th_shared_rsrc_heavy_perf_control[type];
2950 }
2951
2952 __options_decl(sched_edge_rsrc_heavy_thread_state, uint32_t, {
2953 SCHED_EDGE_RSRC_HEAVY_THREAD_SET = 1,
2954 SCHED_EDGE_RSRC_HEAVY_THREAD_CLR = 2,
2955 });
2956
2957 kern_return_t
thread_shared_rsrc_policy_set(thread_t thread,__unused uint32_t index,cluster_shared_rsrc_type_t type,shared_rsrc_policy_agent_t agent)2958 thread_shared_rsrc_policy_set(thread_t thread, __unused uint32_t index, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent)
2959 {
2960 spl_t s = splsched();
2961 thread_lock(thread);
2962
2963 bool user = (agent == SHARED_RSRC_POLICY_AGENT_DISPATCH) || (agent == SHARED_RSRC_POLICY_AGENT_SYSCTL);
2964 bool *thread_flags = (user) ? thread->th_shared_rsrc_heavy_user : thread->th_shared_rsrc_heavy_perf_control;
2965 if (thread_flags[type]) {
2966 thread_unlock(thread);
2967 splx(s);
2968 return KERN_FAILURE;
2969 }
2970
2971 thread_flags[type] = true;
2972 thread_unlock(thread);
2973 splx(s);
2974
2975 KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_RSRC_HEAVY_THREAD) | DBG_FUNC_NONE, SCHED_EDGE_RSRC_HEAVY_THREAD_SET, thread_tid(thread), type, agent);
2976 if (thread == current_thread()) {
2977 if (agent == SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM) {
2978 ast_on(AST_PREEMPT);
2979 } else {
2980 assert(agent != SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
2981 thread_block(THREAD_CONTINUE_NULL);
2982 }
2983 }
2984 return KERN_SUCCESS;
2985 }
2986
2987 kern_return_t
thread_shared_rsrc_policy_clear(thread_t thread,cluster_shared_rsrc_type_t type,shared_rsrc_policy_agent_t agent)2988 thread_shared_rsrc_policy_clear(thread_t thread, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent)
2989 {
2990 spl_t s = splsched();
2991 thread_lock(thread);
2992
2993 bool user = (agent == SHARED_RSRC_POLICY_AGENT_DISPATCH) || (agent == SHARED_RSRC_POLICY_AGENT_SYSCTL);
2994 bool *thread_flags = (user) ? thread->th_shared_rsrc_heavy_user : thread->th_shared_rsrc_heavy_perf_control;
2995 if (!thread_flags[type]) {
2996 thread_unlock(thread);
2997 splx(s);
2998 return KERN_FAILURE;
2999 }
3000
3001 thread_flags[type] = false;
3002 thread_unlock(thread);
3003 splx(s);
3004
3005 KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_RSRC_HEAVY_THREAD) | DBG_FUNC_NONE, SCHED_EDGE_RSRC_HEAVY_THREAD_CLR, thread_tid(thread), type, agent);
3006 if (thread == current_thread()) {
3007 if (agent == SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM) {
3008 ast_on(AST_PREEMPT);
3009 } else {
3010 assert(agent != SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
3011 thread_block(THREAD_CONTINUE_NULL);
3012 }
3013 }
3014 return KERN_SUCCESS;
3015 }
3016
3017 #else /* CONFIG_SCHED_EDGE */
3018
3019 /*
3020 * On non-Edge schedulers, the shared resource contention
3021 * is managed by simply binding threads to specific clusters
3022 * based on the worker index passed by the agents marking
3023 * this thread as resource heavy threads. The thread binding
3024 * approach does not provide any rebalancing opportunities;
3025 * it can also suffer from scheduling delays if the cluster
3026 * where the thread is bound is contended.
3027 */
3028
3029 boolean_t
thread_shared_rsrc_policy_get(__unused thread_t thread,__unused cluster_shared_rsrc_type_t type)3030 thread_shared_rsrc_policy_get(__unused thread_t thread, __unused cluster_shared_rsrc_type_t type)
3031 {
3032 return false;
3033 }
3034
3035 kern_return_t
thread_shared_rsrc_policy_set(thread_t thread,uint32_t index,__unused cluster_shared_rsrc_type_t type,__unused shared_rsrc_policy_agent_t agent)3036 thread_shared_rsrc_policy_set(thread_t thread, uint32_t index, __unused cluster_shared_rsrc_type_t type, __unused shared_rsrc_policy_agent_t agent)
3037 {
3038 return thread_bind_cluster_id(thread, index, THREAD_BIND_SOFT | THREAD_BIND_ELIGIBLE_ONLY);
3039 }
3040
3041 kern_return_t
thread_shared_rsrc_policy_clear(thread_t thread,__unused cluster_shared_rsrc_type_t type,__unused shared_rsrc_policy_agent_t agent)3042 thread_shared_rsrc_policy_clear(thread_t thread, __unused cluster_shared_rsrc_type_t type, __unused shared_rsrc_policy_agent_t agent)
3043 {
3044 return thread_bind_cluster_id(thread, 0, THREAD_UNBIND);
3045 }
3046
3047 #endif /* CONFIG_SCHED_EDGE */
3048
3049 uint64_t
thread_dispatchqaddr(thread_t thread)3050 thread_dispatchqaddr(
3051 thread_t thread)
3052 {
3053 uint64_t dispatchqueue_addr;
3054 uint64_t thread_handle;
3055 task_t task;
3056
3057 if (thread == THREAD_NULL) {
3058 return 0;
3059 }
3060
3061 thread_handle = thread->machine.cthread_self;
3062 if (thread_handle == 0) {
3063 return 0;
3064 }
3065
3066 task = get_threadtask(thread);
3067 if (thread->inspection == TRUE) {
3068 dispatchqueue_addr = thread_handle + get_task_dispatchqueue_offset(task);
3069 } else if (task->bsd_info) {
3070 dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(task->bsd_info);
3071 } else {
3072 dispatchqueue_addr = 0;
3073 }
3074
3075 return dispatchqueue_addr;
3076 }
3077
3078
3079 uint64_t
thread_wqquantum_addr(thread_t thread)3080 thread_wqquantum_addr(thread_t thread)
3081 {
3082 uint64_t thread_handle;
3083 task_t task;
3084
3085 if (thread == THREAD_NULL) {
3086 return 0;
3087 }
3088
3089 thread_handle = thread->machine.cthread_self;
3090 if (thread_handle == 0) {
3091 return 0;
3092 }
3093 task = get_threadtask(thread);
3094
3095 uint64_t wq_quantum_expiry_offset = get_wq_quantum_offset_from_proc(task->bsd_info);
3096 if (wq_quantum_expiry_offset == 0) {
3097 return 0;
3098 }
3099
3100 return wq_quantum_expiry_offset + thread_handle;
3101 }
3102
3103 uint64_t
thread_rettokern_addr(thread_t thread)3104 thread_rettokern_addr(
3105 thread_t thread)
3106 {
3107 uint64_t rettokern_addr;
3108 uint64_t rettokern_offset;
3109 uint64_t thread_handle;
3110 task_t task;
3111
3112 if (thread == THREAD_NULL) {
3113 return 0;
3114 }
3115
3116 thread_handle = thread->machine.cthread_self;
3117 if (thread_handle == 0) {
3118 return 0;
3119 }
3120 task = get_threadtask(thread);
3121
3122 if (task->bsd_info) {
3123 rettokern_offset = get_return_to_kernel_offset_from_proc(task->bsd_info);
3124
3125 /* Return 0 if return to kernel offset is not initialized. */
3126 if (rettokern_offset == 0) {
3127 rettokern_addr = 0;
3128 } else {
3129 rettokern_addr = thread_handle + rettokern_offset;
3130 }
3131 } else {
3132 rettokern_addr = 0;
3133 }
3134
3135 return rettokern_addr;
3136 }
3137
3138 /*
3139 * Export routines to other components for things that are done as macros
3140 * within the osfmk component.
3141 */
3142
3143 void
thread_mtx_lock(thread_t thread)3144 thread_mtx_lock(thread_t thread)
3145 {
3146 lck_mtx_lock(&thread->mutex);
3147 }
3148
3149 void
thread_mtx_unlock(thread_t thread)3150 thread_mtx_unlock(thread_t thread)
3151 {
3152 lck_mtx_unlock(&thread->mutex);
3153 }
3154
3155 void
thread_reference(thread_t thread)3156 thread_reference(
3157 thread_t thread)
3158 {
3159 if (thread != THREAD_NULL) {
3160 zone_id_require(ZONE_ID_THREAD, sizeof(struct thread), thread);
3161 os_ref_retain_raw(&thread->ref_count, &thread_refgrp);
3162 }
3163 }
3164
3165 void
thread_require(thread_t thread)3166 thread_require(thread_t thread)
3167 {
3168 zone_id_require(ZONE_ID_THREAD, sizeof(struct thread), thread);
3169 }
3170
3171 #undef thread_should_halt
3172
3173 boolean_t
thread_should_halt(thread_t th)3174 thread_should_halt(
3175 thread_t th)
3176 {
3177 return thread_should_halt_fast(th);
3178 }
3179
3180 /*
3181 * thread_set_voucher_name - reset the voucher port name bound to this thread
3182 *
3183 * Conditions: nothing locked
3184 */
3185
3186 kern_return_t
thread_set_voucher_name(mach_port_name_t voucher_name)3187 thread_set_voucher_name(mach_port_name_t voucher_name)
3188 {
3189 thread_t thread = current_thread();
3190 ipc_voucher_t new_voucher = IPC_VOUCHER_NULL;
3191 ipc_voucher_t voucher;
3192 ledger_t bankledger = NULL;
3193 struct thread_group *banktg = NULL;
3194 uint32_t persona_id = 0;
3195
3196 if (MACH_PORT_DEAD == voucher_name) {
3197 return KERN_INVALID_RIGHT;
3198 }
3199
3200 /*
3201 * agressively convert to voucher reference
3202 */
3203 if (MACH_PORT_VALID(voucher_name)) {
3204 new_voucher = convert_port_name_to_voucher(voucher_name);
3205 if (IPC_VOUCHER_NULL == new_voucher) {
3206 return KERN_INVALID_ARGUMENT;
3207 }
3208 }
3209 bank_get_bank_ledger_thread_group_and_persona(new_voucher, &bankledger, &banktg, &persona_id);
3210
3211 thread_mtx_lock(thread);
3212 voucher = thread->ith_voucher;
3213 thread->ith_voucher_name = voucher_name;
3214 thread->ith_voucher = new_voucher;
3215 thread_mtx_unlock(thread);
3216
3217 bank_swap_thread_bank_ledger(thread, bankledger);
3218 #if CONFIG_THREAD_GROUPS
3219 thread_group_set_bank(thread, banktg);
3220 #endif /* CONFIG_THREAD_GROUPS */
3221
3222 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3223 MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
3224 (uintptr_t)thread_tid(thread),
3225 (uintptr_t)voucher_name,
3226 VM_KERNEL_ADDRPERM((uintptr_t)new_voucher),
3227 persona_id, 0);
3228
3229 if (IPC_VOUCHER_NULL != voucher) {
3230 ipc_voucher_release(voucher);
3231 }
3232
3233 return KERN_SUCCESS;
3234 }
3235
3236 /*
3237 * thread_get_mach_voucher - return a voucher reference for the specified thread voucher
3238 *
3239 * Conditions: nothing locked
3240 *
3241 * NOTE: At the moment, there is no distinction between the current and effective
3242 * vouchers because we only set them at the thread level currently.
3243 */
3244 kern_return_t
thread_get_mach_voucher(thread_act_t thread,mach_voucher_selector_t __unused which,ipc_voucher_t * voucherp)3245 thread_get_mach_voucher(
3246 thread_act_t thread,
3247 mach_voucher_selector_t __unused which,
3248 ipc_voucher_t *voucherp)
3249 {
3250 ipc_voucher_t voucher;
3251
3252 if (THREAD_NULL == thread) {
3253 return KERN_INVALID_ARGUMENT;
3254 }
3255
3256 thread_mtx_lock(thread);
3257 voucher = thread->ith_voucher;
3258
3259 if (IPC_VOUCHER_NULL != voucher) {
3260 ipc_voucher_reference(voucher);
3261 thread_mtx_unlock(thread);
3262 *voucherp = voucher;
3263 return KERN_SUCCESS;
3264 }
3265
3266 thread_mtx_unlock(thread);
3267
3268 *voucherp = IPC_VOUCHER_NULL;
3269 return KERN_SUCCESS;
3270 }
3271
3272 /*
3273 * thread_set_mach_voucher - set a voucher reference for the specified thread voucher
3274 *
3275 * Conditions: callers holds a reference on the voucher.
3276 * nothing locked.
3277 *
3278 * We grab another reference to the voucher and bind it to the thread.
3279 * The old voucher reference associated with the thread is
3280 * discarded.
3281 */
3282 kern_return_t
thread_set_mach_voucher(thread_t thread,ipc_voucher_t voucher)3283 thread_set_mach_voucher(
3284 thread_t thread,
3285 ipc_voucher_t voucher)
3286 {
3287 ipc_voucher_t old_voucher;
3288 ledger_t bankledger = NULL;
3289 struct thread_group *banktg = NULL;
3290 uint32_t persona_id = 0;
3291
3292 if (THREAD_NULL == thread) {
3293 return KERN_INVALID_ARGUMENT;
3294 }
3295
3296 bank_get_bank_ledger_thread_group_and_persona(voucher, &bankledger, &banktg, &persona_id);
3297
3298 thread_mtx_lock(thread);
3299 /*
3300 * Once the thread is started, we will look at `ith_voucher` without
3301 * holding any lock.
3302 *
3303 * Setting the voucher hence can only be done by current_thread() or
3304 * before it started. "started" flips under the thread mutex and must be
3305 * tested under it too.
3306 */
3307 if (thread != current_thread() && thread->started) {
3308 thread_mtx_unlock(thread);
3309 return KERN_INVALID_ARGUMENT;
3310 }
3311
3312 ipc_voucher_reference(voucher);
3313 old_voucher = thread->ith_voucher;
3314 thread->ith_voucher = voucher;
3315 thread->ith_voucher_name = MACH_PORT_NULL;
3316 thread_mtx_unlock(thread);
3317
3318 bank_swap_thread_bank_ledger(thread, bankledger);
3319 #if CONFIG_THREAD_GROUPS
3320 thread_group_set_bank(thread, banktg);
3321 #endif /* CONFIG_THREAD_GROUPS */
3322
3323 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3324 MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
3325 (uintptr_t)thread_tid(thread),
3326 (uintptr_t)MACH_PORT_NULL,
3327 VM_KERNEL_ADDRPERM((uintptr_t)voucher),
3328 persona_id, 0);
3329
3330 ipc_voucher_release(old_voucher);
3331
3332 return KERN_SUCCESS;
3333 }
3334
3335 /*
3336 * thread_swap_mach_voucher - swap a voucher reference for the specified thread voucher
3337 *
3338 * Conditions: callers holds a reference on the new and presumed old voucher(s).
3339 * nothing locked.
3340 *
3341 * This function is no longer supported.
3342 */
3343 kern_return_t
thread_swap_mach_voucher(__unused thread_t thread,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)3344 thread_swap_mach_voucher(
3345 __unused thread_t thread,
3346 __unused ipc_voucher_t new_voucher,
3347 ipc_voucher_t *in_out_old_voucher)
3348 {
3349 /*
3350 * Currently this function is only called from a MIG generated
3351 * routine which doesn't release the reference on the voucher
3352 * addressed by in_out_old_voucher. To avoid leaking this reference,
3353 * a call to release it has been added here.
3354 */
3355 ipc_voucher_release(*in_out_old_voucher);
3356 return KERN_NOT_SUPPORTED;
3357 }
3358
3359 /*
3360 * thread_get_current_voucher_origin_pid - get the pid of the originator of the current voucher.
3361 */
3362 kern_return_t
thread_get_current_voucher_origin_pid(int32_t * pid)3363 thread_get_current_voucher_origin_pid(
3364 int32_t *pid)
3365 {
3366 uint32_t buf_size;
3367 kern_return_t kr;
3368 thread_t thread = current_thread();
3369
3370 buf_size = sizeof(*pid);
3371 kr = mach_voucher_attr_command(thread->ith_voucher,
3372 MACH_VOUCHER_ATTR_KEY_BANK,
3373 BANK_ORIGINATOR_PID,
3374 NULL,
3375 0,
3376 (mach_voucher_attr_content_t)pid,
3377 &buf_size);
3378
3379 return kr;
3380 }
3381
3382 #if CONFIG_THREAD_GROUPS
3383 /*
3384 * Returns the current thread's voucher-carried thread group
3385 *
3386 * Reference is borrowed from this being the current voucher, so it does NOT
3387 * return a reference to the group.
3388 */
3389 struct thread_group *
thread_get_current_voucher_thread_group(thread_t thread)3390 thread_get_current_voucher_thread_group(thread_t thread)
3391 {
3392 assert(thread == current_thread());
3393
3394 if (thread->ith_voucher == NULL) {
3395 return NULL;
3396 }
3397
3398 ledger_t bankledger = NULL;
3399 struct thread_group *banktg = NULL;
3400
3401 bank_get_bank_ledger_thread_group_and_persona(thread->ith_voucher, &bankledger, &banktg, NULL);
3402
3403 return banktg;
3404 }
3405
3406 #endif /* CONFIG_THREAD_GROUPS */
3407
3408 extern struct workqueue *
3409 proc_get_wqptr(void *proc);
3410
3411 static bool
task_supports_cooperative_workqueue(task_t task)3412 task_supports_cooperative_workqueue(task_t task)
3413 {
3414 assert(task == current_task());
3415 if (task->bsd_info == NULL) {
3416 return false;
3417 }
3418
3419 uint64_t wq_quantum_expiry_offset = get_wq_quantum_offset_from_proc(task->bsd_info);
3420 /* userspace may not yet have called workq_open yet */
3421 struct workqueue *wq = proc_get_wqptr(task->bsd_info);
3422
3423 return (wq != NULL) && (wq_quantum_expiry_offset != 0);
3424 }
3425
3426 /* Not safe to call from scheduler paths - should only be called on self */
3427 bool
thread_supports_cooperative_workqueue(thread_t thread)3428 thread_supports_cooperative_workqueue(thread_t thread)
3429 {
3430 struct uthread *uth = get_bsdthread_info(thread);
3431 task_t task = get_threadtask(thread);
3432
3433 assert(thread == current_thread());
3434
3435 return task_supports_cooperative_workqueue(task) &&
3436 bsdthread_part_of_cooperative_workqueue(uth);
3437 }
3438
3439 static inline bool
thread_has_armed_workqueue_quantum(thread_t thread)3440 thread_has_armed_workqueue_quantum(thread_t thread)
3441 {
3442 return thread->workq_quantum_deadline != 0;
3443 }
3444
3445 /*
3446 * The workq quantum is a lazy timer that is evaluated at 2 specific times in
3447 * the scheduler:
3448 *
3449 * - context switch time
3450 * - scheduler quantum expiry time.
3451 *
3452 * We're currently expressing the workq quantum with a 0.5 scale factor of the
3453 * scheduler quantum. It is possible that if the workq quantum is rearmed
3454 * shortly after the scheduler quantum begins, we could have a large delay
3455 * between when the workq quantum next expires and when it actually is noticed.
3456 *
3457 * A potential future improvement for the wq quantum expiry logic is to compare
3458 * it to the next actual scheduler quantum deadline and expire it if it is
3459 * within a certain leeway.
3460 */
3461 static inline uint64_t
thread_workq_quantum_size(thread_t thread)3462 thread_workq_quantum_size(thread_t thread)
3463 {
3464 return (uint64_t) (SCHED(initial_quantum_size)(thread) / 2);
3465 }
3466
3467 /*
3468 * Always called by thread on itself - either at AST boundary after processing
3469 * an existing quantum expiry, or when a new quantum is armed before the thread
3470 * goes out to userspace to handle a thread request
3471 */
3472 void
thread_arm_workqueue_quantum(thread_t thread)3473 thread_arm_workqueue_quantum(thread_t thread)
3474 {
3475 /*
3476 * If the task is not opted into wq quantum notification, or if the thread
3477 * is not part of the cooperative workqueue, don't even bother with tracking
3478 * the quantum or calculating expiry
3479 */
3480 if (!thread_supports_cooperative_workqueue(thread)) {
3481 assert(thread->workq_quantum_deadline == 0);
3482 return;
3483 }
3484
3485 assert(current_thread() == thread);
3486 assert(thread_get_tag(thread) & THREAD_TAG_WORKQUEUE);
3487
3488 uint64_t current_runtime = thread_get_runtime_self();
3489 uint64_t deadline = thread_workq_quantum_size(thread) + current_runtime;
3490
3491 /*
3492 * The update of a workqueue quantum should always be followed by the update
3493 * of the AST - see explanation in kern/thread.h for synchronization of this
3494 * field
3495 */
3496 thread->workq_quantum_deadline = deadline;
3497
3498 /* We're arming a new quantum, clear any previous expiry notification */
3499 act_clear_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3500
3501 WQ_TRACE(TRACE_wq_quantum_arm, current_runtime, deadline, 0, 0);
3502
3503 WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, thread->workq_quantum_deadline, true);
3504 }
3505
3506 /* Called by a thread on itself when it is about to park */
3507 void
thread_disarm_workqueue_quantum(thread_t thread)3508 thread_disarm_workqueue_quantum(thread_t thread)
3509 {
3510 /* The update of a workqueue quantum should always be followed by the update
3511 * of the AST - see explanation in kern/thread.h for synchronization of this
3512 * field */
3513 thread->workq_quantum_deadline = 0;
3514 act_clear_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3515
3516 WQ_TRACE(TRACE_wq_quantum_disarm, 0, 0, 0, 0);
3517
3518 WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, thread->workq_quantum_deadline, false);
3519 }
3520
3521 /* This is called at context switch time on a thread that may not be self,
3522 * and at AST time
3523 */
3524 bool
thread_has_expired_workqueue_quantum(thread_t thread,bool should_trace)3525 thread_has_expired_workqueue_quantum(thread_t thread, bool should_trace)
3526 {
3527 if (!thread_has_armed_workqueue_quantum(thread)) {
3528 return false;
3529 }
3530 /* We do not do a thread_get_runtime_self() here since this function is
3531 * called from context switch time or during scheduler quantum expiry and
3532 * therefore, we may not be evaluating it on the current thread/self.
3533 *
3534 * In addition, the timers on the thread have just been updated recently so
3535 * we don't need to update them again.
3536 */
3537 uint64_t runtime = (timer_grab(&thread->user_timer) + timer_grab(&thread->system_timer));
3538 bool expired = runtime > thread->workq_quantum_deadline;
3539
3540 if (expired && should_trace) {
3541 WQ_TRACE(TRACE_wq_quantum_expired, runtime, thread->workq_quantum_deadline, 0, 0);
3542 }
3543
3544 return expired;
3545 }
3546
3547 /*
3548 * Called on a thread that is being context switched out or during quantum
3549 * expiry on self. Only called from scheduler paths.
3550 */
3551 void
thread_evaluate_workqueue_quantum_expiry(thread_t thread)3552 thread_evaluate_workqueue_quantum_expiry(thread_t thread)
3553 {
3554 if (thread_has_expired_workqueue_quantum(thread, true)) {
3555 act_set_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3556 }
3557 }
3558
3559 boolean_t
thread_has_thread_name(thread_t th)3560 thread_has_thread_name(thread_t th)
3561 {
3562 if (th) {
3563 return bsd_hasthreadname(get_bsdthread_info(th));
3564 }
3565
3566 /*
3567 * This is an odd case; clients may set the thread name based on the lack of
3568 * a name, but in this context there is no uthread to attach the name to.
3569 */
3570 return FALSE;
3571 }
3572
3573 void
thread_set_thread_name(thread_t th,const char * name)3574 thread_set_thread_name(thread_t th, const char* name)
3575 {
3576 if (th && name) {
3577 bsd_setthreadname(get_bsdthread_info(th), name);
3578 }
3579 }
3580
3581 void
thread_get_thread_name(thread_t th,char * name)3582 thread_get_thread_name(thread_t th, char* name)
3583 {
3584 if (!name) {
3585 return;
3586 }
3587 if (th) {
3588 bsd_getthreadname(get_bsdthread_info(th), name);
3589 } else {
3590 name[0] = '\0';
3591 }
3592 }
3593
3594 void
thread_set_honor_qlimit(thread_t thread)3595 thread_set_honor_qlimit(thread_t thread)
3596 {
3597 thread->options |= TH_OPT_HONOR_QLIMIT;
3598 }
3599
3600 void
thread_clear_honor_qlimit(thread_t thread)3601 thread_clear_honor_qlimit(thread_t thread)
3602 {
3603 thread->options &= (~TH_OPT_HONOR_QLIMIT);
3604 }
3605
3606 /*
3607 * thread_enable_send_importance - set/clear the SEND_IMPORTANCE thread option bit.
3608 */
3609 void
thread_enable_send_importance(thread_t thread,boolean_t enable)3610 thread_enable_send_importance(thread_t thread, boolean_t enable)
3611 {
3612 if (enable == TRUE) {
3613 thread->options |= TH_OPT_SEND_IMPORTANCE;
3614 } else {
3615 thread->options &= ~TH_OPT_SEND_IMPORTANCE;
3616 }
3617 }
3618
3619 /*
3620 * thread_set_allocation_name - .
3621 */
3622
3623 kern_allocation_name_t
thread_set_allocation_name(kern_allocation_name_t new_name)3624 thread_set_allocation_name(kern_allocation_name_t new_name)
3625 {
3626 kern_allocation_name_t ret;
3627 thread_kernel_state_t kstate = thread_get_kernel_state(current_thread());
3628 ret = kstate->allocation_name;
3629 // fifo
3630 if (!new_name || !kstate->allocation_name) {
3631 kstate->allocation_name = new_name;
3632 }
3633 return ret;
3634 }
3635
3636 void *
thread_iokit_tls_get(uint32_t index)3637 thread_iokit_tls_get(uint32_t index)
3638 {
3639 assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
3640 return current_thread()->saved.iokit.tls[index];
3641 }
3642
3643 void
thread_iokit_tls_set(uint32_t index,void * data)3644 thread_iokit_tls_set(uint32_t index, void * data)
3645 {
3646 assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
3647 current_thread()->saved.iokit.tls[index] = data;
3648 }
3649
3650 uint64_t
thread_get_last_wait_duration(thread_t thread)3651 thread_get_last_wait_duration(thread_t thread)
3652 {
3653 return thread->last_made_runnable_time - thread->last_run_time;
3654 }
3655
3656 integer_t
thread_kern_get_pri(thread_t thr)3657 thread_kern_get_pri(thread_t thr)
3658 {
3659 return thr->base_pri;
3660 }
3661
3662 void
thread_kern_set_pri(thread_t thr,integer_t pri)3663 thread_kern_set_pri(thread_t thr, integer_t pri)
3664 {
3665 sched_set_kernel_thread_priority(thr, pri);
3666 }
3667
3668 integer_t
thread_kern_get_kernel_maxpri(void)3669 thread_kern_get_kernel_maxpri(void)
3670 {
3671 return MAXPRI_KERNEL;
3672 }
3673 /*
3674 * thread_port_with_flavor_no_senders
3675 *
3676 * Called whenever the Mach port system detects no-senders on
3677 * the thread inspect or read port. These ports are allocated lazily and
3678 * should be deallocated here when there are no senders remaining.
3679 */
3680 static void
thread_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)3681 thread_port_with_flavor_no_senders(
3682 ipc_port_t port,
3683 mach_port_mscount_t mscount __unused)
3684 {
3685 thread_ro_t tro;
3686 thread_t thread;
3687 mach_thread_flavor_t flavor;
3688 ipc_kobject_type_t kotype;
3689
3690 ip_mq_lock(port);
3691 if (port->ip_srights > 0) {
3692 ip_mq_unlock(port);
3693 return;
3694 }
3695 kotype = ip_kotype(port);
3696 assert((IKOT_THREAD_READ == kotype) || (IKOT_THREAD_INSPECT == kotype));
3697 thread = ipc_kobject_get_locked(port, kotype);
3698 if (thread != THREAD_NULL) {
3699 thread_reference(thread);
3700 }
3701 ip_mq_unlock(port);
3702
3703 if (thread == THREAD_NULL) {
3704 /* The thread is exiting or disabled; it will eventually deallocate the port */
3705 return;
3706 }
3707
3708 if (kotype == IKOT_THREAD_READ) {
3709 flavor = THREAD_FLAVOR_READ;
3710 } else {
3711 flavor = THREAD_FLAVOR_INSPECT;
3712 }
3713
3714 thread_mtx_lock(thread);
3715 ip_mq_lock(port);
3716
3717 /*
3718 * If the port is no longer active, then ipc_thread_terminate() ran
3719 * and destroyed the kobject already. Just deallocate the task
3720 * ref we took and go away.
3721 *
3722 * It is also possible that several nsrequests are in flight,
3723 * only one shall NULL-out the port entry, and this is the one
3724 * that gets to dealloc the port.
3725 *
3726 * Check for a stale no-senders notification. A call to any function
3727 * that vends out send rights to this port could resurrect it between
3728 * this notification being generated and actually being handled here.
3729 */
3730 tro = get_thread_ro(thread);
3731 if (!ip_active(port) ||
3732 tro->tro_ports[flavor] != port ||
3733 port->ip_srights > 0) {
3734 ip_mq_unlock(port);
3735 thread_mtx_unlock(thread);
3736 thread_deallocate(thread);
3737 return;
3738 }
3739
3740 assert(tro->tro_ports[flavor] == port);
3741 zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_ports[flavor]);
3742 thread_mtx_unlock(thread);
3743
3744 ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
3745
3746 thread_deallocate(thread);
3747 }
3748
3749 /*
3750 * The 'thread_region_page_shift' is used by footprint
3751 * to specify the page size that it will use to
3752 * accomplish its accounting work on the task being
3753 * inspected. Since footprint uses a thread for each
3754 * task that it works on, we need to keep the page_shift
3755 * on a per-thread basis.
3756 */
3757
3758 int
thread_self_region_page_shift(void)3759 thread_self_region_page_shift(void)
3760 {
3761 /*
3762 * Return the page shift that this thread
3763 * would like to use for its accounting work.
3764 */
3765 return current_thread()->thread_region_page_shift;
3766 }
3767
3768 void
thread_self_region_page_shift_set(int pgshift)3769 thread_self_region_page_shift_set(
3770 int pgshift)
3771 {
3772 /*
3773 * Set the page shift that this thread
3774 * would like to use for its accounting work
3775 * when dealing with a task.
3776 */
3777 current_thread()->thread_region_page_shift = pgshift;
3778 }
3779
3780 #if CONFIG_DTRACE
3781 uint32_t
dtrace_get_thread_predcache(thread_t thread)3782 dtrace_get_thread_predcache(thread_t thread)
3783 {
3784 if (thread != THREAD_NULL) {
3785 return thread->t_dtrace_predcache;
3786 } else {
3787 return 0;
3788 }
3789 }
3790
3791 int64_t
dtrace_get_thread_vtime(thread_t thread)3792 dtrace_get_thread_vtime(thread_t thread)
3793 {
3794 if (thread != THREAD_NULL) {
3795 return thread->t_dtrace_vtime;
3796 } else {
3797 return 0;
3798 }
3799 }
3800
3801 int
dtrace_get_thread_last_cpu_id(thread_t thread)3802 dtrace_get_thread_last_cpu_id(thread_t thread)
3803 {
3804 if ((thread != THREAD_NULL) && (thread->last_processor != PROCESSOR_NULL)) {
3805 return thread->last_processor->cpu_id;
3806 } else {
3807 return -1;
3808 }
3809 }
3810
3811 int64_t
dtrace_get_thread_tracing(thread_t thread)3812 dtrace_get_thread_tracing(thread_t thread)
3813 {
3814 if (thread != THREAD_NULL) {
3815 return thread->t_dtrace_tracing;
3816 } else {
3817 return 0;
3818 }
3819 }
3820
3821 uint16_t
dtrace_get_thread_inprobe(thread_t thread)3822 dtrace_get_thread_inprobe(thread_t thread)
3823 {
3824 if (thread != THREAD_NULL) {
3825 return thread->t_dtrace_inprobe;
3826 } else {
3827 return 0;
3828 }
3829 }
3830
3831 vm_offset_t
thread_get_kernel_stack(thread_t thread)3832 thread_get_kernel_stack(thread_t thread)
3833 {
3834 if (thread != THREAD_NULL) {
3835 return thread->kernel_stack;
3836 } else {
3837 return 0;
3838 }
3839 }
3840
3841 #if KASAN
3842 struct kasan_thread_data *
kasan_get_thread_data(thread_t thread)3843 kasan_get_thread_data(thread_t thread)
3844 {
3845 return &thread->kasan_data;
3846 }
3847 #endif
3848
3849 #if CONFIG_KCOV
3850 kcov_thread_data_t *
kcov_get_thread_data(thread_t thread)3851 kcov_get_thread_data(thread_t thread)
3852 {
3853 return &thread->kcov_data;
3854 }
3855 #endif
3856
3857 #if CONFIG_STKSZ
3858 /*
3859 * Returns base of a thread's kernel stack.
3860 *
3861 * Coverage sanitizer instruments every function including those that participates in stack handoff between threads.
3862 * There is a window in which CPU still holds old values but stack has been handed over to anoher thread already.
3863 * In this window kernel_stack is 0 but CPU still uses the original stack (until contex switch occurs). The original
3864 * kernel_stack value is preserved in ksancov_stack during this window.
3865 */
3866 vm_offset_t
kcov_stksz_get_thread_stkbase(thread_t thread)3867 kcov_stksz_get_thread_stkbase(thread_t thread)
3868 {
3869 if (thread != THREAD_NULL) {
3870 kcov_thread_data_t *data = kcov_get_thread_data(thread);
3871 if (data->ktd_stksz.kst_stack) {
3872 return data->ktd_stksz.kst_stack;
3873 } else {
3874 return thread->kernel_stack;
3875 }
3876 } else {
3877 return 0;
3878 }
3879 }
3880
3881 vm_offset_t
kcov_stksz_get_thread_stksize(thread_t thread)3882 kcov_stksz_get_thread_stksize(thread_t thread)
3883 {
3884 if (thread != THREAD_NULL) {
3885 return kernel_stack_size;
3886 } else {
3887 return 0;
3888 }
3889 }
3890
3891 void
kcov_stksz_set_thread_stack(thread_t thread,vm_offset_t stack)3892 kcov_stksz_set_thread_stack(thread_t thread, vm_offset_t stack)
3893 {
3894 kcov_thread_data_t *data = kcov_get_thread_data(thread);
3895 data->ktd_stksz.kst_stack = stack;
3896 }
3897 #endif /* CONFIG_STKSZ */
3898
3899 int64_t
dtrace_calc_thread_recent_vtime(thread_t thread)3900 dtrace_calc_thread_recent_vtime(thread_t thread)
3901 {
3902 if (thread != THREAD_NULL) {
3903 processor_t processor = current_processor();
3904 uint64_t abstime = mach_absolute_time();
3905 timer_t timer;
3906
3907 timer = processor->thread_timer;
3908
3909 return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) +
3910 (abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */
3911 } else {
3912 return 0;
3913 }
3914 }
3915
3916 void
dtrace_set_thread_predcache(thread_t thread,uint32_t predcache)3917 dtrace_set_thread_predcache(thread_t thread, uint32_t predcache)
3918 {
3919 if (thread != THREAD_NULL) {
3920 thread->t_dtrace_predcache = predcache;
3921 }
3922 }
3923
3924 void
dtrace_set_thread_vtime(thread_t thread,int64_t vtime)3925 dtrace_set_thread_vtime(thread_t thread, int64_t vtime)
3926 {
3927 if (thread != THREAD_NULL) {
3928 thread->t_dtrace_vtime = vtime;
3929 }
3930 }
3931
3932 void
dtrace_set_thread_tracing(thread_t thread,int64_t accum)3933 dtrace_set_thread_tracing(thread_t thread, int64_t accum)
3934 {
3935 if (thread != THREAD_NULL) {
3936 thread->t_dtrace_tracing = accum;
3937 }
3938 }
3939
3940 void
dtrace_set_thread_inprobe(thread_t thread,uint16_t inprobe)3941 dtrace_set_thread_inprobe(thread_t thread, uint16_t inprobe)
3942 {
3943 if (thread != THREAD_NULL) {
3944 thread->t_dtrace_inprobe = inprobe;
3945 }
3946 }
3947
3948 vm_offset_t
dtrace_set_thread_recover(thread_t thread,vm_offset_t recover)3949 dtrace_set_thread_recover(thread_t thread, vm_offset_t recover)
3950 {
3951 vm_offset_t prev = 0;
3952
3953 if (thread != THREAD_NULL) {
3954 prev = thread->recover;
3955 thread->recover = recover;
3956 }
3957 return prev;
3958 }
3959
3960 vm_offset_t
dtrace_sign_and_set_thread_recover(thread_t thread,vm_offset_t recover)3961 dtrace_sign_and_set_thread_recover(thread_t thread, vm_offset_t recover)
3962 {
3963 #if defined(HAS_APPLE_PAC)
3964 return dtrace_set_thread_recover(thread,
3965 (vm_address_t)ptrauth_sign_unauthenticated((void *)recover,
3966 ptrauth_key_function_pointer,
3967 ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER)));
3968 #else /* defined(HAS_APPLE_PAC) */
3969 return dtrace_set_thread_recover(thread, recover);
3970 #endif /* defined(HAS_APPLE_PAC) */
3971 }
3972
3973 void
dtrace_thread_bootstrap(void)3974 dtrace_thread_bootstrap(void)
3975 {
3976 task_t task = current_task();
3977
3978 if (task->thread_count == 1) {
3979 thread_t thread = current_thread();
3980 if (thread->t_dtrace_flags & TH_DTRACE_EXECSUCCESS) {
3981 thread->t_dtrace_flags &= ~TH_DTRACE_EXECSUCCESS;
3982 DTRACE_PROC(exec__success);
3983 KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXEC),
3984 task_pid(task));
3985 }
3986 DTRACE_PROC(start);
3987 }
3988 DTRACE_PROC(lwp__start);
3989 }
3990
3991 void
dtrace_thread_didexec(thread_t thread)3992 dtrace_thread_didexec(thread_t thread)
3993 {
3994 thread->t_dtrace_flags |= TH_DTRACE_EXECSUCCESS;
3995 }
3996 #endif /* CONFIG_DTRACE */
3997