1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/thread.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61 * Date: 1986
62 *
63 * Thread management primitives implementation.
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to [email protected] any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
84 #include <mach/mach_types.h>
85 #include <mach/boolean.h>
86 #include <mach/policy.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_special_ports.h>
89 #include <mach/thread_act.h>
90 #include <mach/thread_status.h>
91 #include <mach/time_value.h>
92 #include <mach/vm_param.h>
93
94 #include <machine/thread.h>
95 #include <machine/pal_routines.h>
96 #include <machine/limits.h>
97
98 #include <kern/kern_types.h>
99 #include <kern/kalloc.h>
100 #include <kern/cpu_data.h>
101 #include <kern/extmod_statistics.h>
102 #include <kern/ipc_mig.h>
103 #include <kern/ipc_tt.h>
104 #include <kern/mach_param.h>
105 #include <kern/machine.h>
106 #include <kern/misc_protos.h>
107 #include <kern/processor.h>
108 #include <kern/queue.h>
109 #include <kern/restartable.h>
110 #include <kern/sched.h>
111 #include <kern/sched_prim.h>
112 #include <kern/syscall_subr.h>
113 #include <kern/task.h>
114 #include <kern/thread.h>
115 #include <kern/thread_group.h>
116 #include <kern/coalition.h>
117 #include <kern/host.h>
118 #include <kern/zalloc.h>
119 #include <kern/assert.h>
120 #include <kern/exc_resource.h>
121 #include <kern/exc_guard.h>
122 #include <kern/telemetry.h>
123 #include <kern/policy_internal.h>
124 #include <kern/turnstile.h>
125 #include <kern/sched_clutch.h>
126 #include <kern/recount.h>
127 #include <kern/smr.h>
128 #include <kern/ast.h>
129 #include <kern/compact_id.h>
130
131 #include <corpses/task_corpse.h>
132 #include <kern/kpc.h>
133 #include <vm/vm_map_xnu.h>
134
135 #if CONFIG_PERVASIVE_CPI
136 #include <kern/monotonic.h>
137 #include <machine/monotonic.h>
138 #endif /* CONFIG_PERVASIVE_CPI */
139
140 #include <ipc/ipc_kmsg.h>
141 #include <ipc/ipc_port.h>
142 #include <bank/bank_types.h>
143
144 #include <vm/vm_kern_xnu.h>
145 #include <vm/vm_pageout_xnu.h>
146
147 #include <sys/kdebug.h>
148 #include <sys/bsdtask_info.h>
149 #include <sys/reason.h>
150 #include <mach/sdt.h>
151 #include <san/kasan.h>
152 #include <san/kcov_stksz.h>
153
154 #include <stdatomic.h>
155
156 #if defined(HAS_APPLE_PAC)
157 #include <ptrauth.h>
158 #include <arm64/proc_reg.h>
159 #endif /* defined(HAS_APPLE_PAC) */
160
161 /*
162 * Exported interfaces
163 */
164 #include <mach/task_server.h>
165 #include <mach/thread_act_server.h>
166 #include <mach/mach_host_server.h>
167 #include <mach/host_priv_server.h>
168 #include <mach/mach_voucher_server.h>
169 #include <kern/policy_internal.h>
170
171 #if CONFIG_MACF
172 #include <security/mac_mach_internal.h>
173 #endif
174
175 #include <pthread/workqueue_trace.h>
176
177 #if CONFIG_EXCLAVES
178 #include <mach/exclaves.h>
179 #endif
180
181 LCK_GRP_DECLARE(thread_lck_grp, "thread");
182
183 static SECURITY_READ_ONLY_LATE(zone_t) thread_zone;
184 ZONE_DEFINE_ID(ZONE_ID_THREAD_RO, "threads_ro", struct thread_ro, ZC_READONLY);
185
186 static void thread_port_with_flavor_no_senders(ipc_port_t, mach_port_mscount_t);
187
188 IPC_KOBJECT_DEFINE(IKOT_THREAD_CONTROL);
189 IPC_KOBJECT_DEFINE(IKOT_THREAD_READ,
190 .iko_op_no_senders = thread_port_with_flavor_no_senders);
191 IPC_KOBJECT_DEFINE(IKOT_THREAD_INSPECT,
192 .iko_op_no_senders = thread_port_with_flavor_no_senders);
193
194 static struct mpsc_daemon_queue thread_stack_queue;
195 static struct mpsc_daemon_queue thread_terminate_queue;
196 static struct mpsc_daemon_queue thread_deallocate_queue;
197 static struct mpsc_daemon_queue thread_exception_queue;
198 static struct mpsc_daemon_queue thread_backtrace_queue;
199
200 decl_simple_lock_data(static, crashed_threads_lock);
201 static queue_head_t crashed_threads_queue;
202
203 struct thread_exception_elt {
204 struct mpsc_queue_chain link;
205 exception_type_t exception_type;
206 task_t exception_task;
207 thread_t exception_thread;
208 };
209
210 struct thread_backtrace_elt {
211 struct mpsc_queue_chain link;
212 exception_type_t exception_type;
213 kcdata_object_t obj;
214 exception_port_t exc_ports[BT_EXC_PORTS_COUNT]; /* send rights */
215 };
216
217 static SECURITY_READ_ONLY_LATE(struct thread) thread_template = {
218 #if MACH_ASSERT
219 .thread_magic = THREAD_MAGIC,
220 #endif /* MACH_ASSERT */
221 .wait_result = THREAD_WAITING,
222 .options = THREAD_ABORTSAFE,
223 .state = TH_WAIT | TH_UNINT,
224 .th_sched_bucket = TH_BUCKET_RUN,
225 .base_pri = BASEPRI_DEFAULT,
226 .realtime.deadline = UINT64_MAX,
227 .last_made_runnable_time = THREAD_NOT_RUNNABLE,
228 .last_basepri_change_time = THREAD_NOT_RUNNABLE,
229 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
230 .pri_shift = INT8_MAX,
231 #endif
232 /* timers are initialized in thread_bootstrap */
233 };
234
235 #define CTID_SIZE_BIT 20
236 #define CTID_MASK ((1u << CTID_SIZE_BIT) - 1)
237 #define CTID_MAX_THREAD_NUMBER (CTID_MASK - 1)
238 static_assert(CTID_MAX_THREAD_NUMBER <= COMPACT_ID_MAX);
239
240 #ifndef __LITTLE_ENDIAN__
241 #error "ctid relies on the ls bits of uint32_t to be populated"
242 #endif
243
244 __startup_data
245 static struct thread init_thread;
246 static SECURITY_READ_ONLY_LATE(uint32_t) ctid_nonce;
247 COMPACT_ID_TABLE_DEFINE(static, ctid_table);
248
249 __startup_func
250 static void
thread_zone_startup(void)251 thread_zone_startup(void)
252 {
253 size_t size = sizeof(struct thread);
254
255 #ifdef MACH_BSD
256 size += roundup(uthread_size, _Alignof(struct thread));
257 #endif
258 thread_zone = zone_create_ext("threads", size,
259 ZC_SEQUESTER | ZC_ZFREE_CLEARMEM, ZONE_ID_THREAD, NULL);
260 }
261 STARTUP(ZALLOC, STARTUP_RANK_FOURTH, thread_zone_startup);
262
263 static void thread_deallocate_enqueue(thread_t thread);
264 static void thread_deallocate_complete(thread_t thread);
265
266 static void ctid_table_remove(thread_t thread);
267 static void ctid_table_add(thread_t thread);
268 static void ctid_table_init(void);
269
270 #ifdef MACH_BSD
271 extern void proc_exit(void *);
272 extern mach_exception_data_type_t proc_encode_exit_exception_code(void *);
273 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
274 extern uint64_t get_return_to_kernel_offset_from_proc(void *p);
275 extern uint64_t get_wq_quantum_offset_from_proc(void *);
276 extern int proc_selfpid(void);
277 extern void proc_name(int, char*, int);
278 extern char * proc_name_address(void *p);
279 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
280 extern void kdebug_proc_name_args(struct proc *proc, long args[static 4]);
281 #endif /* MACH_BSD */
282
283 extern bool bsdthread_part_of_cooperative_workqueue(struct uthread *uth);
284 extern bool disable_exc_resource;
285 extern bool disable_exc_resource_during_audio;
286 extern int audio_active;
287 extern int debug_task;
288 int thread_max = CONFIG_THREAD_MAX; /* Max number of threads */
289 int task_threadmax = CONFIG_THREAD_MAX;
290
291 static uint64_t thread_unique_id = 100;
292
293 struct _thread_ledger_indices thread_ledgers = { .cpu_time = -1 };
294 static ledger_template_t thread_ledger_template = NULL;
295 static void init_thread_ledgers(void);
296
297 #if CONFIG_JETSAM
298 void jetsam_on_ledger_cpulimit_exceeded(void);
299 #endif
300
301 extern int task_thread_soft_limit;
302
303
304 /*
305 * Level (in terms of percentage of the limit) at which the CPU usage monitor triggers telemetry.
306 *
307 * (ie when any thread's CPU consumption exceeds 70% of the limit, start taking user
308 * stacktraces, aka micro-stackshots)
309 */
310 #define CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT 70
311
312 /* Percentage. Level at which we start gathering telemetry. */
313 static TUNABLE(uint8_t, cpumon_ustackshots_trigger_pct,
314 "cpumon_ustackshots_trigger_pct", CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT);
315 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void);
316
317 #if DEVELOPMENT || DEBUG
318 TUNABLE_WRITEABLE(int, exc_resource_threads_enabled, "exc_resource_threads_enabled", 1);
319
320 void __attribute__((noinline)) SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t, int);
321 #endif /* DEVELOPMENT || DEBUG */
322
323 /*
324 * The smallest interval over which we support limiting CPU consumption is 1ms
325 */
326 #define MINIMUM_CPULIMIT_INTERVAL_MS 1
327
328 os_refgrp_decl(static, thread_refgrp, "thread", NULL);
329
330 static inline void
init_thread_from_template(thread_t thread)331 init_thread_from_template(thread_t thread)
332 {
333 /*
334 * In general, struct thread isn't trivially-copyable, since it may
335 * contain pointers to thread-specific state. This may be enforced at
336 * compile time on architectures that store authed + diversified
337 * pointers in machine_thread.
338 *
339 * In this specific case, where we're initializing a new thread from a
340 * thread_template, we know all diversified pointers are NULL; these are
341 * safe to bitwise copy.
342 */
343 #pragma clang diagnostic push
344 #pragma clang diagnostic ignored "-Wnontrivial-memaccess"
345 memcpy(thread, &thread_template, sizeof(*thread));
346 #pragma clang diagnostic pop
347 }
348
349 static void
thread_ro_create(task_t parent_task,thread_t th,thread_ro_t tro_tpl)350 thread_ro_create(task_t parent_task, thread_t th, thread_ro_t tro_tpl)
351 {
352 #if __x86_64__
353 th->t_task = parent_task;
354 #endif
355 tro_tpl->tro_owner = th;
356 tro_tpl->tro_task = parent_task;
357 th->t_tro = zalloc_ro(ZONE_ID_THREAD_RO, Z_WAITOK | Z_ZERO | Z_NOFAIL);
358 zalloc_ro_update_elem(ZONE_ID_THREAD_RO, th->t_tro, tro_tpl);
359 }
360
361 static void
thread_ro_destroy(thread_t th)362 thread_ro_destroy(thread_t th)
363 {
364 thread_ro_t tro = get_thread_ro(th);
365 #if MACH_BSD
366 struct ucred *cred = tro->tro_cred;
367 struct ucred *rcred = tro->tro_realcred;
368 #endif
369 zfree_ro(ZONE_ID_THREAD_RO, tro);
370 #if MACH_BSD
371 uthread_cred_free(cred);
372 uthread_cred_free(rcred);
373 #endif
374 }
375
376 __startup_func
377 thread_t
thread_bootstrap(void)378 thread_bootstrap(void)
379 {
380 /*
381 * Fill in a template thread for fast initialization.
382 */
383 timer_init(&thread_template.runnable_timer);
384
385 init_thread_from_template(&init_thread);
386 /* fiddle with init thread to skip asserts in set_sched_pri */
387 init_thread.sched_pri = MAXPRI_KERNEL;
388
389 /*
390 * We can't quite use ctid yet, on ARM thread_bootstrap() is called
391 * before we can call random or anything,
392 * so we just make it barely work and it will get fixed up
393 * when the first thread is actually made.
394 */
395 *compact_id_resolve(&ctid_table, 0) = &init_thread;
396 init_thread.ctid = CTID_MASK;
397
398 return &init_thread;
399 }
400
401 void
thread_machine_init_template(void)402 thread_machine_init_template(void)
403 {
404 machine_thread_template_init(&thread_template);
405 }
406
407 void
thread_init(void)408 thread_init(void)
409 {
410 /*
411 * Initialize any machine-dependent
412 * per-thread structures necessary.
413 */
414 machine_thread_init();
415
416 init_thread_ledgers();
417 }
418
419 boolean_t
thread_is_active(thread_t thread)420 thread_is_active(thread_t thread)
421 {
422 return thread->active;
423 }
424
425 void
thread_corpse_continue(void)426 thread_corpse_continue(void)
427 {
428 thread_t thread = current_thread();
429
430 thread_terminate_internal(thread);
431
432 /*
433 * Handle the thread termination directly
434 * here instead of returning to userspace.
435 */
436 assert(thread->active == FALSE);
437 thread_ast_clear(thread, AST_APC);
438 thread_apc_ast(thread);
439
440 panic("thread_corpse_continue");
441 /*NOTREACHED*/
442 }
443
444 __dead2
445 static void
thread_terminate_continue(void)446 thread_terminate_continue(void)
447 {
448 panic("thread_terminate_continue");
449 /*NOTREACHED*/
450 }
451
452 /*
453 * thread_terminate_self:
454 */
455 void
thread_terminate_self(void)456 thread_terminate_self(void)
457 {
458 thread_t thread = current_thread();
459 thread_ro_t tro = get_thread_ro(thread);
460 task_t task = tro->tro_task;
461 void *bsd_info = get_bsdtask_info(task);
462 int threadcnt;
463
464 pal_thread_terminate_self(thread);
465
466 DTRACE_PROC(lwp__exit);
467
468 thread_mtx_lock(thread);
469
470 ipc_thread_disable(thread);
471
472 thread_mtx_unlock(thread);
473
474 thread_sched_call(thread, NULL);
475
476 spl_t s = splsched();
477 thread_lock(thread);
478
479 thread_depress_abort_locked(thread);
480
481 /*
482 * Before we take the thread_lock right above,
483 * act_set_ast_reset_pcs() might not yet observe
484 * that the thread is inactive, and could have
485 * requested an IPI Ack.
486 *
487 * Once we unlock the thread, we know that
488 * act_set_ast_reset_pcs() can't fail to notice
489 * that thread->active is false,
490 * and won't set new ones.
491 */
492 thread_reset_pcs_ack_IPI(thread);
493
494 thread_unlock(thread);
495
496 splx(s);
497
498 #if CONFIG_TASKWATCH
499 thead_remove_taskwatch(thread);
500 #endif /* CONFIG_TASKWATCH */
501
502 work_interval_thread_terminate(thread);
503
504 thread_mtx_lock(thread);
505
506 thread_policy_reset(thread);
507
508 thread_mtx_unlock(thread);
509
510 assert(thread->th_work_interval == NULL);
511
512 bank_swap_thread_bank_ledger(thread, NULL);
513
514 if (kdebug_enable && bsd_hasthreadname(get_bsdthread_info(thread))) {
515 char threadname[MAXTHREADNAMESIZE];
516 bsd_getthreadname(get_bsdthread_info(thread), threadname);
517 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, threadname);
518 }
519
520 uthread_cleanup(get_bsdthread_info(thread), tro);
521
522 if (kdebug_enable && bsd_info && !task_is_exec_copy(task)) {
523 /* trace out pid before we sign off */
524 long dbg_arg1 = 0;
525 long dbg_arg2 = 0;
526
527 kdbg_trace_data(get_bsdtask_info(task), &dbg_arg1, &dbg_arg2);
528 #if CONFIG_PERVASIVE_CPI
529 if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_THR_EXIT)) {
530 struct recount_usage usage = { 0 };
531 struct recount_usage perf_only = { 0 };
532 boolean_t intrs_end = ml_set_interrupts_enabled(FALSE);
533 recount_current_thread_usage_perf_only(&usage, &perf_only);
534 ml_set_interrupts_enabled(intrs_end);
535 KDBG_RELEASE(DBG_MT_INSTRS_CYCLES_THR_EXIT,
536 recount_usage_instructions(&usage),
537 recount_usage_cycles(&usage),
538 recount_usage_system_time_mach(&usage),
539 usage.ru_metrics[RCT_LVL_USER].rm_time_mach);
540 #if __AMP__
541 KDBG_RELEASE(DBG_MT_P_INSTRS_CYCLES_THR_EXIT,
542 recount_usage_instructions(&perf_only),
543 recount_usage_cycles(&perf_only),
544 recount_usage_system_time_mach(&perf_only),
545 perf_only.ru_metrics[RCT_LVL_USER].rm_time_mach);
546 #endif // __AMP__
547 }
548 #endif/* CONFIG_PERVASIVE_CPI */
549 KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE_PID, dbg_arg1, dbg_arg2);
550 }
551
552 /*
553 * After this subtraction, this thread should never access
554 * task->bsd_info unless it got 0 back from the os_atomic_dec. It
555 * could be racing with other threads to be the last thread in the
556 * process, and the last thread in the process will tear down the proc
557 * structure and zero-out task->bsd_info.
558 */
559 threadcnt = os_atomic_dec(&task->active_thread_count, relaxed);
560
561 #if CONFIG_COALITIONS
562 /*
563 * Leave the coalitions when last thread of task is exiting and the
564 * task is not a corpse.
565 */
566 if (threadcnt == 0 && !task->corpse_info) {
567 coalitions_remove_task(task);
568 }
569 #endif
570
571 /*
572 * If we are the last thread to terminate and the task is
573 * associated with a BSD process, perform BSD process exit.
574 */
575 if (threadcnt == 0 && bsd_info != NULL) {
576 mach_exception_data_type_t subcode = 0;
577 if (kdebug_enable) {
578 /* since we're the last thread in this process, trace out the command name too */
579 long args[4] = { 0 };
580 kdebug_proc_name_args(bsd_info, args);
581 #if CONFIG_PERVASIVE_CPI
582 if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_PROC_EXIT)) {
583 struct recount_usage usage = { 0 };
584 struct recount_usage perf_only = { 0 };
585 recount_current_task_usage_perf_only(&usage, &perf_only);
586 KDBG_RELEASE(DBG_MT_INSTRS_CYCLES_PROC_EXIT,
587 recount_usage_instructions(&usage),
588 recount_usage_cycles(&usage),
589 recount_usage_system_time_mach(&usage),
590 usage.ru_metrics[RCT_LVL_USER].rm_time_mach);
591 #if __AMP__
592 KDBG_RELEASE(DBG_MT_P_INSTRS_CYCLES_PROC_EXIT,
593 recount_usage_instructions(&perf_only),
594 recount_usage_cycles(&perf_only),
595 recount_usage_system_time_mach(&perf_only),
596 perf_only.ru_metrics[RCT_LVL_USER].rm_time_mach);
597 #endif // __AMP__
598 }
599 #endif/* CONFIG_PERVASIVE_CPI */
600 KDBG_RELEASE(TRACE_STRING_PROC_EXIT, args[0], args[1], args[2], args[3]);
601 }
602
603 /* Get the exit reason before proc_exit */
604 subcode = proc_encode_exit_exception_code(bsd_info);
605 proc_exit(bsd_info);
606 bsd_info = NULL;
607 #if CONFIG_EXCLAVES
608 task_clear_conclave(task);
609 #endif
610 /*
611 * if there is crash info in task
612 * then do the deliver action since this is
613 * last thread for this task.
614 */
615 if (task->corpse_info) {
616 /* reset all except task name port */
617 ipc_task_reset(task);
618 /* enable all task ports (name port unchanged) */
619 ipc_task_enable(task);
620 exception_type_t etype = get_exception_from_corpse_crashinfo(task->corpse_info);
621 task_deliver_crash_notification(task, current_thread(), etype, subcode);
622 }
623 }
624
625 if (threadcnt == 0) {
626 task_lock(task);
627 if (task_is_a_corpse_fork(task)) {
628 thread_wakeup((event_t)&task->active_thread_count);
629 }
630 task_unlock(task);
631 }
632
633 #if CONFIG_EXCLAVES
634 exclaves_thread_terminate(thread);
635 #endif
636
637 if (thread->th_vm_faults_disabled) {
638 panic("Thread %p terminating with vm_faults disabled.", thread);
639 }
640
641 s = splsched();
642 thread_lock(thread);
643
644 /*
645 * Ensure that the depress timer is no longer enqueued,
646 * so the timer can be safely deallocated
647 *
648 * TODO: build timer_call_cancel_wait
649 */
650
651 assert((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) == 0);
652
653 uint32_t delay_us = 1;
654
655 while (thread->depress_timer_active > 0) {
656 thread_unlock(thread);
657 splx(s);
658
659 delay(delay_us++);
660
661 if (delay_us > USEC_PER_SEC) {
662 panic("depress timer failed to inactivate!"
663 "thread: %p depress_timer_active: %d",
664 thread, thread->depress_timer_active);
665 }
666
667 s = splsched();
668 thread_lock(thread);
669 }
670
671 /*
672 * Cancel wait timer, and wait for
673 * concurrent expirations.
674 */
675 if (thread->wait_timer_armed) {
676 thread->wait_timer_armed = false;
677
678 if (timer_call_cancel(thread->wait_timer)) {
679 thread->wait_timer_active--;
680 }
681 }
682
683 delay_us = 1;
684
685 while (thread->wait_timer_active > 0) {
686 thread_unlock(thread);
687 splx(s);
688
689 delay(delay_us++);
690
691 if (delay_us > USEC_PER_SEC) {
692 panic("wait timer failed to inactivate!"
693 "thread: %p, wait_timer_active: %d, "
694 "wait_timer_armed: %d",
695 thread, thread->wait_timer_active,
696 thread->wait_timer_armed);
697 }
698
699 s = splsched();
700 thread_lock(thread);
701 }
702
703 /*
704 * If there is a reserved stack, release it.
705 */
706 if (thread->reserved_stack != 0) {
707 stack_free_reserved(thread);
708 thread->reserved_stack = 0;
709 }
710
711 /*
712 * Mark thread as terminating, and block.
713 */
714 thread->state |= TH_TERMINATE;
715 thread_mark_wait_locked(thread, THREAD_UNINT);
716
717 #if CONFIG_EXCLAVES
718 assert(thread->th_exclaves_ipc_ctx.ipcb == NULL);
719 assert(thread->th_exclaves_ipc_ctx.scid == 0);
720 assert(thread->th_exclaves_intstate == 0);
721 assert(thread->th_exclaves_state == 0);
722 #endif
723 assert(thread->th_work_interval_flags == TH_WORK_INTERVAL_FLAGS_NONE);
724 assert(thread->kern_promotion_schedpri == 0);
725 if (thread->rwlock_count > 0) {
726 panic("rwlock_count is %d for thread %p, possibly it still holds a rwlock", thread->rwlock_count, thread);
727 }
728 assert(thread->priority_floor_count == 0);
729 assert(thread->handoff_thread == THREAD_NULL);
730 assert(thread->th_work_interval == NULL);
731 assert(thread->t_rr_state.trr_value == 0);
732 #if DEBUG || DEVELOPMENT
733 assert(thread->th_test_ctx == NULL);
734 #endif
735
736 assert3u(0, ==, thread->sched_flags &
737 (TH_SFLAG_WAITQ_PROMOTED |
738 TH_SFLAG_RW_PROMOTED |
739 TH_SFLAG_EXEC_PROMOTED |
740 TH_SFLAG_FLOOR_PROMOTED |
741 TH_SFLAG_DEPRESS));
742
743 thread_unlock(thread);
744 /* splsched */
745
746 thread_block((thread_continue_t)thread_terminate_continue);
747 /*NOTREACHED*/
748 }
749
750 static bool
thread_ref_release(thread_t thread)751 thread_ref_release(thread_t thread)
752 {
753 if (thread == THREAD_NULL) {
754 return false;
755 }
756
757 assert_thread_magic(thread);
758
759 return os_ref_release_raw(&thread->ref_count, &thread_refgrp) == 0;
760 }
761
762 /* Drop a thread refcount safely without triggering a zfree */
763 void
thread_deallocate_safe(thread_t thread)764 thread_deallocate_safe(thread_t thread)
765 {
766 if (__improbable(thread_ref_release(thread))) {
767 /* enqueue the thread for thread deallocate deamon to call thread_deallocate_complete */
768 thread_deallocate_enqueue(thread);
769 }
770 }
771
772 void
thread_deallocate(thread_t thread)773 thread_deallocate(thread_t thread)
774 {
775 if (__improbable(thread_ref_release(thread))) {
776 thread_deallocate_complete(thread);
777 }
778 }
779
780 void
thread_deallocate_complete(thread_t thread)781 thread_deallocate_complete(
782 thread_t thread)
783 {
784 task_t task;
785
786 assert_thread_magic(thread);
787
788 assert(os_ref_get_count_raw(&thread->ref_count) == 0);
789
790 if (!(thread->state & TH_TERMINATE2)) {
791 panic("thread_deallocate: thread not properly terminated");
792 }
793
794 thread_assert_runq_null(thread);
795 assert(!(thread->state & TH_WAKING));
796
797 #if CONFIG_CPU_COUNTERS
798 kpc_thread_destroy(thread);
799 #endif /* CONFIG_CPU_COUNTERS */
800
801 ipc_thread_terminate(thread);
802
803 proc_thread_qos_deallocate(thread);
804
805 task = get_threadtask(thread);
806
807 #ifdef MACH_BSD
808 uthread_destroy(get_bsdthread_info(thread));
809 #endif /* MACH_BSD */
810
811 if (thread->t_ledger) {
812 ledger_dereference(thread->t_ledger);
813 }
814 if (thread->t_threadledger) {
815 ledger_dereference(thread->t_threadledger);
816 }
817
818 assert(thread->turnstile != TURNSTILE_NULL);
819 if (thread->turnstile) {
820 turnstile_deallocate(thread->turnstile);
821 }
822 turnstile_compact_id_put(thread->ctsid);
823
824 if (IPC_VOUCHER_NULL != thread->ith_voucher) {
825 ipc_voucher_release(thread->ith_voucher);
826 }
827
828 kfree_data(thread->thread_io_stats, sizeof(struct io_stat_info));
829 #if CONFIG_PREADOPT_TG
830 if (thread->old_preadopt_thread_group) {
831 thread_group_release(thread->old_preadopt_thread_group);
832 }
833
834 if (thread->preadopt_thread_group) {
835 thread_group_release(thread->preadopt_thread_group);
836 }
837 #endif /* CONFIG_PREADOPT_TG */
838
839 if (thread->kernel_stack != 0) {
840 stack_free(thread);
841 }
842
843 recount_thread_deinit(&thread->th_recount);
844
845 lck_mtx_destroy(&thread->mutex, &thread_lck_grp);
846 machine_thread_destroy(thread);
847
848 task_deallocate_grp(task, TASK_GRP_INTERNAL);
849
850 #if MACH_ASSERT
851 assert_thread_magic(thread);
852 thread->thread_magic = 0;
853 #endif /* MACH_ASSERT */
854
855 lck_mtx_lock(&tasks_threads_lock);
856 assert(terminated_threads_count > 0);
857 queue_remove(&terminated_threads, thread, thread_t, threads);
858 terminated_threads_count--;
859 lck_mtx_unlock(&tasks_threads_lock);
860
861 timer_call_free(thread->depress_timer);
862 timer_call_free(thread->wait_timer);
863
864 ctid_table_remove(thread);
865
866 thread_ro_destroy(thread);
867 zfree(thread_zone, thread);
868 }
869
870 /*
871 * thread_inspect_deallocate:
872 *
873 * Drop a thread inspection reference.
874 */
875 void
thread_inspect_deallocate(thread_inspect_t thread_inspect)876 thread_inspect_deallocate(
877 thread_inspect_t thread_inspect)
878 {
879 return thread_deallocate((thread_t)thread_inspect);
880 }
881
882 /*
883 * thread_read_deallocate:
884 *
885 * Drop a reference on thread read port.
886 */
887 void
thread_read_deallocate(thread_read_t thread_read)888 thread_read_deallocate(
889 thread_read_t thread_read)
890 {
891 return thread_deallocate((thread_t)thread_read);
892 }
893
894
895 /*
896 * thread_exception_queue_invoke:
897 *
898 * Deliver EXC_{RESOURCE,GUARD} exception
899 */
900 static void
thread_exception_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)901 thread_exception_queue_invoke(mpsc_queue_chain_t elm,
902 __assert_only mpsc_daemon_queue_t dq)
903 {
904 struct thread_exception_elt *elt;
905 task_t task;
906 thread_t thread;
907 exception_type_t etype;
908
909 assert(dq == &thread_exception_queue);
910 elt = mpsc_queue_element(elm, struct thread_exception_elt, link);
911
912 etype = elt->exception_type;
913 task = elt->exception_task;
914 thread = elt->exception_thread;
915 assert_thread_magic(thread);
916
917 kfree_type(struct thread_exception_elt, elt);
918
919 /* wait for all the threads in the task to terminate */
920 task_lock(task);
921 task_wait_till_threads_terminate_locked(task);
922 task_unlock(task);
923
924 /* Consumes the task ref returned by task_generate_corpse_internal */
925 task_deallocate(task);
926 /* Consumes the thread ref returned by task_generate_corpse_internal */
927 thread_deallocate(thread);
928
929 /* Deliver the notification, also clears the corpse. */
930 task_deliver_crash_notification(task, thread, etype, 0);
931 }
932
933 static void
thread_backtrace_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)934 thread_backtrace_queue_invoke(mpsc_queue_chain_t elm,
935 __assert_only mpsc_daemon_queue_t dq)
936 {
937 struct thread_backtrace_elt *elt;
938 kcdata_object_t obj;
939 exception_port_t exc_ports[BT_EXC_PORTS_COUNT]; /* send rights */
940 exception_type_t etype;
941
942 assert(dq == &thread_backtrace_queue);
943 elt = mpsc_queue_element(elm, struct thread_backtrace_elt, link);
944
945 obj = elt->obj;
946 memcpy(exc_ports, elt->exc_ports, sizeof(ipc_port_t) * BT_EXC_PORTS_COUNT);
947 etype = elt->exception_type;
948
949 kfree_type(struct thread_backtrace_elt, elt);
950
951 /* Deliver to backtrace exception ports */
952 exception_deliver_backtrace(obj, exc_ports, etype);
953
954 /*
955 * Release port right and kcdata object refs given by
956 * task_enqueue_exception_with_corpse()
957 */
958
959 for (unsigned int i = 0; i < BT_EXC_PORTS_COUNT; i++) {
960 ipc_port_release_send(exc_ports[i]);
961 }
962
963 kcdata_object_release(obj);
964 }
965
966 /*
967 * thread_exception_enqueue:
968 *
969 * Enqueue a corpse port to be delivered an EXC_{RESOURCE,GUARD}.
970 */
971 void
thread_exception_enqueue(task_t task,thread_t thread,exception_type_t etype)972 thread_exception_enqueue(
973 task_t task,
974 thread_t thread,
975 exception_type_t etype)
976 {
977 assert(EXC_RESOURCE == etype || EXC_GUARD == etype);
978 struct thread_exception_elt *elt = kalloc_type(struct thread_exception_elt, Z_WAITOK | Z_NOFAIL);
979 elt->exception_type = etype;
980 elt->exception_task = task;
981 elt->exception_thread = thread;
982
983 mpsc_daemon_enqueue(&thread_exception_queue, &elt->link,
984 MPSC_QUEUE_DISABLE_PREEMPTION);
985 }
986
987 void
thread_backtrace_enqueue(kcdata_object_t obj,exception_port_t ports[static BT_EXC_PORTS_COUNT],exception_type_t etype)988 thread_backtrace_enqueue(
989 kcdata_object_t obj,
990 exception_port_t ports[static BT_EXC_PORTS_COUNT],
991 exception_type_t etype)
992 {
993 struct thread_backtrace_elt *elt = kalloc_type(struct thread_backtrace_elt, Z_WAITOK | Z_NOFAIL);
994 elt->obj = obj;
995 elt->exception_type = etype;
996
997 memcpy(elt->exc_ports, ports, sizeof(ipc_port_t) * BT_EXC_PORTS_COUNT);
998
999 mpsc_daemon_enqueue(&thread_backtrace_queue, &elt->link,
1000 MPSC_QUEUE_DISABLE_PREEMPTION);
1001 }
1002
1003 /*
1004 * thread_copy_resource_info
1005 *
1006 * Copy the resource info counters from source
1007 * thread to destination thread.
1008 */
1009 void
thread_copy_resource_info(thread_t dst_thread,thread_t src_thread)1010 thread_copy_resource_info(
1011 thread_t dst_thread,
1012 thread_t src_thread)
1013 {
1014 dst_thread->c_switch = src_thread->c_switch;
1015 dst_thread->p_switch = src_thread->p_switch;
1016 dst_thread->ps_switch = src_thread->ps_switch;
1017 dst_thread->sched_time_save = src_thread->sched_time_save;
1018 dst_thread->runnable_timer = src_thread->runnable_timer;
1019 dst_thread->vtimer_user_save = src_thread->vtimer_user_save;
1020 dst_thread->vtimer_prof_save = src_thread->vtimer_prof_save;
1021 dst_thread->vtimer_rlim_save = src_thread->vtimer_rlim_save;
1022 dst_thread->vtimer_qos_save = src_thread->vtimer_qos_save;
1023 dst_thread->syscalls_unix = src_thread->syscalls_unix;
1024 dst_thread->syscalls_mach = src_thread->syscalls_mach;
1025 ledger_rollup(dst_thread->t_threadledger, src_thread->t_threadledger);
1026 recount_thread_copy(&dst_thread->th_recount, &src_thread->th_recount);
1027 *dst_thread->thread_io_stats = *src_thread->thread_io_stats;
1028 }
1029
1030 static void
thread_terminate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)1031 thread_terminate_queue_invoke(mpsc_queue_chain_t e,
1032 __assert_only mpsc_daemon_queue_t dq)
1033 {
1034 thread_t thread = mpsc_queue_element(e, struct thread, mpsc_links);
1035 task_t task = get_threadtask(thread);
1036
1037 assert(dq == &thread_terminate_queue);
1038
1039 task_lock(task);
1040
1041 /*
1042 * if marked for crash reporting, skip reaping.
1043 * The corpse delivery thread will clear bit and enqueue
1044 * for reaping when done
1045 *
1046 * Note: the inspection field is set under the task lock
1047 *
1048 * FIXME[mad]: why enqueue for termination before `inspection` is false ?
1049 */
1050 if (__improbable(thread->inspection)) {
1051 simple_lock(&crashed_threads_lock, &thread_lck_grp);
1052 task_unlock(task);
1053
1054 enqueue_tail(&crashed_threads_queue, &thread->runq_links);
1055 simple_unlock(&crashed_threads_lock);
1056 return;
1057 }
1058
1059 recount_task_rollup_thread(&task->tk_recount, &thread->th_recount);
1060
1061 task->total_runnable_time += timer_grab(&thread->runnable_timer);
1062 task->c_switch += thread->c_switch;
1063 task->p_switch += thread->p_switch;
1064 task->ps_switch += thread->ps_switch;
1065
1066 task->syscalls_unix += thread->syscalls_unix;
1067 task->syscalls_mach += thread->syscalls_mach;
1068
1069 task->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
1070 task->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
1071 task->task_gpu_ns += ml_gpu_stat(thread);
1072 task->decompressions += thread->decompressions;
1073
1074 thread_update_qos_cpu_time(thread);
1075
1076 queue_remove(&task->threads, thread, thread_t, task_threads);
1077 task->thread_count--;
1078
1079 /*
1080 * If the task is being halted, and there is only one thread
1081 * left in the task after this one, then wakeup that thread.
1082 */
1083 if (task->thread_count == 1 && task->halting) {
1084 thread_wakeup((event_t)&task->halting);
1085 }
1086
1087 task_unlock(task);
1088
1089 lck_mtx_lock(&tasks_threads_lock);
1090 queue_remove(&threads, thread, thread_t, threads);
1091 threads_count--;
1092 queue_enter(&terminated_threads, thread, thread_t, threads);
1093 terminated_threads_count++;
1094 lck_mtx_unlock(&tasks_threads_lock);
1095
1096 #if MACH_BSD
1097 /*
1098 * The thread no longer counts against the task's thread count,
1099 * we can now wake up any pending joiner.
1100 *
1101 * Note that the inheritor will be set to `thread` which is
1102 * incorrect once it is on the termination queue, however
1103 * the termination queue runs at MINPRI_KERNEL which is higher
1104 * than any user thread, so this isn't a priority inversion.
1105 */
1106 if (thread_get_tag(thread) & THREAD_TAG_USER_JOIN) {
1107 struct uthread *uth = get_bsdthread_info(thread);
1108 mach_port_name_t kport = uthread_joiner_port(uth);
1109
1110 /*
1111 * Clear the port low two bits to tell pthread that thread is gone.
1112 */
1113 #ifndef NO_PORT_GEN
1114 kport &= ~MACH_PORT_MAKE(0, IE_BITS_GEN_MASK + IE_BITS_GEN_ONE);
1115 #else
1116 kport |= MACH_PORT_MAKE(0, ~(IE_BITS_GEN_MASK + IE_BITS_GEN_ONE));
1117 #endif
1118 (void)copyoutmap_atomic32(task->map, kport,
1119 uthread_joiner_address(uth));
1120 uthread_joiner_wake(task, uth);
1121 }
1122 #endif
1123
1124 thread_deallocate(thread);
1125 }
1126
1127 static void
thread_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)1128 thread_deallocate_queue_invoke(mpsc_queue_chain_t e,
1129 __assert_only mpsc_daemon_queue_t dq)
1130 {
1131 thread_t thread = mpsc_queue_element(e, struct thread, mpsc_links);
1132
1133 assert(dq == &thread_deallocate_queue);
1134
1135 thread_deallocate_complete(thread);
1136 }
1137
1138 /*
1139 * thread_terminate_enqueue:
1140 *
1141 * Enqueue a terminating thread for final disposition.
1142 *
1143 * Called at splsched.
1144 */
1145 void
thread_terminate_enqueue(thread_t thread)1146 thread_terminate_enqueue(
1147 thread_t thread)
1148 {
1149 KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE, thread->thread_id);
1150
1151 mpsc_daemon_enqueue(&thread_terminate_queue, &thread->mpsc_links,
1152 MPSC_QUEUE_DISABLE_PREEMPTION);
1153 }
1154
1155 /*
1156 * thread_deallocate_enqueue:
1157 *
1158 * Enqueue a thread for final deallocation.
1159 */
1160 static void
thread_deallocate_enqueue(thread_t thread)1161 thread_deallocate_enqueue(
1162 thread_t thread)
1163 {
1164 mpsc_daemon_enqueue(&thread_deallocate_queue, &thread->mpsc_links,
1165 MPSC_QUEUE_DISABLE_PREEMPTION);
1166 }
1167
1168 /*
1169 * thread_terminate_crashed_threads:
1170 * walk the list of crashed threads and put back set of threads
1171 * who are no longer being inspected.
1172 */
1173 void
thread_terminate_crashed_threads(void)1174 thread_terminate_crashed_threads(void)
1175 {
1176 thread_t th_remove;
1177
1178 simple_lock(&crashed_threads_lock, &thread_lck_grp);
1179 /*
1180 * loop through the crashed threads queue
1181 * to put any threads that are not being inspected anymore
1182 */
1183
1184 qe_foreach_element_safe(th_remove, &crashed_threads_queue, runq_links) {
1185 /* make sure current_thread is never in crashed queue */
1186 assert(th_remove != current_thread());
1187
1188 if (th_remove->inspection == FALSE) {
1189 remqueue(&th_remove->runq_links);
1190 mpsc_daemon_enqueue(&thread_terminate_queue, &th_remove->mpsc_links,
1191 MPSC_QUEUE_NONE);
1192 }
1193 }
1194
1195 simple_unlock(&crashed_threads_lock);
1196 }
1197
1198 /*
1199 * thread_stack_queue_invoke:
1200 *
1201 * Perform stack allocation as required due to
1202 * invoke failures.
1203 */
1204 static void
thread_stack_queue_invoke(mpsc_queue_chain_t elm,__assert_only mpsc_daemon_queue_t dq)1205 thread_stack_queue_invoke(mpsc_queue_chain_t elm,
1206 __assert_only mpsc_daemon_queue_t dq)
1207 {
1208 thread_t thread = mpsc_queue_element(elm, struct thread, mpsc_links);
1209
1210 assert(dq == &thread_stack_queue);
1211
1212 /* allocate stack with interrupts enabled so that we can call into VM */
1213 stack_alloc(thread);
1214
1215 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_END, thread_tid(thread), 0, 0, 0, 0);
1216
1217 spl_t s = splsched();
1218 thread_lock(thread);
1219 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1220 thread_unlock(thread);
1221 splx(s);
1222 }
1223
1224 /*
1225 * thread_stack_enqueue:
1226 *
1227 * Enqueue a thread for stack allocation.
1228 *
1229 * Called at splsched.
1230 */
1231 void
thread_stack_enqueue(thread_t thread)1232 thread_stack_enqueue(
1233 thread_t thread)
1234 {
1235 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_START, thread_tid(thread), 0, 0, 0, 0);
1236 assert_thread_magic(thread);
1237
1238 mpsc_daemon_enqueue(&thread_stack_queue, &thread->mpsc_links,
1239 MPSC_QUEUE_DISABLE_PREEMPTION);
1240 }
1241
1242 void
thread_daemon_init(void)1243 thread_daemon_init(void)
1244 {
1245 kern_return_t result;
1246
1247 thread_deallocate_daemon_init();
1248
1249 thread_deallocate_daemon_register_queue(&thread_terminate_queue,
1250 thread_terminate_queue_invoke);
1251
1252 thread_deallocate_daemon_register_queue(&thread_deallocate_queue,
1253 thread_deallocate_queue_invoke);
1254
1255 ipc_object_deallocate_register_queue();
1256
1257 simple_lock_init(&crashed_threads_lock, 0);
1258 queue_init(&crashed_threads_queue);
1259
1260 result = mpsc_daemon_queue_init_with_thread(&thread_stack_queue,
1261 thread_stack_queue_invoke, BASEPRI_PREEMPT_HIGH,
1262 "daemon.thread-stack", MPSC_DAEMON_INIT_NONE);
1263 if (result != KERN_SUCCESS) {
1264 panic("thread_daemon_init: thread_stack_daemon");
1265 }
1266
1267 result = mpsc_daemon_queue_init_with_thread(&thread_exception_queue,
1268 thread_exception_queue_invoke, MINPRI_KERNEL,
1269 "daemon.thread-exception", MPSC_DAEMON_INIT_NONE);
1270
1271 if (result != KERN_SUCCESS) {
1272 panic("thread_daemon_init: thread_exception_daemon");
1273 }
1274
1275 result = mpsc_daemon_queue_init_with_thread(&thread_backtrace_queue,
1276 thread_backtrace_queue_invoke, MINPRI_KERNEL,
1277 "daemon.thread-backtrace", MPSC_DAEMON_INIT_NONE);
1278
1279 if (result != KERN_SUCCESS) {
1280 panic("thread_daemon_init: thread_backtrace_daemon");
1281 }
1282 }
1283
1284 __options_decl(thread_create_internal_options_t, uint32_t, {
1285 TH_OPTION_NONE = 0x00,
1286 TH_OPTION_NOSUSP = 0x02,
1287 TH_OPTION_WORKQ = 0x04,
1288 TH_OPTION_MAINTHREAD = 0x08,
1289 });
1290
1291 void
main_thread_set_immovable_pinned(thread_t thread)1292 main_thread_set_immovable_pinned(thread_t thread)
1293 {
1294 ipc_main_thread_set_immovable_pinned(thread);
1295 }
1296
1297 /*
1298 * Create a new thread.
1299 * Doesn't start the thread running.
1300 *
1301 * Task and tasks_threads_lock are returned locked on success.
1302 */
1303 static kern_return_t
thread_create_internal(task_t parent_task,integer_t priority,thread_continue_t continuation,void * parameter,thread_create_internal_options_t options,thread_t * out_thread)1304 thread_create_internal(
1305 task_t parent_task,
1306 integer_t priority,
1307 thread_continue_t continuation,
1308 void *parameter,
1309 thread_create_internal_options_t options,
1310 thread_t *out_thread)
1311 {
1312 thread_t new_thread;
1313 ipc_thread_init_options_t init_options = IPC_THREAD_INIT_NONE;
1314 struct thread_ro tro_tpl = { };
1315 bool first_thread = false;
1316 kern_return_t kr = KERN_FAILURE;
1317
1318 /*
1319 * Allocate a thread and initialize static fields
1320 */
1321 new_thread = zalloc_flags(thread_zone, Z_WAITOK | Z_NOFAIL);
1322
1323 if (__improbable(current_thread() == &init_thread)) {
1324 /*
1325 * The first thread ever is a global, but because we want to be
1326 * able to zone_id_require() threads, we have to stop using the
1327 * global piece of memory we used to boostrap the kernel and
1328 * jump to a proper thread from a zone.
1329 *
1330 * This is why that one thread will inherit its original
1331 * state differently.
1332 *
1333 * Also remember this thread in `vm_pageout_scan_thread`
1334 * as this is what the first thread ever becomes.
1335 *
1336 * Also pre-warm the depress timer since the VM pageout scan
1337 * daemon might need to use it.
1338 */
1339 assert(vm_pageout_scan_thread == THREAD_NULL);
1340 vm_pageout_scan_thread = new_thread;
1341
1342 first_thread = true;
1343 #pragma clang diagnostic push
1344 #pragma clang diagnostic ignored "-Wnontrivial-memaccess"
1345 /* work around 74481146 */
1346 memcpy(new_thread, &init_thread, sizeof(*new_thread));
1347 #pragma clang diagnostic pop
1348
1349 /*
1350 * Make the ctid table functional
1351 */
1352 ctid_table_init();
1353 new_thread->ctid = 0;
1354 } else {
1355 init_thread_from_template(new_thread);
1356 }
1357
1358 if (options & TH_OPTION_MAINTHREAD) {
1359 init_options |= IPC_THREAD_INIT_MAINTHREAD;
1360 }
1361
1362 os_ref_init_count_raw(&new_thread->ref_count, &thread_refgrp, 2);
1363 machine_thread_create(new_thread, parent_task, first_thread);
1364
1365 machine_thread_process_signature(new_thread, parent_task);
1366
1367 #ifdef MACH_BSD
1368 uthread_init(parent_task, get_bsdthread_info(new_thread),
1369 &tro_tpl, (options & TH_OPTION_WORKQ) != 0);
1370 if (!task_is_a_corpse(parent_task)) {
1371 /*
1372 * uthread_init will set tro_cred (with a +1)
1373 * and tro_proc for live tasks.
1374 */
1375 assert(tro_tpl.tro_cred && tro_tpl.tro_proc);
1376 }
1377 #endif /* MACH_BSD */
1378
1379 thread_lock_init(new_thread);
1380 wake_lock_init(new_thread);
1381
1382 lck_mtx_init(&new_thread->mutex, &thread_lck_grp, LCK_ATTR_NULL);
1383
1384 ipc_thread_init(parent_task, new_thread, &tro_tpl, init_options);
1385
1386 thread_ro_create(parent_task, new_thread, &tro_tpl);
1387
1388 new_thread->continuation = continuation;
1389 new_thread->parameter = parameter;
1390 new_thread->inheritor_flags = TURNSTILE_UPDATE_FLAGS_NONE;
1391 new_thread->requested_policy = default_thread_requested_policy;
1392 new_thread->__runq.runq = PROCESSOR_NULL;
1393 priority_queue_init(&new_thread->sched_inheritor_queue);
1394 priority_queue_init(&new_thread->base_inheritor_queue);
1395 #if CONFIG_SCHED_CLUTCH
1396 priority_queue_entry_init(&new_thread->th_clutch_runq_link);
1397 priority_queue_entry_init(&new_thread->th_clutch_pri_link);
1398 #endif /* CONFIG_SCHED_CLUTCH */
1399
1400 #if CONFIG_SCHED_EDGE
1401 new_thread->th_bound_cluster_enqueued = false;
1402 for (cluster_shared_rsrc_type_t shared_rsrc_type = CLUSTER_SHARED_RSRC_TYPE_MIN; shared_rsrc_type < CLUSTER_SHARED_RSRC_TYPE_COUNT; shared_rsrc_type++) {
1403 new_thread->th_shared_rsrc_enqueued[shared_rsrc_type] = false;
1404 new_thread->th_shared_rsrc_heavy_user[shared_rsrc_type] = false;
1405 new_thread->th_shared_rsrc_heavy_perf_control[shared_rsrc_type] = false;
1406 }
1407 #endif /* CONFIG_SCHED_EDGE */
1408 new_thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
1409
1410 /* Allocate I/O Statistics structure */
1411 new_thread->thread_io_stats = kalloc_data(sizeof(struct io_stat_info),
1412 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1413
1414 #if KASAN_CLASSIC
1415 kasan_init_thread(&new_thread->kasan_data);
1416 #endif /* KASAN_CLASSIC */
1417
1418 #if CONFIG_KCOV
1419 kcov_init_thread(&new_thread->kcov_data);
1420 #endif
1421
1422 #if CONFIG_IOSCHED
1423 /* Clear out the I/O Scheduling info for AppleFSCompression */
1424 new_thread->decmp_upl = NULL;
1425 #endif /* CONFIG_IOSCHED */
1426
1427 new_thread->thread_region_page_shift = 0;
1428
1429 #if DEVELOPMENT || DEBUG
1430 task_lock(parent_task);
1431 uint16_t thread_limit = parent_task->task_thread_limit;
1432 if (exc_resource_threads_enabled &&
1433 thread_limit > 0 &&
1434 parent_task->thread_count >= thread_limit &&
1435 !parent_task->task_has_crossed_thread_limit &&
1436 !(task_is_a_corpse(parent_task))) {
1437 int thread_count = parent_task->thread_count;
1438 parent_task->task_has_crossed_thread_limit = TRUE;
1439 task_unlock(parent_task);
1440 SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(parent_task, thread_count);
1441 } else {
1442 task_unlock(parent_task);
1443 }
1444 #endif
1445
1446 lck_mtx_lock(&tasks_threads_lock);
1447 task_lock(parent_task);
1448
1449 /*
1450 * Fail thread creation if parent task is being torn down or has too many threads
1451 * If the caller asked for TH_OPTION_NOSUSP, also fail if the parent task is suspended
1452 */
1453 if (parent_task->active == 0 || parent_task->halting ||
1454 (parent_task->suspend_count > 0 && (options & TH_OPTION_NOSUSP) != 0) ||
1455 (parent_task->thread_count >= task_threadmax && parent_task != kernel_task)) {
1456 task_unlock(parent_task);
1457 lck_mtx_unlock(&tasks_threads_lock);
1458
1459 ipc_thread_disable(new_thread);
1460 ipc_thread_terminate(new_thread);
1461 kfree_data(new_thread->thread_io_stats,
1462 sizeof(struct io_stat_info));
1463 lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
1464 kr = KERN_FAILURE;
1465 goto out_thread_cleanup;
1466 }
1467
1468 /* Protected by the tasks_threads_lock */
1469 new_thread->thread_id = ++thread_unique_id;
1470
1471 ctid_table_add(new_thread);
1472
1473 /* New threads inherit any default state on the task */
1474 machine_thread_inherit_taskwide(new_thread, parent_task);
1475
1476 task_reference_grp(parent_task, TASK_GRP_INTERNAL);
1477
1478 if (parent_task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
1479 /*
1480 * This task has a per-thread CPU limit; make sure this new thread
1481 * gets its limit set too, before it gets out of the kernel.
1482 */
1483 act_set_astledger(new_thread);
1484 }
1485
1486 /* Instantiate a thread ledger. Do not fail thread creation if ledger creation fails. */
1487 if ((new_thread->t_threadledger = ledger_instantiate(thread_ledger_template,
1488 LEDGER_CREATE_INACTIVE_ENTRIES)) != LEDGER_NULL) {
1489 ledger_entry_setactive(new_thread->t_threadledger, thread_ledgers.cpu_time);
1490 }
1491
1492 new_thread->t_bankledger = LEDGER_NULL;
1493 new_thread->t_deduct_bank_ledger_time = 0;
1494 new_thread->t_deduct_bank_ledger_energy = 0;
1495
1496 new_thread->t_ledger = parent_task->ledger;
1497 if (new_thread->t_ledger) {
1498 ledger_reference(new_thread->t_ledger);
1499 }
1500
1501 recount_thread_init(&new_thread->th_recount);
1502
1503 /* Cache the task's map */
1504 new_thread->map = parent_task->map;
1505
1506 new_thread->depress_timer = timer_call_alloc(thread_depress_expire, new_thread);
1507 new_thread->wait_timer = timer_call_alloc(thread_timer_expire, new_thread);
1508
1509 #if CONFIG_CPU_COUNTERS
1510 kpc_thread_create(new_thread);
1511 #endif /* CONFIG_CPU_COUNTERS */
1512
1513 /* Set the thread's scheduling parameters */
1514 new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task);
1515 new_thread->max_priority = parent_task->max_priority;
1516 new_thread->task_priority = parent_task->priority;
1517
1518 #if CONFIG_THREAD_GROUPS
1519 thread_group_init_thread(new_thread, parent_task);
1520 #endif /* CONFIG_THREAD_GROUPS */
1521
1522 int new_priority = (priority < 0) ? parent_task->priority: priority;
1523 new_priority = (priority < 0)? parent_task->priority: priority;
1524 if (new_priority > new_thread->max_priority) {
1525 new_priority = new_thread->max_priority;
1526 }
1527 #if !defined(XNU_TARGET_OS_OSX)
1528 if (new_priority < MAXPRI_THROTTLE) {
1529 new_priority = MAXPRI_THROTTLE;
1530 }
1531 #endif /* !defined(XNU_TARGET_OS_OSX) */
1532
1533 new_thread->importance = new_priority - new_thread->task_priority;
1534
1535 sched_set_thread_base_priority(new_thread, new_priority);
1536
1537 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
1538 new_thread->sched_stamp = sched_tick;
1539 #if CONFIG_SCHED_CLUTCH
1540 new_thread->pri_shift = sched_clutch_thread_pri_shift(new_thread, new_thread->th_sched_bucket);
1541 #else /* CONFIG_SCHED_CLUTCH */
1542 new_thread->pri_shift = sched_pri_shifts[new_thread->th_sched_bucket];
1543 #endif /* CONFIG_SCHED_CLUTCH */
1544 #endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */
1545
1546 if (parent_task->max_priority <= MAXPRI_THROTTLE) {
1547 sched_thread_mode_demote(new_thread, TH_SFLAG_THROTTLED);
1548 }
1549
1550 thread_policy_create(new_thread);
1551
1552 /* Chain the thread onto the task's list */
1553 queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
1554 parent_task->thread_count++;
1555
1556 /* So terminating threads don't need to take the task lock to decrement */
1557 os_atomic_inc(&parent_task->active_thread_count, relaxed);
1558
1559 queue_enter(&threads, new_thread, thread_t, threads);
1560 threads_count++;
1561
1562 new_thread->active = TRUE;
1563 if (task_is_a_corpse_fork(parent_task)) {
1564 /* Set the inspection bit if the task is a corpse fork */
1565 new_thread->inspection = TRUE;
1566 } else {
1567 new_thread->inspection = FALSE;
1568 }
1569 new_thread->corpse_dup = FALSE;
1570 new_thread->turnstile = turnstile_alloc();
1571 new_thread->ctsid = turnstile_compact_id_get();
1572
1573
1574 *out_thread = new_thread;
1575
1576 if (kdebug_enable) {
1577 long args[4] = {};
1578
1579 kdbg_trace_data(get_bsdtask_info(parent_task), &args[1], &args[3]);
1580
1581 /*
1582 * Starting with 26604425, exec'ing creates a new task/thread.
1583 *
1584 * NEWTHREAD in the current process has two possible meanings:
1585 *
1586 * 1) Create a new thread for this process.
1587 * 2) Create a new thread for the future process this will become in an
1588 * exec.
1589 *
1590 * To disambiguate these, arg3 will be set to TRUE for case #2.
1591 *
1592 * The value we need to find (TPF_EXEC_COPY) is stable in the case of a
1593 * task exec'ing. The read of t_procflags does not take the proc_lock.
1594 */
1595 args[2] = task_is_exec_copy(parent_task) ? 1 : 0;
1596
1597 KDBG_RELEASE(TRACE_DATA_NEWTHREAD, (uintptr_t)thread_tid(new_thread),
1598 args[1], args[2], args[3]);
1599
1600 kdebug_proc_name_args(get_bsdtask_info(parent_task), args);
1601 KDBG_RELEASE(TRACE_STRING_NEWTHREAD, args[0], args[1], args[2],
1602 args[3]);
1603 }
1604
1605 DTRACE_PROC1(lwp__create, thread_t, *out_thread);
1606
1607 kr = KERN_SUCCESS;
1608 goto done;
1609
1610 out_thread_cleanup:
1611 #ifdef MACH_BSD
1612 {
1613 struct uthread *ut = get_bsdthread_info(new_thread);
1614
1615 uthread_cleanup(ut, &tro_tpl);
1616 uthread_destroy(ut);
1617 }
1618 #endif /* MACH_BSD */
1619
1620 machine_thread_destroy(new_thread);
1621
1622 thread_ro_destroy(new_thread);
1623 zfree(thread_zone, new_thread);
1624
1625 done:
1626 return kr;
1627 }
1628
1629 static kern_return_t
thread_create_with_options_internal(task_t task,thread_t * new_thread,boolean_t from_user,thread_create_internal_options_t options,thread_continue_t continuation)1630 thread_create_with_options_internal(
1631 task_t task,
1632 thread_t *new_thread,
1633 boolean_t from_user,
1634 thread_create_internal_options_t options,
1635 thread_continue_t continuation)
1636 {
1637 kern_return_t result;
1638 thread_t thread;
1639
1640 if (task == TASK_NULL || task == kernel_task) {
1641 return KERN_INVALID_ARGUMENT;
1642 }
1643
1644 #if CONFIG_MACF
1645 if (from_user && current_task() != task &&
1646 mac_proc_check_remote_thread_create(task, -1, NULL, 0) != 0) {
1647 return KERN_DENIED;
1648 }
1649 #endif
1650
1651 result = thread_create_internal(task, -1, continuation, NULL, options, &thread);
1652 if (result != KERN_SUCCESS) {
1653 return result;
1654 }
1655
1656 thread->user_stop_count = 1;
1657 thread_hold(thread);
1658 if (task->suspend_count > 0) {
1659 thread_hold(thread);
1660 }
1661
1662 if (from_user) {
1663 extmod_statistics_incr_thread_create(task);
1664 }
1665
1666 task_unlock(task);
1667 lck_mtx_unlock(&tasks_threads_lock);
1668
1669 *new_thread = thread;
1670
1671 return KERN_SUCCESS;
1672 }
1673
1674 kern_return_t
thread_create_immovable(task_t task,thread_t * new_thread)1675 thread_create_immovable(
1676 task_t task,
1677 thread_t *new_thread)
1678 {
1679 return thread_create_with_options_internal(task, new_thread, FALSE,
1680 TH_OPTION_NONE, (thread_continue_t)thread_bootstrap_return);
1681 }
1682
1683 kern_return_t
thread_create_from_user(task_t task,thread_t * new_thread)1684 thread_create_from_user(
1685 task_t task,
1686 thread_t *new_thread)
1687 {
1688 /* All thread ports are created immovable by default */
1689 return thread_create_with_options_internal(task, new_thread, TRUE, TH_OPTION_NONE,
1690 (thread_continue_t)thread_bootstrap_return);
1691 }
1692
1693 kern_return_t
thread_create_with_continuation(task_t task,thread_t * new_thread,thread_continue_t continuation)1694 thread_create_with_continuation(
1695 task_t task,
1696 thread_t *new_thread,
1697 thread_continue_t continuation)
1698 {
1699 return thread_create_with_options_internal(task, new_thread, FALSE, TH_OPTION_NONE, continuation);
1700 }
1701
1702 /*
1703 * Create a thread that is already started, but is waiting on an event
1704 */
1705 static kern_return_t
thread_create_waiting_internal(task_t task,thread_continue_t continuation,event_t event,block_hint_t block_hint,thread_create_internal_options_t options,thread_t * new_thread)1706 thread_create_waiting_internal(
1707 task_t task,
1708 thread_continue_t continuation,
1709 event_t event,
1710 block_hint_t block_hint,
1711 thread_create_internal_options_t options,
1712 thread_t *new_thread)
1713 {
1714 kern_return_t result;
1715 thread_t thread;
1716 wait_interrupt_t wait_interrupt = THREAD_INTERRUPTIBLE;
1717
1718 if (task == TASK_NULL || task == kernel_task) {
1719 return KERN_INVALID_ARGUMENT;
1720 }
1721
1722 result = thread_create_internal(task, -1, continuation, NULL,
1723 options, &thread);
1724 if (result != KERN_SUCCESS) {
1725 return result;
1726 }
1727
1728 /* note no user_stop_count or thread_hold here */
1729
1730 if (task->suspend_count > 0) {
1731 thread_hold(thread);
1732 }
1733
1734 thread_mtx_lock(thread);
1735 thread_set_pending_block_hint(thread, block_hint);
1736 if (options & TH_OPTION_WORKQ) {
1737 thread->static_param = true;
1738 event = workq_thread_init_and_wq_lock(task, thread);
1739 } else if (options & TH_OPTION_MAINTHREAD) {
1740 wait_interrupt = THREAD_UNINT;
1741 }
1742 thread_start_in_assert_wait(thread,
1743 assert_wait_queue(event), CAST_EVENT64_T(event),
1744 wait_interrupt);
1745 thread_mtx_unlock(thread);
1746
1747 task_unlock(task);
1748 lck_mtx_unlock(&tasks_threads_lock);
1749
1750 *new_thread = thread;
1751
1752 return KERN_SUCCESS;
1753 }
1754
1755 kern_return_t
main_thread_create_waiting(task_t task,thread_continue_t continuation,event_t event,thread_t * new_thread)1756 main_thread_create_waiting(
1757 task_t task,
1758 thread_continue_t continuation,
1759 event_t event,
1760 thread_t *new_thread)
1761 {
1762 return thread_create_waiting_internal(task, continuation, event,
1763 kThreadWaitNone, TH_OPTION_MAINTHREAD, new_thread);
1764 }
1765
1766
1767 static kern_return_t
thread_create_running_internal2(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread,boolean_t from_user)1768 thread_create_running_internal2(
1769 task_t task,
1770 int flavor,
1771 thread_state_t new_state,
1772 mach_msg_type_number_t new_state_count,
1773 thread_t *new_thread,
1774 boolean_t from_user)
1775 {
1776 kern_return_t result;
1777 thread_t thread;
1778
1779 if (task == TASK_NULL || task == kernel_task) {
1780 return KERN_INVALID_ARGUMENT;
1781 }
1782
1783 #if CONFIG_MACF
1784 if (from_user && current_task() != task &&
1785 mac_proc_check_remote_thread_create(task, flavor, new_state, new_state_count) != 0) {
1786 return KERN_DENIED;
1787 }
1788 #endif
1789
1790 result = thread_create_internal(task, -1,
1791 (thread_continue_t)thread_bootstrap_return, NULL,
1792 TH_OPTION_NONE, &thread);
1793 if (result != KERN_SUCCESS) {
1794 return result;
1795 }
1796
1797 if (task->suspend_count > 0) {
1798 thread_hold(thread);
1799 }
1800
1801 if (from_user) {
1802 result = machine_thread_state_convert_from_user(thread, flavor,
1803 new_state, new_state_count, NULL, 0, TSSF_FLAGS_NONE);
1804 }
1805 if (result == KERN_SUCCESS) {
1806 result = machine_thread_set_state(thread, flavor, new_state,
1807 new_state_count);
1808 }
1809 if (result != KERN_SUCCESS) {
1810 task_unlock(task);
1811 lck_mtx_unlock(&tasks_threads_lock);
1812
1813 thread_terminate(thread);
1814 thread_deallocate(thread);
1815 return result;
1816 }
1817
1818 thread_mtx_lock(thread);
1819 thread_start(thread);
1820 thread_mtx_unlock(thread);
1821
1822 if (from_user) {
1823 extmod_statistics_incr_thread_create(task);
1824 }
1825
1826 task_unlock(task);
1827 lck_mtx_unlock(&tasks_threads_lock);
1828
1829 *new_thread = thread;
1830
1831 return result;
1832 }
1833
1834 /* Prototype, see justification above */
1835 kern_return_t
1836 thread_create_running(
1837 task_t task,
1838 int flavor,
1839 thread_state_t new_state,
1840 mach_msg_type_number_t new_state_count,
1841 thread_t *new_thread);
1842
1843 kern_return_t
thread_create_running(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread)1844 thread_create_running(
1845 task_t task,
1846 int flavor,
1847 thread_state_t new_state,
1848 mach_msg_type_number_t new_state_count,
1849 thread_t *new_thread)
1850 {
1851 return thread_create_running_internal2(
1852 task, flavor, new_state, new_state_count,
1853 new_thread, FALSE);
1854 }
1855
1856 kern_return_t
thread_create_running_from_user(task_t task,int flavor,thread_state_t new_state,mach_msg_type_number_t new_state_count,thread_t * new_thread)1857 thread_create_running_from_user(
1858 task_t task,
1859 int flavor,
1860 thread_state_t new_state,
1861 mach_msg_type_number_t new_state_count,
1862 thread_t *new_thread)
1863 {
1864 return thread_create_running_internal2(
1865 task, flavor, new_state, new_state_count,
1866 new_thread, TRUE);
1867 }
1868
1869 kern_return_t
thread_create_workq_waiting(task_t task,thread_continue_t continuation,thread_t * new_thread,bool is_permanently_bound)1870 thread_create_workq_waiting(
1871 task_t task,
1872 thread_continue_t continuation,
1873 thread_t *new_thread,
1874 bool is_permanently_bound)
1875 {
1876 /*
1877 * Create thread, but don't pin control port just yet, in case someone calls
1878 * task_threads() and deallocates pinned port before kernel copyout happens,
1879 * which will result in pinned port guard exception. Instead, pin and copyout
1880 * atomically during workq_setup_and_run().
1881 */
1882 int options = TH_OPTION_WORKQ;
1883
1884 /*
1885 * Until we add a support for delayed thread creation for permanently
1886 * bound workqueue threads, we do not pass TH_OPTION_NOSUSP for their
1887 * creation.
1888 */
1889 if (!is_permanently_bound) {
1890 options |= TH_OPTION_NOSUSP;
1891 }
1892
1893 return thread_create_waiting_internal(task, continuation, NULL,
1894 is_permanently_bound ? kThreadWaitParkedBoundWorkQueue : kThreadWaitParkedWorkQueue,
1895 options, new_thread);
1896 }
1897
1898 /*
1899 * kernel_thread_create:
1900 *
1901 * Create a thread in the kernel task
1902 * to execute in kernel context.
1903 */
1904 kern_return_t
kernel_thread_create(thread_continue_t continuation,void * parameter,integer_t priority,thread_t * new_thread)1905 kernel_thread_create(
1906 thread_continue_t continuation,
1907 void *parameter,
1908 integer_t priority,
1909 thread_t *new_thread)
1910 {
1911 kern_return_t result;
1912 thread_t thread;
1913 task_t task = kernel_task;
1914
1915 result = thread_create_internal(task, priority, continuation, parameter,
1916 TH_OPTION_NONE, &thread);
1917 if (result != KERN_SUCCESS) {
1918 return result;
1919 }
1920
1921 task_unlock(task);
1922 lck_mtx_unlock(&tasks_threads_lock);
1923
1924 stack_alloc(thread);
1925 assert(thread->kernel_stack != 0);
1926 #if !defined(XNU_TARGET_OS_OSX)
1927 if (priority > BASEPRI_KERNEL)
1928 #endif
1929 thread->reserved_stack = thread->kernel_stack;
1930
1931 if (debug_task & 1) {
1932 kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread, continuation);
1933 }
1934 *new_thread = thread;
1935
1936 return result;
1937 }
1938
1939 kern_return_t
kernel_thread_start_priority(thread_continue_t continuation,void * parameter,integer_t priority,thread_t * new_thread)1940 kernel_thread_start_priority(
1941 thread_continue_t continuation,
1942 void *parameter,
1943 integer_t priority,
1944 thread_t *new_thread)
1945 {
1946 kern_return_t result;
1947 thread_t thread;
1948
1949 result = kernel_thread_create(continuation, parameter, priority, &thread);
1950 if (result != KERN_SUCCESS) {
1951 return result;
1952 }
1953
1954 *new_thread = thread;
1955
1956 thread_mtx_lock(thread);
1957 thread_start(thread);
1958 thread_mtx_unlock(thread);
1959
1960 return result;
1961 }
1962
1963 kern_return_t
kernel_thread_start(thread_continue_t continuation,void * parameter,thread_t * new_thread)1964 kernel_thread_start(
1965 thread_continue_t continuation,
1966 void *parameter,
1967 thread_t *new_thread)
1968 {
1969 return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
1970 }
1971
1972 /* Separated into helper function so it can be used by THREAD_BASIC_INFO and THREAD_EXTENDED_INFO */
1973 /* it is assumed that the thread is locked by the caller */
1974 static void
retrieve_thread_basic_info(thread_t thread,thread_basic_info_t basic_info)1975 retrieve_thread_basic_info(thread_t thread, thread_basic_info_t basic_info)
1976 {
1977 int state, flags;
1978
1979 /* fill in info */
1980
1981 thread_read_times(thread, &basic_info->user_time,
1982 &basic_info->system_time, NULL);
1983
1984 /*
1985 * Update lazy-evaluated scheduler info because someone wants it.
1986 */
1987 if (SCHED(can_update_priority)(thread)) {
1988 SCHED(update_priority)(thread);
1989 }
1990
1991 basic_info->sleep_time = 0;
1992
1993 /*
1994 * To calculate cpu_usage, first correct for timer rate,
1995 * then for 5/8 ageing. The correction factor [3/5] is
1996 * (1/(5/8) - 1).
1997 */
1998 basic_info->cpu_usage = 0;
1999 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2000 if (sched_tick_interval) {
2001 basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage
2002 * TH_USAGE_SCALE) / sched_tick_interval);
2003 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
2004 }
2005 #endif
2006
2007 if (basic_info->cpu_usage > TH_USAGE_SCALE) {
2008 basic_info->cpu_usage = TH_USAGE_SCALE;
2009 }
2010
2011 basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)?
2012 POLICY_TIMESHARE: POLICY_RR);
2013
2014 flags = 0;
2015 if (thread->options & TH_OPT_IDLE_THREAD) {
2016 flags |= TH_FLAGS_IDLE;
2017 }
2018
2019 if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) {
2020 flags |= TH_FLAGS_GLOBAL_FORCED_IDLE;
2021 }
2022
2023 if (!thread->kernel_stack) {
2024 flags |= TH_FLAGS_SWAPPED;
2025 }
2026
2027 state = 0;
2028 if (thread->state & TH_TERMINATE) {
2029 state = TH_STATE_HALTED;
2030 } else if (thread->state & TH_RUN) {
2031 state = TH_STATE_RUNNING;
2032 } else if (thread->state & TH_UNINT) {
2033 state = TH_STATE_UNINTERRUPTIBLE;
2034 } else if (thread->state & TH_SUSP) {
2035 state = TH_STATE_STOPPED;
2036 } else if (thread->state & TH_WAIT) {
2037 state = TH_STATE_WAITING;
2038 }
2039
2040 basic_info->run_state = state;
2041 basic_info->flags = flags;
2042
2043 basic_info->suspend_count = thread->user_stop_count;
2044
2045 return;
2046 }
2047
2048 kern_return_t
thread_info_internal(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)2049 thread_info_internal(
2050 thread_t thread,
2051 thread_flavor_t flavor,
2052 thread_info_t thread_info_out, /* ptr to OUT array */
2053 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
2054 {
2055 spl_t s;
2056
2057 if (thread == THREAD_NULL) {
2058 return KERN_INVALID_ARGUMENT;
2059 }
2060
2061 if (flavor == THREAD_BASIC_INFO) {
2062 if (*thread_info_count < THREAD_BASIC_INFO_COUNT) {
2063 return KERN_INVALID_ARGUMENT;
2064 }
2065
2066 s = splsched();
2067 thread_lock(thread);
2068
2069 retrieve_thread_basic_info(thread, (thread_basic_info_t) thread_info_out);
2070
2071 thread_unlock(thread);
2072 splx(s);
2073
2074 *thread_info_count = THREAD_BASIC_INFO_COUNT;
2075
2076 return KERN_SUCCESS;
2077 } else if (flavor == THREAD_IDENTIFIER_INFO) {
2078 thread_identifier_info_t identifier_info;
2079
2080 if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) {
2081 return KERN_INVALID_ARGUMENT;
2082 }
2083
2084 identifier_info = __IGNORE_WCASTALIGN((thread_identifier_info_t)thread_info_out);
2085
2086 s = splsched();
2087 thread_lock(thread);
2088
2089 identifier_info->thread_id = thread->thread_id;
2090 identifier_info->thread_handle = thread->machine.cthread_self;
2091 identifier_info->dispatch_qaddr = thread_dispatchqaddr(thread);
2092
2093 thread_unlock(thread);
2094 splx(s);
2095 return KERN_SUCCESS;
2096 } else if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
2097 policy_timeshare_info_t ts_info;
2098
2099 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT) {
2100 return KERN_INVALID_ARGUMENT;
2101 }
2102
2103 ts_info = (policy_timeshare_info_t)thread_info_out;
2104
2105 s = splsched();
2106 thread_lock(thread);
2107
2108 if (thread->sched_mode != TH_MODE_TIMESHARE) {
2109 thread_unlock(thread);
2110 splx(s);
2111 return KERN_INVALID_POLICY;
2112 }
2113
2114 ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
2115 if (ts_info->depressed) {
2116 ts_info->base_priority = DEPRESSPRI;
2117 ts_info->depress_priority = thread->base_pri;
2118 } else {
2119 ts_info->base_priority = thread->base_pri;
2120 ts_info->depress_priority = -1;
2121 }
2122
2123 ts_info->cur_priority = thread->sched_pri;
2124 ts_info->max_priority = thread->max_priority;
2125
2126 thread_unlock(thread);
2127 splx(s);
2128
2129 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
2130
2131 return KERN_SUCCESS;
2132 } else if (flavor == THREAD_SCHED_FIFO_INFO) {
2133 if (*thread_info_count < POLICY_FIFO_INFO_COUNT) {
2134 return KERN_INVALID_ARGUMENT;
2135 }
2136
2137 return KERN_INVALID_POLICY;
2138 } else if (flavor == THREAD_SCHED_RR_INFO) {
2139 policy_rr_info_t rr_info;
2140 uint32_t quantum_time;
2141 uint64_t quantum_ns;
2142
2143 if (*thread_info_count < POLICY_RR_INFO_COUNT) {
2144 return KERN_INVALID_ARGUMENT;
2145 }
2146
2147 rr_info = (policy_rr_info_t) thread_info_out;
2148
2149 s = splsched();
2150 thread_lock(thread);
2151
2152 if (thread->sched_mode == TH_MODE_TIMESHARE) {
2153 thread_unlock(thread);
2154 splx(s);
2155
2156 return KERN_INVALID_POLICY;
2157 }
2158
2159 rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
2160 if (rr_info->depressed) {
2161 rr_info->base_priority = DEPRESSPRI;
2162 rr_info->depress_priority = thread->base_pri;
2163 } else {
2164 rr_info->base_priority = thread->base_pri;
2165 rr_info->depress_priority = -1;
2166 }
2167
2168 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
2169 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
2170
2171 rr_info->max_priority = thread->max_priority;
2172 rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
2173
2174 thread_unlock(thread);
2175 splx(s);
2176
2177 *thread_info_count = POLICY_RR_INFO_COUNT;
2178
2179 return KERN_SUCCESS;
2180 } else if (flavor == THREAD_EXTENDED_INFO) {
2181 thread_basic_info_data_t basic_info;
2182 thread_extended_info_t extended_info = __IGNORE_WCASTALIGN((thread_extended_info_t)thread_info_out);
2183
2184 if (*thread_info_count < THREAD_EXTENDED_INFO_COUNT) {
2185 return KERN_INVALID_ARGUMENT;
2186 }
2187
2188 s = splsched();
2189 thread_lock(thread);
2190
2191 /* NOTE: This mimics fill_taskthreadinfo(), which is the function used by proc_pidinfo() for
2192 * the PROC_PIDTHREADINFO flavor (which can't be used on corpses)
2193 */
2194 retrieve_thread_basic_info(thread, &basic_info);
2195 extended_info->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
2196 extended_info->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
2197
2198 extended_info->pth_cpu_usage = basic_info.cpu_usage;
2199 extended_info->pth_policy = basic_info.policy;
2200 extended_info->pth_run_state = basic_info.run_state;
2201 extended_info->pth_flags = basic_info.flags;
2202 extended_info->pth_sleep_time = basic_info.sleep_time;
2203 extended_info->pth_curpri = thread->sched_pri;
2204 extended_info->pth_priority = thread->base_pri;
2205 extended_info->pth_maxpriority = thread->max_priority;
2206
2207 bsd_getthreadname(get_bsdthread_info(thread), extended_info->pth_name);
2208
2209 thread_unlock(thread);
2210 splx(s);
2211
2212 *thread_info_count = THREAD_EXTENDED_INFO_COUNT;
2213
2214 return KERN_SUCCESS;
2215 } else if (flavor == THREAD_DEBUG_INFO_INTERNAL) {
2216 #if DEVELOPMENT || DEBUG
2217 thread_debug_info_internal_t dbg_info;
2218 if (*thread_info_count < THREAD_DEBUG_INFO_INTERNAL_COUNT) {
2219 return KERN_NOT_SUPPORTED;
2220 }
2221
2222 if (thread_info_out == NULL) {
2223 return KERN_INVALID_ARGUMENT;
2224 }
2225
2226 dbg_info = __IGNORE_WCASTALIGN((thread_debug_info_internal_t)thread_info_out);
2227 dbg_info->page_creation_count = thread->t_page_creation_count;
2228
2229 *thread_info_count = THREAD_DEBUG_INFO_INTERNAL_COUNT;
2230 return KERN_SUCCESS;
2231 #endif /* DEVELOPMENT || DEBUG */
2232 return KERN_NOT_SUPPORTED;
2233 }
2234
2235 return KERN_INVALID_ARGUMENT;
2236 }
2237
2238 static void
_convert_mach_to_time_value(uint64_t time_mach,time_value_t * time)2239 _convert_mach_to_time_value(uint64_t time_mach, time_value_t *time)
2240 {
2241 clock_sec_t secs;
2242 clock_usec_t usecs;
2243 absolutetime_to_microtime(time_mach, &secs, &usecs);
2244 time->seconds = (typeof(time->seconds))secs;
2245 time->microseconds = usecs;
2246 }
2247
2248 void
thread_read_times(thread_t thread,time_value_t * user_time,time_value_t * system_time,time_value_t * runnable_time)2249 thread_read_times(
2250 thread_t thread,
2251 time_value_t *user_time,
2252 time_value_t *system_time,
2253 time_value_t *runnable_time)
2254 {
2255 if (user_time && system_time) {
2256 struct recount_times_mach times = recount_thread_times(thread);
2257 _convert_mach_to_time_value(times.rtm_user, user_time);
2258 _convert_mach_to_time_value(times.rtm_system, system_time);
2259 }
2260
2261 if (runnable_time) {
2262 uint64_t runnable_time_mach = timer_grab(&thread->runnable_timer);
2263 _convert_mach_to_time_value(runnable_time_mach, runnable_time);
2264 }
2265 }
2266
2267 uint64_t
thread_get_runtime_self(void)2268 thread_get_runtime_self(void)
2269 {
2270 /*
2271 * Must be guaranteed to stay on the same CPU and not be updated by the
2272 * scheduler.
2273 */
2274 boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
2275 uint64_t time_mach = recount_current_thread_time_mach();
2276 ml_set_interrupts_enabled(interrupt_state);
2277 return time_mach;
2278 }
2279
2280 /*
2281 * thread_wire_internal:
2282 *
2283 * Specify that the target thread must always be able
2284 * to run and to allocate memory.
2285 */
2286 kern_return_t
thread_wire_internal(host_priv_t host_priv,thread_t thread,boolean_t wired,boolean_t * prev_state)2287 thread_wire_internal(
2288 host_priv_t host_priv,
2289 thread_t thread,
2290 boolean_t wired,
2291 boolean_t *prev_state)
2292 {
2293 if (host_priv == NULL || thread != current_thread()) {
2294 return KERN_INVALID_ARGUMENT;
2295 }
2296
2297 if (prev_state) {
2298 *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
2299 }
2300
2301 if (wired) {
2302 if (!(thread->options & TH_OPT_VMPRIV)) {
2303 vm_page_free_reserve(1); /* XXX */
2304 }
2305 thread->options |= TH_OPT_VMPRIV;
2306 } else {
2307 if (thread->options & TH_OPT_VMPRIV) {
2308 vm_page_free_reserve(-1); /* XXX */
2309 }
2310 thread->options &= ~TH_OPT_VMPRIV;
2311 }
2312
2313 return KERN_SUCCESS;
2314 }
2315
2316
2317 /*
2318 * thread_wire:
2319 *
2320 * User-api wrapper for thread_wire_internal()
2321 */
2322 kern_return_t
thread_wire(host_priv_t host_priv __unused,thread_t thread __unused,boolean_t wired __unused)2323 thread_wire(
2324 host_priv_t host_priv __unused,
2325 thread_t thread __unused,
2326 boolean_t wired __unused)
2327 {
2328 return KERN_NOT_SUPPORTED;
2329 }
2330
2331 boolean_t
is_external_pageout_thread(void)2332 is_external_pageout_thread(void)
2333 {
2334 return current_thread() == pgo_iothread_external_state.pgo_iothread;
2335 }
2336
2337 boolean_t
is_vm_privileged(void)2338 is_vm_privileged(void)
2339 {
2340 return current_thread()->options & TH_OPT_VMPRIV ? TRUE : FALSE;
2341 }
2342
2343 boolean_t
set_vm_privilege(boolean_t privileged)2344 set_vm_privilege(boolean_t privileged)
2345 {
2346 boolean_t was_vmpriv;
2347
2348 if (current_thread()->options & TH_OPT_VMPRIV) {
2349 was_vmpriv = TRUE;
2350 } else {
2351 was_vmpriv = FALSE;
2352 }
2353
2354 if (privileged != FALSE) {
2355 current_thread()->options |= TH_OPT_VMPRIV;
2356 } else {
2357 current_thread()->options &= ~TH_OPT_VMPRIV;
2358 }
2359
2360 return was_vmpriv;
2361 }
2362
2363 void
thread_floor_boost_set_promotion_locked(thread_t thread)2364 thread_floor_boost_set_promotion_locked(thread_t thread)
2365 {
2366 assert(thread->priority_floor_count > 0);
2367
2368 if (!(thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED)) {
2369 sched_thread_promote_reason(thread, TH_SFLAG_FLOOR_PROMOTED, 0);
2370 }
2371 }
2372
2373 /*! @function thread_priority_floor_start
2374 * @abstract boost the current thread priority to floor.
2375 * @discussion Increase the priority of the current thread to at least MINPRI_FLOOR.
2376 * The boost will be mantained until a corresponding thread_priority_floor_end()
2377 * is called. Every call of thread_priority_floor_start() needs to have a corresponding
2378 * call to thread_priority_floor_end() from the same thread.
2379 * No thread can return to userspace before calling thread_priority_floor_end().
2380 *
2381 * NOTE: avoid to use this function. Try to use gate_t or sleep_with_inheritor()
2382 * instead.
2383 * @result a token to be given to the corresponding thread_priority_floor_end()
2384 */
2385 thread_pri_floor_t
thread_priority_floor_start(void)2386 thread_priority_floor_start(void)
2387 {
2388 thread_pri_floor_t ret;
2389 thread_t thread = current_thread();
2390 __assert_only uint16_t prev_priority_floor_count;
2391
2392 assert(thread->priority_floor_count < UINT16_MAX);
2393 prev_priority_floor_count = thread->priority_floor_count++;
2394 #if MACH_ASSERT
2395 /*
2396 * Set the ast to check that the
2397 * priority_floor_count is going to be set to zero when
2398 * going back to userspace.
2399 * Set it only once when we increment it for the first time.
2400 */
2401 if (prev_priority_floor_count == 0) {
2402 act_set_debug_assert();
2403 }
2404 #endif
2405
2406 ret.thread = thread;
2407 return ret;
2408 }
2409
2410 /*! @function thread_priority_floor_end
2411 * @abstract ends the floor boost.
2412 * @param token the token obtained from thread_priority_floor_start()
2413 * @discussion ends the priority floor boost started with thread_priority_floor_start()
2414 */
2415 void
thread_priority_floor_end(thread_pri_floor_t * token)2416 thread_priority_floor_end(thread_pri_floor_t *token)
2417 {
2418 thread_t thread = current_thread();
2419
2420 assert(thread->priority_floor_count > 0);
2421 assertf(token->thread == thread, "thread_priority_floor_end called from a different thread from thread_priority_floor_start %p %p", thread, token->thread);
2422
2423 if ((thread->priority_floor_count-- == 1) && (thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED)) {
2424 spl_t s = splsched();
2425 thread_lock(thread);
2426
2427 if (thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) {
2428 sched_thread_unpromote_reason(thread, TH_SFLAG_FLOOR_PROMOTED, 0);
2429 }
2430
2431 thread_unlock(thread);
2432 splx(s);
2433 }
2434
2435 token->thread = NULL;
2436 }
2437
2438 /*
2439 * XXX assuming current thread only, for now...
2440 */
2441 void
thread_ast_mach_exception(thread_t thread,int os_reason,exception_type_t exception_type,mach_exception_data_type_t code,mach_exception_data_type_t subcode,bool fatal,bool ktriage)2442 thread_ast_mach_exception(
2443 thread_t thread,
2444 int os_reason,
2445 exception_type_t exception_type,
2446 mach_exception_data_type_t code,
2447 mach_exception_data_type_t subcode,
2448 bool fatal,
2449 bool ktriage)
2450 {
2451 assert(thread == current_thread());
2452
2453 /*
2454 * Don't set up the AST for kernel threads; this check is needed to ensure
2455 * that the guard_exc_* fields in the thread structure are set only by the
2456 * current thread and therefore, don't require a lock.
2457 */
2458 if (get_threadtask(thread) == kernel_task) {
2459 return;
2460 }
2461
2462 /*
2463 * Use the saved state area of the thread structure
2464 * to store all info required to handle the AST when
2465 * returning to userspace. It's possible that there is
2466 * already a pending guard exception.
2467 *
2468 * Fatal guard exceptions cannot be overwritten; non-fatal
2469 * guards can be overwritten by fatal guards.
2470 */
2471 if (thread->mach_exc_info.code && (thread->mach_exc_fatal || !fatal)) {
2472 return;
2473 }
2474
2475 thread->mach_exc_info.os_reason = os_reason;
2476 thread->mach_exc_info.exception_type = exception_type;
2477 thread->mach_exc_info.code = code;
2478 thread->mach_exc_info.subcode = subcode;
2479 thread->mach_exc_fatal = fatal;
2480 thread->mach_exc_ktriage = ktriage;
2481
2482 spl_t s = splsched();
2483 thread_ast_set(thread, AST_MACH_EXCEPTION);
2484 ast_propagate(thread);
2485 splx(s);
2486 }
2487
2488 void
thread_guard_violation(thread_t thread,mach_exception_data_type_t code,mach_exception_data_type_t subcode,bool fatal)2489 thread_guard_violation(
2490 thread_t thread,
2491 mach_exception_data_type_t code,
2492 mach_exception_data_type_t subcode,
2493 bool fatal)
2494 {
2495 assert(EXC_GUARD_DECODE_GUARD_TYPE(code));
2496 thread_ast_mach_exception(thread, OS_REASON_GUARD, EXC_GUARD, code, subcode, fatal, false);
2497 }
2498
2499 #if CONFIG_DEBUG_SYSCALL_REJECTION
2500 extern void rejected_syscall_guard_ast(thread_t __unused t, mach_exception_data_type_t code, mach_exception_data_type_t subcode);
2501 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
2502
2503 /*
2504 * guard_ast:
2505 *
2506 * Handle AST_MACH_EXCEPTION with reason OS_REASON_GUARD for a thread. This
2507 * routine looks at the state saved in the thread structure to determine
2508 * the cause of this exception. Based on this value, it invokes the
2509 * appropriate routine which determines other exception related info and
2510 * raises the exception.
2511 */
2512 static void
guard_ast(thread_t t,mach_exception_data_type_t code,mach_exception_data_type_t subcode)2513 guard_ast(thread_t t,
2514 mach_exception_data_type_t code,
2515 mach_exception_data_type_t subcode)
2516 {
2517 switch (EXC_GUARD_DECODE_GUARD_TYPE(code)) {
2518 case GUARD_TYPE_MACH_PORT:
2519 mach_port_guard_ast(t, code, subcode);
2520 break;
2521 case GUARD_TYPE_FD:
2522 fd_guard_ast(t, code, subcode);
2523 break;
2524 case GUARD_TYPE_VN:
2525 vn_guard_ast(t, code, subcode);
2526 break;
2527 case GUARD_TYPE_VIRT_MEMORY:
2528 virt_memory_guard_ast(t, code, subcode);
2529 break;
2530 #if CONFIG_DEBUG_SYSCALL_REJECTION
2531 case GUARD_TYPE_REJECTED_SC:
2532 rejected_syscall_guard_ast(t, code, subcode);
2533 break;
2534 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
2535 default:
2536 panic("guard_exc_info %llx %llx", code, subcode);
2537 }
2538 }
2539
2540 void
mach_exception_ast(thread_t t)2541 mach_exception_ast(thread_t t)
2542 {
2543 const int os_reason = t->mach_exc_info.os_reason;
2544 const exception_type_t exception_type = t->mach_exc_info.exception_type;
2545 const mach_exception_data_type_t
2546 code = t->mach_exc_info.code,
2547 subcode = t->mach_exc_info.subcode;
2548 const bool
2549 ktriage = t->mach_exc_ktriage;
2550
2551 bzero(&t->mach_exc_info, sizeof(t->mach_exc_info));
2552 t->mach_exc_fatal = 0;
2553 t->mach_exc_ktriage = 0;
2554
2555 if (os_reason == OS_REASON_INVALID) {
2556 /* lingering AST_MACH_EXCEPTION on the processor? */
2557 } else if (os_reason == OS_REASON_GUARD) {
2558 guard_ast(t, code, subcode);
2559 } else {
2560 task_t task = get_threadtask(t);
2561 void *bsd_info = get_bsdtask_info(task);
2562 uint32_t flags = PX_FLAGS_NONE;
2563 if (ktriage) {
2564 flags |= PX_KTRIAGE;
2565 }
2566
2567 exception_info_t info = {
2568 .os_reason = os_reason,
2569 .exception_type = exception_type,
2570 .mx_code = code,
2571 .mx_subcode = subcode,
2572 };
2573 exit_with_mach_exception(bsd_info, info, flags);
2574 }
2575
2576 }
2577
2578 static void
thread_cputime_callback(int warning,__unused const void * arg0,__unused const void * arg1)2579 thread_cputime_callback(int warning, __unused const void *arg0, __unused const void *arg1)
2580 {
2581 if (warning == 0) {
2582 SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU();
2583 }
2584 }
2585
2586 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void)2587 SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void)
2588 {
2589 int pid = 0;
2590 task_t task = current_task();
2591 thread_t thread = current_thread();
2592 uint64_t tid = thread->thread_id;
2593 const char *procname = "unknown";
2594 time_value_t thread_total_time = {0, 0};
2595 time_value_t thread_system_time;
2596 time_value_t thread_user_time;
2597 int action;
2598 uint8_t percentage;
2599 uint32_t usage_percent = 0;
2600 uint32_t interval_sec;
2601 uint64_t interval_ns;
2602 uint64_t balance_ns;
2603 boolean_t fatal = FALSE;
2604 boolean_t send_exc_resource = TRUE; /* in addition to RESOURCE_NOTIFY */
2605 kern_return_t kr;
2606
2607 #ifdef EXC_RESOURCE_MONITORS
2608 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2609 #endif /* EXC_RESOURCE_MONITORS */
2610 struct ledger_entry_info lei;
2611
2612 assert(thread->t_threadledger != LEDGER_NULL);
2613
2614 /*
2615 * Extract the fatal bit and suspend the monitor (which clears the bit).
2616 */
2617 task_lock(task);
2618 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_CPUMON) {
2619 fatal = TRUE;
2620 send_exc_resource = TRUE;
2621 }
2622 /* Only one thread can be here at a time. Whichever makes it through
2623 * first will successfully suspend the monitor and proceed to send the
2624 * notification. Other threads will get an error trying to suspend the
2625 * monitor and give up on sending the notification. In the first release,
2626 * the monitor won't be resumed for a number of seconds, but we may
2627 * eventually need to handle low-latency resume.
2628 */
2629 kr = task_suspend_cpumon(task);
2630 task_unlock(task);
2631 if (kr == KERN_INVALID_ARGUMENT) {
2632 return;
2633 }
2634
2635 #ifdef MACH_BSD
2636 pid = proc_selfpid();
2637 void *bsd_info = get_bsdtask_info(task);
2638 if (bsd_info != NULL) {
2639 procname = proc_name_address(bsd_info);
2640 }
2641 #endif
2642
2643 thread_get_cpulimit(&action, &percentage, &interval_ns);
2644
2645 interval_sec = (uint32_t)(interval_ns / NSEC_PER_SEC);
2646
2647 thread_read_times(thread, &thread_user_time, &thread_system_time, NULL);
2648 time_value_add(&thread_total_time, &thread_user_time);
2649 time_value_add(&thread_total_time, &thread_system_time);
2650 ledger_get_entry_info(thread->t_threadledger, thread_ledgers.cpu_time, &lei);
2651
2652 /* credit/debit/balance/limit are in absolute time units;
2653 * the refill info is in nanoseconds. */
2654 absolutetime_to_nanoseconds(lei.lei_balance, &balance_ns);
2655 if (lei.lei_last_refill > 0) {
2656 usage_percent = (uint32_t)((balance_ns * 100ULL) / lei.lei_last_refill);
2657 }
2658
2659 /* TODO: show task total runtime (via TASK_ABSOLUTETIME_INFO)? */
2660 printf("process %s[%d] thread %llu caught burning CPU! It used more than %d%% CPU over %u seconds\n",
2661 procname, pid, tid, percentage, interval_sec);
2662 printf(" (actual recent usage: %d%% over ~%llu seconds)\n",
2663 usage_percent, (lei.lei_last_refill + NSEC_PER_SEC / 2) / NSEC_PER_SEC);
2664 printf(" Thread lifetime cpu usage %d.%06ds, (%d.%06d user, %d.%06d sys)\n",
2665 thread_total_time.seconds, thread_total_time.microseconds,
2666 thread_user_time.seconds, thread_user_time.microseconds,
2667 thread_system_time.seconds, thread_system_time.microseconds);
2668 printf(" Ledger balance: %lld; mabs credit: %lld; mabs debit: %lld\n",
2669 lei.lei_balance, lei.lei_credit, lei.lei_debit);
2670 printf(" mabs limit: %llu; mabs period: %llu ns; last refill: %llu ns%s.\n",
2671 lei.lei_limit, lei.lei_refill_period, lei.lei_last_refill,
2672 (fatal ? " [fatal violation]" : ""));
2673
2674 /*
2675 * For now, send RESOURCE_NOTIFY in parallel with EXC_RESOURCE. Once
2676 * we have logging parity, we will stop sending EXC_RESOURCE (24508922).
2677 */
2678
2679 /* RESOURCE_NOTIFY MIG specifies nanoseconds of CPU time */
2680 lei.lei_balance = balance_ns;
2681 absolutetime_to_nanoseconds(lei.lei_limit, &lei.lei_limit);
2682 trace_resource_violation(RMON_CPUUSAGE_VIOLATED, &lei);
2683 kr = send_resource_violation(send_cpu_usage_violation, task, &lei,
2684 fatal ? kRNFatalLimitFlag : 0);
2685 if (kr) {
2686 printf("send_resource_violation(CPU usage, ...): error %#x\n", kr);
2687 }
2688
2689 #ifdef EXC_RESOURCE_MONITORS
2690 if (send_exc_resource) {
2691 if (disable_exc_resource) {
2692 printf("process %s[%d] thread %llu caught burning CPU! "
2693 "EXC_RESOURCE%s suppressed by a boot-arg\n",
2694 procname, pid, tid, fatal ? " (and termination)" : "");
2695 return;
2696 }
2697
2698 if (disable_exc_resource_during_audio && audio_active) {
2699 printf("process %s[%d] thread %llu caught burning CPU! "
2700 "EXC_RESOURCE & termination suppressed due to audio playback\n",
2701 procname, pid, tid);
2702 return;
2703 }
2704 }
2705
2706
2707 if (send_exc_resource) {
2708 code[0] = code[1] = 0;
2709 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_CPU);
2710 if (fatal) {
2711 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR_FATAL);
2712 } else {
2713 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR);
2714 }
2715 EXC_RESOURCE_CPUMONITOR_ENCODE_INTERVAL(code[0], interval_sec);
2716 EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[0], percentage);
2717 EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[1], usage_percent);
2718 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
2719 }
2720 #endif /* EXC_RESOURCE_MONITORS */
2721
2722 if (fatal) {
2723 #if CONFIG_JETSAM
2724 jetsam_on_ledger_cpulimit_exceeded();
2725 #else
2726 task_terminate_internal(task);
2727 #endif
2728 }
2729 }
2730
2731 bool os_variant_has_internal_diagnostics(const char *subsystem);
2732
2733 #if DEVELOPMENT || DEBUG
2734
2735 void __attribute__((noinline))
SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task,int thread_count)2736 SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task, int thread_count)
2737 {
2738 mach_exception_data_type_t code[EXCEPTION_CODE_MAX] = {0};
2739 int pid = task_pid(task);
2740 char procname[MAXCOMLEN + 1] = "unknown";
2741
2742 if (pid == 1) {
2743 /*
2744 * Cannot suspend launchd
2745 */
2746 return;
2747 }
2748
2749 proc_name(pid, procname, sizeof(procname));
2750
2751 /*
2752 * Skip all checks for testing when exc_resource_threads_enabled is overriden
2753 */
2754 if (exc_resource_threads_enabled == 2) {
2755 goto skip_checks;
2756 }
2757
2758 if (disable_exc_resource) {
2759 printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2760 "suppressed by a boot-arg.\n", procname, pid, thread_count);
2761 return;
2762 }
2763
2764 if (!os_variant_has_internal_diagnostics("com.apple.xnu")) {
2765 printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2766 "suppressed, internal diagnostics disabled.\n", procname, pid, thread_count);
2767 return;
2768 }
2769
2770 if (disable_exc_resource_during_audio && audio_active) {
2771 printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2772 "suppressed due to audio playback.\n", procname, pid, thread_count);
2773 return;
2774 }
2775
2776 if (!exc_via_corpse_forking) {
2777 printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE "
2778 "suppressed due to corpse forking being disabled.\n", procname, pid,
2779 thread_count);
2780 return;
2781 }
2782
2783 skip_checks:
2784 printf("process %s[%d] crossed thread count high watermark (%d), sending "
2785 "EXC_RESOURCE\n", procname, pid, thread_count);
2786
2787 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_THREADS);
2788 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_THREADS_HIGH_WATERMARK);
2789 EXC_RESOURCE_THREADS_ENCODE_THREADS(code[0], thread_count);
2790
2791 task_enqueue_exception_with_corpse(task, EXC_RESOURCE, code, EXCEPTION_CODE_MAX, NULL, FALSE);
2792 }
2793 #endif /* DEVELOPMENT || DEBUG */
2794
2795 void
thread_update_io_stats(thread_t thread,int size,int io_flags)2796 thread_update_io_stats(thread_t thread, int size, int io_flags)
2797 {
2798 task_t task = get_threadtask(thread);
2799 int io_tier;
2800
2801 if (thread->thread_io_stats == NULL || task->task_io_stats == NULL) {
2802 return;
2803 }
2804
2805 if (io_flags & DKIO_READ) {
2806 UPDATE_IO_STATS(thread->thread_io_stats->disk_reads, size);
2807 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->disk_reads, size);
2808 }
2809
2810 if (io_flags & DKIO_META) {
2811 UPDATE_IO_STATS(thread->thread_io_stats->metadata, size);
2812 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->metadata, size);
2813 }
2814
2815 if (io_flags & DKIO_PAGING) {
2816 UPDATE_IO_STATS(thread->thread_io_stats->paging, size);
2817 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->paging, size);
2818 }
2819
2820 io_tier = ((io_flags & DKIO_TIER_MASK) >> DKIO_TIER_SHIFT);
2821 assert(io_tier < IO_NUM_PRIORITIES);
2822
2823 UPDATE_IO_STATS(thread->thread_io_stats->io_priority[io_tier], size);
2824 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->io_priority[io_tier], size);
2825
2826 /* Update Total I/O Counts */
2827 UPDATE_IO_STATS(thread->thread_io_stats->total_io, size);
2828 UPDATE_IO_STATS_ATOMIC(task->task_io_stats->total_io, size);
2829
2830 if (!(io_flags & DKIO_READ)) {
2831 DTRACE_IO3(physical_writes, struct task *, task, uint32_t, size, int, io_flags);
2832 ledger_credit(task->ledger, task_ledgers.physical_writes, size);
2833 }
2834 }
2835
2836 static void
init_thread_ledgers(void)2837 init_thread_ledgers(void)
2838 {
2839 ledger_template_t t;
2840 int idx;
2841
2842 assert(thread_ledger_template == NULL);
2843
2844 if ((t = ledger_template_create("Per-thread ledger")) == NULL) {
2845 panic("couldn't create thread ledger template");
2846 }
2847
2848 if ((idx = ledger_entry_add(t, "cpu_time", "sched", "ns")) < 0) {
2849 panic("couldn't create cpu_time entry for thread ledger template");
2850 }
2851
2852 if (ledger_set_callback(t, idx, thread_cputime_callback, NULL, NULL) < 0) {
2853 panic("couldn't set thread ledger callback for cpu_time entry");
2854 }
2855
2856 thread_ledgers.cpu_time = idx;
2857
2858 ledger_template_complete(t);
2859 thread_ledger_template = t;
2860 }
2861
2862 /*
2863 * Returns the amount of (abs) CPU time that remains before the limit would be
2864 * hit or the amount of time left in the current interval, whichever is smaller.
2865 * This value changes as CPU time is consumed and the ledgers refilled.
2866 * Used to limit the quantum of a thread.
2867 */
2868 uint64_t
thread_cpulimit_remaining(uint64_t now)2869 thread_cpulimit_remaining(uint64_t now)
2870 {
2871 thread_t thread = current_thread();
2872
2873 if ((thread->options &
2874 (TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT)) == 0) {
2875 return UINT64_MAX;
2876 }
2877
2878 /* Amount of time left in the current interval. */
2879 const uint64_t interval_remaining =
2880 ledger_get_interval_remaining(thread->t_threadledger, thread_ledgers.cpu_time, now);
2881
2882 /* Amount that can be spent until the limit is hit. */
2883 const uint64_t remaining =
2884 ledger_get_remaining(thread->t_threadledger, thread_ledgers.cpu_time);
2885
2886 return MIN(interval_remaining, remaining);
2887 }
2888
2889 /*
2890 * Returns true if a new interval should be started.
2891 */
2892 bool
thread_cpulimit_interval_has_expired(uint64_t now)2893 thread_cpulimit_interval_has_expired(uint64_t now)
2894 {
2895 thread_t thread = current_thread();
2896
2897 if ((thread->options &
2898 (TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT)) == 0) {
2899 return false;
2900 }
2901
2902 return ledger_get_interval_remaining(thread->t_threadledger,
2903 thread_ledgers.cpu_time, now) == 0;
2904 }
2905
2906 /*
2907 * Balances the ledger and sets the last refill time to `now`.
2908 */
2909 void
thread_cpulimit_restart(uint64_t now)2910 thread_cpulimit_restart(uint64_t now)
2911 {
2912 thread_t thread = current_thread();
2913
2914 assert3u(thread->options & (TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT), !=, 0);
2915
2916 ledger_restart(thread->t_threadledger, thread_ledgers.cpu_time, now);
2917 }
2918
2919 /*
2920 * Returns currently applied CPU usage limit, or 0/0 if none is applied.
2921 */
2922 int
thread_get_cpulimit(int * action,uint8_t * percentage,uint64_t * interval_ns)2923 thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns)
2924 {
2925 int64_t abstime = 0;
2926 uint64_t limittime = 0;
2927 thread_t thread = current_thread();
2928
2929 *percentage = 0;
2930 *interval_ns = 0;
2931 *action = 0;
2932
2933 if (thread->t_threadledger == LEDGER_NULL) {
2934 /*
2935 * This thread has no per-thread ledger, so it can't possibly
2936 * have a CPU limit applied.
2937 */
2938 return KERN_SUCCESS;
2939 }
2940
2941 ledger_get_period(thread->t_threadledger, thread_ledgers.cpu_time, interval_ns);
2942 ledger_get_limit(thread->t_threadledger, thread_ledgers.cpu_time, &abstime);
2943
2944 if ((abstime == LEDGER_LIMIT_INFINITY) || (*interval_ns == 0)) {
2945 /*
2946 * This thread's CPU time ledger has no period or limit; so it
2947 * doesn't have a CPU limit applied.
2948 */
2949 return KERN_SUCCESS;
2950 }
2951
2952 /*
2953 * This calculation is the converse to the one in thread_set_cpulimit().
2954 */
2955 absolutetime_to_nanoseconds(abstime, &limittime);
2956 *percentage = (uint8_t)((limittime * 100ULL) / *interval_ns);
2957 assert(*percentage <= 100);
2958
2959 if (thread->options & TH_OPT_PROC_CPULIMIT) {
2960 assert((thread->options & TH_OPT_PRVT_CPULIMIT) == 0);
2961
2962 *action = THREAD_CPULIMIT_BLOCK;
2963 } else if (thread->options & TH_OPT_PRVT_CPULIMIT) {
2964 assert((thread->options & TH_OPT_PROC_CPULIMIT) == 0);
2965
2966 *action = THREAD_CPULIMIT_EXCEPTION;
2967 } else {
2968 *action = THREAD_CPULIMIT_DISABLE;
2969 }
2970
2971 return KERN_SUCCESS;
2972 }
2973
2974 /*
2975 * Set CPU usage limit on a thread.
2976 */
2977 int
thread_set_cpulimit(int action,uint8_t percentage,uint64_t interval_ns)2978 thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns)
2979 {
2980 thread_t thread = current_thread();
2981 ledger_t l;
2982 uint64_t limittime = 0;
2983 uint64_t abstime = 0;
2984
2985 assert(percentage <= 100);
2986 assert(percentage > 0 || action == THREAD_CPULIMIT_DISABLE);
2987
2988 /*
2989 * Disallow any change to the CPU limit if the TH_OPT_FORCED_LEDGER
2990 * flag is set.
2991 */
2992 if ((thread->options & TH_OPT_FORCED_LEDGER) != 0) {
2993 return KERN_FAILURE;
2994 }
2995
2996 if (action == THREAD_CPULIMIT_DISABLE) {
2997 /*
2998 * Remove CPU limit, if any exists.
2999 */
3000 if (thread->t_threadledger != LEDGER_NULL) {
3001 l = thread->t_threadledger;
3002 ledger_set_limit(l, thread_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0);
3003 ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_IGNORE);
3004 thread->options &= ~(TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT);
3005 }
3006
3007 return 0;
3008 }
3009
3010 if (interval_ns < MINIMUM_CPULIMIT_INTERVAL_MS * NSEC_PER_MSEC) {
3011 return KERN_INVALID_ARGUMENT;
3012 }
3013
3014 l = thread->t_threadledger;
3015 if (l == LEDGER_NULL) {
3016 /*
3017 * This thread doesn't yet have a per-thread ledger; so create one with the CPU time entry active.
3018 */
3019 if ((l = ledger_instantiate(thread_ledger_template, LEDGER_CREATE_INACTIVE_ENTRIES)) == LEDGER_NULL) {
3020 return KERN_RESOURCE_SHORTAGE;
3021 }
3022
3023 /*
3024 * We are the first to create this thread's ledger, so only activate our entry.
3025 */
3026 ledger_entry_setactive(l, thread_ledgers.cpu_time);
3027 thread->t_threadledger = l;
3028 }
3029
3030 /*
3031 * The limit is specified as a percentage of CPU over an interval in nanoseconds.
3032 * Calculate the amount of CPU time that the thread needs to consume in order to hit the limit.
3033 */
3034 limittime = (interval_ns * percentage) / 100;
3035 nanoseconds_to_absolutetime(limittime, &abstime);
3036 ledger_set_limit(l, thread_ledgers.cpu_time, abstime, cpumon_ustackshots_trigger_pct);
3037 /*
3038 * Refill the thread's allotted CPU time every interval_ns nanoseconds.
3039 */
3040 ledger_set_period(l, thread_ledgers.cpu_time, interval_ns);
3041
3042 if (action == THREAD_CPULIMIT_EXCEPTION) {
3043 /*
3044 * We don't support programming the CPU usage monitor on a task if any of its
3045 * threads have a per-thread blocking CPU limit configured.
3046 */
3047 if (thread->options & TH_OPT_PRVT_CPULIMIT) {
3048 panic("CPU usage monitor activated, but blocking thread limit exists");
3049 }
3050
3051 /*
3052 * Make a note that this thread's CPU limit is being used for the task-wide CPU
3053 * usage monitor. We don't have to arm the callback which will trigger the
3054 * exception, because that was done for us in ledger_instantiate (because the
3055 * ledger template used has a default callback).
3056 */
3057 thread->options |= TH_OPT_PROC_CPULIMIT;
3058 } else {
3059 /*
3060 * We deliberately override any CPU limit imposed by a task-wide limit (eg
3061 * CPU usage monitor).
3062 */
3063 thread->options &= ~TH_OPT_PROC_CPULIMIT;
3064
3065 thread->options |= TH_OPT_PRVT_CPULIMIT;
3066 /* The per-thread ledger template by default has a callback for CPU time */
3067 ledger_disable_callback(l, thread_ledgers.cpu_time);
3068 ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
3069 }
3070
3071 return 0;
3072 }
3073
3074 void
thread_sched_call(thread_t thread,sched_call_t call)3075 thread_sched_call(
3076 thread_t thread,
3077 sched_call_t call)
3078 {
3079 assert((thread->state & TH_WAIT_REPORT) == 0);
3080 thread->sched_call = call;
3081 }
3082
3083
3084 uint64_t
thread_tid(thread_t thread)3085 thread_tid(
3086 thread_t thread)
3087 {
3088 return thread != THREAD_NULL? thread->thread_id: 0;
3089 }
3090
3091 uint64_t
uthread_tid(struct uthread * uth)3092 uthread_tid(
3093 struct uthread *uth)
3094 {
3095 if (uth) {
3096 return thread_tid(get_machthread(uth));
3097 }
3098 return 0;
3099 }
3100
3101 uint16_t
thread_set_tag(thread_t th,uint16_t tag)3102 thread_set_tag(thread_t th, uint16_t tag)
3103 {
3104 return thread_set_tag_internal(th, tag);
3105 }
3106
3107 uint16_t
thread_get_tag(thread_t th)3108 thread_get_tag(thread_t th)
3109 {
3110 return thread_get_tag_internal(th);
3111 }
3112
3113 uint64_t
thread_last_run_time(thread_t th)3114 thread_last_run_time(thread_t th)
3115 {
3116 return th->last_run_time;
3117 }
3118
3119 /*
3120 * Shared resource contention management
3121 *
3122 * The scheduler attempts to load balance the shared resource intensive
3123 * workloads across clusters to ensure that the resource is not heavily
3124 * contended. The kernel relies on external agents (userspace or
3125 * performance controller) to identify shared resource heavy threads.
3126 * The load balancing is achieved based on the scheduler configuration
3127 * enabled on the platform.
3128 */
3129
3130
3131 #if CONFIG_SCHED_EDGE
3132
3133 /*
3134 * On the Edge scheduler, the load balancing is achieved by looking
3135 * at cluster level shared resource loads and migrating resource heavy
3136 * threads dynamically to under utilized cluster. Therefore, when a
3137 * thread is indicated as a resource heavy thread, the policy set
3138 * routine simply adds a flag to the thread which is looked at by
3139 * the scheduler on thread migration decisions.
3140 */
3141
3142 boolean_t
thread_shared_rsrc_policy_get(thread_t thread,cluster_shared_rsrc_type_t type)3143 thread_shared_rsrc_policy_get(thread_t thread, cluster_shared_rsrc_type_t type)
3144 {
3145 return thread->th_shared_rsrc_heavy_user[type] || thread->th_shared_rsrc_heavy_perf_control[type];
3146 }
3147
3148 __options_decl(sched_edge_rsrc_heavy_thread_state, uint32_t, {
3149 SCHED_EDGE_RSRC_HEAVY_THREAD_SET = 1,
3150 SCHED_EDGE_RSRC_HEAVY_THREAD_CLR = 2,
3151 });
3152
3153 kern_return_t
thread_shared_rsrc_policy_set(thread_t thread,__unused uint32_t index,cluster_shared_rsrc_type_t type,shared_rsrc_policy_agent_t agent)3154 thread_shared_rsrc_policy_set(thread_t thread, __unused uint32_t index, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent)
3155 {
3156 spl_t s = splsched();
3157 thread_lock(thread);
3158
3159 bool user = (agent == SHARED_RSRC_POLICY_AGENT_DISPATCH) || (agent == SHARED_RSRC_POLICY_AGENT_SYSCTL);
3160 bool *thread_flags = (user) ? thread->th_shared_rsrc_heavy_user : thread->th_shared_rsrc_heavy_perf_control;
3161 if (thread_flags[type]) {
3162 thread_unlock(thread);
3163 splx(s);
3164 return KERN_FAILURE;
3165 }
3166
3167 thread_flags[type] = true;
3168 thread_unlock(thread);
3169 splx(s);
3170
3171 KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_RSRC_HEAVY_THREAD) | DBG_FUNC_NONE, SCHED_EDGE_RSRC_HEAVY_THREAD_SET, thread_tid(thread), type, agent);
3172 if (thread == current_thread()) {
3173 if (agent == SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM) {
3174 ast_on(AST_PREEMPT);
3175 } else {
3176 assert(agent != SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
3177 thread_block(THREAD_CONTINUE_NULL);
3178 }
3179 }
3180 return KERN_SUCCESS;
3181 }
3182
3183 kern_return_t
thread_shared_rsrc_policy_clear(thread_t thread,cluster_shared_rsrc_type_t type,shared_rsrc_policy_agent_t agent)3184 thread_shared_rsrc_policy_clear(thread_t thread, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent)
3185 {
3186 spl_t s = splsched();
3187 thread_lock(thread);
3188
3189 bool user = (agent == SHARED_RSRC_POLICY_AGENT_DISPATCH) || (agent == SHARED_RSRC_POLICY_AGENT_SYSCTL);
3190 bool *thread_flags = (user) ? thread->th_shared_rsrc_heavy_user : thread->th_shared_rsrc_heavy_perf_control;
3191 if (!thread_flags[type]) {
3192 thread_unlock(thread);
3193 splx(s);
3194 return KERN_FAILURE;
3195 }
3196
3197 thread_flags[type] = false;
3198 thread_unlock(thread);
3199 splx(s);
3200
3201 KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_RSRC_HEAVY_THREAD) | DBG_FUNC_NONE, SCHED_EDGE_RSRC_HEAVY_THREAD_CLR, thread_tid(thread), type, agent);
3202 if (thread == current_thread()) {
3203 if (agent == SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM) {
3204 ast_on(AST_PREEMPT);
3205 } else {
3206 assert(agent != SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
3207 thread_block(THREAD_CONTINUE_NULL);
3208 }
3209 }
3210 return KERN_SUCCESS;
3211 }
3212
3213 #else /* CONFIG_SCHED_EDGE */
3214
3215 /*
3216 * On non-Edge schedulers, the shared resource contention
3217 * is managed by simply binding threads to specific clusters
3218 * based on the worker index passed by the agents marking
3219 * this thread as resource heavy threads. The thread binding
3220 * approach does not provide any rebalancing opportunities;
3221 * it can also suffer from scheduling delays if the cluster
3222 * where the thread is bound is contended.
3223 */
3224
3225 boolean_t
thread_shared_rsrc_policy_get(__unused thread_t thread,__unused cluster_shared_rsrc_type_t type)3226 thread_shared_rsrc_policy_get(__unused thread_t thread, __unused cluster_shared_rsrc_type_t type)
3227 {
3228 return false;
3229 }
3230
3231 kern_return_t
thread_shared_rsrc_policy_set(thread_t thread,uint32_t index,__unused cluster_shared_rsrc_type_t type,__unused shared_rsrc_policy_agent_t agent)3232 thread_shared_rsrc_policy_set(thread_t thread, uint32_t index, __unused cluster_shared_rsrc_type_t type, __unused shared_rsrc_policy_agent_t agent)
3233 {
3234 return thread_soft_bind_cluster_id(thread, index, THREAD_BIND_ELIGIBLE_ONLY);
3235 }
3236
3237 kern_return_t
thread_shared_rsrc_policy_clear(thread_t thread,__unused cluster_shared_rsrc_type_t type,__unused shared_rsrc_policy_agent_t agent)3238 thread_shared_rsrc_policy_clear(thread_t thread, __unused cluster_shared_rsrc_type_t type, __unused shared_rsrc_policy_agent_t agent)
3239 {
3240 return thread_soft_bind_cluster_id(thread, 0, THREAD_UNBIND);
3241 }
3242
3243 #endif /* CONFIG_SCHED_EDGE */
3244
3245 uint64_t
thread_dispatchqaddr(thread_t thread)3246 thread_dispatchqaddr(
3247 thread_t thread)
3248 {
3249 uint64_t dispatchqueue_addr;
3250 uint64_t thread_handle;
3251 task_t task;
3252
3253 if (thread == THREAD_NULL) {
3254 return 0;
3255 }
3256
3257 thread_handle = thread->machine.cthread_self;
3258 if (thread_handle == 0) {
3259 return 0;
3260 }
3261
3262 task = get_threadtask(thread);
3263 void *bsd_info = get_bsdtask_info(task);
3264 if (thread->inspection == TRUE) {
3265 dispatchqueue_addr = thread_handle + get_task_dispatchqueue_offset(task);
3266 } else if (bsd_info) {
3267 dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(bsd_info);
3268 } else {
3269 dispatchqueue_addr = 0;
3270 }
3271
3272 return dispatchqueue_addr;
3273 }
3274
3275
3276 uint64_t
thread_wqquantum_addr(thread_t thread)3277 thread_wqquantum_addr(thread_t thread)
3278 {
3279 uint64_t thread_handle;
3280 task_t task;
3281
3282 if (thread == THREAD_NULL) {
3283 return 0;
3284 }
3285
3286 thread_handle = thread->machine.cthread_self;
3287 if (thread_handle == 0) {
3288 return 0;
3289 }
3290 task = get_threadtask(thread);
3291
3292 uint64_t wq_quantum_expiry_offset = get_wq_quantum_offset_from_proc(get_bsdtask_info(task));
3293 if (wq_quantum_expiry_offset == 0) {
3294 return 0;
3295 }
3296
3297 return wq_quantum_expiry_offset + thread_handle;
3298 }
3299
3300 uint64_t
thread_rettokern_addr(thread_t thread)3301 thread_rettokern_addr(
3302 thread_t thread)
3303 {
3304 uint64_t rettokern_addr;
3305 uint64_t rettokern_offset;
3306 uint64_t thread_handle;
3307 task_t task;
3308 void *bsd_info;
3309
3310 if (thread == THREAD_NULL) {
3311 return 0;
3312 }
3313
3314 thread_handle = thread->machine.cthread_self;
3315 if (thread_handle == 0) {
3316 return 0;
3317 }
3318 task = get_threadtask(thread);
3319 bsd_info = get_bsdtask_info(task);
3320
3321 if (bsd_info) {
3322 rettokern_offset = get_return_to_kernel_offset_from_proc(bsd_info);
3323
3324 /* Return 0 if return to kernel offset is not initialized. */
3325 if (rettokern_offset == 0) {
3326 rettokern_addr = 0;
3327 } else {
3328 rettokern_addr = thread_handle + rettokern_offset;
3329 }
3330 } else {
3331 rettokern_addr = 0;
3332 }
3333
3334 return rettokern_addr;
3335 }
3336
3337 /*
3338 * Export routines to other components for things that are done as macros
3339 * within the osfmk component.
3340 */
3341
3342 void
thread_mtx_lock(thread_t thread)3343 thread_mtx_lock(thread_t thread)
3344 {
3345 lck_mtx_lock(&thread->mutex);
3346 }
3347
3348 void
thread_mtx_unlock(thread_t thread)3349 thread_mtx_unlock(thread_t thread)
3350 {
3351 lck_mtx_unlock(&thread->mutex);
3352 }
3353
3354 void
thread_reference(thread_t thread)3355 thread_reference(
3356 thread_t thread)
3357 {
3358 if (thread != THREAD_NULL) {
3359 zone_id_require(ZONE_ID_THREAD, sizeof(struct thread), thread);
3360 os_ref_retain_raw(&thread->ref_count, &thread_refgrp);
3361 }
3362 }
3363
3364 void
thread_require(thread_t thread)3365 thread_require(thread_t thread)
3366 {
3367 zone_id_require(ZONE_ID_THREAD, sizeof(struct thread), thread);
3368 }
3369
3370 #undef thread_should_halt
3371
3372 boolean_t
thread_should_halt(thread_t th)3373 thread_should_halt(
3374 thread_t th)
3375 {
3376 return thread_should_halt_fast(th);
3377 }
3378
3379 /*
3380 * thread_set_voucher_name - reset the voucher port name bound to this thread
3381 *
3382 * Conditions: nothing locked
3383 */
3384
3385 kern_return_t
thread_set_voucher_name(mach_port_name_t voucher_name)3386 thread_set_voucher_name(mach_port_name_t voucher_name)
3387 {
3388 thread_t thread = current_thread();
3389 ipc_voucher_t new_voucher = IPC_VOUCHER_NULL;
3390 ipc_voucher_t voucher;
3391 ledger_t bankledger = NULL;
3392 struct thread_group *banktg = NULL;
3393 uint32_t persona_id = 0;
3394
3395 if (MACH_PORT_DEAD == voucher_name) {
3396 return KERN_INVALID_RIGHT;
3397 }
3398
3399 /*
3400 * agressively convert to voucher reference
3401 */
3402 if (MACH_PORT_VALID(voucher_name)) {
3403 new_voucher = convert_port_name_to_voucher(voucher_name);
3404 if (IPC_VOUCHER_NULL == new_voucher) {
3405 return KERN_INVALID_ARGUMENT;
3406 }
3407 }
3408 bank_get_bank_ledger_thread_group_and_persona(new_voucher, &bankledger, &banktg, &persona_id);
3409
3410 thread_mtx_lock(thread);
3411 voucher = thread->ith_voucher;
3412 thread->ith_voucher_name = voucher_name;
3413 thread->ith_voucher = new_voucher;
3414 thread_mtx_unlock(thread);
3415
3416 bank_swap_thread_bank_ledger(thread, bankledger);
3417 #if CONFIG_THREAD_GROUPS
3418 thread_group_set_bank(thread, banktg);
3419 #endif /* CONFIG_THREAD_GROUPS */
3420
3421 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3422 MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
3423 (uintptr_t)thread_tid(thread),
3424 (uintptr_t)voucher_name,
3425 VM_KERNEL_ADDRPERM((uintptr_t)new_voucher),
3426 persona_id, 0);
3427
3428 if (IPC_VOUCHER_NULL != voucher) {
3429 ipc_voucher_release(voucher);
3430 }
3431
3432 return KERN_SUCCESS;
3433 }
3434
3435 /*
3436 * thread_get_mach_voucher - return a voucher reference for the specified thread voucher
3437 *
3438 * Conditions: nothing locked
3439 *
3440 * NOTE: At the moment, there is no distinction between the current and effective
3441 * vouchers because we only set them at the thread level currently.
3442 */
3443 kern_return_t
thread_get_mach_voucher(thread_act_t thread,mach_voucher_selector_t __unused which,ipc_voucher_t * voucherp)3444 thread_get_mach_voucher(
3445 thread_act_t thread,
3446 mach_voucher_selector_t __unused which,
3447 ipc_voucher_t *voucherp)
3448 {
3449 ipc_voucher_t voucher;
3450
3451 if (THREAD_NULL == thread) {
3452 return KERN_INVALID_ARGUMENT;
3453 }
3454
3455 thread_mtx_lock(thread);
3456 voucher = thread->ith_voucher;
3457
3458 if (IPC_VOUCHER_NULL != voucher) {
3459 ipc_voucher_reference(voucher);
3460 thread_mtx_unlock(thread);
3461 *voucherp = voucher;
3462 return KERN_SUCCESS;
3463 }
3464
3465 thread_mtx_unlock(thread);
3466
3467 *voucherp = IPC_VOUCHER_NULL;
3468 return KERN_SUCCESS;
3469 }
3470
3471 /*
3472 * thread_set_mach_voucher - set a voucher reference for the specified thread voucher
3473 *
3474 * Conditions: callers holds a reference on the voucher.
3475 * nothing locked.
3476 *
3477 * We grab another reference to the voucher and bind it to the thread.
3478 * The old voucher reference associated with the thread is
3479 * discarded.
3480 */
3481 kern_return_t
thread_set_mach_voucher(thread_t thread,ipc_voucher_t voucher)3482 thread_set_mach_voucher(
3483 thread_t thread,
3484 ipc_voucher_t voucher)
3485 {
3486 ipc_voucher_t old_voucher;
3487 ledger_t bankledger = NULL;
3488 struct thread_group *banktg = NULL;
3489 uint32_t persona_id = 0;
3490
3491 if (THREAD_NULL == thread) {
3492 return KERN_INVALID_ARGUMENT;
3493 }
3494
3495 bank_get_bank_ledger_thread_group_and_persona(voucher, &bankledger, &banktg, &persona_id);
3496
3497 thread_mtx_lock(thread);
3498 /*
3499 * Once the thread is started, we will look at `ith_voucher` without
3500 * holding any lock.
3501 *
3502 * Setting the voucher hence can only be done by current_thread() or
3503 * before it started. "started" flips under the thread mutex and must be
3504 * tested under it too.
3505 */
3506 if (thread != current_thread() && thread->started) {
3507 thread_mtx_unlock(thread);
3508 return KERN_INVALID_ARGUMENT;
3509 }
3510
3511 ipc_voucher_reference(voucher);
3512 old_voucher = thread->ith_voucher;
3513 thread->ith_voucher = voucher;
3514 thread->ith_voucher_name = MACH_PORT_NULL;
3515 thread_mtx_unlock(thread);
3516
3517 bank_swap_thread_bank_ledger(thread, bankledger);
3518 #if CONFIG_THREAD_GROUPS
3519 thread_group_set_bank(thread, banktg);
3520 #endif /* CONFIG_THREAD_GROUPS */
3521
3522 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3523 MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
3524 (uintptr_t)thread_tid(thread),
3525 (uintptr_t)MACH_PORT_NULL,
3526 VM_KERNEL_ADDRPERM((uintptr_t)voucher),
3527 persona_id, 0);
3528
3529 ipc_voucher_release(old_voucher);
3530
3531 return KERN_SUCCESS;
3532 }
3533
3534 /*
3535 * thread_swap_mach_voucher - swap a voucher reference for the specified thread voucher
3536 *
3537 * Conditions: callers holds a reference on the new and presumed old voucher(s).
3538 * nothing locked.
3539 *
3540 * This function is no longer supported.
3541 */
3542 kern_return_t
thread_swap_mach_voucher(__unused thread_t thread,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)3543 thread_swap_mach_voucher(
3544 __unused thread_t thread,
3545 __unused ipc_voucher_t new_voucher,
3546 ipc_voucher_t *in_out_old_voucher)
3547 {
3548 /*
3549 * Currently this function is only called from a MIG generated
3550 * routine which doesn't release the reference on the voucher
3551 * addressed by in_out_old_voucher. To avoid leaking this reference,
3552 * a call to release it has been added here.
3553 */
3554 ipc_voucher_release(*in_out_old_voucher);
3555 OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
3556 }
3557
3558 /*
3559 * thread_get_current_voucher_origin_pid - get the pid of the originator of the current voucher.
3560 */
3561 kern_return_t
thread_get_current_voucher_origin_pid(int32_t * pid)3562 thread_get_current_voucher_origin_pid(
3563 int32_t *pid)
3564 {
3565 return thread_get_voucher_origin_pid(current_thread(), pid);
3566 }
3567
3568 /*
3569 * thread_get_current_voucher_origin_pid - get the pid of the originator of the current voucher.
3570 */
3571 kern_return_t
thread_get_voucher_origin_pid(thread_t thread,int32_t * pid)3572 thread_get_voucher_origin_pid(thread_t thread, int32_t *pid)
3573 {
3574 uint32_t buf_size = sizeof(*pid);
3575 return mach_voucher_attr_command(thread->ith_voucher,
3576 MACH_VOUCHER_ATTR_KEY_BANK,
3577 BANK_ORIGINATOR_PID,
3578 NULL,
3579 0,
3580 (mach_voucher_attr_content_t)pid,
3581 &buf_size);
3582 }
3583
3584 /*
3585 * thread_get_current_voucher_proximate_pid - get the pid of the proximate process of the current voucher.
3586 */
3587 kern_return_t
thread_get_voucher_origin_proximate_pid(thread_t thread,int32_t * origin_pid,int32_t * proximate_pid)3588 thread_get_voucher_origin_proximate_pid(thread_t thread, int32_t *origin_pid, int32_t *proximate_pid)
3589 {
3590 int32_t origin_proximate_pids[2] = { };
3591 uint32_t buf_size = sizeof(origin_proximate_pids);
3592 kern_return_t kr = mach_voucher_attr_command(thread->ith_voucher,
3593 MACH_VOUCHER_ATTR_KEY_BANK,
3594 BANK_ORIGINATOR_PROXIMATE_PID,
3595 NULL,
3596 0,
3597 (mach_voucher_attr_content_t)origin_proximate_pids,
3598 &buf_size);
3599 if (kr == KERN_SUCCESS) {
3600 *origin_pid = origin_proximate_pids[0];
3601 *proximate_pid = origin_proximate_pids[1];
3602 }
3603 return kr;
3604 }
3605
3606 #if CONFIG_THREAD_GROUPS
3607 /*
3608 * Returns the current thread's voucher-carried thread group
3609 *
3610 * Reference is borrowed from this being the current voucher, so it does NOT
3611 * return a reference to the group.
3612 */
3613 struct thread_group *
thread_get_current_voucher_thread_group(thread_t thread)3614 thread_get_current_voucher_thread_group(thread_t thread)
3615 {
3616 assert(thread == current_thread());
3617
3618 if (thread->ith_voucher == NULL) {
3619 return NULL;
3620 }
3621
3622 ledger_t bankledger = NULL;
3623 struct thread_group *banktg = NULL;
3624
3625 bank_get_bank_ledger_thread_group_and_persona(thread->ith_voucher, &bankledger, &banktg, NULL);
3626
3627 return banktg;
3628 }
3629
3630 #endif /* CONFIG_THREAD_GROUPS */
3631
3632 #if CONFIG_COALITIONS
3633
3634 uint64_t
thread_get_current_voucher_resource_coalition_id(thread_t thread)3635 thread_get_current_voucher_resource_coalition_id(thread_t thread)
3636 {
3637 uint64_t id = 0;
3638 assert(thread == current_thread());
3639 if (thread->ith_voucher != NULL) {
3640 id = bank_get_bank_ledger_resource_coalition_id(thread->ith_voucher);
3641 }
3642 return id;
3643 }
3644
3645 #endif /* CONFIG_COALITIONS */
3646
3647 extern struct workqueue *
3648 proc_get_wqptr(void *proc);
3649
3650 static bool
task_supports_cooperative_workqueue(task_t task)3651 task_supports_cooperative_workqueue(task_t task)
3652 {
3653 void *bsd_info = get_bsdtask_info(task);
3654
3655 assert(task == current_task());
3656 if (bsd_info == NULL) {
3657 return false;
3658 }
3659
3660 uint64_t wq_quantum_expiry_offset = get_wq_quantum_offset_from_proc(bsd_info);
3661 /* userspace may not yet have called workq_open yet */
3662 struct workqueue *wq = proc_get_wqptr(bsd_info);
3663
3664 return (wq != NULL) && (wq_quantum_expiry_offset != 0);
3665 }
3666
3667 /* Not safe to call from scheduler paths - should only be called on self */
3668 bool
thread_supports_cooperative_workqueue(thread_t thread)3669 thread_supports_cooperative_workqueue(thread_t thread)
3670 {
3671 struct uthread *uth = get_bsdthread_info(thread);
3672 task_t task = get_threadtask(thread);
3673
3674 assert(thread == current_thread());
3675
3676 return task_supports_cooperative_workqueue(task) &&
3677 bsdthread_part_of_cooperative_workqueue(uth);
3678 }
3679
3680 static inline bool
thread_has_armed_workqueue_quantum(thread_t thread)3681 thread_has_armed_workqueue_quantum(thread_t thread)
3682 {
3683 return thread->workq_quantum_deadline != 0;
3684 }
3685
3686 /*
3687 * The workq quantum is a lazy timer that is evaluated at 2 specific times in
3688 * the scheduler:
3689 *
3690 * - context switch time
3691 * - scheduler quantum expiry time.
3692 *
3693 * We're currently expressing the workq quantum with a 0.5 scale factor of the
3694 * scheduler quantum. It is possible that if the workq quantum is rearmed
3695 * shortly after the scheduler quantum begins, we could have a large delay
3696 * between when the workq quantum next expires and when it actually is noticed.
3697 *
3698 * A potential future improvement for the wq quantum expiry logic is to compare
3699 * it to the next actual scheduler quantum deadline and expire it if it is
3700 * within a certain leeway.
3701 */
3702 static inline uint64_t
thread_workq_quantum_size(thread_t thread)3703 thread_workq_quantum_size(thread_t thread)
3704 {
3705 return (uint64_t) (SCHED(initial_quantum_size)(thread) / 2);
3706 }
3707
3708 /*
3709 * Always called by thread on itself - either at AST boundary after processing
3710 * an existing quantum expiry, or when a new quantum is armed before the thread
3711 * goes out to userspace to handle a thread request
3712 */
3713 void
thread_arm_workqueue_quantum(thread_t thread)3714 thread_arm_workqueue_quantum(thread_t thread)
3715 {
3716 /*
3717 * If the task is not opted into wq quantum notification, or if the thread
3718 * is not part of the cooperative workqueue, don't even bother with tracking
3719 * the quantum or calculating expiry
3720 */
3721 if (!thread_supports_cooperative_workqueue(thread)) {
3722 assert(thread->workq_quantum_deadline == 0);
3723 return;
3724 }
3725
3726 assert(current_thread() == thread);
3727 assert(thread_get_tag(thread) & THREAD_TAG_WORKQUEUE);
3728
3729 uint64_t current_runtime = thread_get_runtime_self();
3730 uint64_t deadline = thread_workq_quantum_size(thread) + current_runtime;
3731
3732 /*
3733 * The update of a workqueue quantum should always be followed by the update
3734 * of the AST - see explanation in kern/thread.h for synchronization of this
3735 * field
3736 */
3737 thread->workq_quantum_deadline = deadline;
3738
3739 /* We're arming a new quantum, clear any previous expiry notification */
3740 act_clear_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3741
3742 WQ_TRACE(TRACE_wq_quantum_arm, current_runtime, deadline, 0, 0);
3743
3744 WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, thread->workq_quantum_deadline, true);
3745 }
3746
3747 /* Called by a thread on itself when it is about to park */
3748 void
thread_disarm_workqueue_quantum(thread_t thread)3749 thread_disarm_workqueue_quantum(thread_t thread)
3750 {
3751 /* The update of a workqueue quantum should always be followed by the update
3752 * of the AST - see explanation in kern/thread.h for synchronization of this
3753 * field */
3754 thread->workq_quantum_deadline = 0;
3755 act_clear_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3756
3757 WQ_TRACE(TRACE_wq_quantum_disarm, 0, 0, 0, 0);
3758
3759 WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, thread->workq_quantum_deadline, false);
3760 }
3761
3762 /* This is called at context switch time on a thread that may not be self,
3763 * and at AST time
3764 */
3765 bool
thread_has_expired_workqueue_quantum(thread_t thread,bool should_trace)3766 thread_has_expired_workqueue_quantum(thread_t thread, bool should_trace)
3767 {
3768 if (!thread_has_armed_workqueue_quantum(thread)) {
3769 return false;
3770 }
3771 /* We do not do a thread_get_runtime_self() here since this function is
3772 * called from context switch time or during scheduler quantum expiry and
3773 * therefore, we may not be evaluating it on the current thread/self.
3774 *
3775 * In addition, the timers on the thread have just been updated recently so
3776 * we don't need to update them again.
3777 */
3778 uint64_t runtime = recount_thread_time_mach(thread);
3779 bool expired = runtime > thread->workq_quantum_deadline;
3780
3781 if (expired && should_trace) {
3782 WQ_TRACE(TRACE_wq_quantum_expired, runtime, thread->workq_quantum_deadline, 0, 0);
3783 }
3784
3785 return expired;
3786 }
3787
3788 /*
3789 * Called on a thread that is being context switched out or during quantum
3790 * expiry on self. Only called from scheduler paths.
3791 */
3792 void
thread_evaluate_workqueue_quantum_expiry(thread_t thread)3793 thread_evaluate_workqueue_quantum_expiry(thread_t thread)
3794 {
3795 if (thread_has_expired_workqueue_quantum(thread, true)) {
3796 act_set_astkevent(thread, AST_KEVENT_WORKQ_QUANTUM_EXPIRED);
3797 }
3798 }
3799
3800 boolean_t
thread_has_thread_name(thread_t th)3801 thread_has_thread_name(thread_t th)
3802 {
3803 if (th) {
3804 return bsd_hasthreadname(get_bsdthread_info(th));
3805 }
3806
3807 /*
3808 * This is an odd case; clients may set the thread name based on the lack of
3809 * a name, but in this context there is no uthread to attach the name to.
3810 */
3811 return FALSE;
3812 }
3813
3814 void
thread_set_thread_name(thread_t th,const char * name)3815 thread_set_thread_name(thread_t th, const char* name)
3816 {
3817 if (th && name) {
3818 bsd_setthreadname(get_bsdthread_info(th), thread_tid(th), name);
3819 }
3820 }
3821
3822 void
thread_get_thread_name(thread_t th,char * name)3823 thread_get_thread_name(thread_t th, char* name)
3824 {
3825 if (!name) {
3826 return;
3827 }
3828 if (th) {
3829 bsd_getthreadname(get_bsdthread_info(th), name);
3830 } else {
3831 name[0] = '\0';
3832 }
3833 }
3834
3835 processor_t
thread_get_runq(thread_t thread)3836 thread_get_runq(thread_t thread)
3837 {
3838 thread_lock_assert(thread, LCK_ASSERT_OWNED);
3839 processor_t runq = thread->__runq.runq;
3840 os_atomic_thread_fence(acquire);
3841 return runq;
3842 }
3843
3844 processor_t
thread_get_runq_locked(thread_t thread)3845 thread_get_runq_locked(thread_t thread)
3846 {
3847 thread_lock_assert(thread, LCK_ASSERT_OWNED);
3848 processor_t runq = thread->__runq.runq;
3849 if (runq != PROCESSOR_NULL) {
3850 pset_assert_locked(runq->processor_set);
3851 }
3852 return runq;
3853 }
3854
3855 void
thread_set_runq_locked(thread_t thread,processor_t new_runq)3856 thread_set_runq_locked(thread_t thread, processor_t new_runq)
3857 {
3858 thread_lock_assert(thread, LCK_ASSERT_OWNED);
3859 pset_assert_locked(new_runq->processor_set);
3860 thread_assert_runq_null(thread);
3861 thread->__runq.runq = new_runq;
3862 }
3863
3864 void
thread_clear_runq(thread_t thread)3865 thread_clear_runq(thread_t thread)
3866 {
3867 thread_assert_runq_nonnull(thread);
3868 os_atomic_thread_fence(release);
3869 thread->__runq.runq = PROCESSOR_NULL;
3870 }
3871
3872 void
thread_clear_runq_locked(thread_t thread)3873 thread_clear_runq_locked(thread_t thread)
3874 {
3875 thread_lock_assert(thread, LCK_ASSERT_OWNED);
3876 thread_assert_runq_nonnull(thread);
3877 thread->__runq.runq = PROCESSOR_NULL;
3878 }
3879
3880 void
thread_assert_runq_null(__assert_only thread_t thread)3881 thread_assert_runq_null(__assert_only thread_t thread)
3882 {
3883 assert(thread->__runq.runq == PROCESSOR_NULL);
3884 }
3885
3886 void
thread_assert_runq_nonnull(thread_t thread)3887 thread_assert_runq_nonnull(thread_t thread)
3888 {
3889 pset_assert_locked(thread->__runq.runq->processor_set);
3890 assert(thread->__runq.runq != PROCESSOR_NULL);
3891 }
3892
3893 void
thread_set_honor_qlimit(thread_t thread)3894 thread_set_honor_qlimit(thread_t thread)
3895 {
3896 thread->options |= TH_OPT_HONOR_QLIMIT;
3897 }
3898
3899 void
thread_clear_honor_qlimit(thread_t thread)3900 thread_clear_honor_qlimit(thread_t thread)
3901 {
3902 thread->options &= (~TH_OPT_HONOR_QLIMIT);
3903 }
3904
3905 /*
3906 * thread_enable_send_importance - set/clear the SEND_IMPORTANCE thread option bit.
3907 */
3908 void
thread_enable_send_importance(thread_t thread,boolean_t enable)3909 thread_enable_send_importance(thread_t thread, boolean_t enable)
3910 {
3911 if (enable == TRUE) {
3912 thread->options |= TH_OPT_SEND_IMPORTANCE;
3913 } else {
3914 thread->options &= ~TH_OPT_SEND_IMPORTANCE;
3915 }
3916 }
3917
3918 kern_return_t
thread_get_ipc_propagate_attr(thread_t thread,struct thread_attr_for_ipc_propagation * attr)3919 thread_get_ipc_propagate_attr(thread_t thread, struct thread_attr_for_ipc_propagation *attr)
3920 {
3921 int iotier;
3922 int qos;
3923
3924 if (thread == NULL || attr == NULL) {
3925 return KERN_INVALID_ARGUMENT;
3926 }
3927
3928 iotier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO);
3929 qos = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS);
3930
3931 if (!qos) {
3932 qos = thread_user_promotion_qos_for_pri(thread->base_pri);
3933 }
3934
3935 attr->tafip_iotier = iotier;
3936 attr->tafip_qos = qos;
3937
3938 return KERN_SUCCESS;
3939 }
3940
3941 /*
3942 * thread_set_allocation_name - .
3943 */
3944
3945 kern_allocation_name_t
thread_set_allocation_name(kern_allocation_name_t new_name)3946 thread_set_allocation_name(kern_allocation_name_t new_name)
3947 {
3948 kern_allocation_name_t ret;
3949 thread_kernel_state_t kstate = thread_get_kernel_state(current_thread());
3950 ret = kstate->allocation_name;
3951 // fifo
3952 if (!new_name || !kstate->allocation_name) {
3953 kstate->allocation_name = new_name;
3954 }
3955 return ret;
3956 }
3957
3958 void *
thread_iokit_tls_get(uint32_t index)3959 thread_iokit_tls_get(uint32_t index)
3960 {
3961 assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
3962 return current_thread()->saved.iokit.tls[index];
3963 }
3964
3965 void
thread_iokit_tls_set(uint32_t index,void * data)3966 thread_iokit_tls_set(uint32_t index, void * data)
3967 {
3968 assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
3969 current_thread()->saved.iokit.tls[index] = data;
3970 }
3971
3972 uint64_t
thread_get_last_wait_duration(thread_t thread)3973 thread_get_last_wait_duration(thread_t thread)
3974 {
3975 return thread->last_made_runnable_time - thread->last_run_time;
3976 }
3977
3978 integer_t
thread_kern_get_pri(thread_t thr)3979 thread_kern_get_pri(thread_t thr)
3980 {
3981 return thr->base_pri;
3982 }
3983
3984 void
thread_kern_set_pri(thread_t thr,integer_t pri)3985 thread_kern_set_pri(thread_t thr, integer_t pri)
3986 {
3987 sched_set_kernel_thread_priority(thr, pri);
3988 }
3989
3990 integer_t
thread_kern_get_kernel_maxpri(void)3991 thread_kern_get_kernel_maxpri(void)
3992 {
3993 return MAXPRI_KERNEL;
3994 }
3995 /*
3996 * thread_port_with_flavor_no_senders
3997 *
3998 * Called whenever the Mach port system detects no-senders on
3999 * the thread inspect or read port. These ports are allocated lazily and
4000 * should be deallocated here when there are no senders remaining.
4001 */
4002 static void
thread_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)4003 thread_port_with_flavor_no_senders(
4004 ipc_port_t port,
4005 mach_port_mscount_t mscount __unused)
4006 {
4007 thread_ro_t tro;
4008 thread_t thread;
4009 mach_thread_flavor_t flavor;
4010 ipc_kobject_type_t kotype;
4011
4012 ip_mq_lock(port);
4013 if (port->ip_srights > 0) {
4014 ip_mq_unlock(port);
4015 return;
4016 }
4017 kotype = ip_kotype(port);
4018 assert((IKOT_THREAD_READ == kotype) || (IKOT_THREAD_INSPECT == kotype));
4019 thread = ipc_kobject_get_locked(port, kotype);
4020 if (thread != THREAD_NULL) {
4021 thread_reference(thread);
4022 }
4023 ip_mq_unlock(port);
4024
4025 if (thread == THREAD_NULL) {
4026 /* The thread is exiting or disabled; it will eventually deallocate the port */
4027 return;
4028 }
4029
4030 if (kotype == IKOT_THREAD_READ) {
4031 flavor = THREAD_FLAVOR_READ;
4032 } else {
4033 flavor = THREAD_FLAVOR_INSPECT;
4034 }
4035
4036 thread_mtx_lock(thread);
4037 ip_mq_lock(port);
4038
4039 /*
4040 * If the port is no longer active, then ipc_thread_terminate() ran
4041 * and destroyed the kobject already. Just deallocate the task
4042 * ref we took and go away.
4043 *
4044 * It is also possible that several nsrequests are in flight,
4045 * only one shall NULL-out the port entry, and this is the one
4046 * that gets to dealloc the port.
4047 *
4048 * Check for a stale no-senders notification. A call to any function
4049 * that vends out send rights to this port could resurrect it between
4050 * this notification being generated and actually being handled here.
4051 */
4052 tro = get_thread_ro(thread);
4053 if (!ip_active(port) ||
4054 tro->tro_ports[flavor] != port ||
4055 port->ip_srights > 0) {
4056 ip_mq_unlock(port);
4057 thread_mtx_unlock(thread);
4058 thread_deallocate(thread);
4059 return;
4060 }
4061
4062 assert(tro->tro_ports[flavor] == port);
4063 zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_ports[flavor]);
4064 thread_mtx_unlock(thread);
4065
4066 ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
4067
4068 thread_deallocate(thread);
4069 }
4070
4071 /*
4072 * The 'thread_region_page_shift' is used by footprint
4073 * to specify the page size that it will use to
4074 * accomplish its accounting work on the task being
4075 * inspected. Since footprint uses a thread for each
4076 * task that it works on, we need to keep the page_shift
4077 * on a per-thread basis.
4078 */
4079
4080 int
thread_self_region_page_shift(void)4081 thread_self_region_page_shift(void)
4082 {
4083 /*
4084 * Return the page shift that this thread
4085 * would like to use for its accounting work.
4086 */
4087 return current_thread()->thread_region_page_shift;
4088 }
4089
4090 void
thread_self_region_page_shift_set(int pgshift)4091 thread_self_region_page_shift_set(
4092 int pgshift)
4093 {
4094 /*
4095 * Set the page shift that this thread
4096 * would like to use for its accounting work
4097 * when dealing with a task.
4098 */
4099 current_thread()->thread_region_page_shift = pgshift;
4100 }
4101
4102 __startup_func
4103 static void
ctid_table_init(void)4104 ctid_table_init(void)
4105 {
4106 /*
4107 * Pretend the early boot setup didn't exist,
4108 * and pick a mangling nonce.
4109 */
4110 *compact_id_resolve(&ctid_table, 0) = THREAD_NULL;
4111 ctid_nonce = (uint32_t)early_random() & CTID_MASK;
4112 }
4113
4114
4115 /*
4116 * This maps the [0, CTID_MAX_THREAD_NUMBER] range
4117 * to [1, CTID_MAX_THREAD_NUMBER + 1 == CTID_MASK]
4118 * so that in mangled form, '0' is an invalid CTID.
4119 */
4120 static ctid_t
ctid_mangle(compact_id_t cid)4121 ctid_mangle(compact_id_t cid)
4122 {
4123 return (cid == ctid_nonce ? CTID_MASK : cid) ^ ctid_nonce;
4124 }
4125
4126 static compact_id_t
ctid_unmangle(ctid_t ctid)4127 ctid_unmangle(ctid_t ctid)
4128 {
4129 ctid ^= ctid_nonce;
4130 return ctid == CTID_MASK ? ctid_nonce : ctid;
4131 }
4132
4133 void
ctid_table_add(thread_t thread)4134 ctid_table_add(thread_t thread)
4135 {
4136 compact_id_t cid;
4137
4138 cid = compact_id_get(&ctid_table, CTID_MAX_THREAD_NUMBER, thread);
4139 thread->ctid = ctid_mangle(cid);
4140 }
4141
4142 void
ctid_table_remove(thread_t thread)4143 ctid_table_remove(thread_t thread)
4144 {
4145 __assert_only thread_t value;
4146
4147 value = compact_id_put(&ctid_table, ctid_unmangle(thread->ctid));
4148 assert3p(value, ==, thread);
4149 thread->ctid = 0;
4150 }
4151
4152 thread_t
ctid_get_thread_unsafe(ctid_t ctid)4153 ctid_get_thread_unsafe(ctid_t ctid)
4154 {
4155 if (ctid && compact_id_slab_valid(&ctid_table, ctid_unmangle(ctid))) {
4156 return *compact_id_resolve(&ctid_table, ctid_unmangle(ctid));
4157 }
4158 return THREAD_NULL;
4159 }
4160
4161 thread_t
ctid_get_thread(ctid_t ctid)4162 ctid_get_thread(ctid_t ctid)
4163 {
4164 thread_t thread = THREAD_NULL;
4165
4166 if (ctid) {
4167 thread = *compact_id_resolve(&ctid_table, ctid_unmangle(ctid));
4168 assert(thread && thread->ctid == ctid);
4169 }
4170 return thread;
4171 }
4172
4173 ctid_t
thread_get_ctid(thread_t thread)4174 thread_get_ctid(thread_t thread)
4175 {
4176 return thread->ctid;
4177 }
4178
4179 /*
4180 * Adjust code signature dependent thread state.
4181 *
4182 * Called to allow code signature dependent adjustments to the thread
4183 * state. Note that this is usually called twice for the main thread:
4184 * Once at thread creation by thread_create, when the signature is
4185 * potentially not attached yet (which is usually the case for the
4186 * first/main thread of a task), and once after the task's signature
4187 * has actually been attached.
4188 *
4189 */
4190 kern_return_t
thread_process_signature(thread_t thread,task_t task)4191 thread_process_signature(thread_t thread, task_t task)
4192 {
4193 return machine_thread_process_signature(thread, task);
4194 }
4195
4196 #if CONFIG_SPTM
4197
4198 void
thread_associate_txm_thread_stack(uintptr_t thread_stack)4199 thread_associate_txm_thread_stack(uintptr_t thread_stack)
4200 {
4201 thread_t self = current_thread();
4202
4203 if (self->txm_thread_stack != 0) {
4204 panic("attempted multiple TXM thread associations: %lu | %lu",
4205 self->txm_thread_stack, thread_stack);
4206 }
4207
4208 self->txm_thread_stack = thread_stack;
4209 }
4210
4211 void
thread_disassociate_txm_thread_stack(uintptr_t thread_stack)4212 thread_disassociate_txm_thread_stack(uintptr_t thread_stack)
4213 {
4214 thread_t self = current_thread();
4215
4216 if (self->txm_thread_stack == 0) {
4217 panic("attempted to disassociate non-existent TXM thread");
4218 } else if (self->txm_thread_stack != thread_stack) {
4219 panic("invalid disassociation for TXM thread: %lu | %lu",
4220 self->txm_thread_stack, thread_stack);
4221 }
4222
4223 self->txm_thread_stack = 0;
4224 }
4225
4226 uintptr_t
thread_get_txm_thread_stack(void)4227 thread_get_txm_thread_stack(void)
4228 {
4229 return current_thread()->txm_thread_stack;
4230 }
4231
4232 #endif
4233
4234 #if CONFIG_DTRACE
4235 uint32_t
dtrace_get_thread_predcache(thread_t thread)4236 dtrace_get_thread_predcache(thread_t thread)
4237 {
4238 if (thread != THREAD_NULL) {
4239 return thread->t_dtrace_predcache;
4240 } else {
4241 return 0;
4242 }
4243 }
4244
4245 int64_t
dtrace_get_thread_vtime(thread_t thread)4246 dtrace_get_thread_vtime(thread_t thread)
4247 {
4248 if (thread != THREAD_NULL) {
4249 return thread->t_dtrace_vtime;
4250 } else {
4251 return 0;
4252 }
4253 }
4254
4255 int
dtrace_get_thread_last_cpu_id(thread_t thread)4256 dtrace_get_thread_last_cpu_id(thread_t thread)
4257 {
4258 if ((thread != THREAD_NULL) && (thread->last_processor != PROCESSOR_NULL)) {
4259 return thread->last_processor->cpu_id;
4260 } else {
4261 return -1;
4262 }
4263 }
4264
4265 int64_t
dtrace_get_thread_tracing(thread_t thread)4266 dtrace_get_thread_tracing(thread_t thread)
4267 {
4268 if (thread != THREAD_NULL) {
4269 return thread->t_dtrace_tracing;
4270 } else {
4271 return 0;
4272 }
4273 }
4274
4275 uint16_t
dtrace_get_thread_inprobe(thread_t thread)4276 dtrace_get_thread_inprobe(thread_t thread)
4277 {
4278 if (thread != THREAD_NULL) {
4279 return thread->t_dtrace_inprobe;
4280 } else {
4281 return 0;
4282 }
4283 }
4284
4285 vm_offset_t
thread_get_kernel_stack(thread_t thread)4286 thread_get_kernel_stack(thread_t thread)
4287 {
4288 if (thread != THREAD_NULL) {
4289 return thread->kernel_stack;
4290 } else {
4291 return 0;
4292 }
4293 }
4294
4295 #if KASAN
4296 struct kasan_thread_data *
kasan_get_thread_data(thread_t thread)4297 kasan_get_thread_data(thread_t thread)
4298 {
4299 return &thread->kasan_data;
4300 }
4301 #endif
4302
4303 #if CONFIG_KCOV
4304 kcov_thread_data_t *
kcov_get_thread_data(thread_t thread)4305 kcov_get_thread_data(thread_t thread)
4306 {
4307 return &thread->kcov_data;
4308 }
4309 #endif
4310
4311 #if CONFIG_STKSZ
4312 /*
4313 * Returns base of a thread's kernel stack.
4314 *
4315 * Coverage sanitizer instruments every function including those that participates in stack handoff between threads.
4316 * There is a window in which CPU still holds old values but stack has been handed over to anoher thread already.
4317 * In this window kernel_stack is 0 but CPU still uses the original stack (until contex switch occurs). The original
4318 * kernel_stack value is preserved in ksancov_stack during this window.
4319 */
4320 vm_offset_t
kcov_stksz_get_thread_stkbase(thread_t thread)4321 kcov_stksz_get_thread_stkbase(thread_t thread)
4322 {
4323 if (thread != THREAD_NULL) {
4324 kcov_thread_data_t *data = kcov_get_thread_data(thread);
4325 if (data->ktd_stksz.kst_stack) {
4326 return data->ktd_stksz.kst_stack;
4327 } else {
4328 return thread->kernel_stack;
4329 }
4330 } else {
4331 return 0;
4332 }
4333 }
4334
4335 vm_offset_t
kcov_stksz_get_thread_stksize(thread_t thread)4336 kcov_stksz_get_thread_stksize(thread_t thread)
4337 {
4338 if (thread != THREAD_NULL) {
4339 return kernel_stack_size;
4340 } else {
4341 return 0;
4342 }
4343 }
4344
4345 void
kcov_stksz_set_thread_stack(thread_t thread,vm_offset_t stack)4346 kcov_stksz_set_thread_stack(thread_t thread, vm_offset_t stack)
4347 {
4348 kcov_thread_data_t *data = kcov_get_thread_data(thread);
4349 data->ktd_stksz.kst_stack = stack;
4350 }
4351 #endif /* CONFIG_STKSZ */
4352
4353 int64_t
dtrace_calc_thread_recent_vtime(thread_t thread)4354 dtrace_calc_thread_recent_vtime(thread_t thread)
4355 {
4356 if (thread == THREAD_NULL) {
4357 return 0;
4358 }
4359
4360 struct recount_usage usage = { 0 };
4361 recount_current_thread_usage(&usage);
4362 return (int64_t)(recount_usage_time_mach(&usage));
4363 }
4364
4365 void
dtrace_set_thread_predcache(thread_t thread,uint32_t predcache)4366 dtrace_set_thread_predcache(thread_t thread, uint32_t predcache)
4367 {
4368 if (thread != THREAD_NULL) {
4369 thread->t_dtrace_predcache = predcache;
4370 }
4371 }
4372
4373 void
dtrace_set_thread_vtime(thread_t thread,int64_t vtime)4374 dtrace_set_thread_vtime(thread_t thread, int64_t vtime)
4375 {
4376 if (thread != THREAD_NULL) {
4377 thread->t_dtrace_vtime = vtime;
4378 }
4379 }
4380
4381 void
dtrace_set_thread_tracing(thread_t thread,int64_t accum)4382 dtrace_set_thread_tracing(thread_t thread, int64_t accum)
4383 {
4384 if (thread != THREAD_NULL) {
4385 thread->t_dtrace_tracing = accum;
4386 }
4387 }
4388
4389 void
dtrace_set_thread_inprobe(thread_t thread,uint16_t inprobe)4390 dtrace_set_thread_inprobe(thread_t thread, uint16_t inprobe)
4391 {
4392 if (thread != THREAD_NULL) {
4393 thread->t_dtrace_inprobe = inprobe;
4394 }
4395 }
4396
4397 void
dtrace_thread_bootstrap(void)4398 dtrace_thread_bootstrap(void)
4399 {
4400 task_t task = current_task();
4401
4402 if (task->thread_count == 1) {
4403 thread_t thread = current_thread();
4404 if (thread->t_dtrace_flags & TH_DTRACE_EXECSUCCESS) {
4405 thread->t_dtrace_flags &= ~TH_DTRACE_EXECSUCCESS;
4406 DTRACE_PROC(exec__success);
4407 KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXEC),
4408 task_pid(task));
4409 }
4410 DTRACE_PROC(start);
4411 }
4412 DTRACE_PROC(lwp__start);
4413 }
4414
4415 void
dtrace_thread_didexec(thread_t thread)4416 dtrace_thread_didexec(thread_t thread)
4417 {
4418 thread->t_dtrace_flags |= TH_DTRACE_EXECSUCCESS;
4419 }
4420 #endif /* CONFIG_DTRACE */
4421