1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to [email protected] any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52
53 #include <sys/kdebug.h>
54 #include <mach/mach_types.h>
55 #include <mach/kern_return.h>
56 #include <mach/thread_act_server.h>
57 #include <mach/thread_act.h>
58
59 #include <kern/kern_types.h>
60 #include <kern/ast.h>
61 #include <kern/mach_param.h>
62 #include <kern/zalloc.h>
63 #include <kern/extmod_statistics.h>
64 #include <kern/thread.h>
65 #include <kern/task.h>
66 #include <kern/sched_prim.h>
67 #include <kern/misc_protos.h>
68 #include <kern/assert.h>
69 #include <kern/exception.h>
70 #include <kern/ipc_mig.h>
71 #include <kern/ipc_tt.h>
72 #include <kern/machine.h>
73 #include <kern/spl.h>
74 #include <kern/syscall_subr.h>
75 #include <kern/processor.h>
76 #include <kern/restartable.h>
77 #include <kern/timer.h>
78 #include <kern/affinity.h>
79 #include <kern/host.h>
80 #include <kern/exc_guard.h>
81 #include <ipc/ipc_policy.h>
82 #include <mach/arm/thread_status.h>
83
84 #include <sys/code_signing.h>
85
86 #include <stdatomic.h>
87
88 #include <security/mac_mach_internal.h>
89 #include <libkern/coreanalytics/coreanalytics.h>
90
91 static void act_abort(thread_t thread);
92
93 static void thread_suspended(void *arg, wait_result_t result);
94 static void thread_set_apc_ast(thread_t thread);
95 static void thread_set_apc_ast_locked(thread_t thread);
96
97 extern void proc_name(int pid, char * buf, int size);
98 extern boolean_t IOTaskHasEntitlement(task_t task, const char *entitlement);
99
100 CA_EVENT(thread_set_state,
101 CA_STATIC_STRING(CA_PROCNAME_LEN), current_proc);
102
103 static void
send_thread_set_state_telemetry(void)104 send_thread_set_state_telemetry(void)
105 {
106 ca_event_t ca_event = CA_EVENT_ALLOCATE(thread_set_state);
107 CA_EVENT_TYPE(thread_set_state) * event = ca_event->data;
108
109 proc_name(task_pid(current_task()), (char *) &event->current_proc, CA_PROCNAME_LEN);
110
111 CA_EVENT_SEND(ca_event);
112 }
113
114 /* bootarg to create lightweight corpse for thread set state lockdown */
115 TUNABLE(bool, tss_should_crash, "tss_should_crash", true);
116
117 static inline boolean_t
thread_set_state_allowed(thread_t thread,int flavor,task_t curr_task)118 thread_set_state_allowed(thread_t thread, int flavor, task_t curr_task)
119 {
120 task_t target_task = get_threadtask(thread);
121
122 #if DEVELOPMENT || DEBUG
123 /* disable the feature if the boot-arg is disabled. */
124 if (!tss_should_crash) {
125 return TRUE;
126 }
127 #endif /* DEVELOPMENT || DEBUG */
128
129 /* hardened binaries must have entitlement - all others ok */
130 if (task_is_hardened_binary(target_task)
131 && !(thread->options & TH_IN_MACH_EXCEPTION) /* Allowed for now - rdar://103085786 */
132 && FLAVOR_MODIFIES_CORE_CPU_REGISTERS(flavor) /* only care about locking down PC/LR */
133 #if XNU_TARGET_OS_OSX
134 && !task_opted_out_mach_hardening(target_task)
135 #endif /* XNU_TARGET_OS_OSX */
136 #if CONFIG_ROSETTA
137 && !task_is_translated(target_task) /* Ignore translated tasks */
138 #endif /* CONFIG_ROSETTA */
139 && !IOTaskHasEntitlement(curr_task, "com.apple.private.thread-set-state")
140 ) {
141 /* fatal crash */
142 mach_port_guard_exception(MACH_PORT_NULL, 0, kGUARD_EXC_THREAD_SET_STATE);
143 send_thread_set_state_telemetry();
144 return FALSE;
145 }
146
147 #if __has_feature(ptrauth_calls)
148 /* Do not allow Fatal PAC exception binaries to set Debug state */
149 if (task_is_pac_exception_fatal(target_task)
150 && machine_thread_state_is_debug_flavor(flavor)
151 #if XNU_TARGET_OS_OSX
152 && !task_opted_out_mach_hardening(target_task)
153 #endif /* XNU_TARGET_OS_OSX */
154 #if CONFIG_ROSETTA
155 && !task_is_translated(target_task) /* Ignore translated tasks */
156 #endif /* CONFIG_ROSETTA */
157 && !IOTaskHasEntitlement(curr_task, "com.apple.private.thread-set-state")
158 ) {
159 /* fatal crash */
160 mach_port_guard_exception(MACH_PORT_NULL, 0, kGUARD_EXC_THREAD_SET_STATE);
161 send_thread_set_state_telemetry();
162 return FALSE;
163 }
164 #endif /* __has_feature(ptrauth_calls) */
165
166 return TRUE;
167 }
168
169 /*
170 * Internal routine to mark a thread as started.
171 * Always called with the thread mutex locked.
172 */
173 void
thread_start(thread_t thread)174 thread_start(
175 thread_t thread)
176 {
177 clear_wait(thread, THREAD_AWAKENED);
178 thread->started = TRUE;
179 }
180
181 /*
182 * Internal routine to mark a thread as waiting
183 * right after it has been created. The caller
184 * is responsible to call wakeup()/thread_wakeup()
185 * or thread_terminate() to get it going.
186 *
187 * Always called with the thread mutex locked.
188 *
189 * Task and task_threads mutexes also held
190 * (so nobody can set the thread running before
191 * this point)
192 *
193 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
194 * to allow termination from this point forward.
195 */
196 void
thread_start_in_assert_wait(thread_t thread,struct waitq * waitq,event64_t event,wait_interrupt_t interruptible)197 thread_start_in_assert_wait(
198 thread_t thread,
199 struct waitq *waitq,
200 event64_t event,
201 wait_interrupt_t interruptible)
202 {
203 wait_result_t wait_result;
204 spl_t spl;
205
206 spl = splsched();
207 waitq_lock(waitq);
208
209 /* clear out startup condition (safe because thread not started yet) */
210 thread_lock(thread);
211 assert(!thread->started);
212 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
213 thread->state &= ~(TH_WAIT | TH_UNINT);
214 thread_unlock(thread);
215
216 /* assert wait interruptibly forever */
217 wait_result = waitq_assert_wait64_locked(waitq, event,
218 interruptible,
219 TIMEOUT_URGENCY_SYS_NORMAL,
220 TIMEOUT_WAIT_FOREVER,
221 TIMEOUT_NO_LEEWAY,
222 thread);
223 assert(wait_result == THREAD_WAITING);
224
225 /* mark thread started while we still hold the waitq lock */
226 thread_lock(thread);
227 thread->started = TRUE;
228 thread_unlock(thread);
229
230 waitq_unlock(waitq);
231 splx(spl);
232 }
233
234 /*
235 * Internal routine to terminate a thread.
236 * Sometimes called with task already locked.
237 *
238 * If thread is on core, cause AST check immediately;
239 * Otherwise, let the thread continue running in kernel
240 * until it hits AST.
241 */
242 kern_return_t
thread_terminate_internal(thread_t thread)243 thread_terminate_internal(
244 thread_t thread)
245 {
246 kern_return_t result = KERN_SUCCESS;
247
248 thread_mtx_lock(thread);
249
250 if (thread->active) {
251 thread->active = FALSE;
252
253 act_abort(thread);
254
255 if (thread->started) {
256 clear_wait(thread, THREAD_INTERRUPTED);
257 } else {
258 thread_start(thread);
259 }
260 } else {
261 result = KERN_TERMINATED;
262 }
263
264 if (thread->affinity_set != NULL) {
265 thread_affinity_terminate(thread);
266 }
267
268 /* unconditionally unpin the thread in internal termination */
269 ipc_thread_port_unpin(get_thread_ro(thread)->tro_self_port);
270
271 thread_mtx_unlock(thread);
272
273 if (thread != current_thread() && result == KERN_SUCCESS) {
274 thread_wait(thread, FALSE);
275 }
276
277 return result;
278 }
279
280 kern_return_t
thread_terminate(thread_t thread)281 thread_terminate(
282 thread_t thread)
283 {
284 task_t task;
285
286 if (thread == THREAD_NULL) {
287 return KERN_INVALID_ARGUMENT;
288 }
289
290 if (thread->state & TH_IDLE) {
291 panic("idle thread calling thread_terminate!");
292 }
293
294 task = get_threadtask(thread);
295
296 /* Kernel threads can't be terminated without their own cooperation */
297 if (task == kernel_task && thread != current_thread()) {
298 return KERN_FAILURE;
299 }
300
301 kern_return_t result = thread_terminate_internal(thread);
302
303 /*
304 * If a kernel thread is terminating itself, force handle the APC_AST here.
305 * Kernel threads don't pass through the return-to-user AST checking code,
306 * but all threads must finish their own termination in thread_apc_ast.
307 */
308 if (task == kernel_task) {
309 assert(thread->active == FALSE);
310 thread_ast_clear(thread, AST_APC);
311 thread_apc_ast(thread);
312
313 panic("thread_terminate");
314 /* NOTREACHED */
315 }
316
317 return result;
318 }
319
320 /*
321 * [MIG Call] Terminate a thread.
322 *
323 * Cannot be used on threads managed by pthread.
324 */
325 kern_return_t
thread_terminate_from_user(thread_t thread)326 thread_terminate_from_user(
327 thread_t thread)
328 {
329 if (thread == THREAD_NULL) {
330 return KERN_INVALID_ARGUMENT;
331 }
332
333 if (thread_get_tag(thread) & THREAD_TAG_PTHREAD) {
334 return KERN_DENIED;
335 }
336
337 return thread_terminate(thread);
338 }
339
340 /*
341 * Terminate a thread with pinned control port.
342 *
343 * Can only be used on threads managed by pthread. Exported in pthread_kern.
344 */
345 kern_return_t
thread_terminate_pinned(thread_t thread)346 thread_terminate_pinned(
347 thread_t thread)
348 {
349 task_t task;
350
351 if (thread == THREAD_NULL) {
352 return KERN_INVALID_ARGUMENT;
353 }
354
355 task = get_threadtask(thread);
356
357
358 assert(task != kernel_task);
359 assert(thread_get_tag(thread) & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD));
360
361 thread_mtx_lock(thread);
362 if (task_is_pinned(task) && thread->active) {
363 assert(get_thread_ro(thread)->tro_self_port->ip_pinned == 1);
364 }
365 thread_mtx_unlock(thread);
366
367 kern_return_t result = thread_terminate_internal(thread);
368 return result;
369 }
370
371 /*
372 * Suspend execution of the specified thread.
373 * This is a recursive-style suspension of the thread, a count of
374 * suspends is maintained.
375 *
376 * Called with thread mutex held.
377 */
378 void
thread_hold(thread_t thread)379 thread_hold(thread_t thread)
380 {
381 if (thread->suspend_count++ == 0) {
382 task_t task = get_threadtask(thread);
383 thread_set_apc_ast(thread);
384 assert(thread->suspend_parked == FALSE);
385
386 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_THREAD_SUSPEND) | DBG_FUNC_NONE,
387 thread->thread_id, thread->user_stop_count, task->pidsuspended);
388 }
389 }
390
391 /*
392 * Decrement internal suspension count, setting thread
393 * runnable when count falls to zero.
394 *
395 * Because the wait is abortsafe, we can't be guaranteed that the thread
396 * is currently actually waiting even if suspend_parked is set.
397 *
398 * Called with thread mutex held.
399 */
400 void
thread_release(thread_t thread)401 thread_release(thread_t thread)
402 {
403 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
404
405 /* fail-safe on non-assert builds */
406 if (thread->suspend_count == 0) {
407 return;
408 }
409
410 if (--thread->suspend_count == 0) {
411 if (!thread->started) {
412 thread_start(thread);
413 } else if (thread->suspend_parked) {
414 thread->suspend_parked = FALSE;
415 thread_wakeup_thread(&thread->suspend_count, thread);
416 }
417 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_THREAD_RESUME) | DBG_FUNC_NONE, thread->thread_id);
418 }
419 }
420
421 kern_return_t
thread_suspend(thread_t thread)422 thread_suspend(thread_t thread)
423 {
424 kern_return_t result = KERN_SUCCESS;
425
426 if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
427 return KERN_INVALID_ARGUMENT;
428 }
429
430 thread_mtx_lock(thread);
431
432 if (thread->active) {
433 if (thread->user_stop_count++ == 0) {
434 thread_hold(thread);
435 }
436 } else {
437 result = KERN_TERMINATED;
438 }
439
440 thread_mtx_unlock(thread);
441
442 if (thread != current_thread() && result == KERN_SUCCESS) {
443 thread_wait(thread, FALSE);
444 }
445
446 return result;
447 }
448
449 kern_return_t
thread_resume(thread_t thread)450 thread_resume(thread_t thread)
451 {
452 kern_return_t result = KERN_SUCCESS;
453
454 if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
455 return KERN_INVALID_ARGUMENT;
456 }
457
458 thread_mtx_lock(thread);
459
460 if (thread->active) {
461 if (thread->user_stop_count > 0) {
462 if (--thread->user_stop_count == 0) {
463 thread_release(thread);
464 }
465 } else {
466 result = KERN_FAILURE;
467 }
468 } else {
469 result = KERN_TERMINATED;
470 }
471
472 thread_mtx_unlock(thread);
473
474 return result;
475 }
476
477 /*
478 * thread_depress_abort_from_user:
479 *
480 * Prematurely abort priority depression if there is one.
481 */
482 kern_return_t
thread_depress_abort_from_user(thread_t thread)483 thread_depress_abort_from_user(thread_t thread)
484 {
485 kern_return_t result;
486
487 if (thread == THREAD_NULL) {
488 return KERN_INVALID_ARGUMENT;
489 }
490
491 thread_mtx_lock(thread);
492
493 if (thread->active) {
494 result = thread_depress_abort(thread);
495 } else {
496 result = KERN_TERMINATED;
497 }
498
499 thread_mtx_unlock(thread);
500
501 return result;
502 }
503
504
505 /*
506 * Indicate that the thread should run the AST_APC callback
507 * to detect an abort condition.
508 *
509 * Called with thread mutex held.
510 */
511 static void
act_abort(thread_t thread)512 act_abort(
513 thread_t thread)
514 {
515 spl_t s = splsched();
516
517 thread_lock(thread);
518
519 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
520 thread->sched_flags |= TH_SFLAG_ABORT;
521 thread_set_apc_ast_locked(thread);
522 thread_depress_abort_locked(thread);
523 } else {
524 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
525 }
526
527 thread_unlock(thread);
528 splx(s);
529 }
530
531 kern_return_t
thread_abort(thread_t thread)532 thread_abort(
533 thread_t thread)
534 {
535 kern_return_t result = KERN_SUCCESS;
536
537 if (thread == THREAD_NULL) {
538 return KERN_INVALID_ARGUMENT;
539 }
540
541 thread_mtx_lock(thread);
542
543 if (thread->active) {
544 act_abort(thread);
545 clear_wait(thread, THREAD_INTERRUPTED);
546 } else {
547 result = KERN_TERMINATED;
548 }
549
550 thread_mtx_unlock(thread);
551
552 return result;
553 }
554
555 kern_return_t
thread_abort_safely(thread_t thread)556 thread_abort_safely(
557 thread_t thread)
558 {
559 kern_return_t result = KERN_SUCCESS;
560
561 if (thread == THREAD_NULL) {
562 return KERN_INVALID_ARGUMENT;
563 }
564
565 thread_mtx_lock(thread);
566
567 if (thread->active) {
568 spl_t s = splsched();
569
570 thread_lock(thread);
571 if (!thread->at_safe_point ||
572 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
573 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
574 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
575 thread_set_apc_ast_locked(thread);
576 thread_depress_abort_locked(thread);
577 }
578 }
579 thread_unlock(thread);
580 splx(s);
581 } else {
582 result = KERN_TERMINATED;
583 }
584
585 thread_mtx_unlock(thread);
586
587 return result;
588 }
589
590 /*** backward compatibility hacks ***/
591 #include <mach/thread_info.h>
592 #include <mach/thread_special_ports.h>
593 #include <ipc/ipc_port.h>
594
595 kern_return_t
thread_info(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)596 thread_info(
597 thread_t thread,
598 thread_flavor_t flavor,
599 thread_info_t thread_info_out,
600 mach_msg_type_number_t *thread_info_count)
601 {
602 kern_return_t result;
603
604 if (thread == THREAD_NULL) {
605 return KERN_INVALID_ARGUMENT;
606 }
607
608 thread_mtx_lock(thread);
609
610 if (thread->active || thread->inspection) {
611 result = thread_info_internal(
612 thread, flavor, thread_info_out, thread_info_count);
613 } else {
614 result = KERN_TERMINATED;
615 }
616
617 thread_mtx_unlock(thread);
618
619 return result;
620 }
621
622 static inline kern_return_t
thread_get_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count,thread_set_status_flags_t flags)623 thread_get_state_internal(
624 thread_t thread,
625 int flavor,
626 thread_state_t state, /* pointer to OUT array */
627 mach_msg_type_number_t *state_count, /*IN/OUT*/
628 thread_set_status_flags_t flags)
629 {
630 kern_return_t result = KERN_SUCCESS;
631 boolean_t to_user = !!(flags & TSSF_TRANSLATE_TO_USER);
632
633 if (thread == THREAD_NULL) {
634 return KERN_INVALID_ARGUMENT;
635 }
636
637 thread_mtx_lock(thread);
638
639 if (thread->active) {
640 if (thread != current_thread()) {
641 thread_hold(thread);
642
643 thread_mtx_unlock(thread);
644
645 if (thread_stop(thread, FALSE)) {
646 thread_mtx_lock(thread);
647 result = machine_thread_get_state(
648 thread, flavor, state, state_count);
649 thread_unstop(thread);
650 } else {
651 thread_mtx_lock(thread);
652 result = KERN_ABORTED;
653 }
654
655 thread_release(thread);
656 } else {
657 result = machine_thread_get_state(
658 thread, flavor, state, state_count);
659 }
660 } else if (thread->inspection) {
661 result = machine_thread_get_state(
662 thread, flavor, state, state_count);
663 } else {
664 result = KERN_TERMINATED;
665 }
666
667 if (to_user && result == KERN_SUCCESS) {
668 result = machine_thread_state_convert_to_user(thread, flavor, state,
669 state_count, flags);
670 }
671
672 thread_mtx_unlock(thread);
673
674 return result;
675 }
676
677 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
678
679 kern_return_t
680 thread_get_state(
681 thread_t thread,
682 int flavor,
683 thread_state_t state,
684 mach_msg_type_number_t *state_count);
685
686 kern_return_t
thread_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)687 thread_get_state(
688 thread_t thread,
689 int flavor,
690 thread_state_t state, /* pointer to OUT array */
691 mach_msg_type_number_t *state_count) /*IN/OUT*/
692 {
693 return thread_get_state_internal(thread, flavor, state, state_count, TSSF_FLAGS_NONE);
694 }
695
696 kern_return_t
thread_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)697 thread_get_state_to_user(
698 thread_t thread,
699 int flavor,
700 thread_state_t state, /* pointer to OUT array */
701 mach_msg_type_number_t *state_count) /*IN/OUT*/
702 {
703 return thread_get_state_internal(thread, flavor, state, state_count, TSSF_TRANSLATE_TO_USER);
704 }
705
706 /*
707 * Change thread's machine-dependent state. Called with nothing
708 * locked. Returns same way.
709 */
710 static inline kern_return_t
thread_set_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count,thread_state_t old_state,mach_msg_type_number_t old_state_count,thread_set_status_flags_t flags)711 thread_set_state_internal(
712 thread_t thread,
713 int flavor,
714 thread_state_t state,
715 mach_msg_type_number_t state_count,
716 thread_state_t old_state,
717 mach_msg_type_number_t old_state_count,
718 thread_set_status_flags_t flags)
719 {
720 kern_return_t result = KERN_SUCCESS;
721 boolean_t from_user = !!(flags & TSSF_TRANSLATE_TO_USER);
722 task_t curr_task = NULL;
723
724 if (thread == THREAD_NULL) {
725 return KERN_INVALID_ARGUMENT;
726 }
727
728 #if !(XNU_TARGET_OS_OSX || XNU_TARGET_OS_BRIDGE)
729 /*
730 * Setting the thread state from a userspace mach exception handler is
731 * allowed iff it comes from the same process, or if the process is
732 * being debugged (in dev mode), regardless of TSSF_CHECK_ENTITLEMENT
733 */
734 if (thread->options & TH_IN_MACH_EXCEPTION) {
735 task_t target_task = get_threadtask(thread);
736 proc_t target_proc = get_bsdtask_info(target_task);
737 curr_task = current_task();
738 if (target_task != curr_task &&
739 task_is_hardened_binary(target_task) &&
740 (address_space_debugged(target_proc) != KERN_SUCCESS) &&
741 !IOTaskHasEntitlement(curr_task, "com.apple.private.thread-set-state")) {
742 mach_port_guard_exception(MACH_PORT_NULL, 0, kGUARD_EXC_THREAD_SET_STATE);
743 send_thread_set_state_telemetry();
744 return KERN_NO_ACCESS;
745 }
746 }
747 #endif /* !(XNU_TARGET_OS_OSX || XNU_TARGET_OS_BRIDGE) */
748
749 if ((flags & TSSF_CHECK_ENTITLEMENT) &&
750 !thread_set_state_allowed(thread, flavor, curr_task)) {
751 return KERN_NO_ACCESS;
752 }
753
754 thread_mtx_lock(thread);
755
756 if (thread->active) {
757 if (from_user) {
758 result = machine_thread_state_convert_from_user(thread, flavor,
759 state, state_count, old_state, old_state_count, flags);
760 if (result != KERN_SUCCESS) {
761 goto out;
762 }
763 }
764 if (thread != current_thread()) {
765 thread_hold(thread);
766
767 thread_mtx_unlock(thread);
768
769 if (thread_stop(thread, TRUE)) {
770 thread_mtx_lock(thread);
771 result = machine_thread_set_state(
772 thread, flavor, state, state_count);
773 thread_unstop(thread);
774 } else {
775 thread_mtx_lock(thread);
776 result = KERN_ABORTED;
777 }
778
779 thread_release(thread);
780 } else {
781 result = machine_thread_set_state(
782 thread, flavor, state, state_count);
783 }
784 } else {
785 result = KERN_TERMINATED;
786 }
787
788 if ((result == KERN_SUCCESS) && from_user) {
789 extmod_statistics_incr_thread_set_state(thread);
790 }
791
792 out:
793 thread_mtx_unlock(thread);
794
795 return result;
796 }
797
798 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
799 kern_return_t
800 thread_set_state(
801 thread_t thread,
802 int flavor,
803 thread_state_t state,
804 mach_msg_type_number_t state_count);
805
806 kern_return_t
thread_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)807 thread_set_state(
808 thread_t thread,
809 int flavor,
810 thread_state_t state,
811 mach_msg_type_number_t state_count)
812 {
813 return thread_set_state_internal(thread, flavor, state, state_count, NULL, 0, TSSF_FLAGS_NONE);
814 }
815
816 kern_return_t
thread_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)817 thread_set_state_from_user(
818 thread_t thread,
819 int flavor,
820 thread_state_t state,
821 mach_msg_type_number_t state_count)
822 {
823 return thread_set_state_internal(thread, flavor, state, state_count, NULL,
824 0, TSSF_TRANSLATE_TO_USER | TSSF_CHECK_ENTITLEMENT);
825 }
826
827 kern_return_t
thread_convert_thread_state(thread_t thread,int direction,thread_state_flavor_t flavor,thread_state_t in_state,mach_msg_type_number_t in_state_count,thread_state_t out_state,mach_msg_type_number_t * out_state_count)828 thread_convert_thread_state(
829 thread_t thread,
830 int direction,
831 thread_state_flavor_t flavor,
832 thread_state_t in_state, /* pointer to IN array */
833 mach_msg_type_number_t in_state_count,
834 thread_state_t out_state, /* pointer to OUT array */
835 mach_msg_type_number_t *out_state_count) /*IN/OUT*/
836 {
837 kern_return_t kr;
838 thread_t to_thread = THREAD_NULL;
839 thread_t from_thread = THREAD_NULL;
840 mach_msg_type_number_t state_count = in_state_count;
841
842 if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF &&
843 direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
844 return KERN_INVALID_ARGUMENT;
845 }
846
847 if (thread == THREAD_NULL) {
848 return KERN_INVALID_ARGUMENT;
849 }
850
851 if (state_count > *out_state_count) {
852 return KERN_INSUFFICIENT_BUFFER_SIZE;
853 }
854
855 if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
856 to_thread = thread;
857 from_thread = current_thread();
858 } else {
859 to_thread = current_thread();
860 from_thread = thread;
861 }
862
863 /* Authenticate and convert thread state to kernel representation */
864 kr = machine_thread_state_convert_from_user(from_thread, flavor,
865 in_state, state_count, NULL, 0, TSSF_FLAGS_NONE);
866
867 /* Return early if one of the thread was jop disabled while other wasn't */
868 if (kr != KERN_SUCCESS) {
869 return kr;
870 }
871
872 /* Convert thread state to target thread user representation */
873 kr = machine_thread_state_convert_to_user(to_thread, flavor,
874 in_state, &state_count, TSSF_PRESERVE_FLAGS);
875
876 if (kr == KERN_SUCCESS) {
877 if (state_count <= *out_state_count) {
878 memcpy(out_state, in_state, state_count * sizeof(uint32_t));
879 *out_state_count = state_count;
880 } else {
881 kr = KERN_INSUFFICIENT_BUFFER_SIZE;
882 }
883 }
884
885 return kr;
886 }
887
888 /*
889 * Kernel-internal "thread" interfaces used outside this file:
890 */
891
892 /* Initialize (or re-initialize) a thread state. Called from execve
893 * with nothing locked, returns same way.
894 */
895 kern_return_t
thread_state_initialize(thread_t thread)896 thread_state_initialize(
897 thread_t thread)
898 {
899 kern_return_t result = KERN_SUCCESS;
900
901 if (thread == THREAD_NULL) {
902 return KERN_INVALID_ARGUMENT;
903 }
904
905 thread_mtx_lock(thread);
906
907 if (thread->active) {
908 if (thread != current_thread()) {
909 /* Thread created in exec should be blocked in UNINT wait */
910 assert(!(thread->state & TH_RUN));
911 }
912 machine_thread_state_initialize( thread );
913 } else {
914 result = KERN_TERMINATED;
915 }
916
917 thread_mtx_unlock(thread);
918
919 return result;
920 }
921
922 kern_return_t
thread_dup(thread_t target)923 thread_dup(
924 thread_t target)
925 {
926 thread_t self = current_thread();
927 kern_return_t result = KERN_SUCCESS;
928
929 if (target == THREAD_NULL || target == self) {
930 return KERN_INVALID_ARGUMENT;
931 }
932
933 thread_mtx_lock(target);
934
935 if (target->active) {
936 thread_hold(target);
937
938 thread_mtx_unlock(target);
939
940 if (thread_stop(target, TRUE)) {
941 thread_mtx_lock(target);
942 result = machine_thread_dup(self, target, FALSE);
943
944 if (self->affinity_set != AFFINITY_SET_NULL) {
945 thread_affinity_dup(self, target);
946 }
947 thread_unstop(target);
948 } else {
949 thread_mtx_lock(target);
950 result = KERN_ABORTED;
951 }
952
953 thread_release(target);
954 } else {
955 result = KERN_TERMINATED;
956 }
957
958 thread_mtx_unlock(target);
959
960 return result;
961 }
962
963
964 kern_return_t
thread_dup2(thread_t source,thread_t target)965 thread_dup2(
966 thread_t source,
967 thread_t target)
968 {
969 kern_return_t result = KERN_SUCCESS;
970 uint32_t active = 0;
971
972 if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
973 return KERN_INVALID_ARGUMENT;
974 }
975
976 thread_mtx_lock(source);
977 active = source->active;
978 thread_mtx_unlock(source);
979
980 if (!active) {
981 return KERN_TERMINATED;
982 }
983
984 thread_mtx_lock(target);
985
986 if (target->active || target->inspection) {
987 thread_hold(target);
988
989 thread_mtx_unlock(target);
990
991 if (thread_stop(target, TRUE)) {
992 thread_mtx_lock(target);
993 result = machine_thread_dup(source, target, TRUE);
994 if (source->affinity_set != AFFINITY_SET_NULL) {
995 thread_affinity_dup(source, target);
996 }
997 thread_unstop(target);
998 } else {
999 thread_mtx_lock(target);
1000 result = KERN_ABORTED;
1001 }
1002
1003 thread_release(target);
1004 } else {
1005 result = KERN_TERMINATED;
1006 }
1007
1008 thread_mtx_unlock(target);
1009
1010 return result;
1011 }
1012
1013 /*
1014 * thread_setstatus:
1015 *
1016 * Set the status of the specified thread.
1017 * Called with (and returns with) no locks held.
1018 */
1019 kern_return_t
thread_setstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count)1020 thread_setstatus(
1021 thread_t thread,
1022 int flavor,
1023 thread_state_t tstate,
1024 mach_msg_type_number_t count)
1025 {
1026 return thread_set_state(thread, flavor, tstate, count);
1027 }
1028
1029 kern_return_t
thread_setstatus_from_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count,thread_state_t old_tstate,mach_msg_type_number_t old_count,thread_set_status_flags_t flags)1030 thread_setstatus_from_user(
1031 thread_t thread,
1032 int flavor,
1033 thread_state_t tstate,
1034 mach_msg_type_number_t count,
1035 thread_state_t old_tstate,
1036 mach_msg_type_number_t old_count,
1037 thread_set_status_flags_t flags)
1038 {
1039 return thread_set_state_internal(thread, flavor, tstate, count, old_tstate,
1040 old_count, flags | TSSF_TRANSLATE_TO_USER);
1041 }
1042
1043 /*
1044 * thread_getstatus:
1045 *
1046 * Get the status of the specified thread.
1047 */
1048 kern_return_t
thread_getstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count)1049 thread_getstatus(
1050 thread_t thread,
1051 int flavor,
1052 thread_state_t tstate,
1053 mach_msg_type_number_t *count)
1054 {
1055 return thread_get_state(thread, flavor, tstate, count);
1056 }
1057
1058 kern_return_t
thread_getstatus_to_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count,thread_set_status_flags_t flags)1059 thread_getstatus_to_user(
1060 thread_t thread,
1061 int flavor,
1062 thread_state_t tstate,
1063 mach_msg_type_number_t *count,
1064 thread_set_status_flags_t flags)
1065 {
1066 return thread_get_state_internal(thread, flavor, tstate, count, flags | TSSF_TRANSLATE_TO_USER);
1067 }
1068
1069 /*
1070 * Change thread's machine-dependent userspace TSD base.
1071 * Called with nothing locked. Returns same way.
1072 */
1073 kern_return_t
thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)1074 thread_set_tsd_base(
1075 thread_t thread,
1076 mach_vm_offset_t tsd_base)
1077 {
1078 kern_return_t result = KERN_SUCCESS;
1079
1080 if (thread == THREAD_NULL) {
1081 return KERN_INVALID_ARGUMENT;
1082 }
1083
1084 thread_mtx_lock(thread);
1085
1086 if (thread->active) {
1087 if (thread != current_thread()) {
1088 thread_hold(thread);
1089
1090 thread_mtx_unlock(thread);
1091
1092 if (thread_stop(thread, TRUE)) {
1093 thread_mtx_lock(thread);
1094 result = machine_thread_set_tsd_base(thread, tsd_base);
1095 thread_unstop(thread);
1096 } else {
1097 thread_mtx_lock(thread);
1098 result = KERN_ABORTED;
1099 }
1100
1101 thread_release(thread);
1102 } else {
1103 result = machine_thread_set_tsd_base(thread, tsd_base);
1104 }
1105 } else {
1106 result = KERN_TERMINATED;
1107 }
1108
1109 thread_mtx_unlock(thread);
1110
1111 return result;
1112 }
1113
1114 /*
1115 * thread_set_apc_ast:
1116 *
1117 * Register the AST_APC callback that handles suspension and
1118 * termination, if it hasn't been installed already.
1119 *
1120 * Called with the thread mutex held.
1121 */
1122 static void
thread_set_apc_ast(thread_t thread)1123 thread_set_apc_ast(thread_t thread)
1124 {
1125 spl_t s = splsched();
1126
1127 thread_lock(thread);
1128 thread_set_apc_ast_locked(thread);
1129 thread_unlock(thread);
1130
1131 splx(s);
1132 }
1133
1134 /*
1135 * thread_set_apc_ast_locked:
1136 *
1137 * Do the work of registering for the AST_APC callback.
1138 *
1139 * Called with the thread mutex and scheduling lock held.
1140 */
1141 static void
thread_set_apc_ast_locked(thread_t thread)1142 thread_set_apc_ast_locked(thread_t thread)
1143 {
1144 thread_ast_set(thread, AST_APC);
1145
1146 if (thread == current_thread()) {
1147 ast_propagate(thread);
1148 } else {
1149 processor_t processor = thread->last_processor;
1150
1151 if (processor != PROCESSOR_NULL &&
1152 processor->state == PROCESSOR_RUNNING &&
1153 processor->active_thread == thread) {
1154 cause_ast_check(processor);
1155 }
1156 }
1157 }
1158
1159 /*
1160 * Activation control support routines internal to this file:
1161 *
1162 */
1163
1164 /*
1165 * thread_suspended
1166 *
1167 * Continuation routine for thread suspension. It checks
1168 * to see whether there has been any new suspensions. If so, it
1169 * installs the AST_APC handler again.
1170 */
1171 __attribute__((noreturn))
1172 static void
thread_suspended(__unused void * parameter,wait_result_t result)1173 thread_suspended(__unused void *parameter, wait_result_t result)
1174 {
1175 thread_t thread = current_thread();
1176
1177 thread_mtx_lock(thread);
1178
1179 if (result == THREAD_INTERRUPTED) {
1180 thread->suspend_parked = FALSE;
1181 } else {
1182 assert(thread->suspend_parked == FALSE);
1183 }
1184
1185 if (thread->suspend_count > 0) {
1186 thread_set_apc_ast(thread);
1187 }
1188
1189 thread_mtx_unlock(thread);
1190
1191 thread_exception_return();
1192 /*NOTREACHED*/
1193 }
1194
1195 /*
1196 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
1197 * Called with nothing locked. Returns (if it returns) the same way.
1198 */
1199 void
thread_apc_ast(thread_t thread)1200 thread_apc_ast(thread_t thread)
1201 {
1202 thread_mtx_lock(thread);
1203
1204 assert(thread->suspend_parked == FALSE);
1205
1206 spl_t s = splsched();
1207 thread_lock(thread);
1208
1209 /* TH_SFLAG_POLLDEPRESS is OK to have here */
1210 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
1211
1212 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1213 thread_unlock(thread);
1214 splx(s);
1215
1216 if (!thread->active) {
1217 /* Thread is ready to terminate, time to tear it down */
1218 thread_mtx_unlock(thread);
1219
1220 thread_terminate_self();
1221 /*NOTREACHED*/
1222 }
1223
1224 /* If we're suspended, go to sleep and wait for someone to wake us up. */
1225 if (thread->suspend_count > 0) {
1226 thread->suspend_parked = TRUE;
1227 assert_wait(&thread->suspend_count,
1228 THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
1229 thread_mtx_unlock(thread);
1230
1231 thread_block(thread_suspended);
1232 /*NOTREACHED*/
1233 }
1234
1235 thread_mtx_unlock(thread);
1236 }
1237
1238 #if CONFIG_ROSETTA
1239 extern kern_return_t
1240 exception_deliver(
1241 thread_t thread,
1242 exception_type_t exception,
1243 mach_exception_data_t code,
1244 mach_msg_type_number_t codeCnt,
1245 struct exception_action *excp,
1246 lck_mtx_t *mutex);
1247
1248 kern_return_t
thread_raise_exception(thread_t thread,exception_type_t exception,natural_t code_count,int64_t code,int64_t sub_code)1249 thread_raise_exception(
1250 thread_t thread,
1251 exception_type_t exception,
1252 natural_t code_count,
1253 int64_t code,
1254 int64_t sub_code)
1255 {
1256 task_t task;
1257
1258 if (thread == THREAD_NULL) {
1259 return KERN_INVALID_ARGUMENT;
1260 }
1261
1262 task = get_threadtask(thread);
1263
1264 if (task != current_task()) {
1265 return KERN_FAILURE;
1266 }
1267
1268 if (!task_is_translated(task)) {
1269 return KERN_FAILURE;
1270 }
1271
1272 if (exception == EXC_CRASH) {
1273 return KERN_INVALID_ARGUMENT;
1274 }
1275
1276 int64_t codes[] = { code, sub_code };
1277 host_priv_t host_priv = host_priv_self();
1278 kern_return_t kr = exception_deliver(thread, exception, codes, code_count, host_priv->exc_actions, &host_priv->lock);
1279 if (kr != KERN_SUCCESS) {
1280 return kr;
1281 }
1282
1283 return thread_resume(thread);
1284 }
1285 #endif
1286
1287 void
thread_debug_return_to_user_ast(thread_t thread)1288 thread_debug_return_to_user_ast(
1289 thread_t thread)
1290 {
1291 #pragma unused(thread)
1292 #if MACH_ASSERT
1293 if ((thread->sched_flags & TH_SFLAG_RW_PROMOTED) ||
1294 thread->rwlock_count > 0) {
1295 panic("Returning to userspace with rw lock held, thread %p sched_flag %u rwlock_count %d", thread, thread->sched_flags, thread->rwlock_count);
1296 }
1297
1298 if ((thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) ||
1299 thread->priority_floor_count > 0) {
1300 panic("Returning to userspace with floor boost set, thread %p sched_flag %u priority_floor_count %d", thread, thread->sched_flags, thread->priority_floor_count);
1301 }
1302
1303 if (thread->th_vm_faults_disabled) {
1304 panic("Returning to userspace with vm faults disabled, thread %p", thread);
1305 }
1306
1307 #if CONFIG_EXCLAVES
1308 assert3u(thread->th_exclaves_state & TH_EXCLAVES_STATE_ANY, ==, 0);
1309 #endif /* CONFIG_EXCLAVES */
1310
1311 #endif /* MACH_ASSERT */
1312 }
1313
1314
1315 /* Prototype, see justification above */
1316 kern_return_t
1317 act_set_state(
1318 thread_t thread,
1319 int flavor,
1320 thread_state_t state,
1321 mach_msg_type_number_t count);
1322
1323 kern_return_t
act_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1324 act_set_state(
1325 thread_t thread,
1326 int flavor,
1327 thread_state_t state,
1328 mach_msg_type_number_t count)
1329 {
1330 if (thread == current_thread()) {
1331 return KERN_INVALID_ARGUMENT;
1332 }
1333
1334 return thread_set_state(thread, flavor, state, count);
1335 }
1336
1337 kern_return_t
act_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1338 act_set_state_from_user(
1339 thread_t thread,
1340 int flavor,
1341 thread_state_t state,
1342 mach_msg_type_number_t count)
1343 {
1344 if (thread == current_thread()) {
1345 return KERN_INVALID_ARGUMENT;
1346 }
1347
1348 return thread_set_state_from_user(thread, flavor, state, count);
1349 }
1350
1351 /* Prototype, see justification above */
1352 kern_return_t
1353 act_get_state(
1354 thread_t thread,
1355 int flavor,
1356 thread_state_t state,
1357 mach_msg_type_number_t *count);
1358
1359 kern_return_t
act_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1360 act_get_state(
1361 thread_t thread,
1362 int flavor,
1363 thread_state_t state,
1364 mach_msg_type_number_t *count)
1365 {
1366 if (thread == current_thread()) {
1367 return KERN_INVALID_ARGUMENT;
1368 }
1369
1370 return thread_get_state(thread, flavor, state, count);
1371 }
1372
1373 kern_return_t
act_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1374 act_get_state_to_user(
1375 thread_t thread,
1376 int flavor,
1377 thread_state_t state,
1378 mach_msg_type_number_t *count)
1379 {
1380 if (thread == current_thread()) {
1381 return KERN_INVALID_ARGUMENT;
1382 }
1383
1384 return thread_get_state_to_user(thread, flavor, state, count);
1385 }
1386
1387 static void
act_set_ast(thread_t thread,ast_t ast)1388 act_set_ast(
1389 thread_t thread,
1390 ast_t ast)
1391 {
1392 spl_t s = splsched();
1393
1394 if (thread == current_thread()) {
1395 thread_ast_set(thread, ast);
1396 ast_propagate(thread);
1397 } else {
1398 processor_t processor;
1399
1400 thread_lock(thread);
1401 thread_ast_set(thread, ast);
1402 processor = thread->last_processor;
1403 if (processor != PROCESSOR_NULL &&
1404 processor->state == PROCESSOR_RUNNING &&
1405 processor->active_thread == thread) {
1406 cause_ast_check(processor);
1407 }
1408 thread_unlock(thread);
1409 }
1410
1411 splx(s);
1412 }
1413
1414 /*
1415 * set AST on thread without causing an AST check
1416 * and without taking the thread lock
1417 *
1418 * If thread is not the current thread, then it may take
1419 * up until the next context switch or quantum expiration
1420 * on that thread for it to notice the AST.
1421 */
1422 static void
act_set_ast_async(thread_t thread,ast_t ast)1423 act_set_ast_async(thread_t thread,
1424 ast_t ast)
1425 {
1426 thread_ast_set(thread, ast);
1427
1428 if (thread == current_thread()) {
1429 spl_t s = splsched();
1430 ast_propagate(thread);
1431 splx(s);
1432 }
1433 }
1434
1435 void
act_set_debug_assert(void)1436 act_set_debug_assert(void)
1437 {
1438 thread_t thread = current_thread();
1439 if (thread_ast_peek(thread, AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1440 thread_ast_set(thread, AST_DEBUG_ASSERT);
1441 }
1442 if (ast_peek(AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1443 spl_t s = splsched();
1444 ast_propagate(thread);
1445 splx(s);
1446 }
1447 }
1448
1449 void
act_set_astbsd(thread_t thread)1450 act_set_astbsd(thread_t thread)
1451 {
1452 act_set_ast(thread, AST_BSD);
1453 }
1454
1455 void
act_set_astkevent(thread_t thread,uint16_t bits)1456 act_set_astkevent(thread_t thread, uint16_t bits)
1457 {
1458 os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
1459
1460 /* kevent AST shouldn't send immediate IPIs */
1461 act_set_ast_async(thread, AST_KEVENT);
1462 }
1463
1464 uint16_t
act_clear_astkevent(thread_t thread,uint16_t bits)1465 act_clear_astkevent(thread_t thread, uint16_t bits)
1466 {
1467 /*
1468 * avoid the atomic operation if none of the bits is set,
1469 * which will be the common case.
1470 */
1471 uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1472 if (cur & bits) {
1473 cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1474 }
1475 return cur & bits;
1476 }
1477
1478 bool
act_set_ast_reset_pcs(task_t task,thread_t thread)1479 act_set_ast_reset_pcs(task_t task, thread_t thread)
1480 {
1481 processor_t processor;
1482 bool needs_wait = false;
1483 spl_t s;
1484
1485 s = splsched();
1486
1487 if (thread == current_thread()) {
1488 /*
1489 * this is called from the signal code,
1490 * just set the AST and move on
1491 */
1492 thread_ast_set(thread, AST_RESET_PCS);
1493 ast_propagate(thread);
1494 } else {
1495 thread_lock(thread);
1496
1497 assert(thread->t_rr_state.trr_ipi_ack_pending == 0);
1498 assert(thread->t_rr_state.trr_sync_waiting == 0);
1499
1500 processor = thread->last_processor;
1501 if (!thread->active) {
1502 /*
1503 * ->active is being set before the thread is added
1504 * to the thread list (under the task lock which
1505 * the caller holds), and is reset before the thread
1506 * lock is being taken by thread_terminate_self().
1507 *
1508 * The result is that this will never fail to
1509 * set the AST on an thread that is active,
1510 * but will not set it past thread_terminate_self().
1511 */
1512 } else if (processor != PROCESSOR_NULL &&
1513 processor->state == PROCESSOR_RUNNING &&
1514 processor->active_thread == thread) {
1515 thread->t_rr_state.trr_ipi_ack_pending = true;
1516 needs_wait = true;
1517 thread_ast_set(thread, AST_RESET_PCS);
1518 cause_ast_check(processor);
1519 } else if (thread_reset_pcs_in_range(task, thread)) {
1520 if (thread->t_rr_state.trr_fault_state) {
1521 thread->t_rr_state.trr_fault_state =
1522 TRR_FAULT_OBSERVED;
1523 needs_wait = true;
1524 }
1525 thread_ast_set(thread, AST_RESET_PCS);
1526 }
1527 thread_unlock(thread);
1528 }
1529
1530 splx(s);
1531
1532 return needs_wait;
1533 }
1534
1535 void
act_set_kperf(thread_t thread)1536 act_set_kperf(thread_t thread)
1537 {
1538 /* safety check */
1539 if (thread != current_thread()) {
1540 if (!ml_get_interrupts_enabled()) {
1541 panic("unsafe act_set_kperf operation");
1542 }
1543 }
1544
1545 act_set_ast(thread, AST_KPERF);
1546 }
1547
1548 #if CONFIG_MACF
1549 void
act_set_astmacf(thread_t thread)1550 act_set_astmacf(
1551 thread_t thread)
1552 {
1553 act_set_ast( thread, AST_MACF);
1554 }
1555 #endif
1556
1557 void
act_set_astledger(thread_t thread)1558 act_set_astledger(thread_t thread)
1559 {
1560 act_set_ast(thread, AST_LEDGER);
1561 }
1562
1563 /*
1564 * The ledger AST may need to be set while already holding
1565 * the thread lock. This routine skips sending the IPI,
1566 * allowing us to avoid the lock hold.
1567 *
1568 * However, it means the targeted thread must context switch
1569 * to recognize the ledger AST.
1570 */
1571 void
act_set_astledger_async(thread_t thread)1572 act_set_astledger_async(thread_t thread)
1573 {
1574 act_set_ast_async(thread, AST_LEDGER);
1575 }
1576
1577 void
act_set_io_telemetry_ast(thread_t thread)1578 act_set_io_telemetry_ast(thread_t thread)
1579 {
1580 act_set_ast(thread, AST_TELEMETRY_IO);
1581 }
1582
1583 void
act_set_macf_telemetry_ast(thread_t thread)1584 act_set_macf_telemetry_ast(thread_t thread)
1585 {
1586 act_set_ast(thread, AST_TELEMETRY_MACF);
1587 }
1588
1589 void
act_set_astproc_resource(thread_t thread)1590 act_set_astproc_resource(thread_t thread)
1591 {
1592 act_set_ast(thread, AST_PROC_RESOURCE);
1593 }
1594